diff --git a/3rdparty/ffmpeg/.gitignore b/3rdparty/ffmpeg/.gitignore new file mode 100644 index 0000000000..8daae67351 --- /dev/null +++ b/3rdparty/ffmpeg/.gitignore @@ -0,0 +1,3 @@ +downloads/ +*.dll +ffmpeg_version.cmake diff --git a/3rdparty/ffmpeg/build_win32.txt b/3rdparty/ffmpeg/build_win32.txt deleted file mode 100644 index e98b285208..0000000000 --- a/3rdparty/ffmpeg/build_win32.txt +++ /dev/null @@ -1,42 +0,0 @@ -The build script is to be fixed. -Right now it assumes that 32-bit MinGW is in the system path and -64-bit mingw is installed to c:\Apps\MinGW64. - -It is important that gcc is used, not g++! -Otherwise the produced DLL will likely be dependent on libgcc_s_dw2-1.dll or similar DLL. -While we want to make the DLLs with minimum dependencies: Win32 libraries + msvcrt.dll. - -ffopencv.c is really a C++ source, hence -x c++ is used. - -How to update opencv_ffmpeg.dll and opencv_ffmpeg_64.dll when a new version of FFMPEG is release? - -1. Install 32-bit MinGW + MSYS from - http://sourceforge.net/projects/mingw/files/Automated%20MinGW%20Installer/mingw-get-inst/ - Let's assume, it's installed in C:\MSYS32. -2. Install 64-bit MinGW. http://mingw-w64.sourceforge.net/ - Let's assume, it's installed in C:\MSYS64 -3. Copy C:\MSYS32\msys to C:\MSYS64\msys. Edit C:\MSYS64\msys\etc\fstab, change C:\MSYS32 to C:\MSYS64. - -4. Now you have working MSYS32 and MSYS64 environments. - Launch, one by one, C:\MSYS32\msys\msys.bat and C:\MSYS64\msys\msys.bat to create your home directories. - -4. Download ffmpeg-x.y.z.tar.gz (where x.y.z denotes the actual ffmpeg version). - Copy it to C:\MSYS{32|64}\msys\home\ directory. - -5. To build 32-bit ffmpeg libraries, run C:\MSYS32\msys\msys.bat and type the following commands: - - 5.1. tar -xzf ffmpeg-x.y.z.tar.gz - 5.2. mkdir build - 5.3. cd build - 5.4. ../ffmpeg-x.y.z/configure --enable-w32threads - 5.5. make - 5.6. make install - 5.7. cd /local/lib - 5.8. strip -g *.a - -6. Then repeat the same for 64-bit case. The output libs: libavcodec.a etc. need to be renamed to libavcodec64.a etc. - -7. Then, copy all those libs to \3rdparty\lib\, copy the headers to \3rdparty\include\ffmpeg_. - -8. Then, go to \3rdparty\ffmpeg, edit make.bat - (change paths to the actual paths to your msys32 and msys64 distributions) and then run make.bat diff --git a/3rdparty/ffmpeg/ffmpeg.cmake b/3rdparty/ffmpeg/ffmpeg.cmake new file mode 100644 index 0000000000..448ad2bb2c --- /dev/null +++ b/3rdparty/ffmpeg/ffmpeg.cmake @@ -0,0 +1,25 @@ +# Binary branch name: ffmpeg/master_20150703 +# Binaries were created for OpenCV: e379ea6ed60b0caad4d4e3eea096e9d850cb8c86 +set(FFMPEG_BINARIES_COMMIT "8aeefc4efe3215de89d8c7e114ae6f7a6091b8eb") +set(FFMPEG_FILE_HASH_BIN32 "89c783eee1c47bfc733f08334ec2e31c") +set(FFMPEG_FILE_HASH_BIN64 "35fe6ccdda6d7a04e9056b0d73b98e76") +set(FFMPEG_FILE_HASH_CMAKE "8606f947a780071f8fcce8cbf39ceef5") + +set(FFMPEG_DOWNLOAD_URL ${OPENCV_FFMPEG_URL};$ENV{OPENCV_FFMPEG_URL};https://raw.githubusercontent.com/Itseez/opencv_3rdparty/${FFMPEG_BINARIES_COMMIT}/ffmpeg/) + +ocv_download(PACKAGE opencv_ffmpeg.dll + HASH ${FFMPEG_FILE_HASH_BIN32} + URL ${FFMPEG_DOWNLOAD_URL} + DESTINATION_DIR ${CMAKE_CURRENT_LIST_DIR}) + +ocv_download(PACKAGE opencv_ffmpeg_64.dll + HASH ${FFMPEG_FILE_HASH_BIN64} + URL ${FFMPEG_DOWNLOAD_URL} + DESTINATION_DIR ${CMAKE_CURRENT_LIST_DIR}) + +ocv_download(PACKAGE ffmpeg_version.cmake + HASH ${FFMPEG_FILE_HASH_CMAKE} + URL ${FFMPEG_DOWNLOAD_URL} + DESTINATION_DIR ${CMAKE_CURRENT_LIST_DIR}) + +include(${CMAKE_CURRENT_LIST_DIR}/ffmpeg_version.cmake) diff --git a/3rdparty/ffmpeg/ffmpeg_version.cmake b/3rdparty/ffmpeg/ffmpeg_version.cmake deleted file mode 100644 index 48fba2b913..0000000000 --- a/3rdparty/ffmpeg/ffmpeg_version.cmake +++ /dev/null @@ -1,13 +0,0 @@ -set(HAVE_FFMPEG 1) -set(HAVE_FFMPEG_CODEC 1) -set(HAVE_FFMPEG_FORMAT 1) -set(HAVE_FFMPEG_UTIL 1) -set(HAVE_FFMPEG_SWSCALE 1) -set(HAVE_FFMPEG_RESAMPLE 0) -set(HAVE_GENTOO_FFMPEG 1) - -set(ALIASOF_libavcodec_VERSION 55.18.102) -set(ALIASOF_libavformat_VERSION 55.12.100) -set(ALIASOF_libavutil_VERSION 52.38.100) -set(ALIASOF_libswscale_VERSION 2.3.100) -set(ALIASOF_libavresample_VERSION 1.0.1) \ No newline at end of file diff --git a/3rdparty/ffmpeg/ffopencv.c b/3rdparty/ffmpeg/ffopencv.c deleted file mode 100644 index b412e90071..0000000000 --- a/3rdparty/ffmpeg/ffopencv.c +++ /dev/null @@ -1 +0,0 @@ -#include "cap_ffmpeg_impl.hpp" diff --git a/3rdparty/ffmpeg/make.bat b/3rdparty/ffmpeg/make.bat deleted file mode 100644 index 318c2fee88..0000000000 --- a/3rdparty/ffmpeg/make.bat +++ /dev/null @@ -1,2 +0,0 @@ -set path=c:\dev\msys32\bin;%path% & gcc -Wall -shared -o opencv_ffmpeg.dll -O2 -x c++ -I../include -I../include/ffmpeg_ -I../../modules/highgui/src ffopencv.c -L../lib -lavformat -lavcodec -lavdevice -lswscale -lavutil -lws2_32 -set path=c:\dev\msys64\bin;%path% & gcc -m64 -Wall -shared -o opencv_ffmpeg_64.dll -O2 -x c++ -I../include -I../include/ffmpeg_ -I../../modules/highgui/src ffopencv.c -L../lib -lavformat64 -lavcodec64 -lavdevice64 -lswscale64 -lavutil64 -lws2_32 \ No newline at end of file diff --git a/3rdparty/ffmpeg/opencv_ffmpeg.dll b/3rdparty/ffmpeg/opencv_ffmpeg.dll deleted file mode 100644 index b1e70df6a3..0000000000 Binary files a/3rdparty/ffmpeg/opencv_ffmpeg.dll and /dev/null differ diff --git a/3rdparty/ffmpeg/opencv_ffmpeg_64.dll b/3rdparty/ffmpeg/opencv_ffmpeg_64.dll deleted file mode 100644 index 37236e5424..0000000000 Binary files a/3rdparty/ffmpeg/opencv_ffmpeg_64.dll and /dev/null differ diff --git a/3rdparty/ffmpeg/readme.txt b/3rdparty/ffmpeg/readme.txt index 84faf7a83b..0a7833a1de 100644 --- a/3rdparty/ffmpeg/readme.txt +++ b/3rdparty/ffmpeg/readme.txt @@ -3,19 +3,19 @@ he/she should use --enabled-shared configure flag and make sure that no GPL components are enabled (some notable examples are x264 (H264 encoder) and libac3 (Dolby AC3 audio codec)). See https://www.ffmpeg.org/legal.html for details. - + If you want to play very safe and do not want to use FFMPEG at all, regardless of whether it's installed on your system or not, configure and build OpenCV using CMake with WITH_FFMPEG=OFF flag. OpenCV will then use AVFoundation (OSX), GStreamer (Linux) or other available backends supported by opencv_videoio module. - + There is also our self-contained motion jpeg codec, which you can use without any worries. It handles CV_FOURCC('M', 'J', 'P', 'G') streams within an AVI container (".avi"). - + * On Windows OpenCV uses pre-built ffmpeg binaries, built with proper flags (without GPL components) and wrapped with simple, stable OpenCV-compatible API. The binaries are opencv_ffmpeg.dll (version for 32-bit Windows) and opencv_ffmpeg_64.dll (version for 64-bit Windows). - + See build_win32.txt for the build instructions, if you want to rebuild opencv_ffmpeg*.dll from scratch. The pre-built opencv_ffmpeg*.dll is: @@ -24,9 +24,15 @@ If it succeeds, ffmpeg can be used to decode/encode videos; otherwise, other API is used. + FFMPEG build contains H264 encoder based on the OpenH264 library, that should be installed separatelly. + OpenH264 Video Codec provided by Cisco Systems, Inc. + See https://github.com/cisco/openh264/releases for details and OpenH264 license. + Downloaded binary file can be placed into global system path (System32 or SysWOW64) or near application binaries. + You can also specify location of binary file via OPENH264_LIBRARY_PATH environment variable. + If LGPL/GPL software can not be supplied with your OpenCV-based product, simply exclude opencv_ffmpeg*.dll from your distribution; OpenCV will stay fully functional except for the ability to decode/encode videos using FFMPEG (though, it may still be able to do that using other API, such as Video for Windows, Windows Media Foundation or our self-contained motion jpeg codec). - + See license.txt for the FFMPEG copyright notice and the licensing terms. diff --git a/3rdparty/include/ffmpeg_/libavcodec/avcodec.h b/3rdparty/include/ffmpeg_/libavcodec/avcodec.h deleted file mode 100644 index f0ccfee940..0000000000 --- a/3rdparty/include/ffmpeg_/libavcodec/avcodec.h +++ /dev/null @@ -1,4863 +0,0 @@ -/* - * copyright (c) 2001 Fabrice Bellard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_AVCODEC_H -#define AVCODEC_AVCODEC_H - -/** - * @file - * @ingroup libavc - * Libavcodec external API header - */ - -#include -#include "libavutil/samplefmt.h" -#include "libavutil/avutil.h" -#include "libavutil/buffer.h" -#include "libavutil/cpu.h" -#include "libavutil/channel_layout.h" -#include "libavutil/dict.h" -#include "libavutil/frame.h" -#include "libavutil/log.h" -#include "libavutil/pixfmt.h" -#include "libavutil/rational.h" - -#include "libavcodec/version.h" -/** - * @defgroup libavc Encoding/Decoding Library - * @{ - * - * @defgroup lavc_decoding Decoding - * @{ - * @} - * - * @defgroup lavc_encoding Encoding - * @{ - * @} - * - * @defgroup lavc_codec Codecs - * @{ - * @defgroup lavc_codec_native Native Codecs - * @{ - * @} - * @defgroup lavc_codec_wrappers External library wrappers - * @{ - * @} - * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge - * @{ - * @} - * @} - * @defgroup lavc_internal Internal - * @{ - * @} - * @} - * - */ - -/** - * @defgroup lavc_core Core functions/structures. - * @ingroup libavc - * - * Basic definitions, functions for querying libavcodec capabilities, - * allocating core structures, etc. - * @{ - */ - - -/** - * Identify the syntax and semantics of the bitstream. - * The principle is roughly: - * Two decoders with the same ID can decode the same streams. - * Two encoders with the same ID can encode compatible streams. - * There may be slight deviations from the principle due to implementation - * details. - * - * If you add a codec ID to this list, add it so that - * 1. no value of a existing codec ID changes (that would break ABI), - * 2. Give it a value which when taken as ASCII is recognized uniquely by a human as this specific codec. - * This ensures that 2 forks can independently add AVCodecIDs without producing conflicts. - * - * After adding new codec IDs, do not forget to add an entry to the codec - * descriptor list and bump libavcodec minor version. - */ -enum AVCodecID { - AV_CODEC_ID_NONE, - - /* video codecs */ - AV_CODEC_ID_MPEG1VIDEO, - AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding - AV_CODEC_ID_MPEG2VIDEO_XVMC, - AV_CODEC_ID_H261, - AV_CODEC_ID_H263, - AV_CODEC_ID_RV10, - AV_CODEC_ID_RV20, - AV_CODEC_ID_MJPEG, - AV_CODEC_ID_MJPEGB, - AV_CODEC_ID_LJPEG, - AV_CODEC_ID_SP5X, - AV_CODEC_ID_JPEGLS, - AV_CODEC_ID_MPEG4, - AV_CODEC_ID_RAWVIDEO, - AV_CODEC_ID_MSMPEG4V1, - AV_CODEC_ID_MSMPEG4V2, - AV_CODEC_ID_MSMPEG4V3, - AV_CODEC_ID_WMV1, - AV_CODEC_ID_WMV2, - AV_CODEC_ID_H263P, - AV_CODEC_ID_H263I, - AV_CODEC_ID_FLV1, - AV_CODEC_ID_SVQ1, - AV_CODEC_ID_SVQ3, - AV_CODEC_ID_DVVIDEO, - AV_CODEC_ID_HUFFYUV, - AV_CODEC_ID_CYUV, - AV_CODEC_ID_H264, - AV_CODEC_ID_INDEO3, - AV_CODEC_ID_VP3, - AV_CODEC_ID_THEORA, - AV_CODEC_ID_ASV1, - AV_CODEC_ID_ASV2, - AV_CODEC_ID_FFV1, - AV_CODEC_ID_4XM, - AV_CODEC_ID_VCR1, - AV_CODEC_ID_CLJR, - AV_CODEC_ID_MDEC, - AV_CODEC_ID_ROQ, - AV_CODEC_ID_INTERPLAY_VIDEO, - AV_CODEC_ID_XAN_WC3, - AV_CODEC_ID_XAN_WC4, - AV_CODEC_ID_RPZA, - AV_CODEC_ID_CINEPAK, - AV_CODEC_ID_WS_VQA, - AV_CODEC_ID_MSRLE, - AV_CODEC_ID_MSVIDEO1, - AV_CODEC_ID_IDCIN, - AV_CODEC_ID_8BPS, - AV_CODEC_ID_SMC, - AV_CODEC_ID_FLIC, - AV_CODEC_ID_TRUEMOTION1, - AV_CODEC_ID_VMDVIDEO, - AV_CODEC_ID_MSZH, - AV_CODEC_ID_ZLIB, - AV_CODEC_ID_QTRLE, - AV_CODEC_ID_TSCC, - AV_CODEC_ID_ULTI, - AV_CODEC_ID_QDRAW, - AV_CODEC_ID_VIXL, - AV_CODEC_ID_QPEG, - AV_CODEC_ID_PNG, - AV_CODEC_ID_PPM, - AV_CODEC_ID_PBM, - AV_CODEC_ID_PGM, - AV_CODEC_ID_PGMYUV, - AV_CODEC_ID_PAM, - AV_CODEC_ID_FFVHUFF, - AV_CODEC_ID_RV30, - AV_CODEC_ID_RV40, - AV_CODEC_ID_VC1, - AV_CODEC_ID_WMV3, - AV_CODEC_ID_LOCO, - AV_CODEC_ID_WNV1, - AV_CODEC_ID_AASC, - AV_CODEC_ID_INDEO2, - AV_CODEC_ID_FRAPS, - AV_CODEC_ID_TRUEMOTION2, - AV_CODEC_ID_BMP, - AV_CODEC_ID_CSCD, - AV_CODEC_ID_MMVIDEO, - AV_CODEC_ID_ZMBV, - AV_CODEC_ID_AVS, - AV_CODEC_ID_SMACKVIDEO, - AV_CODEC_ID_NUV, - AV_CODEC_ID_KMVC, - AV_CODEC_ID_FLASHSV, - AV_CODEC_ID_CAVS, - AV_CODEC_ID_JPEG2000, - AV_CODEC_ID_VMNC, - AV_CODEC_ID_VP5, - AV_CODEC_ID_VP6, - AV_CODEC_ID_VP6F, - AV_CODEC_ID_TARGA, - AV_CODEC_ID_DSICINVIDEO, - AV_CODEC_ID_TIERTEXSEQVIDEO, - AV_CODEC_ID_TIFF, - AV_CODEC_ID_GIF, - AV_CODEC_ID_DXA, - AV_CODEC_ID_DNXHD, - AV_CODEC_ID_THP, - AV_CODEC_ID_SGI, - AV_CODEC_ID_C93, - AV_CODEC_ID_BETHSOFTVID, - AV_CODEC_ID_PTX, - AV_CODEC_ID_TXD, - AV_CODEC_ID_VP6A, - AV_CODEC_ID_AMV, - AV_CODEC_ID_VB, - AV_CODEC_ID_PCX, - AV_CODEC_ID_SUNRAST, - AV_CODEC_ID_INDEO4, - AV_CODEC_ID_INDEO5, - AV_CODEC_ID_MIMIC, - AV_CODEC_ID_RL2, - AV_CODEC_ID_ESCAPE124, - AV_CODEC_ID_DIRAC, - AV_CODEC_ID_BFI, - AV_CODEC_ID_CMV, - AV_CODEC_ID_MOTIONPIXELS, - AV_CODEC_ID_TGV, - AV_CODEC_ID_TGQ, - AV_CODEC_ID_TQI, - AV_CODEC_ID_AURA, - AV_CODEC_ID_AURA2, - AV_CODEC_ID_V210X, - AV_CODEC_ID_TMV, - AV_CODEC_ID_V210, - AV_CODEC_ID_DPX, - AV_CODEC_ID_MAD, - AV_CODEC_ID_FRWU, - AV_CODEC_ID_FLASHSV2, - AV_CODEC_ID_CDGRAPHICS, - AV_CODEC_ID_R210, - AV_CODEC_ID_ANM, - AV_CODEC_ID_BINKVIDEO, - AV_CODEC_ID_IFF_ILBM, - AV_CODEC_ID_IFF_BYTERUN1, - AV_CODEC_ID_KGV1, - AV_CODEC_ID_YOP, - AV_CODEC_ID_VP8, - AV_CODEC_ID_PICTOR, - AV_CODEC_ID_ANSI, - AV_CODEC_ID_A64_MULTI, - AV_CODEC_ID_A64_MULTI5, - AV_CODEC_ID_R10K, - AV_CODEC_ID_MXPEG, - AV_CODEC_ID_LAGARITH, - AV_CODEC_ID_PRORES, - AV_CODEC_ID_JV, - AV_CODEC_ID_DFA, - AV_CODEC_ID_WMV3IMAGE, - AV_CODEC_ID_VC1IMAGE, - AV_CODEC_ID_UTVIDEO, - AV_CODEC_ID_BMV_VIDEO, - AV_CODEC_ID_VBLE, - AV_CODEC_ID_DXTORY, - AV_CODEC_ID_V410, - AV_CODEC_ID_XWD, - AV_CODEC_ID_CDXL, - AV_CODEC_ID_XBM, - AV_CODEC_ID_ZEROCODEC, - AV_CODEC_ID_MSS1, - AV_CODEC_ID_MSA1, - AV_CODEC_ID_TSCC2, - AV_CODEC_ID_MTS2, - AV_CODEC_ID_CLLC, - AV_CODEC_ID_MSS2, - AV_CODEC_ID_VP9, - AV_CODEC_ID_AIC, - AV_CODEC_ID_ESCAPE130_DEPRECATED, - AV_CODEC_ID_G2M_DEPRECATED, - - AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'), - AV_CODEC_ID_Y41P = MKBETAG('Y','4','1','P'), - AV_CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'), - AV_CODEC_ID_EXR = MKBETAG('0','E','X','R'), - AV_CODEC_ID_AVRP = MKBETAG('A','V','R','P'), - - AV_CODEC_ID_012V = MKBETAG('0','1','2','V'), - AV_CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'), - AV_CODEC_ID_AVUI = MKBETAG('A','V','U','I'), - AV_CODEC_ID_AYUV = MKBETAG('A','Y','U','V'), - AV_CODEC_ID_TARGA_Y216 = MKBETAG('T','2','1','6'), - AV_CODEC_ID_V308 = MKBETAG('V','3','0','8'), - AV_CODEC_ID_V408 = MKBETAG('V','4','0','8'), - AV_CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'), - AV_CODEC_ID_SANM = MKBETAG('S','A','N','M'), - AV_CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'), - AV_CODEC_ID_AVRN = MKBETAG('A','V','R','n'), - AV_CODEC_ID_CPIA = MKBETAG('C','P','I','A'), - AV_CODEC_ID_XFACE = MKBETAG('X','F','A','C'), - AV_CODEC_ID_SGIRLE = MKBETAG('S','G','I','R'), - AV_CODEC_ID_MVC1 = MKBETAG('M','V','C','1'), - AV_CODEC_ID_MVC2 = MKBETAG('M','V','C','2'), - AV_CODEC_ID_SNOW = MKBETAG('S','N','O','W'), - AV_CODEC_ID_WEBP = MKBETAG('W','E','B','P'), - AV_CODEC_ID_SMVJPEG = MKBETAG('S','M','V','J'), - - /* various PCM "codecs" */ - AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs - AV_CODEC_ID_PCM_S16LE = 0x10000, - AV_CODEC_ID_PCM_S16BE, - AV_CODEC_ID_PCM_U16LE, - AV_CODEC_ID_PCM_U16BE, - AV_CODEC_ID_PCM_S8, - AV_CODEC_ID_PCM_U8, - AV_CODEC_ID_PCM_MULAW, - AV_CODEC_ID_PCM_ALAW, - AV_CODEC_ID_PCM_S32LE, - AV_CODEC_ID_PCM_S32BE, - AV_CODEC_ID_PCM_U32LE, - AV_CODEC_ID_PCM_U32BE, - AV_CODEC_ID_PCM_S24LE, - AV_CODEC_ID_PCM_S24BE, - AV_CODEC_ID_PCM_U24LE, - AV_CODEC_ID_PCM_U24BE, - AV_CODEC_ID_PCM_S24DAUD, - AV_CODEC_ID_PCM_ZORK, - AV_CODEC_ID_PCM_S16LE_PLANAR, - AV_CODEC_ID_PCM_DVD, - AV_CODEC_ID_PCM_F32BE, - AV_CODEC_ID_PCM_F32LE, - AV_CODEC_ID_PCM_F64BE, - AV_CODEC_ID_PCM_F64LE, - AV_CODEC_ID_PCM_BLURAY, - AV_CODEC_ID_PCM_LXF, - AV_CODEC_ID_S302M, - AV_CODEC_ID_PCM_S8_PLANAR, - AV_CODEC_ID_PCM_S24LE_PLANAR = MKBETAG(24,'P','S','P'), - AV_CODEC_ID_PCM_S32LE_PLANAR = MKBETAG(32,'P','S','P'), - AV_CODEC_ID_PCM_S16BE_PLANAR = MKBETAG('P','S','P',16), - - /* various ADPCM codecs */ - AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, - AV_CODEC_ID_ADPCM_IMA_WAV, - AV_CODEC_ID_ADPCM_IMA_DK3, - AV_CODEC_ID_ADPCM_IMA_DK4, - AV_CODEC_ID_ADPCM_IMA_WS, - AV_CODEC_ID_ADPCM_IMA_SMJPEG, - AV_CODEC_ID_ADPCM_MS, - AV_CODEC_ID_ADPCM_4XM, - AV_CODEC_ID_ADPCM_XA, - AV_CODEC_ID_ADPCM_ADX, - AV_CODEC_ID_ADPCM_EA, - AV_CODEC_ID_ADPCM_G726, - AV_CODEC_ID_ADPCM_CT, - AV_CODEC_ID_ADPCM_SWF, - AV_CODEC_ID_ADPCM_YAMAHA, - AV_CODEC_ID_ADPCM_SBPRO_4, - AV_CODEC_ID_ADPCM_SBPRO_3, - AV_CODEC_ID_ADPCM_SBPRO_2, - AV_CODEC_ID_ADPCM_THP, - AV_CODEC_ID_ADPCM_IMA_AMV, - AV_CODEC_ID_ADPCM_EA_R1, - AV_CODEC_ID_ADPCM_EA_R3, - AV_CODEC_ID_ADPCM_EA_R2, - AV_CODEC_ID_ADPCM_IMA_EA_SEAD, - AV_CODEC_ID_ADPCM_IMA_EA_EACS, - AV_CODEC_ID_ADPCM_EA_XAS, - AV_CODEC_ID_ADPCM_EA_MAXIS_XA, - AV_CODEC_ID_ADPCM_IMA_ISS, - AV_CODEC_ID_ADPCM_G722, - AV_CODEC_ID_ADPCM_IMA_APC, - AV_CODEC_ID_VIMA = MKBETAG('V','I','M','A'), - AV_CODEC_ID_ADPCM_AFC = MKBETAG('A','F','C',' '), - AV_CODEC_ID_ADPCM_IMA_OKI = MKBETAG('O','K','I',' '), - AV_CODEC_ID_ADPCM_DTK = MKBETAG('D','T','K',' '), - AV_CODEC_ID_ADPCM_IMA_RAD = MKBETAG('R','A','D',' '), - - /* AMR */ - AV_CODEC_ID_AMR_NB = 0x12000, - AV_CODEC_ID_AMR_WB, - - /* RealAudio codecs*/ - AV_CODEC_ID_RA_144 = 0x13000, - AV_CODEC_ID_RA_288, - - /* various DPCM codecs */ - AV_CODEC_ID_ROQ_DPCM = 0x14000, - AV_CODEC_ID_INTERPLAY_DPCM, - AV_CODEC_ID_XAN_DPCM, - AV_CODEC_ID_SOL_DPCM, - - /* audio codecs */ - AV_CODEC_ID_MP2 = 0x15000, - AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 - AV_CODEC_ID_AAC, - AV_CODEC_ID_AC3, - AV_CODEC_ID_DTS, - AV_CODEC_ID_VORBIS, - AV_CODEC_ID_DVAUDIO, - AV_CODEC_ID_WMAV1, - AV_CODEC_ID_WMAV2, - AV_CODEC_ID_MACE3, - AV_CODEC_ID_MACE6, - AV_CODEC_ID_VMDAUDIO, - AV_CODEC_ID_FLAC, - AV_CODEC_ID_MP3ADU, - AV_CODEC_ID_MP3ON4, - AV_CODEC_ID_SHORTEN, - AV_CODEC_ID_ALAC, - AV_CODEC_ID_WESTWOOD_SND1, - AV_CODEC_ID_GSM, ///< as in Berlin toast format - AV_CODEC_ID_QDM2, - AV_CODEC_ID_COOK, - AV_CODEC_ID_TRUESPEECH, - AV_CODEC_ID_TTA, - AV_CODEC_ID_SMACKAUDIO, - AV_CODEC_ID_QCELP, - AV_CODEC_ID_WAVPACK, - AV_CODEC_ID_DSICINAUDIO, - AV_CODEC_ID_IMC, - AV_CODEC_ID_MUSEPACK7, - AV_CODEC_ID_MLP, - AV_CODEC_ID_GSM_MS, /* as found in WAV */ - AV_CODEC_ID_ATRAC3, - AV_CODEC_ID_VOXWARE, - AV_CODEC_ID_APE, - AV_CODEC_ID_NELLYMOSER, - AV_CODEC_ID_MUSEPACK8, - AV_CODEC_ID_SPEEX, - AV_CODEC_ID_WMAVOICE, - AV_CODEC_ID_WMAPRO, - AV_CODEC_ID_WMALOSSLESS, - AV_CODEC_ID_ATRAC3P, - AV_CODEC_ID_EAC3, - AV_CODEC_ID_SIPR, - AV_CODEC_ID_MP1, - AV_CODEC_ID_TWINVQ, - AV_CODEC_ID_TRUEHD, - AV_CODEC_ID_MP4ALS, - AV_CODEC_ID_ATRAC1, - AV_CODEC_ID_BINKAUDIO_RDFT, - AV_CODEC_ID_BINKAUDIO_DCT, - AV_CODEC_ID_AAC_LATM, - AV_CODEC_ID_QDMC, - AV_CODEC_ID_CELT, - AV_CODEC_ID_G723_1, - AV_CODEC_ID_G729, - AV_CODEC_ID_8SVX_EXP, - AV_CODEC_ID_8SVX_FIB, - AV_CODEC_ID_BMV_AUDIO, - AV_CODEC_ID_RALF, - AV_CODEC_ID_IAC, - AV_CODEC_ID_ILBC, - AV_CODEC_ID_OPUS_DEPRECATED, - AV_CODEC_ID_COMFORT_NOISE, - AV_CODEC_ID_TAK_DEPRECATED, - AV_CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'), - AV_CODEC_ID_SONIC = MKBETAG('S','O','N','C'), - AV_CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'), - AV_CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'), - AV_CODEC_ID_OPUS = MKBETAG('O','P','U','S'), - AV_CODEC_ID_TAK = MKBETAG('t','B','a','K'), - AV_CODEC_ID_EVRC = MKBETAG('s','e','v','c'), - AV_CODEC_ID_SMV = MKBETAG('s','s','m','v'), - - /* subtitle codecs */ - AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. - AV_CODEC_ID_DVD_SUBTITLE = 0x17000, - AV_CODEC_ID_DVB_SUBTITLE, - AV_CODEC_ID_TEXT, ///< raw UTF-8 text - AV_CODEC_ID_XSUB, - AV_CODEC_ID_SSA, - AV_CODEC_ID_MOV_TEXT, - AV_CODEC_ID_HDMV_PGS_SUBTITLE, - AV_CODEC_ID_DVB_TELETEXT, - AV_CODEC_ID_SRT, - AV_CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'), - AV_CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'), - AV_CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'), - AV_CODEC_ID_SAMI = MKBETAG('S','A','M','I'), - AV_CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'), - AV_CODEC_ID_SUBVIEWER1 = MKBETAG('S','b','V','1'), - AV_CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'), - AV_CODEC_ID_SUBRIP = MKBETAG('S','R','i','p'), - AV_CODEC_ID_WEBVTT = MKBETAG('W','V','T','T'), - AV_CODEC_ID_MPL2 = MKBETAG('M','P','L','2'), - AV_CODEC_ID_VPLAYER = MKBETAG('V','P','l','r'), - AV_CODEC_ID_PJS = MKBETAG('P','h','J','S'), - AV_CODEC_ID_ASS = MKBETAG('A','S','S',' '), ///< ASS as defined in Matroska - - /* other specific kind of codecs (generally used for attachments) */ - AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. - AV_CODEC_ID_TTF = 0x18000, - AV_CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'), - AV_CODEC_ID_XBIN = MKBETAG('X','B','I','N'), - AV_CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'), - AV_CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'), - AV_CODEC_ID_SMPTE_KLV = MKBETAG('K','L','V','A'), - AV_CODEC_ID_DVD_NAV = MKBETAG('D','N','A','V'), - - - AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it - - AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS - * stream (only used by libavformat) */ - AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems - * stream (only used by libavformat) */ - AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. - -#if FF_API_CODEC_ID -#include "old_codec_ids.h" -#endif -}; - -/** - * This struct describes the properties of a single codec described by an - * AVCodecID. - * @see avcodec_get_descriptor() - */ -typedef struct AVCodecDescriptor { - enum AVCodecID id; - enum AVMediaType type; - /** - * Name of the codec described by this descriptor. It is non-empty and - * unique for each codec descriptor. It should contain alphanumeric - * characters and '_' only. - */ - const char *name; - /** - * A more descriptive name for this codec. May be NULL. - */ - const char *long_name; - /** - * Codec properties, a combination of AV_CODEC_PROP_* flags. - */ - int props; -} AVCodecDescriptor; - -/** - * Codec uses only intra compression. - * Video codecs only. - */ -#define AV_CODEC_PROP_INTRA_ONLY (1 << 0) -/** - * Codec supports lossy compression. Audio and video codecs only. - * @note a codec may support both lossy and lossless - * compression modes - */ -#define AV_CODEC_PROP_LOSSY (1 << 1) -/** - * Codec supports lossless compression. Audio and video codecs only. - */ -#define AV_CODEC_PROP_LOSSLESS (1 << 2) -/** - * Subtitle codec is bitmap based - * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field. - */ -#define AV_CODEC_PROP_BITMAP_SUB (1 << 16) -/** - * Subtitle codec is text based. - * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field. - */ -#define AV_CODEC_PROP_TEXT_SUB (1 << 17) - -/** - * @ingroup lavc_decoding - * Required number of additionally allocated bytes at the end of the input bitstream for decoding. - * This is mainly needed because some optimized bitstream readers read - * 32 or 64 bit at once and could read over the end.
- * Note: If the first 23 bits of the additional bytes are not 0, then damaged - * MPEG bitstreams could cause overread and segfault. - */ -#define FF_INPUT_BUFFER_PADDING_SIZE 16 - -/** - * @ingroup lavc_encoding - * minimum encoding buffer size - * Used to avoid some checks during header writing. - */ -#define FF_MIN_BUFFER_SIZE 16384 - - -/** - * @ingroup lavc_encoding - * motion estimation type. - */ -enum Motion_Est_ID { - ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed - ME_FULL, - ME_LOG, - ME_PHODS, - ME_EPZS, ///< enhanced predictive zonal search - ME_X1, ///< reserved for experiments - ME_HEX, ///< hexagon based search - ME_UMH, ///< uneven multi-hexagon search - ME_TESA, ///< transformed exhaustive search algorithm - ME_ITER=50, ///< iterative search -}; - -/** - * @ingroup lavc_decoding - */ -enum AVDiscard{ - /* We leave some space between them for extensions (drop some - * keyframes for intra-only or drop just some bidir frames). */ - AVDISCARD_NONE =-16, ///< discard nothing - AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi - AVDISCARD_NONREF = 8, ///< discard all non reference - AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames - AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes - AVDISCARD_ALL = 48, ///< discard all -}; - -enum AVColorPrimaries{ - AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B - AVCOL_PRI_UNSPECIFIED = 2, - AVCOL_PRI_BT470M = 4, - AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM - AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC - AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above - AVCOL_PRI_FILM = 8, - AVCOL_PRI_NB , ///< Not part of ABI -}; - -enum AVColorTransferCharacteristic{ - AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 - AVCOL_TRC_UNSPECIFIED = 2, - AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM - AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG - AVCOL_TRC_SMPTE240M = 7, - AVCOL_TRC_NB , ///< Not part of ABI -}; - -enum AVColorSpace{ - AVCOL_SPC_RGB = 0, - AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B - AVCOL_SPC_UNSPECIFIED = 2, - AVCOL_SPC_FCC = 4, - AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 - AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above - AVCOL_SPC_SMPTE240M = 7, - AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 - AVCOL_SPC_NB , ///< Not part of ABI -}; -#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG - -enum AVColorRange{ - AVCOL_RANGE_UNSPECIFIED = 0, - AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges - AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges - AVCOL_RANGE_NB , ///< Not part of ABI -}; - -/** - * X X 3 4 X X are luma samples, - * 1 2 1-6 are possible chroma positions - * X X 5 6 X 0 is undefined/unknown position - */ -enum AVChromaLocation{ - AVCHROMA_LOC_UNSPECIFIED = 0, - AVCHROMA_LOC_LEFT = 1, ///< mpeg2/4, h264 default - AVCHROMA_LOC_CENTER = 2, ///< mpeg1, jpeg, h263 - AVCHROMA_LOC_TOPLEFT = 3, ///< DV - AVCHROMA_LOC_TOP = 4, - AVCHROMA_LOC_BOTTOMLEFT = 5, - AVCHROMA_LOC_BOTTOM = 6, - AVCHROMA_LOC_NB , ///< Not part of ABI -}; - -enum AVAudioServiceType { - AV_AUDIO_SERVICE_TYPE_MAIN = 0, - AV_AUDIO_SERVICE_TYPE_EFFECTS = 1, - AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2, - AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3, - AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4, - AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5, - AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6, - AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7, - AV_AUDIO_SERVICE_TYPE_KARAOKE = 8, - AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI -}; - -/** - * @ingroup lavc_encoding - */ -typedef struct RcOverride{ - int start_frame; - int end_frame; - int qscale; // If this is 0 then quality_factor will be used instead. - float quality_factor; -} RcOverride; - -#define FF_MAX_B_FRAMES 16 - -/* encoding support - These flags can be passed in AVCodecContext.flags before initialization. - Note: Not everything is supported yet. -*/ - -/** - * Allow decoders to produce frames with data planes that are not aligned - * to CPU requirements (e.g. due to cropping). - */ -#define CODEC_FLAG_UNALIGNED 0x0001 -#define CODEC_FLAG_QSCALE 0x0002 ///< Use fixed qscale. -#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263. -#define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC. -#define CODEC_FLAG_GMC 0x0020 ///< Use GMC. -#define CODEC_FLAG_MV0 0x0040 ///< Always try a MB with MV=<0,0>. -/** - * The parent program guarantees that the input for B-frames containing - * streams is not written to for at least s->max_b_frames+1 frames, if - * this is not set the input will be copied. - */ -#define CODEC_FLAG_INPUT_PRESERVED 0x0100 -#define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode. -#define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode. -#define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale. -#define CODEC_FLAG_EMU_EDGE 0x4000 ///< Don't draw edges. -#define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding. -#define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random - location instead of only at frame boundaries. */ -#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 ///< Normalize adaptive quantization. -#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT. -#define CODEC_FLAG_LOW_DELAY 0x00080000 ///< Force low delay. -#define CODEC_FLAG_GLOBAL_HEADER 0x00400000 ///< Place global headers in extradata instead of every keyframe. -#define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT). -/* Fx : Flag for h263+ extra options */ -#define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction -#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter -#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation -#define CODEC_FLAG_CLOSED_GOP 0x80000000 -#define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks. -#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding. -#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata. -#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!! -#define CODEC_FLAG2_IGNORE_CROP 0x00010000 ///< Discard cropping information from SPS. - -#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries. -#define CODEC_FLAG2_SHOW_ALL 0x00400000 ///< Show all frames before the first keyframe - -/* Unsupported options : - * Syntax Arithmetic coding (SAC) - * Reference Picture Selection - * Independent Segment Decoding */ -/* /Fx */ -/* codec capabilities */ - -#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback. -/** - * Codec uses get_buffer() for allocating buffers and supports custom allocators. - * If not set, it might not use get_buffer() at all or use operations that - * assume the buffer was allocated by avcodec_default_get_buffer. - */ -#define CODEC_CAP_DR1 0x0002 -#define CODEC_CAP_TRUNCATED 0x0008 -/* Codec can export data for HW decoding (XvMC). */ -#define CODEC_CAP_HWACCEL 0x0010 -/** - * Encoder or decoder requires flushing with NULL input at the end in order to - * give the complete and correct output. - * - * NOTE: If this flag is not set, the codec is guaranteed to never be fed with - * with NULL data. The user can still send NULL data to the public encode - * or decode function, but libavcodec will not pass it along to the codec - * unless this flag is set. - * - * Decoders: - * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, - * avpkt->size=0 at the end to get the delayed data until the decoder no longer - * returns frames. - * - * Encoders: - * The encoder needs to be fed with NULL data at the end of encoding until the - * encoder no longer returns data. - * - * NOTE: For encoders implementing the AVCodec.encode2() function, setting this - * flag also means that the encoder must set the pts and duration for - * each output packet. If this flag is not set, the pts and duration will - * be determined by libavcodec from the input frame. - */ -#define CODEC_CAP_DELAY 0x0020 -/** - * Codec can be fed a final frame with a smaller size. - * This can be used to prevent truncation of the last audio samples. - */ -#define CODEC_CAP_SMALL_LAST_FRAME 0x0040 -/** - * Codec can export data for HW decoding (VDPAU). - */ -#define CODEC_CAP_HWACCEL_VDPAU 0x0080 -/** - * Codec can output multiple frames per AVPacket - * Normally demuxers return one frame at a time, demuxers which do not do - * are connected to a parser to split what they return into proper frames. - * This flag is reserved to the very rare category of codecs which have a - * bitstream that cannot be split into frames without timeconsuming - * operations like full decoding. Demuxers carring such bitstreams thus - * may return multiple frames in a packet. This has many disadvantages like - * prohibiting stream copy in many cases thus it should only be considered - * as a last resort. - */ -#define CODEC_CAP_SUBFRAMES 0x0100 -/** - * Codec is experimental and is thus avoided in favor of non experimental - * encoders - */ -#define CODEC_CAP_EXPERIMENTAL 0x0200 -/** - * Codec should fill in channel configuration and samplerate instead of container - */ -#define CODEC_CAP_CHANNEL_CONF 0x0400 - -/** - * Codec is able to deal with negative linesizes - */ -#define CODEC_CAP_NEG_LINESIZES 0x0800 - -/** - * Codec supports frame-level multithreading. - */ -#define CODEC_CAP_FRAME_THREADS 0x1000 -/** - * Codec supports slice-based (or partition-based) multithreading. - */ -#define CODEC_CAP_SLICE_THREADS 0x2000 -/** - * Codec supports changed parameters at any point. - */ -#define CODEC_CAP_PARAM_CHANGE 0x4000 -/** - * Codec supports avctx->thread_count == 0 (auto). - */ -#define CODEC_CAP_AUTO_THREADS 0x8000 -/** - * Audio encoder supports receiving a different number of samples in each call. - */ -#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000 -/** - * Codec is intra only. - */ -#define CODEC_CAP_INTRA_ONLY 0x40000000 -/** - * Codec is lossless. - */ -#define CODEC_CAP_LOSSLESS 0x80000000 - -//The following defines may change, don't expect compatibility if you use them. -#define MB_TYPE_INTRA4x4 0x0001 -#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific -#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific -#define MB_TYPE_16x16 0x0008 -#define MB_TYPE_16x8 0x0010 -#define MB_TYPE_8x16 0x0020 -#define MB_TYPE_8x8 0x0040 -#define MB_TYPE_INTERLACED 0x0080 -#define MB_TYPE_DIRECT2 0x0100 //FIXME -#define MB_TYPE_ACPRED 0x0200 -#define MB_TYPE_GMC 0x0400 -#define MB_TYPE_SKIP 0x0800 -#define MB_TYPE_P0L0 0x1000 -#define MB_TYPE_P1L0 0x2000 -#define MB_TYPE_P0L1 0x4000 -#define MB_TYPE_P1L1 0x8000 -#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0) -#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1) -#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1) -#define MB_TYPE_QUANT 0x00010000 -#define MB_TYPE_CBP 0x00020000 -//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...) - -/** - * Pan Scan area. - * This specifies the area which should be displayed. - * Note there may be multiple such areas for one frame. - */ -typedef struct AVPanScan{ - /** - * id - * - encoding: Set by user. - * - decoding: Set by libavcodec. - */ - int id; - - /** - * width and height in 1/16 pel - * - encoding: Set by user. - * - decoding: Set by libavcodec. - */ - int width; - int height; - - /** - * position of the top left corner in 1/16 pel for up to 3 fields/frames - * - encoding: Set by user. - * - decoding: Set by libavcodec. - */ - int16_t position[3][2]; -}AVPanScan; - -#define FF_QSCALE_TYPE_MPEG1 0 -#define FF_QSCALE_TYPE_MPEG2 1 -#define FF_QSCALE_TYPE_H264 2 -#define FF_QSCALE_TYPE_VP56 3 - -#if FF_API_GET_BUFFER -#define FF_BUFFER_TYPE_INTERNAL 1 -#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user) -#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared. -#define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything. - -#define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore). -#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer. -#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content. -#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update). -#endif - -/** - * The decoder will keep a reference to the frame and may reuse it later. - */ -#define AV_GET_BUFFER_FLAG_REF (1 << 0) - -/** - * @defgroup lavc_packet AVPacket - * - * Types and functions for working with AVPacket. - * @{ - */ -enum AVPacketSideDataType { - AV_PKT_DATA_PALETTE, - AV_PKT_DATA_NEW_EXTRADATA, - - /** - * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: - * @code - * u32le param_flags - * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) - * s32le channel_count - * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) - * u64le channel_layout - * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) - * s32le sample_rate - * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) - * s32le width - * s32le height - * @endcode - */ - AV_PKT_DATA_PARAM_CHANGE, - - /** - * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of - * structures with info about macroblocks relevant to splitting the - * packet into smaller packets on macroblock edges (e.g. as for RFC 2190). - * That is, it does not necessarily contain info about all macroblocks, - * as long as the distance between macroblocks in the info is smaller - * than the target payload size. - * Each MB info structure is 12 bytes, and is laid out as follows: - * @code - * u32le bit offset from the start of the packet - * u8 current quantizer at the start of the macroblock - * u8 GOB number - * u16le macroblock address within the GOB - * u8 horizontal MV predictor - * u8 vertical MV predictor - * u8 horizontal MV predictor for block number 3 - * u8 vertical MV predictor for block number 3 - * @endcode - */ - AV_PKT_DATA_H263_MB_INFO, - - /** - * Recommmends skipping the specified number of samples - * @code - * u32le number of samples to skip from start of this packet - * u32le number of samples to skip from end of this packet - * u8 reason for start skip - * u8 reason for end skip (0=padding silence, 1=convergence) - * @endcode - */ - AV_PKT_DATA_SKIP_SAMPLES=70, - - /** - * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that - * the packet may contain "dual mono" audio specific to Japanese DTV - * and if it is true, recommends only the selected channel to be used. - * @code - * u8 selected channels (0=mail/left, 1=sub/right, 2=both) - * @endcode - */ - AV_PKT_DATA_JP_DUALMONO, - - /** - * A list of zero terminated key/value strings. There is no end marker for - * the list, so it is required to rely on the side data size to stop. - */ - AV_PKT_DATA_STRINGS_METADATA, - - /** - * Subtitle event position - * @code - * u32le x1 - * u32le y1 - * u32le x2 - * u32le y2 - * @endcode - */ - AV_PKT_DATA_SUBTITLE_POSITION, - - /** - * Data found in BlockAdditional element of matroska container. There is - * no end marker for the data, so it is required to rely on the side data - * size to recognize the end. 8 byte id (as found in BlockAddId) followed - * by data. - */ - AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, - - /** - * The optional first identifier line of a WebVTT cue. - */ - AV_PKT_DATA_WEBVTT_IDENTIFIER, - - /** - * The optional settings (rendering instructions) that immediately - * follow the timestamp specifier of a WebVTT cue. - */ - AV_PKT_DATA_WEBVTT_SETTINGS, -}; - -/** - * This structure stores compressed data. It is typically exported by demuxers - * and then passed as input to decoders, or received as output from encoders and - * then passed to muxers. - * - * For video, it should typically contain one compressed frame. For audio it may - * contain several compressed frames. - * - * AVPacket is one of the few structs in FFmpeg, whose size is a part of public - * ABI. Thus it may be allocated on stack and no new fields can be added to it - * without libavcodec and libavformat major bump. - * - * The semantics of data ownership depends on the buf or destruct (deprecated) - * fields. If either is set, the packet data is dynamically allocated and is - * valid indefinitely until av_free_packet() is called (which in turn calls - * av_buffer_unref()/the destruct callback to free the data). If neither is set, - * the packet data is typically backed by some static buffer somewhere and is - * only valid for a limited time (e.g. until the next read call when demuxing). - * - * The side data is always allocated with av_malloc() and is freed in - * av_free_packet(). - */ -typedef struct AVPacket { - /** - * A reference to the reference-counted buffer where the packet data is - * stored. - * May be NULL, then the packet data is not reference-counted. - */ - AVBufferRef *buf; - /** - * Presentation timestamp in AVStream->time_base units; the time at which - * the decompressed packet will be presented to the user. - * Can be AV_NOPTS_VALUE if it is not stored in the file. - * pts MUST be larger or equal to dts as presentation cannot happen before - * decompression, unless one wants to view hex dumps. Some formats misuse - * the terms dts and pts/cts to mean something different. Such timestamps - * must be converted to true pts/dts before they are stored in AVPacket. - */ - int64_t pts; - /** - * Decompression timestamp in AVStream->time_base units; the time at which - * the packet is decompressed. - * Can be AV_NOPTS_VALUE if it is not stored in the file. - */ - int64_t dts; - uint8_t *data; - int size; - int stream_index; - /** - * A combination of AV_PKT_FLAG values - */ - int flags; - /** - * Additional packet data that can be provided by the container. - * Packet can contain several types of side information. - */ - struct { - uint8_t *data; - int size; - enum AVPacketSideDataType type; - } *side_data; - int side_data_elems; - - /** - * Duration of this packet in AVStream->time_base units, 0 if unknown. - * Equals next_pts - this_pts in presentation order. - */ - int duration; -#if FF_API_DESTRUCT_PACKET - attribute_deprecated - void (*destruct)(struct AVPacket *); - attribute_deprecated - void *priv; -#endif - int64_t pos; ///< byte position in stream, -1 if unknown - - /** - * Time difference in AVStream->time_base units from the pts of this - * packet to the point at which the output from the decoder has converged - * independent from the availability of previous frames. That is, the - * frames are virtually identical no matter if decoding started from - * the very first frame or from this keyframe. - * Is AV_NOPTS_VALUE if unknown. - * This field is not the display duration of the current packet. - * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY - * set. - * - * The purpose of this field is to allow seeking in streams that have no - * keyframes in the conventional sense. It corresponds to the - * recovery point SEI in H.264 and match_time_delta in NUT. It is also - * essential for some types of subtitle streams to ensure that all - * subtitles are correctly displayed after seeking. - */ - int64_t convergence_duration; -} AVPacket; -#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe -#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted - -enum AVSideDataParamChangeFlags { - AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001, - AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002, - AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004, - AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008, -}; -/** - * @} - */ - -struct AVCodecInternal; - -enum AVFieldOrder { - AV_FIELD_UNKNOWN, - AV_FIELD_PROGRESSIVE, - AV_FIELD_TT, //< Top coded_first, top displayed first - AV_FIELD_BB, //< Bottom coded first, bottom displayed first - AV_FIELD_TB, //< Top coded first, bottom displayed first - AV_FIELD_BT, //< Bottom coded first, top displayed first -}; - -/** - * main external API structure. - * New fields can be added to the end with minor version bumps. - * Removal, reordering and changes to existing fields require a major - * version bump. - * Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user - * applications. - * sizeof(AVCodecContext) must not be used outside libav*. - */ -typedef struct AVCodecContext { - /** - * information on struct for av_log - * - set by avcodec_alloc_context3 - */ - const AVClass *av_class; - int log_level_offset; - - enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */ - const struct AVCodec *codec; - char codec_name[32]; - enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */ - - /** - * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). - * This is used to work around some encoder bugs. - * A demuxer should set this to what is stored in the field used to identify the codec. - * If there are multiple such fields in a container then the demuxer should choose the one - * which maximizes the information about the used codec. - * If the codec tag field in a container is larger than 32 bits then the demuxer should - * remap the longer ID to 32 bits with a table or other structure. Alternatively a new - * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated - * first. - * - encoding: Set by user, if not then the default based on codec_id will be used. - * - decoding: Set by user, will be converted to uppercase by libavcodec during init. - */ - unsigned int codec_tag; - - /** - * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). - * This is used to work around some encoder bugs. - * - encoding: unused - * - decoding: Set by user, will be converted to uppercase by libavcodec during init. - */ - unsigned int stream_codec_tag; - - void *priv_data; - - /** - * Private context used for internal data. - * - * Unlike priv_data, this is not codec-specific. It is used in general - * libavcodec functions. - */ - struct AVCodecInternal *internal; - - /** - * Private data of the user, can be used to carry app specific stuff. - * - encoding: Set by user. - * - decoding: Set by user. - */ - void *opaque; - - /** - * the average bitrate - * - encoding: Set by user; unused for constant quantizer encoding. - * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream. - */ - int bit_rate; - - /** - * number of bits the bitstream is allowed to diverge from the reference. - * the reference can be CBR (for CBR pass1) or VBR (for pass2) - * - encoding: Set by user; unused for constant quantizer encoding. - * - decoding: unused - */ - int bit_rate_tolerance; - - /** - * Global quality for codecs which cannot change it per frame. - * This should be proportional to MPEG-1/2/4 qscale. - * - encoding: Set by user. - * - decoding: unused - */ - int global_quality; - - /** - * - encoding: Set by user. - * - decoding: unused - */ - int compression_level; -#define FF_COMPRESSION_DEFAULT -1 - - /** - * CODEC_FLAG_*. - * - encoding: Set by user. - * - decoding: Set by user. - */ - int flags; - - /** - * CODEC_FLAG2_* - * - encoding: Set by user. - * - decoding: Set by user. - */ - int flags2; - - /** - * some codecs need / can use extradata like Huffman tables. - * mjpeg: Huffman tables - * rv10: additional flags - * mpeg4: global headers (they can be in the bitstream or here) - * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger - * than extradata_size to avoid problems if it is read with the bitstream reader. - * The bytewise contents of extradata must not depend on the architecture or CPU endianness. - * - encoding: Set/allocated/freed by libavcodec. - * - decoding: Set/allocated/freed by user. - */ - uint8_t *extradata; - int extradata_size; - - /** - * This is the fundamental unit of time (in seconds) in terms - * of which frame timestamps are represented. For fixed-fps content, - * timebase should be 1/framerate and timestamp increments should be - * identically 1. - * - encoding: MUST be set by user. - * - decoding: Set by libavcodec. - */ - AVRational time_base; - - /** - * For some codecs, the time base is closer to the field rate than the frame rate. - * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration - * if no telecine is used ... - * - * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2. - */ - int ticks_per_frame; - - /** - * Codec delay. - * - * Encoding: Number of frames delay there will be from the encoder input to - * the decoder output. (we assume the decoder matches the spec) - * Decoding: Number of frames delay in addition to what a standard decoder - * as specified in the spec would produce. - * - * Video: - * Number of frames the decoded output will be delayed relative to the - * encoded input. - * - * Audio: - * For encoding, this is the number of "priming" samples added to the - * beginning of the stream. The decoded output will be delayed by this - * many samples relative to the input to the encoder. Note that this - * field is purely informational and does not directly affect the pts - * output by the encoder, which should always be based on the actual - * presentation time, including any delay. - * For decoding, this is the number of samples the decoder needs to - * output before the decoder's output is valid. When seeking, you should - * start decoding this many samples prior to your desired seek point. - * - * - encoding: Set by libavcodec. - * - decoding: Set by libavcodec. - */ - int delay; - - - /* video only */ - /** - * picture width / height. - * - encoding: MUST be set by user. - * - decoding: May be set by the user before opening the decoder if known e.g. - * from the container. Some decoders will require the dimensions - * to be set by the caller. During decoding, the decoder may - * overwrite those values as required. - */ - int width, height; - - /** - * Bitstream width / height, may be different from width/height e.g. when - * the decoded frame is cropped before being output or lowres is enabled. - * - encoding: unused - * - decoding: May be set by the user before opening the decoder if known - * e.g. from the container. During decoding, the decoder may - * overwrite those values as required. - */ - int coded_width, coded_height; - -#define FF_ASPECT_EXTENDED 15 - - /** - * the number of pictures in a group of pictures, or 0 for intra_only - * - encoding: Set by user. - * - decoding: unused - */ - int gop_size; - - /** - * Pixel format, see AV_PIX_FMT_xxx. - * May be set by the demuxer if known from headers. - * May be overridden by the decoder if it knows better. - * - encoding: Set by user. - * - decoding: Set by user if known, overridden by libavcodec if known - */ - enum AVPixelFormat pix_fmt; - - /** - * Motion estimation algorithm used for video coding. - * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex), - * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific] - * - encoding: MUST be set by user. - * - decoding: unused - */ - int me_method; - - /** - * If non NULL, 'draw_horiz_band' is called by the libavcodec - * decoder to draw a horizontal band. It improves cache usage. Not - * all codecs can do that. You must check the codec capabilities - * beforehand. - * When multithreading is used, it may be called from multiple threads - * at the same time; threads might draw different parts of the same AVFrame, - * or multiple AVFrames, and there is no guarantee that slices will be drawn - * in order. - * The function is also used by hardware acceleration APIs. - * It is called at least once during frame decoding to pass - * the data needed for hardware render. - * In that mode instead of pixel data, AVFrame points to - * a structure specific to the acceleration API. The application - * reads the structure and can change some fields to indicate progress - * or mark state. - * - encoding: unused - * - decoding: Set by user. - * @param height the height of the slice - * @param y the y position of the slice - * @param type 1->top field, 2->bottom field, 3->frame - * @param offset offset into the AVFrame.data from which the slice should be read - */ - void (*draw_horiz_band)(struct AVCodecContext *s, - const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], - int y, int type, int height); - - /** - * callback to negotiate the pixelFormat - * @param fmt is the list of formats which are supported by the codec, - * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality. - * The first is always the native one. - * @return the chosen format - * - encoding: unused - * - decoding: Set by user, if not set the native format will be chosen. - */ - enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt); - - /** - * maximum number of B-frames between non-B-frames - * Note: The output will be delayed by max_b_frames+1 relative to the input. - * - encoding: Set by user. - * - decoding: unused - */ - int max_b_frames; - - /** - * qscale factor between IP and B-frames - * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset). - * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). - * - encoding: Set by user. - * - decoding: unused - */ - float b_quant_factor; - - /** obsolete FIXME remove */ - int rc_strategy; -#define FF_RC_STRATEGY_XVID 1 - - int b_frame_strategy; - - /** - * qscale offset between IP and B-frames - * - encoding: Set by user. - * - decoding: unused - */ - float b_quant_offset; - - /** - * Size of the frame reordering buffer in the decoder. - * For MPEG-2 it is 1 IPB or 0 low delay IP. - * - encoding: Set by libavcodec. - * - decoding: Set by libavcodec. - */ - int has_b_frames; - - /** - * 0-> h263 quant 1-> mpeg quant - * - encoding: Set by user. - * - decoding: unused - */ - int mpeg_quant; - - /** - * qscale factor between P and I-frames - * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset). - * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). - * - encoding: Set by user. - * - decoding: unused - */ - float i_quant_factor; - - /** - * qscale offset between P and I-frames - * - encoding: Set by user. - * - decoding: unused - */ - float i_quant_offset; - - /** - * luminance masking (0-> disabled) - * - encoding: Set by user. - * - decoding: unused - */ - float lumi_masking; - - /** - * temporary complexity masking (0-> disabled) - * - encoding: Set by user. - * - decoding: unused - */ - float temporal_cplx_masking; - - /** - * spatial complexity masking (0-> disabled) - * - encoding: Set by user. - * - decoding: unused - */ - float spatial_cplx_masking; - - /** - * p block masking (0-> disabled) - * - encoding: Set by user. - * - decoding: unused - */ - float p_masking; - - /** - * darkness masking (0-> disabled) - * - encoding: Set by user. - * - decoding: unused - */ - float dark_masking; - - /** - * slice count - * - encoding: Set by libavcodec. - * - decoding: Set by user (or 0). - */ - int slice_count; - /** - * prediction method (needed for huffyuv) - * - encoding: Set by user. - * - decoding: unused - */ - int prediction_method; -#define FF_PRED_LEFT 0 -#define FF_PRED_PLANE 1 -#define FF_PRED_MEDIAN 2 - - /** - * slice offsets in the frame in bytes - * - encoding: Set/allocated by libavcodec. - * - decoding: Set/allocated by user (or NULL). - */ - int *slice_offset; - - /** - * sample aspect ratio (0 if unknown) - * That is the width of a pixel divided by the height of the pixel. - * Numerator and denominator must be relatively prime and smaller than 256 for some video standards. - * - encoding: Set by user. - * - decoding: Set by libavcodec. - */ - AVRational sample_aspect_ratio; - - /** - * motion estimation comparison function - * - encoding: Set by user. - * - decoding: unused - */ - int me_cmp; - /** - * subpixel motion estimation comparison function - * - encoding: Set by user. - * - decoding: unused - */ - int me_sub_cmp; - /** - * macroblock comparison function (not supported yet) - * - encoding: Set by user. - * - decoding: unused - */ - int mb_cmp; - /** - * interlaced DCT comparison function - * - encoding: Set by user. - * - decoding: unused - */ - int ildct_cmp; -#define FF_CMP_SAD 0 -#define FF_CMP_SSE 1 -#define FF_CMP_SATD 2 -#define FF_CMP_DCT 3 -#define FF_CMP_PSNR 4 -#define FF_CMP_BIT 5 -#define FF_CMP_RD 6 -#define FF_CMP_ZERO 7 -#define FF_CMP_VSAD 8 -#define FF_CMP_VSSE 9 -#define FF_CMP_NSSE 10 -#define FF_CMP_W53 11 -#define FF_CMP_W97 12 -#define FF_CMP_DCTMAX 13 -#define FF_CMP_DCT264 14 -#define FF_CMP_CHROMA 256 - - /** - * ME diamond size & shape - * - encoding: Set by user. - * - decoding: unused - */ - int dia_size; - - /** - * amount of previous MV predictors (2a+1 x 2a+1 square) - * - encoding: Set by user. - * - decoding: unused - */ - int last_predictor_count; - - /** - * prepass for motion estimation - * - encoding: Set by user. - * - decoding: unused - */ - int pre_me; - - /** - * motion estimation prepass comparison function - * - encoding: Set by user. - * - decoding: unused - */ - int me_pre_cmp; - - /** - * ME prepass diamond size & shape - * - encoding: Set by user. - * - decoding: unused - */ - int pre_dia_size; - - /** - * subpel ME quality - * - encoding: Set by user. - * - decoding: unused - */ - int me_subpel_quality; - - /** - * DTG active format information (additional aspect ratio - * information only used in DVB MPEG-2 transport streams) - * 0 if not set. - * - * - encoding: unused - * - decoding: Set by decoder. - */ - int dtg_active_format; -#define FF_DTG_AFD_SAME 8 -#define FF_DTG_AFD_4_3 9 -#define FF_DTG_AFD_16_9 10 -#define FF_DTG_AFD_14_9 11 -#define FF_DTG_AFD_4_3_SP_14_9 13 -#define FF_DTG_AFD_16_9_SP_14_9 14 -#define FF_DTG_AFD_SP_4_3 15 - - /** - * maximum motion estimation search range in subpel units - * If 0 then no limit. - * - * - encoding: Set by user. - * - decoding: unused - */ - int me_range; - - /** - * intra quantizer bias - * - encoding: Set by user. - * - decoding: unused - */ - int intra_quant_bias; -#define FF_DEFAULT_QUANT_BIAS 999999 - - /** - * inter quantizer bias - * - encoding: Set by user. - * - decoding: unused - */ - int inter_quant_bias; - - /** - * slice flags - * - encoding: unused - * - decoding: Set by user. - */ - int slice_flags; -#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display -#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics) -#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1) - - /** - * XVideo Motion Acceleration - * - encoding: forbidden - * - decoding: set by decoder - */ - int xvmc_acceleration; - - /** - * macroblock decision mode - * - encoding: Set by user. - * - decoding: unused - */ - int mb_decision; -#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp -#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits -#define FF_MB_DECISION_RD 2 ///< rate distortion - - /** - * custom intra quantization matrix - * - encoding: Set by user, can be NULL. - * - decoding: Set by libavcodec. - */ - uint16_t *intra_matrix; - - /** - * custom inter quantization matrix - * - encoding: Set by user, can be NULL. - * - decoding: Set by libavcodec. - */ - uint16_t *inter_matrix; - - /** - * scene change detection threshold - * 0 is default, larger means fewer detected scene changes. - * - encoding: Set by user. - * - decoding: unused - */ - int scenechange_threshold; - - /** - * noise reduction strength - * - encoding: Set by user. - * - decoding: unused - */ - int noise_reduction; - - /** - * Motion estimation threshold below which no motion estimation is - * performed, but instead the user specified motion vectors are used. - * - * - encoding: Set by user. - * - decoding: unused - */ - int me_threshold; - - /** - * Macroblock threshold below which the user specified macroblock types will be used. - * - encoding: Set by user. - * - decoding: unused - */ - int mb_threshold; - - /** - * precision of the intra DC coefficient - 8 - * - encoding: Set by user. - * - decoding: unused - */ - int intra_dc_precision; - - /** - * Number of macroblock rows at the top which are skipped. - * - encoding: unused - * - decoding: Set by user. - */ - int skip_top; - - /** - * Number of macroblock rows at the bottom which are skipped. - * - encoding: unused - * - decoding: Set by user. - */ - int skip_bottom; - - /** - * Border processing masking, raises the quantizer for mbs on the borders - * of the picture. - * - encoding: Set by user. - * - decoding: unused - */ - float border_masking; - - /** - * minimum MB lagrange multipler - * - encoding: Set by user. - * - decoding: unused - */ - int mb_lmin; - - /** - * maximum MB lagrange multipler - * - encoding: Set by user. - * - decoding: unused - */ - int mb_lmax; - - /** - * - * - encoding: Set by user. - * - decoding: unused - */ - int me_penalty_compensation; - - /** - * - * - encoding: Set by user. - * - decoding: unused - */ - int bidir_refine; - - /** - * - * - encoding: Set by user. - * - decoding: unused - */ - int brd_scale; - - /** - * minimum GOP size - * - encoding: Set by user. - * - decoding: unused - */ - int keyint_min; - - /** - * number of reference frames - * - encoding: Set by user. - * - decoding: Set by lavc. - */ - int refs; - - /** - * chroma qp offset from luma - * - encoding: Set by user. - * - decoding: unused - */ - int chromaoffset; - - /** - * Multiplied by qscale for each frame and added to scene_change_score. - * - encoding: Set by user. - * - decoding: unused - */ - int scenechange_factor; - - /** - * - * Note: Value depends upon the compare function used for fullpel ME. - * - encoding: Set by user. - * - decoding: unused - */ - int mv0_threshold; - - /** - * Adjust sensitivity of b_frame_strategy 1. - * - encoding: Set by user. - * - decoding: unused - */ - int b_sensitivity; - - /** - * Chromaticity coordinates of the source primaries. - * - encoding: Set by user - * - decoding: Set by libavcodec - */ - enum AVColorPrimaries color_primaries; - - /** - * Color Transfer Characteristic. - * - encoding: Set by user - * - decoding: Set by libavcodec - */ - enum AVColorTransferCharacteristic color_trc; - - /** - * YUV colorspace type. - * - encoding: Set by user - * - decoding: Set by libavcodec - */ - enum AVColorSpace colorspace; - - /** - * MPEG vs JPEG YUV range. - * - encoding: Set by user - * - decoding: Set by libavcodec - */ - enum AVColorRange color_range; - - /** - * This defines the location of chroma samples. - * - encoding: Set by user - * - decoding: Set by libavcodec - */ - enum AVChromaLocation chroma_sample_location; - - /** - * Number of slices. - * Indicates number of picture subdivisions. Used for parallelized - * decoding. - * - encoding: Set by user - * - decoding: unused - */ - int slices; - - /** Field order - * - encoding: set by libavcodec - * - decoding: Set by user. - */ - enum AVFieldOrder field_order; - - /* audio only */ - int sample_rate; ///< samples per second - int channels; ///< number of audio channels - - /** - * audio sample format - * - encoding: Set by user. - * - decoding: Set by libavcodec. - */ - enum AVSampleFormat sample_fmt; ///< sample format - - /* The following data should not be initialized. */ - /** - * Number of samples per channel in an audio frame. - * - * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame - * except the last must contain exactly frame_size samples per channel. - * May be 0 when the codec has CODEC_CAP_VARIABLE_FRAME_SIZE set, then the - * frame size is not restricted. - * - decoding: may be set by some decoders to indicate constant frame size - */ - int frame_size; - - /** - * Frame counter, set by libavcodec. - * - * - decoding: total number of frames returned from the decoder so far. - * - encoding: total number of frames passed to the encoder so far. - * - * @note the counter is not incremented if encoding/decoding resulted in - * an error. - */ - int frame_number; - - /** - * number of bytes per packet if constant and known or 0 - * Used by some WAV based audio codecs. - */ - int block_align; - - /** - * Audio cutoff bandwidth (0 means "automatic") - * - encoding: Set by user. - * - decoding: unused - */ - int cutoff; - -#if FF_API_REQUEST_CHANNELS - /** - * Decoder should decode to this many channels if it can (0 for default) - * - encoding: unused - * - decoding: Set by user. - * @deprecated Deprecated in favor of request_channel_layout. - */ - int request_channels; -#endif - - /** - * Audio channel layout. - * - encoding: set by user. - * - decoding: set by user, may be overwritten by libavcodec. - */ - uint64_t channel_layout; - - /** - * Request decoder to use this channel layout if it can (0 for default) - * - encoding: unused - * - decoding: Set by user. - */ - uint64_t request_channel_layout; - - /** - * Type of service that the audio stream conveys. - * - encoding: Set by user. - * - decoding: Set by libavcodec. - */ - enum AVAudioServiceType audio_service_type; - - /** - * desired sample format - * - encoding: Not used. - * - decoding: Set by user. - * Decoder will decode to this format if it can. - */ - enum AVSampleFormat request_sample_fmt; - -#if FF_API_GET_BUFFER - /** - * Called at the beginning of each frame to get a buffer for it. - * - * The function will set AVFrame.data[], AVFrame.linesize[]. - * AVFrame.extended_data[] must also be set, but it should be the same as - * AVFrame.data[] except for planar audio with more channels than can fit - * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as - * many data pointers as it can hold. - * - * if CODEC_CAP_DR1 is not set then get_buffer() must call - * avcodec_default_get_buffer() instead of providing buffers allocated by - * some other means. - * - * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't - * need it. avcodec_default_get_buffer() aligns the output buffer properly, - * but if get_buffer() is overridden then alignment considerations should - * be taken into account. - * - * @see avcodec_default_get_buffer() - * - * Video: - * - * If pic.reference is set then the frame will be read later by libavcodec. - * avcodec_align_dimensions2() should be used to find the required width and - * height, as they normally need to be rounded up to the next multiple of 16. - * - * If frame multithreading is used and thread_safe_callbacks is set, - * it may be called from a different thread, but not from more than one at - * once. Does not need to be reentrant. - * - * @see release_buffer(), reget_buffer() - * @see avcodec_align_dimensions2() - * - * Audio: - * - * Decoders request a buffer of a particular size by setting - * AVFrame.nb_samples prior to calling get_buffer(). The decoder may, - * however, utilize only part of the buffer by setting AVFrame.nb_samples - * to a smaller value in the output frame. - * - * Decoders cannot use the buffer after returning from - * avcodec_decode_audio4(), so they will not call release_buffer(), as it - * is assumed to be released immediately upon return. In some rare cases, - * a decoder may need to call get_buffer() more than once in a single - * call to avcodec_decode_audio4(). In that case, when get_buffer() is - * called again after it has already been called once, the previously - * acquired buffer is assumed to be released at that time and may not be - * reused by the decoder. - * - * As a convenience, av_samples_get_buffer_size() and - * av_samples_fill_arrays() in libavutil may be used by custom get_buffer() - * functions to find the required data size and to fill data pointers and - * linesize. In AVFrame.linesize, only linesize[0] may be set for audio - * since all planes must be the same size. - * - * @see av_samples_get_buffer_size(), av_samples_fill_arrays() - * - * - encoding: unused - * - decoding: Set by libavcodec, user can override. - * - * @deprecated use get_buffer2() - */ - attribute_deprecated - int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic); - - /** - * Called to release buffers which were allocated with get_buffer. - * A released buffer can be reused in get_buffer(). - * pic.data[*] must be set to NULL. - * May be called from a different thread if frame multithreading is used, - * but not by more than one thread at once, so does not need to be reentrant. - * - encoding: unused - * - decoding: Set by libavcodec, user can override. - * - * @deprecated custom freeing callbacks should be set from get_buffer2() - */ - attribute_deprecated - void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic); - - /** - * Called at the beginning of a frame to get cr buffer for it. - * Buffer type (size, hints) must be the same. libavcodec won't check it. - * libavcodec will pass previous buffer in pic, function should return - * same buffer or new buffer with old frame "painted" into it. - * If pic.data[0] == NULL must behave like get_buffer(). - * if CODEC_CAP_DR1 is not set then reget_buffer() must call - * avcodec_default_reget_buffer() instead of providing buffers allocated by - * some other means. - * - encoding: unused - * - decoding: Set by libavcodec, user can override. - */ - attribute_deprecated - int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic); -#endif - - /** - * This callback is called at the beginning of each frame to get data - * buffer(s) for it. There may be one contiguous buffer for all the data or - * there may be a buffer per each data plane or anything in between. What - * this means is, you may set however many entries in buf[] you feel necessary. - * Each buffer must be reference-counted using the AVBuffer API (see description - * of buf[] below). - * - * The following fields will be set in the frame before this callback is - * called: - * - format - * - width, height (video only) - * - sample_rate, channel_layout, nb_samples (audio only) - * Their values may differ from the corresponding values in - * AVCodecContext. This callback must use the frame values, not the codec - * context values, to calculate the required buffer size. - * - * This callback must fill the following fields in the frame: - * - data[] - * - linesize[] - * - extended_data: - * * if the data is planar audio with more than 8 channels, then this - * callback must allocate and fill extended_data to contain all pointers - * to all data planes. data[] must hold as many pointers as it can. - * extended_data must be allocated with av_malloc() and will be freed in - * av_frame_unref(). - * * otherwise exended_data must point to data - * - buf[] must contain one or more pointers to AVBufferRef structures. Each of - * the frame's data and extended_data pointers must be contained in these. That - * is, one AVBufferRef for each allocated chunk of memory, not necessarily one - * AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(), - * and av_buffer_ref(). - * - extended_buf and nb_extended_buf must be allocated with av_malloc() by - * this callback and filled with the extra buffers if there are more - * buffers than buf[] can hold. extended_buf will be freed in - * av_frame_unref(). - * - * If CODEC_CAP_DR1 is not set then get_buffer2() must call - * avcodec_default_get_buffer2() instead of providing buffers allocated by - * some other means. - * - * Each data plane must be aligned to the maximum required by the target - * CPU. - * - * @see avcodec_default_get_buffer2() - * - * Video: - * - * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused - * (read and/or written to if it is writable) later by libavcodec. - * - * If CODEC_FLAG_EMU_EDGE is not set in s->flags, the buffer must contain an - * edge of the size returned by avcodec_get_edge_width() on all sides. - * - * avcodec_align_dimensions2() should be used to find the required width and - * height, as they normally need to be rounded up to the next multiple of 16. - * - * If frame multithreading is used and thread_safe_callbacks is set, - * this callback may be called from a different thread, but not from more - * than one at once. Does not need to be reentrant. - * - * @see avcodec_align_dimensions2() - * - * Audio: - * - * Decoders request a buffer of a particular size by setting - * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may, - * however, utilize only part of the buffer by setting AVFrame.nb_samples - * to a smaller value in the output frame. - * - * As a convenience, av_samples_get_buffer_size() and - * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2() - * functions to find the required data size and to fill data pointers and - * linesize. In AVFrame.linesize, only linesize[0] may be set for audio - * since all planes must be the same size. - * - * @see av_samples_get_buffer_size(), av_samples_fill_arrays() - * - * - encoding: unused - * - decoding: Set by libavcodec, user can override. - */ - int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags); - - /** - * If non-zero, the decoded audio and video frames returned from - * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted - * and are valid indefinitely. The caller must free them with - * av_frame_unref() when they are not needed anymore. - * Otherwise, the decoded frames must not be freed by the caller and are - * only valid until the next decode call. - * - * - encoding: unused - * - decoding: set by the caller before avcodec_open2(). - */ - int refcounted_frames; - - /* - encoding parameters */ - float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) - float qblur; ///< amount of qscale smoothing over time (0.0-1.0) - - /** - * minimum quantizer - * - encoding: Set by user. - * - decoding: unused - */ - int qmin; - - /** - * maximum quantizer - * - encoding: Set by user. - * - decoding: unused - */ - int qmax; - - /** - * maximum quantizer difference between frames - * - encoding: Set by user. - * - decoding: unused - */ - int max_qdiff; - - /** - * ratecontrol qmin qmax limiting method - * 0-> clipping, 1-> use a nice continuous function to limit qscale wthin qmin/qmax. - * - encoding: Set by user. - * - decoding: unused - */ - float rc_qsquish; - - float rc_qmod_amp; - int rc_qmod_freq; - - /** - * decoder bitstream buffer size - * - encoding: Set by user. - * - decoding: unused - */ - int rc_buffer_size; - - /** - * ratecontrol override, see RcOverride - * - encoding: Allocated/set/freed by user. - * - decoding: unused - */ - int rc_override_count; - RcOverride *rc_override; - - /** - * rate control equation - * - encoding: Set by user - * - decoding: unused - */ - const char *rc_eq; - - /** - * maximum bitrate - * - encoding: Set by user. - * - decoding: unused - */ - int rc_max_rate; - - /** - * minimum bitrate - * - encoding: Set by user. - * - decoding: unused - */ - int rc_min_rate; - - float rc_buffer_aggressivity; - - /** - * initial complexity for pass1 ratecontrol - * - encoding: Set by user. - * - decoding: unused - */ - float rc_initial_cplx; - - /** - * Ratecontrol attempt to use, at maximum, of what can be used without an underflow. - * - encoding: Set by user. - * - decoding: unused. - */ - float rc_max_available_vbv_use; - - /** - * Ratecontrol attempt to use, at least, times the amount needed to prevent a vbv overflow. - * - encoding: Set by user. - * - decoding: unused. - */ - float rc_min_vbv_overflow_use; - - /** - * Number of bits which should be loaded into the rc buffer before decoding starts. - * - encoding: Set by user. - * - decoding: unused - */ - int rc_initial_buffer_occupancy; - -#define FF_CODER_TYPE_VLC 0 -#define FF_CODER_TYPE_AC 1 -#define FF_CODER_TYPE_RAW 2 -#define FF_CODER_TYPE_RLE 3 -#define FF_CODER_TYPE_DEFLATE 4 - /** - * coder type - * - encoding: Set by user. - * - decoding: unused - */ - int coder_type; - - /** - * context model - * - encoding: Set by user. - * - decoding: unused - */ - int context_model; - - /** - * minimum Lagrange multipler - * - encoding: Set by user. - * - decoding: unused - */ - int lmin; - - /** - * maximum Lagrange multipler - * - encoding: Set by user. - * - decoding: unused - */ - int lmax; - - /** - * frame skip threshold - * - encoding: Set by user. - * - decoding: unused - */ - int frame_skip_threshold; - - /** - * frame skip factor - * - encoding: Set by user. - * - decoding: unused - */ - int frame_skip_factor; - - /** - * frame skip exponent - * - encoding: Set by user. - * - decoding: unused - */ - int frame_skip_exp; - - /** - * frame skip comparison function - * - encoding: Set by user. - * - decoding: unused - */ - int frame_skip_cmp; - - /** - * trellis RD quantization - * - encoding: Set by user. - * - decoding: unused - */ - int trellis; - - /** - * - encoding: Set by user. - * - decoding: unused - */ - int min_prediction_order; - - /** - * - encoding: Set by user. - * - decoding: unused - */ - int max_prediction_order; - - /** - * GOP timecode frame start number - * - encoding: Set by user, in non drop frame format - * - decoding: Set by libavcodec (timecode in the 25 bits format, -1 if unset) - */ - int64_t timecode_frame_start; - - /* The RTP callback: This function is called */ - /* every time the encoder has a packet to send. */ - /* It depends on the encoder if the data starts */ - /* with a Start Code (it should). H.263 does. */ - /* mb_nb contains the number of macroblocks */ - /* encoded in the RTP payload. */ - void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb); - - int rtp_payload_size; /* The size of the RTP payload: the coder will */ - /* do its best to deliver a chunk with size */ - /* below rtp_payload_size, the chunk will start */ - /* with a start code on some codecs like H.263. */ - /* This doesn't take account of any particular */ - /* headers inside the transmitted RTP payload. */ - - /* statistics, used for 2-pass encoding */ - int mv_bits; - int header_bits; - int i_tex_bits; - int p_tex_bits; - int i_count; - int p_count; - int skip_count; - int misc_bits; - - /** - * number of bits used for the previously encoded frame - * - encoding: Set by libavcodec. - * - decoding: unused - */ - int frame_bits; - - /** - * pass1 encoding statistics output buffer - * - encoding: Set by libavcodec. - * - decoding: unused - */ - char *stats_out; - - /** - * pass2 encoding statistics input buffer - * Concatenated stuff from stats_out of pass1 should be placed here. - * - encoding: Allocated/set/freed by user. - * - decoding: unused - */ - char *stats_in; - - /** - * Work around bugs in encoders which sometimes cannot be detected automatically. - * - encoding: Set by user - * - decoding: Set by user - */ - int workaround_bugs; -#define FF_BUG_AUTODETECT 1 ///< autodetection -#define FF_BUG_OLD_MSMPEG4 2 -#define FF_BUG_XVID_ILACE 4 -#define FF_BUG_UMP4 8 -#define FF_BUG_NO_PADDING 16 -#define FF_BUG_AMV 32 -#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default. -#define FF_BUG_QPEL_CHROMA 64 -#define FF_BUG_STD_QPEL 128 -#define FF_BUG_QPEL_CHROMA2 256 -#define FF_BUG_DIRECT_BLOCKSIZE 512 -#define FF_BUG_EDGE 1024 -#define FF_BUG_HPEL_CHROMA 2048 -#define FF_BUG_DC_CLIP 4096 -#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders. -#define FF_BUG_TRUNCATED 16384 - - /** - * strictly follow the standard (MPEG4, ...). - * - encoding: Set by user. - * - decoding: Set by user. - * Setting this to STRICT or higher means the encoder and decoder will - * generally do stupid things, whereas setting it to unofficial or lower - * will mean the encoder might produce output that is not supported by all - * spec-compliant decoders. Decoders don't differentiate between normal, - * unofficial and experimental (that is, they always try to decode things - * when they can) unless they are explicitly asked to behave stupidly - * (=strictly conform to the specs) - */ - int strict_std_compliance; -#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software. -#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences. -#define FF_COMPLIANCE_NORMAL 0 -#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions -#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things. - - /** - * error concealment flags - * - encoding: unused - * - decoding: Set by user. - */ - int error_concealment; -#define FF_EC_GUESS_MVS 1 -#define FF_EC_DEBLOCK 2 - - /** - * debug - * - encoding: Set by user. - * - decoding: Set by user. - */ - int debug; -#define FF_DEBUG_PICT_INFO 1 -#define FF_DEBUG_RC 2 -#define FF_DEBUG_BITSTREAM 4 -#define FF_DEBUG_MB_TYPE 8 -#define FF_DEBUG_QP 16 -#define FF_DEBUG_MV 32 -#define FF_DEBUG_DCT_COEFF 0x00000040 -#define FF_DEBUG_SKIP 0x00000080 -#define FF_DEBUG_STARTCODE 0x00000100 -#define FF_DEBUG_PTS 0x00000200 -#define FF_DEBUG_ER 0x00000400 -#define FF_DEBUG_MMCO 0x00000800 -#define FF_DEBUG_BUGS 0x00001000 -#define FF_DEBUG_VIS_QP 0x00002000 -#define FF_DEBUG_VIS_MB_TYPE 0x00004000 -#define FF_DEBUG_BUFFERS 0x00008000 -#define FF_DEBUG_THREADS 0x00010000 - - /** - * debug - * - encoding: Set by user. - * - decoding: Set by user. - */ - int debug_mv; -#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames -#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames -#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames - - /** - * Error recognition; may misdetect some more or less valid parts as errors. - * - encoding: unused - * - decoding: Set by user. - */ - int err_recognition; -#define AV_EF_CRCCHECK (1<<0) -#define AV_EF_BITSTREAM (1<<1) -#define AV_EF_BUFFER (1<<2) -#define AV_EF_EXPLODE (1<<3) - -#define AV_EF_CAREFUL (1<<16) -#define AV_EF_COMPLIANT (1<<17) -#define AV_EF_AGGRESSIVE (1<<18) - - - /** - * opaque 64bit number (generally a PTS) that will be reordered and - * output in AVFrame.reordered_opaque - * @deprecated in favor of pkt_pts - * - encoding: unused - * - decoding: Set by user. - */ - int64_t reordered_opaque; - - /** - * Hardware accelerator in use - * - encoding: unused. - * - decoding: Set by libavcodec - */ - struct AVHWAccel *hwaccel; - - /** - * Hardware accelerator context. - * For some hardware accelerators, a global context needs to be - * provided by the user. In that case, this holds display-dependent - * data FFmpeg cannot instantiate itself. Please refer to the - * FFmpeg HW accelerator documentation to know how to fill this - * is. e.g. for VA API, this is a struct vaapi_context. - * - encoding: unused - * - decoding: Set by user - */ - void *hwaccel_context; - - /** - * error - * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR. - * - decoding: unused - */ - uint64_t error[AV_NUM_DATA_POINTERS]; - - /** - * DCT algorithm, see FF_DCT_* below - * - encoding: Set by user. - * - decoding: unused - */ - int dct_algo; -#define FF_DCT_AUTO 0 -#define FF_DCT_FASTINT 1 -#define FF_DCT_INT 2 -#define FF_DCT_MMX 3 -#define FF_DCT_ALTIVEC 5 -#define FF_DCT_FAAN 6 - - /** - * IDCT algorithm, see FF_IDCT_* below. - * - encoding: Set by user. - * - decoding: Set by user. - */ - int idct_algo; -#define FF_IDCT_AUTO 0 -#define FF_IDCT_INT 1 -#define FF_IDCT_SIMPLE 2 -#define FF_IDCT_SIMPLEMMX 3 -#define FF_IDCT_ARM 7 -#define FF_IDCT_ALTIVEC 8 -#define FF_IDCT_SH4 9 -#define FF_IDCT_SIMPLEARM 10 -#define FF_IDCT_IPP 13 -#define FF_IDCT_XVIDMMX 14 -#define FF_IDCT_SIMPLEARMV5TE 16 -#define FF_IDCT_SIMPLEARMV6 17 -#define FF_IDCT_SIMPLEVIS 18 -#define FF_IDCT_FAAN 20 -#define FF_IDCT_SIMPLENEON 22 -#define FF_IDCT_SIMPLEALPHA 23 - - /** - * bits per sample/pixel from the demuxer (needed for huffyuv). - * - encoding: Set by libavcodec. - * - decoding: Set by user. - */ - int bits_per_coded_sample; - - /** - * Bits per sample/pixel of internal libavcodec pixel/sample format. - * - encoding: set by user. - * - decoding: set by libavcodec. - */ - int bits_per_raw_sample; - -#if FF_API_LOWRES - /** - * low resolution decoding, 1-> 1/2 size, 2->1/4 size - * - encoding: unused - * - decoding: Set by user. - * Code outside libavcodec should access this field using: - * av_codec_{get,set}_lowres(avctx) - */ - int lowres; -#endif - - /** - * the picture in the bitstream - * - encoding: Set by libavcodec. - * - decoding: Set by libavcodec. - */ - AVFrame *coded_frame; - - /** - * thread count - * is used to decide how many independent tasks should be passed to execute() - * - encoding: Set by user. - * - decoding: Set by user. - */ - int thread_count; - - /** - * Which multithreading methods to use. - * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread, - * so clients which cannot provide future frames should not use it. - * - * - encoding: Set by user, otherwise the default is used. - * - decoding: Set by user, otherwise the default is used. - */ - int thread_type; -#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once -#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once - - /** - * Which multithreading methods are in use by the codec. - * - encoding: Set by libavcodec. - * - decoding: Set by libavcodec. - */ - int active_thread_type; - - /** - * Set by the client if its custom get_buffer() callback can be called - * synchronously from another thread, which allows faster multithreaded decoding. - * draw_horiz_band() will be called from other threads regardless of this setting. - * Ignored if the default get_buffer() is used. - * - encoding: Set by user. - * - decoding: Set by user. - */ - int thread_safe_callbacks; - - /** - * The codec may call this to execute several independent things. - * It will return only after finishing all tasks. - * The user may replace this with some multithreaded implementation, - * the default implementation will execute the parts serially. - * @param count the number of things to execute - * - encoding: Set by libavcodec, user can override. - * - decoding: Set by libavcodec, user can override. - */ - int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size); - - /** - * The codec may call this to execute several independent things. - * It will return only after finishing all tasks. - * The user may replace this with some multithreaded implementation, - * the default implementation will execute the parts serially. - * Also see avcodec_thread_init and e.g. the --enable-pthread configure option. - * @param c context passed also to func - * @param count the number of things to execute - * @param arg2 argument passed unchanged to func - * @param ret return values of executed functions, must have space for "count" values. May be NULL. - * @param func function that will be called count times, with jobnr from 0 to count-1. - * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no - * two instances of func executing at the same time will have the same threadnr. - * @return always 0 currently, but code should handle a future improvement where when any call to func - * returns < 0 no further calls to func may be done and < 0 is returned. - * - encoding: Set by libavcodec, user can override. - * - decoding: Set by libavcodec, user can override. - */ - int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count); - - /** - * thread opaque - * Can be used by execute() to store some per AVCodecContext stuff. - * - encoding: set by execute() - * - decoding: set by execute() - */ - void *thread_opaque; - - /** - * noise vs. sse weight for the nsse comparsion function - * - encoding: Set by user. - * - decoding: unused - */ - int nsse_weight; - - /** - * profile - * - encoding: Set by user. - * - decoding: Set by libavcodec. - */ - int profile; -#define FF_PROFILE_UNKNOWN -99 -#define FF_PROFILE_RESERVED -100 - -#define FF_PROFILE_AAC_MAIN 0 -#define FF_PROFILE_AAC_LOW 1 -#define FF_PROFILE_AAC_SSR 2 -#define FF_PROFILE_AAC_LTP 3 -#define FF_PROFILE_AAC_HE 4 -#define FF_PROFILE_AAC_HE_V2 28 -#define FF_PROFILE_AAC_LD 22 -#define FF_PROFILE_AAC_ELD 38 -#define FF_PROFILE_MPEG2_AAC_LOW 128 -#define FF_PROFILE_MPEG2_AAC_HE 131 - -#define FF_PROFILE_DTS 20 -#define FF_PROFILE_DTS_ES 30 -#define FF_PROFILE_DTS_96_24 40 -#define FF_PROFILE_DTS_HD_HRA 50 -#define FF_PROFILE_DTS_HD_MA 60 - -#define FF_PROFILE_MPEG2_422 0 -#define FF_PROFILE_MPEG2_HIGH 1 -#define FF_PROFILE_MPEG2_SS 2 -#define FF_PROFILE_MPEG2_SNR_SCALABLE 3 -#define FF_PROFILE_MPEG2_MAIN 4 -#define FF_PROFILE_MPEG2_SIMPLE 5 - -#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag -#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag - -#define FF_PROFILE_H264_BASELINE 66 -#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED) -#define FF_PROFILE_H264_MAIN 77 -#define FF_PROFILE_H264_EXTENDED 88 -#define FF_PROFILE_H264_HIGH 100 -#define FF_PROFILE_H264_HIGH_10 110 -#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA) -#define FF_PROFILE_H264_HIGH_422 122 -#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA) -#define FF_PROFILE_H264_HIGH_444 144 -#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244 -#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA) -#define FF_PROFILE_H264_CAVLC_444 44 - -#define FF_PROFILE_VC1_SIMPLE 0 -#define FF_PROFILE_VC1_MAIN 1 -#define FF_PROFILE_VC1_COMPLEX 2 -#define FF_PROFILE_VC1_ADVANCED 3 - -#define FF_PROFILE_MPEG4_SIMPLE 0 -#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1 -#define FF_PROFILE_MPEG4_CORE 2 -#define FF_PROFILE_MPEG4_MAIN 3 -#define FF_PROFILE_MPEG4_N_BIT 4 -#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5 -#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6 -#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7 -#define FF_PROFILE_MPEG4_HYBRID 8 -#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9 -#define FF_PROFILE_MPEG4_CORE_SCALABLE 10 -#define FF_PROFILE_MPEG4_ADVANCED_CODING 11 -#define FF_PROFILE_MPEG4_ADVANCED_CORE 12 -#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13 -#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14 -#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15 - -#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 0 -#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 1 -#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 2 -#define FF_PROFILE_JPEG2000_DCINEMA_2K 3 -#define FF_PROFILE_JPEG2000_DCINEMA_4K 4 - - /** - * level - * - encoding: Set by user. - * - decoding: Set by libavcodec. - */ - int level; -#define FF_LEVEL_UNKNOWN -99 - - /** - * Skip loop filtering for selected frames. - * - encoding: unused - * - decoding: Set by user. - */ - enum AVDiscard skip_loop_filter; - - /** - * Skip IDCT/dequantization for selected frames. - * - encoding: unused - * - decoding: Set by user. - */ - enum AVDiscard skip_idct; - - /** - * Skip decoding for selected frames. - * - encoding: unused - * - decoding: Set by user. - */ - enum AVDiscard skip_frame; - - /** - * Header containing style information for text subtitles. - * For SUBTITLE_ASS subtitle type, it should contain the whole ASS - * [Script Info] and [V4+ Styles] section, plus the [Events] line and - * the Format line following. It shouldn't include any Dialogue line. - * - encoding: Set/allocated/freed by user (before avcodec_open2()) - * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2()) - */ - uint8_t *subtitle_header; - int subtitle_header_size; - - /** - * Simulates errors in the bitstream to test error concealment. - * - encoding: Set by user. - * - decoding: unused - */ - int error_rate; - - /** - * Current packet as passed into the decoder, to avoid having - * to pass the packet into every function. Currently only valid - * inside lavc and get/release_buffer callbacks. - * - decoding: set by avcodec_decode_*, read by get_buffer() for setting pkt_pts - * - encoding: unused - */ - AVPacket *pkt; - - /** - * VBV delay coded in the last frame (in periods of a 27 MHz clock). - * Used for compliant TS muxing. - * - encoding: Set by libavcodec. - * - decoding: unused. - */ - uint64_t vbv_delay; - - /** - * Timebase in which pkt_dts/pts and AVPacket.dts/pts are. - * Code outside libavcodec should access this field using: - * av_codec_{get,set}_pkt_timebase(avctx) - * - encoding unused. - * - decoding set by user. - */ - AVRational pkt_timebase; - - /** - * AVCodecDescriptor - * Code outside libavcodec should access this field using: - * av_codec_{get,set}_codec_descriptor(avctx) - * - encoding: unused. - * - decoding: set by libavcodec. - */ - const AVCodecDescriptor *codec_descriptor; - -#if !FF_API_LOWRES - /** - * low resolution decoding, 1-> 1/2 size, 2->1/4 size - * - encoding: unused - * - decoding: Set by user. - * Code outside libavcodec should access this field using: - * av_codec_{get,set}_lowres(avctx) - */ - int lowres; -#endif - - /** - * Current statistics for PTS correction. - * - decoding: maintained and used by libavcodec, not intended to be used by user apps - * - encoding: unused - */ - int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far - int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far - int64_t pts_correction_last_pts; /// PTS of the last frame - int64_t pts_correction_last_dts; /// DTS of the last frame - - /** - * Character encoding of the input subtitles file. - * - decoding: set by user - * - encoding: unused - */ - char *sub_charenc; - - /** - * Subtitles character encoding mode. Formats or codecs might be adjusting - * this setting (if they are doing the conversion themselves for instance). - * - decoding: set by libavcodec - * - encoding: unused - */ - int sub_charenc_mode; -#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance) -#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself -#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv - -} AVCodecContext; - -AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx); -void av_codec_set_pkt_timebase (AVCodecContext *avctx, AVRational val); - -const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx); -void av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc); - -int av_codec_get_lowres(const AVCodecContext *avctx); -void av_codec_set_lowres(AVCodecContext *avctx, int val); - -/** - * AVProfile. - */ -typedef struct AVProfile { - int profile; - const char *name; ///< short name for the profile -} AVProfile; - -typedef struct AVCodecDefault AVCodecDefault; - -struct AVSubtitle; - -/** - * AVCodec. - */ -typedef struct AVCodec { - /** - * Name of the codec implementation. - * The name is globally unique among encoders and among decoders (but an - * encoder and a decoder can share the same name). - * This is the primary way to find a codec from the user perspective. - */ - const char *name; - /** - * Descriptive name for the codec, meant to be more human readable than name. - * You should use the NULL_IF_CONFIG_SMALL() macro to define it. - */ - const char *long_name; - enum AVMediaType type; - enum AVCodecID id; - /** - * Codec capabilities. - * see CODEC_CAP_* - */ - int capabilities; - const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0} - const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1 - const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 - const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 - const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 - uint8_t max_lowres; ///< maximum value for lowres supported by the decoder - const AVClass *priv_class; ///< AVClass for the private context - const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} - - /***************************************************************** - * No fields below this line are part of the public API. They - * may not be used outside of libavcodec and can be changed and - * removed at will. - * New public fields should be added right above. - ***************************************************************** - */ - int priv_data_size; - struct AVCodec *next; - /** - * @name Frame-level threading support functions - * @{ - */ - /** - * If defined, called on thread contexts when they are created. - * If the codec allocates writable tables in init(), re-allocate them here. - * priv_data will be set to a copy of the original. - */ - int (*init_thread_copy)(AVCodecContext *); - /** - * Copy necessary context variables from a previous thread context to the current one. - * If not defined, the next thread will start automatically; otherwise, the codec - * must call ff_thread_finish_setup(). - * - * dst and src will (rarely) point to the same context, in which case memcpy should be skipped. - */ - int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src); - /** @} */ - - /** - * Private codec-specific defaults. - */ - const AVCodecDefault *defaults; - - /** - * Initialize codec static data, called from avcodec_register(). - */ - void (*init_static_data)(struct AVCodec *codec); - - int (*init)(AVCodecContext *); - int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size, - const struct AVSubtitle *sub); - /** - * Encode data to an AVPacket. - * - * @param avctx codec context - * @param avpkt output AVPacket (may contain a user-provided buffer) - * @param[in] frame AVFrame containing the raw data to be encoded - * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a - * non-empty packet was returned in avpkt. - * @return 0 on success, negative error code on failure - */ - int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, - int *got_packet_ptr); - int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt); - int (*close)(AVCodecContext *); - /** - * Flush buffers. - * Will be called when seeking - */ - void (*flush)(AVCodecContext *); -} AVCodec; - -/** - * AVHWAccel. - */ -typedef struct AVHWAccel { - /** - * Name of the hardware accelerated codec. - * The name is globally unique among encoders and among decoders (but an - * encoder and a decoder can share the same name). - */ - const char *name; - - /** - * Type of codec implemented by the hardware accelerator. - * - * See AVMEDIA_TYPE_xxx - */ - enum AVMediaType type; - - /** - * Codec implemented by the hardware accelerator. - * - * See AV_CODEC_ID_xxx - */ - enum AVCodecID id; - - /** - * Supported pixel format. - * - * Only hardware accelerated formats are supported here. - */ - enum AVPixelFormat pix_fmt; - - /** - * Hardware accelerated codec capabilities. - * see FF_HWACCEL_CODEC_CAP_* - */ - int capabilities; - - struct AVHWAccel *next; - - /** - * Called at the beginning of each frame or field picture. - * - * Meaningful frame information (codec specific) is guaranteed to - * be parsed at this point. This function is mandatory. - * - * Note that buf can be NULL along with buf_size set to 0. - * Otherwise, this means the whole frame is available at this point. - * - * @param avctx the codec context - * @param buf the frame data buffer base - * @param buf_size the size of the frame in bytes - * @return zero if successful, a negative value otherwise - */ - int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); - - /** - * Callback for each slice. - * - * Meaningful slice information (codec specific) is guaranteed to - * be parsed at this point. This function is mandatory. - * - * @param avctx the codec context - * @param buf the slice data buffer base - * @param buf_size the size of the slice in bytes - * @return zero if successful, a negative value otherwise - */ - int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); - - /** - * Called at the end of each frame or field picture. - * - * The whole picture is parsed at this point and can now be sent - * to the hardware accelerator. This function is mandatory. - * - * @param avctx the codec context - * @return zero if successful, a negative value otherwise - */ - int (*end_frame)(AVCodecContext *avctx); - - /** - * Size of HW accelerator private data. - * - * Private data is allocated with av_mallocz() before - * AVCodecContext.get_buffer() and deallocated after - * AVCodecContext.release_buffer(). - */ - int priv_data_size; -} AVHWAccel; - -/** - * @defgroup lavc_picture AVPicture - * - * Functions for working with AVPicture - * @{ - */ - -/** - * four components are given, that's all. - * the last component is alpha - */ -typedef struct AVPicture { - uint8_t *data[AV_NUM_DATA_POINTERS]; - int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line -} AVPicture; - -/** - * @} - */ - -enum AVSubtitleType { - SUBTITLE_NONE, - - SUBTITLE_BITMAP, ///< A bitmap, pict will be set - - /** - * Plain text, the text field must be set by the decoder and is - * authoritative. ass and pict fields may contain approximations. - */ - SUBTITLE_TEXT, - - /** - * Formatted text, the ass field must be set by the decoder and is - * authoritative. pict and text fields may contain approximations. - */ - SUBTITLE_ASS, -}; - -#define AV_SUBTITLE_FLAG_FORCED 0x00000001 - -typedef struct AVSubtitleRect { - int x; ///< top left corner of pict, undefined when pict is not set - int y; ///< top left corner of pict, undefined when pict is not set - int w; ///< width of pict, undefined when pict is not set - int h; ///< height of pict, undefined when pict is not set - int nb_colors; ///< number of colors in pict, undefined when pict is not set - - /** - * data+linesize for the bitmap of this subtitle. - * can be set for text/ass as well once they where rendered - */ - AVPicture pict; - enum AVSubtitleType type; - - char *text; ///< 0 terminated plain UTF-8 text - - /** - * 0 terminated ASS/SSA compatible event line. - * The presentation of this is unaffected by the other values in this - * struct. - */ - char *ass; - - int flags; -} AVSubtitleRect; - -typedef struct AVSubtitle { - uint16_t format; /* 0 = graphics */ - uint32_t start_display_time; /* relative to packet pts, in ms */ - uint32_t end_display_time; /* relative to packet pts, in ms */ - unsigned num_rects; - AVSubtitleRect **rects; - int64_t pts; ///< Same as packet pts, in AV_TIME_BASE -} AVSubtitle; - -/** - * If c is NULL, returns the first registered codec, - * if c is non-NULL, returns the next registered codec after c, - * or NULL if c is the last one. - */ -AVCodec *av_codec_next(const AVCodec *c); - -/** - * Return the LIBAVCODEC_VERSION_INT constant. - */ -unsigned avcodec_version(void); - -/** - * Return the libavcodec build-time configuration. - */ -const char *avcodec_configuration(void); - -/** - * Return the libavcodec license. - */ -const char *avcodec_license(void); - -/** - * Register the codec codec and initialize libavcodec. - * - * @warning either this function or avcodec_register_all() must be called - * before any other libavcodec functions. - * - * @see avcodec_register_all() - */ -void avcodec_register(AVCodec *codec); - -/** - * Register all the codecs, parsers and bitstream filters which were enabled at - * configuration time. If you do not call this function you can select exactly - * which formats you want to support, by using the individual registration - * functions. - * - * @see avcodec_register - * @see av_register_codec_parser - * @see av_register_bitstream_filter - */ -void avcodec_register_all(void); - - -#if FF_API_ALLOC_CONTEXT -/** - * Allocate an AVCodecContext and set its fields to default values. The - * resulting struct can be deallocated by simply calling av_free(). - * - * @return An AVCodecContext filled with default values or NULL on failure. - * @see avcodec_get_context_defaults - * - * @deprecated use avcodec_alloc_context3() - */ -attribute_deprecated -AVCodecContext *avcodec_alloc_context(void); - -/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! - * we WILL change its arguments and name a few times! */ -attribute_deprecated -AVCodecContext *avcodec_alloc_context2(enum AVMediaType); - -/** - * Set the fields of the given AVCodecContext to default values. - * - * @param s The AVCodecContext of which the fields should be set to default values. - * @deprecated use avcodec_get_context_defaults3 - */ -attribute_deprecated -void avcodec_get_context_defaults(AVCodecContext *s); - -/** THIS FUNCTION IS NOT YET PART OF THE PUBLIC API! - * we WILL change its arguments and name a few times! */ -attribute_deprecated -void avcodec_get_context_defaults2(AVCodecContext *s, enum AVMediaType); -#endif - -/** - * Allocate an AVCodecContext and set its fields to default values. The - * resulting struct can be deallocated by calling avcodec_close() on it followed - * by av_free(). - * - * @param codec if non-NULL, allocate private data and initialize defaults - * for the given codec. It is illegal to then call avcodec_open2() - * with a different codec. - * If NULL, then the codec-specific defaults won't be initialized, - * which may result in suboptimal default settings (this is - * important mainly for encoders, e.g. libx264). - * - * @return An AVCodecContext filled with default values or NULL on failure. - * @see avcodec_get_context_defaults - */ -AVCodecContext *avcodec_alloc_context3(const AVCodec *codec); - -/** - * Set the fields of the given AVCodecContext to default values corresponding - * to the given codec (defaults may be codec-dependent). - * - * Do not call this function if a non-NULL codec has been passed - * to avcodec_alloc_context3() that allocated this AVCodecContext. - * If codec is non-NULL, it is illegal to call avcodec_open2() with a - * different codec on this AVCodecContext. - */ -int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec); - -/** - * Get the AVClass for AVCodecContext. It can be used in combination with - * AV_OPT_SEARCH_FAKE_OBJ for examining options. - * - * @see av_opt_find(). - */ -const AVClass *avcodec_get_class(void); - -/** - * Get the AVClass for AVFrame. It can be used in combination with - * AV_OPT_SEARCH_FAKE_OBJ for examining options. - * - * @see av_opt_find(). - */ -const AVClass *avcodec_get_frame_class(void); - -/** - * Get the AVClass for AVSubtitleRect. It can be used in combination with - * AV_OPT_SEARCH_FAKE_OBJ for examining options. - * - * @see av_opt_find(). - */ -const AVClass *avcodec_get_subtitle_rect_class(void); - -/** - * Copy the settings of the source AVCodecContext into the destination - * AVCodecContext. The resulting destination codec context will be - * unopened, i.e. you are required to call avcodec_open2() before you - * can use this AVCodecContext to decode/encode video/audio data. - * - * @param dest target codec context, should be initialized with - * avcodec_alloc_context3(), but otherwise uninitialized - * @param src source codec context - * @return AVERROR() on error (e.g. memory allocation error), 0 on success - */ -int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); - -/** - * Allocate an AVFrame and set its fields to default values. The resulting - * struct must be freed using avcodec_free_frame(). - * - * @return An AVFrame filled with default values or NULL on failure. - * @see avcodec_get_frame_defaults - */ -AVFrame *avcodec_alloc_frame(void); - -/** - * Set the fields of the given AVFrame to default values. - * - * @param frame The AVFrame of which the fields should be set to default values. - */ -void avcodec_get_frame_defaults(AVFrame *frame); - -/** - * Free the frame and any dynamically allocated objects in it, - * e.g. extended_data. - * - * @param frame frame to be freed. The pointer will be set to NULL. - * - * @warning this function does NOT free the data buffers themselves - * (it does not know how, since they might have been allocated with - * a custom get_buffer()). - */ -void avcodec_free_frame(AVFrame **frame); - -#if FF_API_AVCODEC_OPEN -/** - * Initialize the AVCodecContext to use the given AVCodec. Prior to using this - * function the context has to be allocated. - * - * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), - * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for - * retrieving a codec. - * - * @warning This function is not thread safe! - * - * @code - * avcodec_register_all(); - * codec = avcodec_find_decoder(AV_CODEC_ID_H264); - * if (!codec) - * exit(1); - * - * context = avcodec_alloc_context3(codec); - * - * if (avcodec_open(context, codec) < 0) - * exit(1); - * @endcode - * - * @param avctx The context which will be set up to use the given codec. - * @param codec The codec to use within the context. - * @return zero on success, a negative value on error - * @see avcodec_alloc_context3, avcodec_find_decoder, avcodec_find_encoder, avcodec_close - * - * @deprecated use avcodec_open2 - */ -attribute_deprecated -int avcodec_open(AVCodecContext *avctx, AVCodec *codec); -#endif - -/** - * Initialize the AVCodecContext to use the given AVCodec. Prior to using this - * function the context has to be allocated with avcodec_alloc_context3(). - * - * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), - * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for - * retrieving a codec. - * - * @warning This function is not thread safe! - * - * @code - * avcodec_register_all(); - * av_dict_set(&opts, "b", "2.5M", 0); - * codec = avcodec_find_decoder(AV_CODEC_ID_H264); - * if (!codec) - * exit(1); - * - * context = avcodec_alloc_context3(codec); - * - * if (avcodec_open2(context, codec, opts) < 0) - * exit(1); - * @endcode - * - * @param avctx The context to initialize. - * @param codec The codec to open this context for. If a non-NULL codec has been - * previously passed to avcodec_alloc_context3() or - * avcodec_get_context_defaults3() for this context, then this - * parameter MUST be either NULL or equal to the previously passed - * codec. - * @param options A dictionary filled with AVCodecContext and codec-private options. - * On return this object will be filled with options that were not found. - * - * @return zero on success, a negative value on error - * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(), - * av_dict_set(), av_opt_find(). - */ -int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options); - -/** - * Close a given AVCodecContext and free all the data associated with it - * (but not the AVCodecContext itself). - * - * Calling this function on an AVCodecContext that hasn't been opened will free - * the codec-specific data allocated in avcodec_alloc_context3() / - * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will - * do nothing. - */ -int avcodec_close(AVCodecContext *avctx); - -/** - * Free all allocated data in the given subtitle struct. - * - * @param sub AVSubtitle to free. - */ -void avsubtitle_free(AVSubtitle *sub); - -/** - * @} - */ - -/** - * @addtogroup lavc_packet - * @{ - */ - -#if FF_API_DESTRUCT_PACKET -/** - * Default packet destructor. - * @deprecated use the AVBuffer API instead - */ -attribute_deprecated -void av_destruct_packet(AVPacket *pkt); -#endif - -/** - * Initialize optional fields of a packet with default values. - * - * Note, this does not touch the data and size members, which have to be - * initialized separately. - * - * @param pkt packet - */ -void av_init_packet(AVPacket *pkt); - -/** - * Allocate the payload of a packet and initialize its fields with - * default values. - * - * @param pkt packet - * @param size wanted payload size - * @return 0 if OK, AVERROR_xxx otherwise - */ -int av_new_packet(AVPacket *pkt, int size); - -/** - * Reduce packet size, correctly zeroing padding - * - * @param pkt packet - * @param size new size - */ -void av_shrink_packet(AVPacket *pkt, int size); - -/** - * Increase packet size, correctly zeroing padding - * - * @param pkt packet - * @param grow_by number of bytes by which to increase the size of the packet - */ -int av_grow_packet(AVPacket *pkt, int grow_by); - -/** - * Initialize a reference-counted packet from av_malloc()ed data. - * - * @param pkt packet to be initialized. This function will set the data, size, - * buf and destruct fields, all others are left untouched. - * @param data Data allocated by av_malloc() to be used as packet data. If this - * function returns successfully, the data is owned by the underlying AVBuffer. - * The caller may not access the data through other means. - * @param size size of data in bytes, without the padding. I.e. the full buffer - * size is assumed to be size + FF_INPUT_BUFFER_PADDING_SIZE. - * - * @return 0 on success, a negative AVERROR on error - */ -int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size); - -/** - * @warning This is a hack - the packet memory allocation stuff is broken. The - * packet is allocated if it was not really allocated. - */ -int av_dup_packet(AVPacket *pkt); - -/** - * Copy packet, including contents - * - * @return 0 on success, negative AVERROR on fail - */ -int av_copy_packet(AVPacket *dst, AVPacket *src); - -/** - * Copy packet side data - * - * @return 0 on success, negative AVERROR on fail - */ -int av_copy_packet_side_data(AVPacket *dst, AVPacket *src); - -/** - * Free a packet. - * - * @param pkt packet to free - */ -void av_free_packet(AVPacket *pkt); - -/** - * Allocate new information of a packet. - * - * @param pkt packet - * @param type side information type - * @param size side information size - * @return pointer to fresh allocated data or NULL otherwise - */ -uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, - int size); - -/** - * Shrink the already allocated side data buffer - * - * @param pkt packet - * @param type side information type - * @param size new side information size - * @return 0 on success, < 0 on failure - */ -int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, - int size); - -/** - * Get side information from packet. - * - * @param pkt packet - * @param type desired side information type - * @param size pointer for side information size to store (optional) - * @return pointer to data if present or NULL otherwise - */ -uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, - int *size); - -int av_packet_merge_side_data(AVPacket *pkt); - -int av_packet_split_side_data(AVPacket *pkt); - - -/** - * @} - */ - -/** - * @addtogroup lavc_decoding - * @{ - */ - -/** - * Find a registered decoder with a matching codec ID. - * - * @param id AVCodecID of the requested decoder - * @return A decoder if one was found, NULL otherwise. - */ -AVCodec *avcodec_find_decoder(enum AVCodecID id); - -/** - * Find a registered decoder with the specified name. - * - * @param name name of the requested decoder - * @return A decoder if one was found, NULL otherwise. - */ -AVCodec *avcodec_find_decoder_by_name(const char *name); - -#if FF_API_GET_BUFFER -attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic); -attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic); -attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic); -#endif - -/** - * The default callback for AVCodecContext.get_buffer2(). It is made public so - * it can be called by custom get_buffer2() implementations for decoders without - * CODEC_CAP_DR1 set. - */ -int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags); - -/** - * Return the amount of padding in pixels which the get_buffer callback must - * provide around the edge of the image for codecs which do not have the - * CODEC_FLAG_EMU_EDGE flag. - * - * @return Required padding in pixels. - */ -unsigned avcodec_get_edge_width(void); - -/** - * Modify width and height values so that they will result in a memory - * buffer that is acceptable for the codec if you do not use any horizontal - * padding. - * - * May only be used if a codec with CODEC_CAP_DR1 has been opened. - * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased - * according to avcodec_get_edge_width() before. - */ -void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); - -/** - * Modify width and height values so that they will result in a memory - * buffer that is acceptable for the codec if you also ensure that all - * line sizes are a multiple of the respective linesize_align[i]. - * - * May only be used if a codec with CODEC_CAP_DR1 has been opened. - * If CODEC_FLAG_EMU_EDGE is not set, the dimensions must have been increased - * according to avcodec_get_edge_width() before. - */ -void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, - int linesize_align[AV_NUM_DATA_POINTERS]); - -#if FF_API_OLD_DECODE_AUDIO -/** - * Wrapper function which calls avcodec_decode_audio4. - * - * @deprecated Use avcodec_decode_audio4 instead. - * - * Decode the audio frame of size avpkt->size from avpkt->data into samples. - * Some decoders may support multiple frames in a single AVPacket, such - * decoders would then just decode the first frame. In this case, - * avcodec_decode_audio3 has to be called again with an AVPacket that contains - * the remaining data in order to decode the second frame etc. - * If no frame - * could be outputted, frame_size_ptr is zero. Otherwise, it is the - * decompressed frame size in bytes. - * - * @warning You must set frame_size_ptr to the allocated size of the - * output buffer before calling avcodec_decode_audio3(). - * - * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than - * the actual read bytes because some optimized bitstream readers read 32 or 64 - * bits at once and could read over the end. - * - * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that - * no overreading happens for damaged MPEG streams. - * - * @warning You must not provide a custom get_buffer() when using - * avcodec_decode_audio3(). Doing so will override it with - * avcodec_default_get_buffer. Use avcodec_decode_audio4() instead, - * which does allow the application to provide a custom get_buffer(). - * - * @note You might have to align the input buffer avpkt->data and output buffer - * samples. The alignment requirements depend on the CPU: On some CPUs it isn't - * necessary at all, on others it won't work at all if not aligned and on others - * it will work but it will have an impact on performance. - * - * In practice, avpkt->data should have 4 byte alignment at minimum and - * samples should be 16 byte aligned unless the CPU doesn't need it - * (AltiVec and SSE do). - * - * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay - * between input and output, these need to be fed with avpkt->data=NULL, - * avpkt->size=0 at the end to return the remaining frames. - * - * @param avctx the codec context - * @param[out] samples the output buffer, sample type in avctx->sample_fmt - * If the sample format is planar, each channel plane will - * be the same size, with no padding between channels. - * @param[in,out] frame_size_ptr the output buffer size in bytes - * @param[in] avpkt The input AVPacket containing the input buffer. - * You can create such packet with av_init_packet() and by then setting - * data and size, some decoders might in addition need other fields. - * All decoders are designed to use the least fields possible though. - * @return On error a negative value is returned, otherwise the number of bytes - * used or zero if no frame data was decompressed (used) from the input AVPacket. - */ -attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, - int *frame_size_ptr, - AVPacket *avpkt); -#endif - -/** - * Decode the audio frame of size avpkt->size from avpkt->data into frame. - * - * Some decoders may support multiple frames in a single AVPacket. Such - * decoders would then just decode the first frame. In this case, - * avcodec_decode_audio4 has to be called again with an AVPacket containing - * the remaining data in order to decode the second frame, etc... - * Even if no frames are returned, the packet needs to be fed to the decoder - * with remaining data until it is completely consumed or an error occurs. - * - * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE - * larger than the actual read bytes because some optimized bitstream - * readers read 32 or 64 bits at once and could read over the end. - * - * @note You might have to align the input buffer. The alignment requirements - * depend on the CPU and the decoder. - * - * @param avctx the codec context - * @param[out] frame The AVFrame in which to store decoded audio samples. - * The decoder will allocate a buffer for the decoded frame by - * calling the AVCodecContext.get_buffer2() callback. - * When AVCodecContext.refcounted_frames is set to 1, the frame is - * reference counted and the returned reference belongs to the - * caller. The caller must release the frame using av_frame_unref() - * when the frame is no longer needed. The caller may safely write - * to the frame if av_frame_is_writable() returns 1. - * When AVCodecContext.refcounted_frames is set to 0, the returned - * reference belongs to the decoder and is valid only until the - * next call to this function or until closing the decoder. - * The caller may not write to it. - * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is - * non-zero. - * @param[in] avpkt The input AVPacket containing the input buffer. - * At least avpkt->data and avpkt->size should be set. Some - * decoders might also require additional fields to be set. - * @return A negative error code is returned if an error occurred during - * decoding, otherwise the number of bytes consumed from the input - * AVPacket is returned. - */ -int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, - int *got_frame_ptr, const AVPacket *avpkt); - -/** - * Decode the video frame of size avpkt->size from avpkt->data into picture. - * Some decoders may support multiple frames in a single AVPacket, such - * decoders would then just decode the first frame. - * - * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than - * the actual read bytes because some optimized bitstream readers read 32 or 64 - * bits at once and could read over the end. - * - * @warning The end of the input buffer buf should be set to 0 to ensure that - * no overreading happens for damaged MPEG streams. - * - * @note You might have to align the input buffer avpkt->data. - * The alignment requirements depend on the CPU: on some CPUs it isn't - * necessary at all, on others it won't work at all if not aligned and on others - * it will work but it will have an impact on performance. - * - * In practice, avpkt->data should have 4 byte alignment at minimum. - * - * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay - * between input and output, these need to be fed with avpkt->data=NULL, - * avpkt->size=0 at the end to return the remaining frames. - * - * @param avctx the codec context - * @param[out] picture The AVFrame in which the decoded video frame will be stored. - * Use av_frame_alloc() to get an AVFrame. The codec will - * allocate memory for the actual bitmap by calling the - * AVCodecContext.get_buffer2() callback. - * When AVCodecContext.refcounted_frames is set to 1, the frame is - * reference counted and the returned reference belongs to the - * caller. The caller must release the frame using av_frame_unref() - * when the frame is no longer needed. The caller may safely write - * to the frame if av_frame_is_writable() returns 1. - * When AVCodecContext.refcounted_frames is set to 0, the returned - * reference belongs to the decoder and is valid only until the - * next call to this function or until closing the decoder. The - * caller may not write to it. - * - * @param[in] avpkt The input AVpacket containing the input buffer. - * You can create such packet with av_init_packet() and by then setting - * data and size, some decoders might in addition need other fields like - * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least - * fields possible. - * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. - * @return On error a negative value is returned, otherwise the number of bytes - * used or zero if no frame could be decompressed. - */ -int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, - int *got_picture_ptr, - const AVPacket *avpkt); - -/** - * Decode a subtitle message. - * Return a negative value on error, otherwise return the number of bytes used. - * If no subtitle could be decompressed, got_sub_ptr is zero. - * Otherwise, the subtitle is stored in *sub. - * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for - * simplicity, because the performance difference is expect to be negligible - * and reusing a get_buffer written for video codecs would probably perform badly - * due to a potentially very different allocation pattern. - * - * @param avctx the codec context - * @param[out] sub The AVSubtitle in which the decoded subtitle will be stored, must be - freed with avsubtitle_free if *got_sub_ptr is set. - * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero. - * @param[in] avpkt The input AVPacket containing the input buffer. - */ -int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, - int *got_sub_ptr, - AVPacket *avpkt); - -/** - * @defgroup lavc_parsing Frame parsing - * @{ - */ - -enum AVPictureStructure { - AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown - AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field - AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field - AV_PICTURE_STRUCTURE_FRAME, //< coded as frame -}; - -typedef struct AVCodecParserContext { - void *priv_data; - struct AVCodecParser *parser; - int64_t frame_offset; /* offset of the current frame */ - int64_t cur_offset; /* current offset - (incremented by each av_parser_parse()) */ - int64_t next_frame_offset; /* offset of the next frame */ - /* video info */ - int pict_type; /* XXX: Put it back in AVCodecContext. */ - /** - * This field is used for proper frame duration computation in lavf. - * It signals, how much longer the frame duration of the current frame - * is compared to normal frame duration. - * - * frame_duration = (1 + repeat_pict) * time_base - * - * It is used by codecs like H.264 to display telecined material. - */ - int repeat_pict; /* XXX: Put it back in AVCodecContext. */ - int64_t pts; /* pts of the current frame */ - int64_t dts; /* dts of the current frame */ - - /* private data */ - int64_t last_pts; - int64_t last_dts; - int fetch_timestamp; - -#define AV_PARSER_PTS_NB 4 - int cur_frame_start_index; - int64_t cur_frame_offset[AV_PARSER_PTS_NB]; - int64_t cur_frame_pts[AV_PARSER_PTS_NB]; - int64_t cur_frame_dts[AV_PARSER_PTS_NB]; - - int flags; -#define PARSER_FLAG_COMPLETE_FRAMES 0x0001 -#define PARSER_FLAG_ONCE 0x0002 -/// Set if the parser has a valid file offset -#define PARSER_FLAG_FETCHED_OFFSET 0x0004 -#define PARSER_FLAG_USE_CODEC_TS 0x1000 - - int64_t offset; ///< byte offset from starting packet start - int64_t cur_frame_end[AV_PARSER_PTS_NB]; - - /** - * Set by parser to 1 for key frames and 0 for non-key frames. - * It is initialized to -1, so if the parser doesn't set this flag, - * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames - * will be used. - */ - int key_frame; - - /** - * Time difference in stream time base units from the pts of this - * packet to the point at which the output from the decoder has converged - * independent from the availability of previous frames. That is, the - * frames are virtually identical no matter if decoding started from - * the very first frame or from this keyframe. - * Is AV_NOPTS_VALUE if unknown. - * This field is not the display duration of the current frame. - * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY - * set. - * - * The purpose of this field is to allow seeking in streams that have no - * keyframes in the conventional sense. It corresponds to the - * recovery point SEI in H.264 and match_time_delta in NUT. It is also - * essential for some types of subtitle streams to ensure that all - * subtitles are correctly displayed after seeking. - */ - int64_t convergence_duration; - - // Timestamp generation support: - /** - * Synchronization point for start of timestamp generation. - * - * Set to >0 for sync point, 0 for no sync point and <0 for undefined - * (default). - * - * For example, this corresponds to presence of H.264 buffering period - * SEI message. - */ - int dts_sync_point; - - /** - * Offset of the current timestamp against last timestamp sync point in - * units of AVCodecContext.time_base. - * - * Set to INT_MIN when dts_sync_point unused. Otherwise, it must - * contain a valid timestamp offset. - * - * Note that the timestamp of sync point has usually a nonzero - * dts_ref_dts_delta, which refers to the previous sync point. Offset of - * the next frame after timestamp sync point will be usually 1. - * - * For example, this corresponds to H.264 cpb_removal_delay. - */ - int dts_ref_dts_delta; - - /** - * Presentation delay of current frame in units of AVCodecContext.time_base. - * - * Set to INT_MIN when dts_sync_point unused. Otherwise, it must - * contain valid non-negative timestamp delta (presentation time of a frame - * must not lie in the past). - * - * This delay represents the difference between decoding and presentation - * time of the frame. - * - * For example, this corresponds to H.264 dpb_output_delay. - */ - int pts_dts_delta; - - /** - * Position of the packet in file. - * - * Analogous to cur_frame_pts/dts - */ - int64_t cur_frame_pos[AV_PARSER_PTS_NB]; - - /** - * Byte position of currently parsed frame in stream. - */ - int64_t pos; - - /** - * Previous frame byte position. - */ - int64_t last_pos; - - /** - * Duration of the current frame. - * For audio, this is in units of 1 / AVCodecContext.sample_rate. - * For all other types, this is in units of AVCodecContext.time_base. - */ - int duration; - - enum AVFieldOrder field_order; - - /** - * Indicate whether a picture is coded as a frame, top field or bottom field. - * - * For example, H.264 field_pic_flag equal to 0 corresponds to - * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag - * equal to 1 and bottom_field_flag equal to 0 corresponds to - * AV_PICTURE_STRUCTURE_TOP_FIELD. - */ - enum AVPictureStructure picture_structure; -} AVCodecParserContext; - -typedef struct AVCodecParser { - int codec_ids[5]; /* several codec IDs are permitted */ - int priv_data_size; - int (*parser_init)(AVCodecParserContext *s); - int (*parser_parse)(AVCodecParserContext *s, - AVCodecContext *avctx, - const uint8_t **poutbuf, int *poutbuf_size, - const uint8_t *buf, int buf_size); - void (*parser_close)(AVCodecParserContext *s); - int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size); - struct AVCodecParser *next; -} AVCodecParser; - -AVCodecParser *av_parser_next(AVCodecParser *c); - -void av_register_codec_parser(AVCodecParser *parser); -AVCodecParserContext *av_parser_init(int codec_id); - -/** - * Parse a packet. - * - * @param s parser context. - * @param avctx codec context. - * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. - * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. - * @param buf input buffer. - * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output). - * @param pts input presentation timestamp. - * @param dts input decoding timestamp. - * @param pos input byte position in stream. - * @return the number of bytes of the input bitstream used. - * - * Example: - * @code - * while(in_len){ - * len = av_parser_parse2(myparser, AVCodecContext, &data, &size, - * in_data, in_len, - * pts, dts, pos); - * in_data += len; - * in_len -= len; - * - * if(size) - * decode_frame(data, size); - * } - * @endcode - */ -int av_parser_parse2(AVCodecParserContext *s, - AVCodecContext *avctx, - uint8_t **poutbuf, int *poutbuf_size, - const uint8_t *buf, int buf_size, - int64_t pts, int64_t dts, - int64_t pos); - -/** - * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed - * @deprecated use AVBitStreamFilter - */ -int av_parser_change(AVCodecParserContext *s, - AVCodecContext *avctx, - uint8_t **poutbuf, int *poutbuf_size, - const uint8_t *buf, int buf_size, int keyframe); -void av_parser_close(AVCodecParserContext *s); - -/** - * @} - * @} - */ - -/** - * @addtogroup lavc_encoding - * @{ - */ - -/** - * Find a registered encoder with a matching codec ID. - * - * @param id AVCodecID of the requested encoder - * @return An encoder if one was found, NULL otherwise. - */ -AVCodec *avcodec_find_encoder(enum AVCodecID id); - -/** - * Find a registered encoder with the specified name. - * - * @param name name of the requested encoder - * @return An encoder if one was found, NULL otherwise. - */ -AVCodec *avcodec_find_encoder_by_name(const char *name); - -#if FF_API_OLD_ENCODE_AUDIO -/** - * Encode an audio frame from samples into buf. - * - * @deprecated Use avcodec_encode_audio2 instead. - * - * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large. - * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user - * will know how much space is needed because it depends on the value passed - * in buf_size as described below. In that case a lower value can be used. - * - * @param avctx the codec context - * @param[out] buf the output buffer - * @param[in] buf_size the output buffer size - * @param[in] samples the input buffer containing the samples - * The number of samples read from this buffer is frame_size*channels, - * both of which are defined in avctx. - * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of - * samples read from samples is equal to: - * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id)) - * This also implies that av_get_bits_per_sample() must not return 0 for these - * codecs. - * @return On error a negative value is returned, on success zero or the number - * of bytes used to encode the data read from the input buffer. - */ -int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx, - uint8_t *buf, int buf_size, - const short *samples); -#endif - -/** - * Encode a frame of audio. - * - * Takes input samples from frame and writes the next output packet, if - * available, to avpkt. The output packet does not necessarily contain data for - * the most recent frame, as encoders can delay, split, and combine input frames - * internally as needed. - * - * @param avctx codec context - * @param avpkt output AVPacket. - * The user can supply an output buffer by setting - * avpkt->data and avpkt->size prior to calling the - * function, but if the size of the user-provided data is not - * large enough, encoding will fail. If avpkt->data and - * avpkt->size are set, avpkt->destruct must also be set. All - * other AVPacket fields will be reset by the encoder using - * av_init_packet(). If avpkt->data is NULL, the encoder will - * allocate it. The encoder will set avpkt->size to the size - * of the output packet. - * - * If this function fails or produces no output, avpkt will be - * freed using av_free_packet() (i.e. avpkt->destruct will be - * called to free the user supplied buffer). - * @param[in] frame AVFrame containing the raw audio data to be encoded. - * May be NULL when flushing an encoder that has the - * CODEC_CAP_DELAY capability set. - * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame - * can have any number of samples. - * If it is not set, frame->nb_samples must be equal to - * avctx->frame_size for all frames except the last. - * The final frame may be smaller than avctx->frame_size. - * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the - * output packet is non-empty, and to 0 if it is - * empty. If the function returns an error, the - * packet can be assumed to be invalid, and the - * value of got_packet_ptr is undefined and should - * not be used. - * @return 0 on success, negative error code on failure - */ -int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, - const AVFrame *frame, int *got_packet_ptr); - -#if FF_API_OLD_ENCODE_VIDEO -/** - * @deprecated use avcodec_encode_video2() instead. - * - * Encode a video frame from pict into buf. - * The input picture should be - * stored using a specific format, namely avctx.pix_fmt. - * - * @param avctx the codec context - * @param[out] buf the output buffer for the bitstream of encoded frame - * @param[in] buf_size the size of the output buffer in bytes - * @param[in] pict the input picture to encode - * @return On error a negative value is returned, on success zero or the number - * of bytes used from the output buffer. - */ -attribute_deprecated -int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, - const AVFrame *pict); -#endif - -/** - * Encode a frame of video. - * - * Takes input raw video data from frame and writes the next output packet, if - * available, to avpkt. The output packet does not necessarily contain data for - * the most recent frame, as encoders can delay and reorder input frames - * internally as needed. - * - * @param avctx codec context - * @param avpkt output AVPacket. - * The user can supply an output buffer by setting - * avpkt->data and avpkt->size prior to calling the - * function, but if the size of the user-provided data is not - * large enough, encoding will fail. All other AVPacket fields - * will be reset by the encoder using av_init_packet(). If - * avpkt->data is NULL, the encoder will allocate it. - * The encoder will set avpkt->size to the size of the - * output packet. The returned data (if any) belongs to the - * caller, he is responsible for freeing it. - * - * If this function fails or produces no output, avpkt will be - * freed using av_free_packet() (i.e. avpkt->destruct will be - * called to free the user supplied buffer). - * @param[in] frame AVFrame containing the raw video data to be encoded. - * May be NULL when flushing an encoder that has the - * CODEC_CAP_DELAY capability set. - * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the - * output packet is non-empty, and to 0 if it is - * empty. If the function returns an error, the - * packet can be assumed to be invalid, and the - * value of got_packet_ptr is undefined and should - * not be used. - * @return 0 on success, negative error code on failure - */ -int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, - const AVFrame *frame, int *got_packet_ptr); - -int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, - const AVSubtitle *sub); - - -/** - * @} - */ - -#if FF_API_AVCODEC_RESAMPLE -/** - * @defgroup lavc_resample Audio resampling - * @ingroup libavc - * @deprecated use libswresample instead - * - * @{ - */ -struct ReSampleContext; -struct AVResampleContext; - -typedef struct ReSampleContext ReSampleContext; - -/** - * Initialize audio resampling context. - * - * @param output_channels number of output channels - * @param input_channels number of input channels - * @param output_rate output sample rate - * @param input_rate input sample rate - * @param sample_fmt_out requested output sample format - * @param sample_fmt_in input sample format - * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency - * @param log2_phase_count log2 of the number of entries in the polyphase filterbank - * @param linear if 1 then the used FIR filter will be linearly interpolated - between the 2 closest, if 0 the closest will be used - * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate - * @return allocated ReSampleContext, NULL if error occurred - */ -attribute_deprecated -ReSampleContext *av_audio_resample_init(int output_channels, int input_channels, - int output_rate, int input_rate, - enum AVSampleFormat sample_fmt_out, - enum AVSampleFormat sample_fmt_in, - int filter_length, int log2_phase_count, - int linear, double cutoff); - -attribute_deprecated -int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples); - -/** - * Free resample context. - * - * @param s a non-NULL pointer to a resample context previously - * created with av_audio_resample_init() - */ -attribute_deprecated -void audio_resample_close(ReSampleContext *s); - - -/** - * Initialize an audio resampler. - * Note, if either rate is not an integer then simply scale both rates up so they are. - * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq - * @param log2_phase_count log2 of the number of entries in the polyphase filterbank - * @param linear If 1 then the used FIR filter will be linearly interpolated - between the 2 closest, if 0 the closest will be used - * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate - */ -attribute_deprecated -struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff); - -/** - * Resample an array of samples using a previously configured context. - * @param src an array of unconsumed samples - * @param consumed the number of samples of src which have been consumed are returned here - * @param src_size the number of unconsumed samples available - * @param dst_size the amount of space in samples available in dst - * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context. - * @return the number of samples written in dst or -1 if an error occurred - */ -attribute_deprecated -int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx); - - -/** - * Compensate samplerate/timestamp drift. The compensation is done by changing - * the resampler parameters, so no audible clicks or similar distortions occur - * @param compensation_distance distance in output samples over which the compensation should be performed - * @param sample_delta number of output samples which should be output less - * - * example: av_resample_compensate(c, 10, 500) - * here instead of 510 samples only 500 samples would be output - * - * note, due to rounding the actual compensation might be slightly different, - * especially if the compensation_distance is large and the in_rate used during init is small - */ -attribute_deprecated -void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance); -attribute_deprecated -void av_resample_close(struct AVResampleContext *c); - -/** - * @} - */ -#endif - -/** - * @addtogroup lavc_picture - * @{ - */ - -/** - * Allocate memory for a picture. Call avpicture_free() to free it. - * - * @see avpicture_fill() - * - * @param picture the picture to be filled in - * @param pix_fmt the format of the picture - * @param width the width of the picture - * @param height the height of the picture - * @return zero if successful, a negative value if not - */ -int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height); - -/** - * Free a picture previously allocated by avpicture_alloc(). - * The data buffer used by the AVPicture is freed, but the AVPicture structure - * itself is not. - * - * @param picture the AVPicture to be freed - */ -void avpicture_free(AVPicture *picture); - -/** - * Fill in the AVPicture fields, always assume a linesize alignment of - * 1. - * - * @see av_image_fill_arrays() - */ -int avpicture_fill(AVPicture *picture, const uint8_t *ptr, - enum AVPixelFormat pix_fmt, int width, int height); - -/** - * Copy pixel data from an AVPicture into a buffer, always assume a - * linesize alignment of 1. - * - * @see av_image_copy_to_buffer() - */ -int avpicture_layout(const AVPicture* src, enum AVPixelFormat pix_fmt, - int width, int height, - unsigned char *dest, int dest_size); - -/** - * Calculate the size in bytes that a picture of the given width and height - * would occupy if stored in the given picture format. - * Always assume a linesize alignment of 1. - * - * @see av_image_get_buffer_size(). - */ -int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height); - -#if FF_API_DEINTERLACE -/** - * deinterlace - if not supported return -1 - * - * @deprecated - use yadif (in libavfilter) instead - */ -attribute_deprecated -int avpicture_deinterlace(AVPicture *dst, const AVPicture *src, - enum AVPixelFormat pix_fmt, int width, int height); -#endif -/** - * Copy image src to dst. Wraps av_image_copy(). - */ -void av_picture_copy(AVPicture *dst, const AVPicture *src, - enum AVPixelFormat pix_fmt, int width, int height); - -/** - * Crop image top and left side. - */ -int av_picture_crop(AVPicture *dst, const AVPicture *src, - enum AVPixelFormat pix_fmt, int top_band, int left_band); - -/** - * Pad image. - */ -int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt, - int padtop, int padbottom, int padleft, int padright, int *color); - -/** - * @} - */ - -/** - * @defgroup lavc_misc Utility functions - * @ingroup libavc - * - * Miscellaneous utility functions related to both encoding and decoding - * (or neither). - * @{ - */ - -/** - * @defgroup lavc_misc_pixfmt Pixel formats - * - * Functions for working with pixel formats. - * @{ - */ - -/** - * Utility function to access log2_chroma_w log2_chroma_h from - * the pixel format AVPixFmtDescriptor. - * - * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample - * for one that returns a failure code and continues in case of invalid - * pix_fmts. - * - * @param[in] pix_fmt the pixel format - * @param[out] h_shift store log2_chroma_w - * @param[out] v_shift store log2_chroma_h - * - * @see av_pix_fmt_get_chroma_sub_sample - */ - -void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift); - -/** - * Return a value representing the fourCC code associated to the - * pixel format pix_fmt, or 0 if no associated fourCC code can be - * found. - */ -unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt); - -#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */ -#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */ -#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */ -#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */ -#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */ -#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */ - -/** - * Compute what kind of losses will occur when converting from one specific - * pixel format to another. - * When converting from one pixel format to another, information loss may occur. - * For example, when converting from RGB24 to GRAY, the color information will - * be lost. Similarly, other losses occur when converting from some formats to - * other formats. These losses can involve loss of chroma, but also loss of - * resolution, loss of color depth, loss due to the color space conversion, loss - * of the alpha bits or loss due to color quantization. - * avcodec_get_fix_fmt_loss() informs you about the various types of losses - * which will occur when converting from one pixel format to another. - * - * @param[in] dst_pix_fmt destination pixel format - * @param[in] src_pix_fmt source pixel format - * @param[in] has_alpha Whether the source pixel format alpha channel is used. - * @return Combination of flags informing you what kind of losses will occur - * (maximum loss for an invalid dst_pix_fmt). - */ -int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt, - int has_alpha); - -/** - * Find the best pixel format to convert to given a certain source pixel - * format. When converting from one pixel format to another, information loss - * may occur. For example, when converting from RGB24 to GRAY, the color - * information will be lost. Similarly, other losses occur when converting from - * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of - * the given pixel formats should be used to suffer the least amount of loss. - * The pixel formats from which it chooses one, are determined by the - * pix_fmt_list parameter. - * - * - * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from - * @param[in] src_pix_fmt source pixel format - * @param[in] has_alpha Whether the source pixel format alpha channel is used. - * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur. - * @return The best pixel format to convert to or -1 if none was found. - */ -enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(enum AVPixelFormat *pix_fmt_list, - enum AVPixelFormat src_pix_fmt, - int has_alpha, int *loss_ptr); - -/** - * Find the best pixel format to convert to given a certain source pixel - * format and a selection of two destination pixel formats. When converting from - * one pixel format to another, information loss may occur. For example, when converting - * from RGB24 to GRAY, the color information will be lost. Similarly, other losses occur when - * converting from some formats to other formats. avcodec_find_best_pix_fmt_of_2() selects which of - * the given pixel formats should be used to suffer the least amount of loss. - * - * If one of the destination formats is AV_PIX_FMT_NONE the other pixel format (if valid) will be - * returned. - * - * @code - * src_pix_fmt = AV_PIX_FMT_YUV420P; - * dst_pix_fmt1= AV_PIX_FMT_RGB24; - * dst_pix_fmt2= AV_PIX_FMT_GRAY8; - * dst_pix_fmt3= AV_PIX_FMT_RGB8; - * loss= FF_LOSS_CHROMA; // don't care about chroma loss, so chroma loss will be ignored. - * dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt1, dst_pix_fmt2, src_pix_fmt, alpha, &loss); - * dst_pix_fmt = avcodec_find_best_pix_fmt_of_2(dst_pix_fmt, dst_pix_fmt3, src_pix_fmt, alpha, &loss); - * @endcode - * - * @param[in] dst_pix_fmt1 One of the two destination pixel formats to choose from - * @param[in] dst_pix_fmt2 The other of the two destination pixel formats to choose from - * @param[in] src_pix_fmt Source pixel format - * @param[in] has_alpha Whether the source pixel format alpha channel is used. - * @param[in, out] loss_ptr Combination of loss flags. In: selects which of the losses to ignore, i.e. - * NULL or value of zero means we care about all losses. Out: the loss - * that occurs when converting from src to selected dst pixel format. - * @return The best pixel format to convert to or -1 if none was found. - */ -enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, - enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); - -attribute_deprecated -#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI -enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat *pix_fmt_list, - enum AVPixelFormat src_pix_fmt, - int has_alpha, int *loss_ptr); -#else -enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, - enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); -#endif - - -enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt); - -/** - * @} - */ - -void avcodec_set_dimensions(AVCodecContext *s, int width, int height); - -/** - * Put a string representing the codec tag codec_tag in buf. - * - * @param buf_size size in bytes of buf - * @return the length of the string that would have been generated if - * enough space had been available, excluding the trailing null - */ -size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag); - -void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); - -/** - * Return a name for the specified profile, if available. - * - * @param codec the codec that is searched for the given profile - * @param profile the profile value for which a name is requested - * @return A name for the profile if found, NULL otherwise. - */ -const char *av_get_profile_name(const AVCodec *codec, int profile); - -int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); -int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); -//FIXME func typedef - -/** - * Fill AVFrame audio data and linesize pointers. - * - * The buffer buf must be a preallocated buffer with a size big enough - * to contain the specified samples amount. The filled AVFrame data - * pointers will point to this buffer. - * - * AVFrame extended_data channel pointers are allocated if necessary for - * planar audio. - * - * @param frame the AVFrame - * frame->nb_samples must be set prior to calling the - * function. This function fills in frame->data, - * frame->extended_data, frame->linesize[0]. - * @param nb_channels channel count - * @param sample_fmt sample format - * @param buf buffer to use for frame data - * @param buf_size size of buffer - * @param align plane size sample alignment (0 = default) - * @return >=0 on success, negative error code on failure - * @todo return the size in bytes required to store the samples in - * case of success, at the next libavutil bump - */ -int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, - enum AVSampleFormat sample_fmt, const uint8_t *buf, - int buf_size, int align); - -/** - * Flush buffers, should be called when seeking or when switching to a different stream. - */ -void avcodec_flush_buffers(AVCodecContext *avctx); - -/** - * Return codec bits per sample. - * - * @param[in] codec_id the codec - * @return Number of bits per sample or zero if unknown for the given codec. - */ -int av_get_bits_per_sample(enum AVCodecID codec_id); - -/** - * Return the PCM codec associated with a sample format. - * @param be endianness, 0 for little, 1 for big, - * -1 (or anything else) for native - * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE - */ -enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be); - -/** - * Return codec bits per sample. - * Only return non-zero if the bits per sample is exactly correct, not an - * approximation. - * - * @param[in] codec_id the codec - * @return Number of bits per sample or zero if unknown for the given codec. - */ -int av_get_exact_bits_per_sample(enum AVCodecID codec_id); - -/** - * Return audio frame duration. - * - * @param avctx codec context - * @param frame_bytes size of the frame, or 0 if unknown - * @return frame duration, in samples, if known. 0 if not able to - * determine. - */ -int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes); - - -typedef struct AVBitStreamFilterContext { - void *priv_data; - struct AVBitStreamFilter *filter; - AVCodecParserContext *parser; - struct AVBitStreamFilterContext *next; -} AVBitStreamFilterContext; - - -typedef struct AVBitStreamFilter { - const char *name; - int priv_data_size; - int (*filter)(AVBitStreamFilterContext *bsfc, - AVCodecContext *avctx, const char *args, - uint8_t **poutbuf, int *poutbuf_size, - const uint8_t *buf, int buf_size, int keyframe); - void (*close)(AVBitStreamFilterContext *bsfc); - struct AVBitStreamFilter *next; -} AVBitStreamFilter; - -/** - * Register a bitstream filter. - * - * The filter will be accessible to the application code through - * av_bitstream_filter_next() or can be directly initialized with - * av_bitstream_filter_init(). - * - * @see avcodec_register_all() - */ -void av_register_bitstream_filter(AVBitStreamFilter *bsf); - -/** - * Create and initialize a bitstream filter context given a bitstream - * filter name. - * - * The returned context must be freed with av_bitstream_filter_close(). - * - * @param name the name of the bitstream filter - * @return a bitstream filter context if a matching filter was found - * and successfully initialized, NULL otherwise - */ -AVBitStreamFilterContext *av_bitstream_filter_init(const char *name); - -/** - * Filter bitstream. - * - * This function filters the buffer buf with size buf_size, and places the - * filtered buffer in the buffer pointed to by poutbuf. - * - * The output buffer must be freed by the caller. - * - * @param bsfc bitstream filter context created by av_bitstream_filter_init() - * @param avctx AVCodecContext accessed by the filter, may be NULL. - * If specified, this must point to the encoder context of the - * output stream the packet is sent to. - * @param args arguments which specify the filter configuration, may be NULL - * @param poutbuf pointer which is updated to point to the filtered buffer - * @param poutbuf_size pointer which is updated to the filtered buffer size in bytes - * @param buf buffer containing the data to filter - * @param buf_size size in bytes of buf - * @param keyframe set to non-zero if the buffer to filter corresponds to a key-frame packet data - * @return >= 0 in case of success, or a negative error code in case of failure - * - * If the return value is positive, an output buffer is allocated and - * is availble in *poutbuf, and is distinct from the input buffer. - * - * If the return value is 0, the output output buffer is not allocated - * and the output buffer should be considered identical to the input - * buffer, or in case *poutbuf was set it points to the input buffer - * (not necessarily to its starting address). - */ -int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, - AVCodecContext *avctx, const char *args, - uint8_t **poutbuf, int *poutbuf_size, - const uint8_t *buf, int buf_size, int keyframe); - -/** - * Release bitstream filter context. - * - * @param bsf the bitstream filter context created with - * av_bitstream_filter_init(), can be NULL - */ -void av_bitstream_filter_close(AVBitStreamFilterContext *bsf); - -/** - * If f is NULL, return the first registered bitstream filter, - * if f is non-NULL, return the next registered bitstream filter - * after f, or NULL if f is the last one. - * - * This function can be used to iterate over all registered bitstream - * filters. - */ -AVBitStreamFilter *av_bitstream_filter_next(AVBitStreamFilter *f); - -/* memory */ - -/** - * Reallocate the given block if it is not large enough, otherwise do nothing. - * - * @see av_realloc - */ -void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); - -/** - * Allocate a buffer, reusing the given one if large enough. - * - * Contrary to av_fast_realloc the current buffer contents might not be - * preserved and on error the old buffer is freed, thus no special - * handling to avoid memleaks is necessary. - * - * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer - * @param size size of the buffer *ptr points to - * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and - * *size 0 if an error occurred. - */ -void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); - -/** - * Same behaviour av_fast_malloc but the buffer has additional - * FF_INPUT_BUFFER_PADDING_SIZE at the end which will will always be 0. - * - * In addition the whole buffer will initially and after resizes - * be 0-initialized so that no uninitialized data will ever appear. - */ -void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size); - -/** - * Same behaviour av_fast_padded_malloc except that buffer will always - * be 0-initialized after call. - */ -void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size); - -/** - * Encode extradata length to a buffer. Used by xiph codecs. - * - * @param s buffer to write to; must be at least (v/255+1) bytes long - * @param v size of extradata in bytes - * @return number of bytes written to the buffer. - */ -unsigned int av_xiphlacing(unsigned char *s, unsigned int v); - -#if FF_API_MISSING_SAMPLE -/** - * Log a generic warning message about a missing feature. This function is - * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) - * only, and would normally not be used by applications. - * @param[in] avc a pointer to an arbitrary struct of which the first field is - * a pointer to an AVClass struct - * @param[in] feature string containing the name of the missing feature - * @param[in] want_sample indicates if samples are wanted which exhibit this feature. - * If want_sample is non-zero, additional verbage will be added to the log - * message which tells the user how to report samples to the development - * mailing list. - * @deprecated Use avpriv_report_missing_feature() instead. - */ -attribute_deprecated -void av_log_missing_feature(void *avc, const char *feature, int want_sample); - -/** - * Log a generic warning message asking for a sample. This function is - * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) - * only, and would normally not be used by applications. - * @param[in] avc a pointer to an arbitrary struct of which the first field is - * a pointer to an AVClass struct - * @param[in] msg string containing an optional message, or NULL if no message - * @deprecated Use avpriv_request_sample() instead. - */ -attribute_deprecated -void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3); -#endif /* FF_API_MISSING_SAMPLE */ - -/** - * Register the hardware accelerator hwaccel. - */ -void av_register_hwaccel(AVHWAccel *hwaccel); - -/** - * If hwaccel is NULL, returns the first registered hardware accelerator, - * if hwaccel is non-NULL, returns the next registered hardware accelerator - * after hwaccel, or NULL if hwaccel is the last one. - */ -AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel); - - -/** - * Lock operation used by lockmgr - */ -enum AVLockOp { - AV_LOCK_CREATE, ///< Create a mutex - AV_LOCK_OBTAIN, ///< Lock the mutex - AV_LOCK_RELEASE, ///< Unlock the mutex - AV_LOCK_DESTROY, ///< Free mutex resources -}; - -/** - * Register a user provided lock manager supporting the operations - * specified by AVLockOp. mutex points to a (void *) where the - * lockmgr should store/get a pointer to a user allocated mutex. It's - * NULL upon AV_LOCK_CREATE and != NULL for all other ops. - * - * @param cb User defined callback. Note: FFmpeg may invoke calls to this - * callback during the call to av_lockmgr_register(). - * Thus, the application must be prepared to handle that. - * If cb is set to NULL the lockmgr will be unregistered. - * Also note that during unregistration the previously registered - * lockmgr callback may also be invoked. - */ -int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)); - -/** - * Get the type of the given codec. - */ -enum AVMediaType avcodec_get_type(enum AVCodecID codec_id); - -/** - * Get the name of a codec. - * @return a static string identifying the codec; never NULL - */ -const char *avcodec_get_name(enum AVCodecID id); - -/** - * @return a positive value if s is open (i.e. avcodec_open2() was called on it - * with no corresponding avcodec_close()), 0 otherwise. - */ -int avcodec_is_open(AVCodecContext *s); - -/** - * @return a non-zero number if codec is an encoder, zero otherwise - */ -int av_codec_is_encoder(const AVCodec *codec); - -/** - * @return a non-zero number if codec is a decoder, zero otherwise - */ -int av_codec_is_decoder(const AVCodec *codec); - -/** - * @return descriptor for given codec ID or NULL if no descriptor exists. - */ -const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id); - -/** - * Iterate over all codec descriptors known to libavcodec. - * - * @param prev previous descriptor. NULL to get the first descriptor. - * - * @return next descriptor or NULL after the last descriptor - */ -const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev); - -/** - * @return codec descriptor with the given name or NULL if no such descriptor - * exists. - */ -const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name); - -/** - * @} - */ - -#endif /* AVCODEC_AVCODEC_H */ diff --git a/3rdparty/include/ffmpeg_/libavcodec/avfft.h b/3rdparty/include/ffmpeg_/libavcodec/avfft.h deleted file mode 100644 index 2d20a45f87..0000000000 --- a/3rdparty/include/ffmpeg_/libavcodec/avfft.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_AVFFT_H -#define AVCODEC_AVFFT_H - -/** - * @file - * @ingroup lavc_fft - * FFT functions - */ - -/** - * @defgroup lavc_fft FFT functions - * @ingroup lavc_misc - * - * @{ - */ - -typedef float FFTSample; - -typedef struct FFTComplex { - FFTSample re, im; -} FFTComplex; - -typedef struct FFTContext FFTContext; - -/** - * Set up a complex FFT. - * @param nbits log2 of the length of the input array - * @param inverse if 0 perform the forward transform, if 1 perform the inverse - */ -FFTContext *av_fft_init(int nbits, int inverse); - -/** - * Do the permutation needed BEFORE calling ff_fft_calc(). - */ -void av_fft_permute(FFTContext *s, FFTComplex *z); - -/** - * Do a complex FFT with the parameters defined in av_fft_init(). The - * input data must be permuted before. No 1.0/sqrt(n) normalization is done. - */ -void av_fft_calc(FFTContext *s, FFTComplex *z); - -void av_fft_end(FFTContext *s); - -FFTContext *av_mdct_init(int nbits, int inverse, double scale); -void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); -void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input); -void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); -void av_mdct_end(FFTContext *s); - -/* Real Discrete Fourier Transform */ - -enum RDFTransformType { - DFT_R2C, - IDFT_C2R, - IDFT_R2C, - DFT_C2R, -}; - -typedef struct RDFTContext RDFTContext; - -/** - * Set up a real FFT. - * @param nbits log2 of the length of the input array - * @param trans the type of transform - */ -RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans); -void av_rdft_calc(RDFTContext *s, FFTSample *data); -void av_rdft_end(RDFTContext *s); - -/* Discrete Cosine Transform */ - -typedef struct DCTContext DCTContext; - -enum DCTTransformType { - DCT_II = 0, - DCT_III, - DCT_I, - DST_I, -}; - -/** - * Set up DCT. - * @param nbits size of the input array: - * (1 << nbits) for DCT-II, DCT-III and DST-I - * (1 << nbits) + 1 for DCT-I - * - * @note the first element of the input of DST-I is ignored - */ -DCTContext *av_dct_init(int nbits, enum DCTTransformType type); -void av_dct_calc(DCTContext *s, FFTSample *data); -void av_dct_end (DCTContext *s); - -/** - * @} - */ - -#endif /* AVCODEC_AVFFT_H */ diff --git a/3rdparty/include/ffmpeg_/libavcodec/dxva2.h b/3rdparty/include/ffmpeg_/libavcodec/dxva2.h deleted file mode 100644 index ac39e06917..0000000000 --- a/3rdparty/include/ffmpeg_/libavcodec/dxva2.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - * DXVA2 HW acceleration - * - * copyright (c) 2009 Laurent Aimar - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_DXVA_H -#define AVCODEC_DXVA_H - -/** - * @file - * @ingroup lavc_codec_hwaccel_dxva2 - * Public libavcodec DXVA2 header. - */ - -#if defined(_WIN32_WINNT) && _WIN32_WINNT < 0x0600 -#undef _WIN32_WINNT -#endif - -#if !defined(_WIN32_WINNT) -#define _WIN32_WINNT 0x0600 -#endif - -#include -#include -#include - -/** - * @defgroup lavc_codec_hwaccel_dxva2 DXVA2 - * @ingroup lavc_codec_hwaccel - * - * @{ - */ - -#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards - -/** - * This structure is used to provides the necessary configurations and data - * to the DXVA2 FFmpeg HWAccel implementation. - * - * The application must make it available as AVCodecContext.hwaccel_context. - */ -struct dxva_context { - /** - * DXVA2 decoder object - */ - IDirectXVideoDecoder *decoder; - - /** - * DXVA2 configuration used to create the decoder - */ - const DXVA2_ConfigPictureDecode *cfg; - - /** - * The number of surface in the surface array - */ - unsigned surface_count; - - /** - * The array of Direct3D surfaces used to create the decoder - */ - LPDIRECT3DSURFACE9 *surface; - - /** - * A bit field configuring the workarounds needed for using the decoder - */ - uint64_t workaround; - - /** - * Private to the FFmpeg AVHWAccel implementation - */ - unsigned report_id; -}; - -/** - * @} - */ - -#endif /* AVCODEC_DXVA_H */ diff --git a/3rdparty/include/ffmpeg_/libavcodec/old_codec_ids.h b/3rdparty/include/ffmpeg_/libavcodec/old_codec_ids.h deleted file mode 100644 index d8a8f746d9..0000000000 --- a/3rdparty/include/ffmpeg_/libavcodec/old_codec_ids.h +++ /dev/null @@ -1,397 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_OLD_CODEC_IDS_H -#define AVCODEC_OLD_CODEC_IDS_H - -#include "libavutil/common.h" - -/* - * This header exists to prevent new codec IDs from being accidentally added to - * the deprecated list. - * Do not include it directly. It will be removed on next major bump - * - * Do not add new items to this list. Use the AVCodecID enum instead. - */ - - CODEC_ID_NONE = AV_CODEC_ID_NONE, - - /* video codecs */ - CODEC_ID_MPEG1VIDEO, - CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding - CODEC_ID_MPEG2VIDEO_XVMC, - CODEC_ID_H261, - CODEC_ID_H263, - CODEC_ID_RV10, - CODEC_ID_RV20, - CODEC_ID_MJPEG, - CODEC_ID_MJPEGB, - CODEC_ID_LJPEG, - CODEC_ID_SP5X, - CODEC_ID_JPEGLS, - CODEC_ID_MPEG4, - CODEC_ID_RAWVIDEO, - CODEC_ID_MSMPEG4V1, - CODEC_ID_MSMPEG4V2, - CODEC_ID_MSMPEG4V3, - CODEC_ID_WMV1, - CODEC_ID_WMV2, - CODEC_ID_H263P, - CODEC_ID_H263I, - CODEC_ID_FLV1, - CODEC_ID_SVQ1, - CODEC_ID_SVQ3, - CODEC_ID_DVVIDEO, - CODEC_ID_HUFFYUV, - CODEC_ID_CYUV, - CODEC_ID_H264, - CODEC_ID_INDEO3, - CODEC_ID_VP3, - CODEC_ID_THEORA, - CODEC_ID_ASV1, - CODEC_ID_ASV2, - CODEC_ID_FFV1, - CODEC_ID_4XM, - CODEC_ID_VCR1, - CODEC_ID_CLJR, - CODEC_ID_MDEC, - CODEC_ID_ROQ, - CODEC_ID_INTERPLAY_VIDEO, - CODEC_ID_XAN_WC3, - CODEC_ID_XAN_WC4, - CODEC_ID_RPZA, - CODEC_ID_CINEPAK, - CODEC_ID_WS_VQA, - CODEC_ID_MSRLE, - CODEC_ID_MSVIDEO1, - CODEC_ID_IDCIN, - CODEC_ID_8BPS, - CODEC_ID_SMC, - CODEC_ID_FLIC, - CODEC_ID_TRUEMOTION1, - CODEC_ID_VMDVIDEO, - CODEC_ID_MSZH, - CODEC_ID_ZLIB, - CODEC_ID_QTRLE, - CODEC_ID_TSCC, - CODEC_ID_ULTI, - CODEC_ID_QDRAW, - CODEC_ID_VIXL, - CODEC_ID_QPEG, - CODEC_ID_PNG, - CODEC_ID_PPM, - CODEC_ID_PBM, - CODEC_ID_PGM, - CODEC_ID_PGMYUV, - CODEC_ID_PAM, - CODEC_ID_FFVHUFF, - CODEC_ID_RV30, - CODEC_ID_RV40, - CODEC_ID_VC1, - CODEC_ID_WMV3, - CODEC_ID_LOCO, - CODEC_ID_WNV1, - CODEC_ID_AASC, - CODEC_ID_INDEO2, - CODEC_ID_FRAPS, - CODEC_ID_TRUEMOTION2, - CODEC_ID_BMP, - CODEC_ID_CSCD, - CODEC_ID_MMVIDEO, - CODEC_ID_ZMBV, - CODEC_ID_AVS, - CODEC_ID_SMACKVIDEO, - CODEC_ID_NUV, - CODEC_ID_KMVC, - CODEC_ID_FLASHSV, - CODEC_ID_CAVS, - CODEC_ID_JPEG2000, - CODEC_ID_VMNC, - CODEC_ID_VP5, - CODEC_ID_VP6, - CODEC_ID_VP6F, - CODEC_ID_TARGA, - CODEC_ID_DSICINVIDEO, - CODEC_ID_TIERTEXSEQVIDEO, - CODEC_ID_TIFF, - CODEC_ID_GIF, - CODEC_ID_DXA, - CODEC_ID_DNXHD, - CODEC_ID_THP, - CODEC_ID_SGI, - CODEC_ID_C93, - CODEC_ID_BETHSOFTVID, - CODEC_ID_PTX, - CODEC_ID_TXD, - CODEC_ID_VP6A, - CODEC_ID_AMV, - CODEC_ID_VB, - CODEC_ID_PCX, - CODEC_ID_SUNRAST, - CODEC_ID_INDEO4, - CODEC_ID_INDEO5, - CODEC_ID_MIMIC, - CODEC_ID_RL2, - CODEC_ID_ESCAPE124, - CODEC_ID_DIRAC, - CODEC_ID_BFI, - CODEC_ID_CMV, - CODEC_ID_MOTIONPIXELS, - CODEC_ID_TGV, - CODEC_ID_TGQ, - CODEC_ID_TQI, - CODEC_ID_AURA, - CODEC_ID_AURA2, - CODEC_ID_V210X, - CODEC_ID_TMV, - CODEC_ID_V210, - CODEC_ID_DPX, - CODEC_ID_MAD, - CODEC_ID_FRWU, - CODEC_ID_FLASHSV2, - CODEC_ID_CDGRAPHICS, - CODEC_ID_R210, - CODEC_ID_ANM, - CODEC_ID_BINKVIDEO, - CODEC_ID_IFF_ILBM, - CODEC_ID_IFF_BYTERUN1, - CODEC_ID_KGV1, - CODEC_ID_YOP, - CODEC_ID_VP8, - CODEC_ID_PICTOR, - CODEC_ID_ANSI, - CODEC_ID_A64_MULTI, - CODEC_ID_A64_MULTI5, - CODEC_ID_R10K, - CODEC_ID_MXPEG, - CODEC_ID_LAGARITH, - CODEC_ID_PRORES, - CODEC_ID_JV, - CODEC_ID_DFA, - CODEC_ID_WMV3IMAGE, - CODEC_ID_VC1IMAGE, - CODEC_ID_UTVIDEO, - CODEC_ID_BMV_VIDEO, - CODEC_ID_VBLE, - CODEC_ID_DXTORY, - CODEC_ID_V410, - CODEC_ID_XWD, - CODEC_ID_CDXL, - CODEC_ID_XBM, - CODEC_ID_ZEROCODEC, - CODEC_ID_MSS1, - CODEC_ID_MSA1, - CODEC_ID_TSCC2, - CODEC_ID_MTS2, - CODEC_ID_CLLC, - CODEC_ID_Y41P = MKBETAG('Y','4','1','P'), - CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'), - CODEC_ID_EXR = MKBETAG('0','E','X','R'), - CODEC_ID_AVRP = MKBETAG('A','V','R','P'), - - CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'), - CODEC_ID_AVUI = MKBETAG('A','V','U','I'), - CODEC_ID_AYUV = MKBETAG('A','Y','U','V'), - CODEC_ID_V308 = MKBETAG('V','3','0','8'), - CODEC_ID_V408 = MKBETAG('V','4','0','8'), - CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'), - CODEC_ID_SANM = MKBETAG('S','A','N','M'), - CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'), - CODEC_ID_SNOW = AV_CODEC_ID_SNOW, - - /* various PCM "codecs" */ - CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs - CODEC_ID_PCM_S16LE = 0x10000, - CODEC_ID_PCM_S16BE, - CODEC_ID_PCM_U16LE, - CODEC_ID_PCM_U16BE, - CODEC_ID_PCM_S8, - CODEC_ID_PCM_U8, - CODEC_ID_PCM_MULAW, - CODEC_ID_PCM_ALAW, - CODEC_ID_PCM_S32LE, - CODEC_ID_PCM_S32BE, - CODEC_ID_PCM_U32LE, - CODEC_ID_PCM_U32BE, - CODEC_ID_PCM_S24LE, - CODEC_ID_PCM_S24BE, - CODEC_ID_PCM_U24LE, - CODEC_ID_PCM_U24BE, - CODEC_ID_PCM_S24DAUD, - CODEC_ID_PCM_ZORK, - CODEC_ID_PCM_S16LE_PLANAR, - CODEC_ID_PCM_DVD, - CODEC_ID_PCM_F32BE, - CODEC_ID_PCM_F32LE, - CODEC_ID_PCM_F64BE, - CODEC_ID_PCM_F64LE, - CODEC_ID_PCM_BLURAY, - CODEC_ID_PCM_LXF, - CODEC_ID_S302M, - CODEC_ID_PCM_S8_PLANAR, - - /* various ADPCM codecs */ - CODEC_ID_ADPCM_IMA_QT = 0x11000, - CODEC_ID_ADPCM_IMA_WAV, - CODEC_ID_ADPCM_IMA_DK3, - CODEC_ID_ADPCM_IMA_DK4, - CODEC_ID_ADPCM_IMA_WS, - CODEC_ID_ADPCM_IMA_SMJPEG, - CODEC_ID_ADPCM_MS, - CODEC_ID_ADPCM_4XM, - CODEC_ID_ADPCM_XA, - CODEC_ID_ADPCM_ADX, - CODEC_ID_ADPCM_EA, - CODEC_ID_ADPCM_G726, - CODEC_ID_ADPCM_CT, - CODEC_ID_ADPCM_SWF, - CODEC_ID_ADPCM_YAMAHA, - CODEC_ID_ADPCM_SBPRO_4, - CODEC_ID_ADPCM_SBPRO_3, - CODEC_ID_ADPCM_SBPRO_2, - CODEC_ID_ADPCM_THP, - CODEC_ID_ADPCM_IMA_AMV, - CODEC_ID_ADPCM_EA_R1, - CODEC_ID_ADPCM_EA_R3, - CODEC_ID_ADPCM_EA_R2, - CODEC_ID_ADPCM_IMA_EA_SEAD, - CODEC_ID_ADPCM_IMA_EA_EACS, - CODEC_ID_ADPCM_EA_XAS, - CODEC_ID_ADPCM_EA_MAXIS_XA, - CODEC_ID_ADPCM_IMA_ISS, - CODEC_ID_ADPCM_G722, - CODEC_ID_ADPCM_IMA_APC, - CODEC_ID_VIMA = MKBETAG('V','I','M','A'), - - /* AMR */ - CODEC_ID_AMR_NB = 0x12000, - CODEC_ID_AMR_WB, - - /* RealAudio codecs*/ - CODEC_ID_RA_144 = 0x13000, - CODEC_ID_RA_288, - - /* various DPCM codecs */ - CODEC_ID_ROQ_DPCM = 0x14000, - CODEC_ID_INTERPLAY_DPCM, - CODEC_ID_XAN_DPCM, - CODEC_ID_SOL_DPCM, - - /* audio codecs */ - CODEC_ID_MP2 = 0x15000, - CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 - CODEC_ID_AAC, - CODEC_ID_AC3, - CODEC_ID_DTS, - CODEC_ID_VORBIS, - CODEC_ID_DVAUDIO, - CODEC_ID_WMAV1, - CODEC_ID_WMAV2, - CODEC_ID_MACE3, - CODEC_ID_MACE6, - CODEC_ID_VMDAUDIO, - CODEC_ID_FLAC, - CODEC_ID_MP3ADU, - CODEC_ID_MP3ON4, - CODEC_ID_SHORTEN, - CODEC_ID_ALAC, - CODEC_ID_WESTWOOD_SND1, - CODEC_ID_GSM, ///< as in Berlin toast format - CODEC_ID_QDM2, - CODEC_ID_COOK, - CODEC_ID_TRUESPEECH, - CODEC_ID_TTA, - CODEC_ID_SMACKAUDIO, - CODEC_ID_QCELP, - CODEC_ID_WAVPACK, - CODEC_ID_DSICINAUDIO, - CODEC_ID_IMC, - CODEC_ID_MUSEPACK7, - CODEC_ID_MLP, - CODEC_ID_GSM_MS, /* as found in WAV */ - CODEC_ID_ATRAC3, - CODEC_ID_VOXWARE, - CODEC_ID_APE, - CODEC_ID_NELLYMOSER, - CODEC_ID_MUSEPACK8, - CODEC_ID_SPEEX, - CODEC_ID_WMAVOICE, - CODEC_ID_WMAPRO, - CODEC_ID_WMALOSSLESS, - CODEC_ID_ATRAC3P, - CODEC_ID_EAC3, - CODEC_ID_SIPR, - CODEC_ID_MP1, - CODEC_ID_TWINVQ, - CODEC_ID_TRUEHD, - CODEC_ID_MP4ALS, - CODEC_ID_ATRAC1, - CODEC_ID_BINKAUDIO_RDFT, - CODEC_ID_BINKAUDIO_DCT, - CODEC_ID_AAC_LATM, - CODEC_ID_QDMC, - CODEC_ID_CELT, - CODEC_ID_G723_1, - CODEC_ID_G729, - CODEC_ID_8SVX_EXP, - CODEC_ID_8SVX_FIB, - CODEC_ID_BMV_AUDIO, - CODEC_ID_RALF, - CODEC_ID_IAC, - CODEC_ID_ILBC, - CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'), - CODEC_ID_SONIC = MKBETAG('S','O','N','C'), - CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'), - CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'), - CODEC_ID_OPUS = MKBETAG('O','P','U','S'), - - /* subtitle codecs */ - CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. - CODEC_ID_DVD_SUBTITLE = 0x17000, - CODEC_ID_DVB_SUBTITLE, - CODEC_ID_TEXT, ///< raw UTF-8 text - CODEC_ID_XSUB, - CODEC_ID_SSA, - CODEC_ID_MOV_TEXT, - CODEC_ID_HDMV_PGS_SUBTITLE, - CODEC_ID_DVB_TELETEXT, - CODEC_ID_SRT, - CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'), - CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'), - CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'), - CODEC_ID_SAMI = MKBETAG('S','A','M','I'), - CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'), - CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'), - - /* other specific kind of codecs (generally used for attachments) */ - CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. - CODEC_ID_TTF = 0x18000, - CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'), - CODEC_ID_XBIN = MKBETAG('X','B','I','N'), - CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'), - CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'), - - CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it - - CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS - * stream (only used by libavformat) */ - CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems - * stream (only used by libavformat) */ - CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. - -#endif /* AVCODEC_OLD_CODEC_IDS_H */ diff --git a/3rdparty/include/ffmpeg_/libavcodec/vaapi.h b/3rdparty/include/ffmpeg_/libavcodec/vaapi.h deleted file mode 100644 index 815a27e226..0000000000 --- a/3rdparty/include/ffmpeg_/libavcodec/vaapi.h +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Video Acceleration API (shared data between FFmpeg and the video player) - * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1 - * - * Copyright (C) 2008-2009 Splitted-Desktop Systems - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_VAAPI_H -#define AVCODEC_VAAPI_H - -/** - * @file - * @ingroup lavc_codec_hwaccel_vaapi - * Public libavcodec VA API header. - */ - -#include - -/** - * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding - * @ingroup lavc_codec_hwaccel - * @{ - */ - -/** - * This structure is used to share data between the FFmpeg library and - * the client video application. - * This shall be zero-allocated and available as - * AVCodecContext.hwaccel_context. All user members can be set once - * during initialization or through each AVCodecContext.get_buffer() - * function call. In any case, they must be valid prior to calling - * decoding functions. - */ -struct vaapi_context { - /** - * Window system dependent data - * - * - encoding: unused - * - decoding: Set by user - */ - void *display; - - /** - * Configuration ID - * - * - encoding: unused - * - decoding: Set by user - */ - uint32_t config_id; - - /** - * Context ID (video decode pipeline) - * - * - encoding: unused - * - decoding: Set by user - */ - uint32_t context_id; - - /** - * VAPictureParameterBuffer ID - * - * - encoding: unused - * - decoding: Set by libavcodec - */ - uint32_t pic_param_buf_id; - - /** - * VAIQMatrixBuffer ID - * - * - encoding: unused - * - decoding: Set by libavcodec - */ - uint32_t iq_matrix_buf_id; - - /** - * VABitPlaneBuffer ID (for VC-1 decoding) - * - * - encoding: unused - * - decoding: Set by libavcodec - */ - uint32_t bitplane_buf_id; - - /** - * Slice parameter/data buffer IDs - * - * - encoding: unused - * - decoding: Set by libavcodec - */ - uint32_t *slice_buf_ids; - - /** - * Number of effective slice buffer IDs to send to the HW - * - * - encoding: unused - * - decoding: Set by libavcodec - */ - unsigned int n_slice_buf_ids; - - /** - * Size of pre-allocated slice_buf_ids - * - * - encoding: unused - * - decoding: Set by libavcodec - */ - unsigned int slice_buf_ids_alloc; - - /** - * Pointer to VASliceParameterBuffers - * - * - encoding: unused - * - decoding: Set by libavcodec - */ - void *slice_params; - - /** - * Size of a VASliceParameterBuffer element - * - * - encoding: unused - * - decoding: Set by libavcodec - */ - unsigned int slice_param_size; - - /** - * Size of pre-allocated slice_params - * - * - encoding: unused - * - decoding: Set by libavcodec - */ - unsigned int slice_params_alloc; - - /** - * Number of slices currently filled in - * - * - encoding: unused - * - decoding: Set by libavcodec - */ - unsigned int slice_count; - - /** - * Pointer to slice data buffer base - * - encoding: unused - * - decoding: Set by libavcodec - */ - const uint8_t *slice_data; - - /** - * Current size of slice data - * - * - encoding: unused - * - decoding: Set by libavcodec - */ - uint32_t slice_data_size; -}; - -/* @} */ - -#endif /* AVCODEC_VAAPI_H */ diff --git a/3rdparty/include/ffmpeg_/libavcodec/vda.h b/3rdparty/include/ffmpeg_/libavcodec/vda.h deleted file mode 100644 index b3d6399a65..0000000000 --- a/3rdparty/include/ffmpeg_/libavcodec/vda.h +++ /dev/null @@ -1,162 +0,0 @@ -/* - * VDA HW acceleration - * - * copyright (c) 2011 Sebastien Zwickert - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_VDA_H -#define AVCODEC_VDA_H - -/** - * @file - * @ingroup lavc_codec_hwaccel_vda - * Public libavcodec VDA header. - */ - -#include - -// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes -// http://openradar.appspot.com/8026390 -#undef __GNUC_STDC_INLINE__ - -#define Picture QuickdrawPicture -#include -#undef Picture - -#include "libavcodec/version.h" - -/** - * @defgroup lavc_codec_hwaccel_vda VDA - * @ingroup lavc_codec_hwaccel - * - * @{ - */ - -/** - * This structure is used to provide the necessary configurations and data - * to the VDA FFmpeg HWAccel implementation. - * - * The application must make it available as AVCodecContext.hwaccel_context. - */ -struct vda_context { - /** - * VDA decoder object. - * - * - encoding: unused - * - decoding: Set/Unset by libavcodec. - */ - VDADecoder decoder; - - /** - * The Core Video pixel buffer that contains the current image data. - * - * encoding: unused - * decoding: Set by libavcodec. Unset by user. - */ - CVPixelBufferRef cv_buffer; - - /** - * Use the hardware decoder in synchronous mode. - * - * encoding: unused - * decoding: Set by user. - */ - int use_sync_decoding; - - /** - * The frame width. - * - * - encoding: unused - * - decoding: Set/Unset by user. - */ - int width; - - /** - * The frame height. - * - * - encoding: unused - * - decoding: Set/Unset by user. - */ - int height; - - /** - * The frame format. - * - * - encoding: unused - * - decoding: Set/Unset by user. - */ - int format; - - /** - * The pixel format for output image buffers. - * - * - encoding: unused - * - decoding: Set/Unset by user. - */ - OSType cv_pix_fmt_type; - - /** - * The current bitstream buffer. - * - * - encoding: unused - * - decoding: Set/Unset by libavcodec. - */ - uint8_t *priv_bitstream; - - /** - * The current size of the bitstream. - * - * - encoding: unused - * - decoding: Set/Unset by libavcodec. - */ - int priv_bitstream_size; - - /** - * The reference size used for fast reallocation. - * - * - encoding: unused - * - decoding: Set/Unset by libavcodec. - */ - int priv_allocated_size; - - /** - * Use av_buffer to manage buffer. - * When the flag is set, the CVPixelBuffers returned by the decoder will - * be released automatically, so you have to retain them if necessary. - * Not setting this flag may cause memory leak. - * - * encoding: unused - * decoding: Set by user. - */ - int use_ref_buffer; -}; - -/** Create the video decoder. */ -int ff_vda_create_decoder(struct vda_context *vda_ctx, - uint8_t *extradata, - int extradata_size); - -/** Destroy the video decoder. */ -int ff_vda_destroy_decoder(struct vda_context *vda_ctx); - -/** - * @} - */ - -#endif /* AVCODEC_VDA_H */ diff --git a/3rdparty/include/ffmpeg_/libavcodec/vdpau.h b/3rdparty/include/ffmpeg_/libavcodec/vdpau.h deleted file mode 100644 index a8d708cd3b..0000000000 --- a/3rdparty/include/ffmpeg_/libavcodec/vdpau.h +++ /dev/null @@ -1,159 +0,0 @@ -/* - * The Video Decode and Presentation API for UNIX (VDPAU) is used for - * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1. - * - * Copyright (C) 2008 NVIDIA - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_VDPAU_H -#define AVCODEC_VDPAU_H - -/** - * @file - * @ingroup lavc_codec_hwaccel_vdpau - * Public libavcodec VDPAU header. - */ - - -/** - * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer - * @ingroup lavc_codec_hwaccel - * - * VDPAU hardware acceleration has two modules - * - VDPAU decoding - * - VDPAU presentation - * - * The VDPAU decoding module parses all headers using FFmpeg - * parsing mechanisms and uses VDPAU for the actual decoding. - * - * As per the current implementation, the actual decoding - * and rendering (API calls) are done as part of the VDPAU - * presentation (vo_vdpau.c) module. - * - * @{ - */ - -#include -#include -#include "libavutil/avconfig.h" - -union FFVdpPictureInfo { - VdpPictureInfoH264 h264; - VdpPictureInfoMPEG1Or2 mpeg; - VdpPictureInfoVC1 vc1; - VdpPictureInfoMPEG4Part2 mpeg4; -}; - -/** - * This structure is used to share data between the libavcodec library and - * the client video application. - * The user shall zero-allocate the structure and make it available as - * AVCodecContext.hwaccel_context. Members can be set by the user once - * during initialization or through each AVCodecContext.get_buffer() - * function call. In any case, they must be valid prior to calling - * decoding functions. - */ -typedef struct AVVDPAUContext { - /** - * VDPAU decoder handle - * - * Set by user. - */ - VdpDecoder decoder; - - /** - * VDPAU decoder render callback - * - * Set by the user. - */ - VdpDecoderRender *render; - - /** - * VDPAU picture information - * - * Set by libavcodec. - */ - union FFVdpPictureInfo info; - - /** - * Allocated size of the bitstream_buffers table. - * - * Set by libavcodec. - */ - int bitstream_buffers_allocated; - - /** - * Useful bitstream buffers in the bitstream buffers table. - * - * Set by libavcodec. - */ - int bitstream_buffers_used; - - /** - * Table of bitstream buffers. - * The user is responsible for freeing this buffer using av_freep(). - * - * Set by libavcodec. - */ - VdpBitstreamBuffer *bitstream_buffers; -} AVVDPAUContext; - - -/** @brief The videoSurface is used for rendering. */ -#define FF_VDPAU_STATE_USED_FOR_RENDER 1 - -/** - * @brief The videoSurface is needed for reference/prediction. - * The codec manipulates this. - */ -#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2 - -/** - * @brief This structure is used as a callback between the FFmpeg - * decoder (vd_) and presentation (vo_) module. - * This is used for defining a video frame containing surface, - * picture parameter, bitstream information etc which are passed - * between the FFmpeg decoder and its clients. - */ -struct vdpau_render_state { - VdpVideoSurface surface; ///< Used as rendered surface, never changed. - - int state; ///< Holds FF_VDPAU_STATE_* values. - -#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI - /** picture parameter information for all supported codecs */ - union FFVdpPictureInfo info; -#endif - - /** Describe size/location of the compressed video data. - Set to 0 when freeing bitstream_buffers. */ - int bitstream_buffers_allocated; - int bitstream_buffers_used; - /** The user is responsible for freeing this buffer using av_freep(). */ - VdpBitstreamBuffer *bitstream_buffers; - -#if !AV_HAVE_INCOMPATIBLE_LIBAV_ABI - /** picture parameter information for all supported codecs */ - union FFVdpPictureInfo info; -#endif -}; - -/* @}*/ - -#endif /* AVCODEC_VDPAU_H */ diff --git a/3rdparty/include/ffmpeg_/libavcodec/version.h b/3rdparty/include/ffmpeg_/libavcodec/version.h deleted file mode 100644 index 32834460b5..0000000000 --- a/3rdparty/include/ffmpeg_/libavcodec/version.h +++ /dev/null @@ -1,95 +0,0 @@ -/* - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_VERSION_H -#define AVCODEC_VERSION_H - -/** - * @file - * @ingroup libavc - * Libavcodec version macros. - */ - -#include "libavutil/avutil.h" - -#define LIBAVCODEC_VERSION_MAJOR 55 -#define LIBAVCODEC_VERSION_MINOR 18 -#define LIBAVCODEC_VERSION_MICRO 102 - -#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ - LIBAVCODEC_VERSION_MINOR, \ - LIBAVCODEC_VERSION_MICRO) -#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \ - LIBAVCODEC_VERSION_MINOR, \ - LIBAVCODEC_VERSION_MICRO) -#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT - -#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION) - -/** - * FF_API_* defines may be placed below to indicate public API that will be - * dropped at a future version bump. The defines themselves are not part of - * the public API and may change, break or disappear at any time. - */ - -#ifndef FF_API_REQUEST_CHANNELS -#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_ALLOC_CONTEXT -#define FF_API_ALLOC_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 55) -#endif -#ifndef FF_API_AVCODEC_OPEN -#define FF_API_AVCODEC_OPEN (LIBAVCODEC_VERSION_MAJOR < 55) -#endif -#ifndef FF_API_OLD_DECODE_AUDIO -#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_OLD_TIMECODE -#define FF_API_OLD_TIMECODE (LIBAVCODEC_VERSION_MAJOR < 55) -#endif - -#ifndef FF_API_OLD_ENCODE_AUDIO -#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_OLD_ENCODE_VIDEO -#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_CODEC_ID -#define FF_API_CODEC_ID (LIBAVCODEC_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_AVCODEC_RESAMPLE -#define FF_API_AVCODEC_RESAMPLE (LIBAVCODEC_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_DEINTERLACE -#define FF_API_DEINTERLACE (LIBAVCODEC_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_DESTRUCT_PACKET -#define FF_API_DESTRUCT_PACKET (LIBAVCODEC_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_GET_BUFFER -#define FF_API_GET_BUFFER (LIBAVCODEC_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_MISSING_SAMPLE -#define FF_API_MISSING_SAMPLE (LIBAVCODEC_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_LOWRES -#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 56) -#endif - -#endif /* AVCODEC_VERSION_H */ diff --git a/3rdparty/include/ffmpeg_/libavcodec/xvmc.h b/3rdparty/include/ffmpeg_/libavcodec/xvmc.h deleted file mode 100644 index b2bf518d0c..0000000000 --- a/3rdparty/include/ffmpeg_/libavcodec/xvmc.h +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright (C) 2003 Ivan Kalvachev - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_XVMC_H -#define AVCODEC_XVMC_H - -/** - * @file - * @ingroup lavc_codec_hwaccel_xvmc - * Public libavcodec XvMC header. - */ - -#include - -#include "avcodec.h" - -/** - * @defgroup lavc_codec_hwaccel_xvmc XvMC - * @ingroup lavc_codec_hwaccel - * - * @{ - */ - -#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct - the number is 1337 speak for the letters IDCT MCo (motion compensation) */ - -struct xvmc_pix_fmt { - /** The field contains the special constant value AV_XVMC_ID. - It is used as a test that the application correctly uses the API, - and that there is no corruption caused by pixel routines. - - application - set during initialization - - libavcodec - unchanged - */ - int xvmc_id; - - /** Pointer to the block array allocated by XvMCCreateBlocks(). - The array has to be freed by XvMCDestroyBlocks(). - Each group of 64 values represents one data block of differential - pixel information (in MoCo mode) or coefficients for IDCT. - - application - set the pointer during initialization - - libavcodec - fills coefficients/pixel data into the array - */ - short* data_blocks; - - /** Pointer to the macroblock description array allocated by - XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks(). - - application - set the pointer during initialization - - libavcodec - fills description data into the array - */ - XvMCMacroBlock* mv_blocks; - - /** Number of macroblock descriptions that can be stored in the mv_blocks - array. - - application - set during initialization - - libavcodec - unchanged - */ - int allocated_mv_blocks; - - /** Number of blocks that can be stored at once in the data_blocks array. - - application - set during initialization - - libavcodec - unchanged - */ - int allocated_data_blocks; - - /** Indicate that the hardware would interpret data_blocks as IDCT - coefficients and perform IDCT on them. - - application - set during initialization - - libavcodec - unchanged - */ - int idct; - - /** In MoCo mode it indicates that intra macroblocks are assumed to be in - unsigned format; same as the XVMC_INTRA_UNSIGNED flag. - - application - set during initialization - - libavcodec - unchanged - */ - int unsigned_intra; - - /** Pointer to the surface allocated by XvMCCreateSurface(). - It has to be freed by XvMCDestroySurface() on application exit. - It identifies the frame and its state on the video hardware. - - application - set during initialization - - libavcodec - unchanged - */ - XvMCSurface* p_surface; - -/** Set by the decoder before calling ff_draw_horiz_band(), - needed by the XvMCRenderSurface function. */ -//@{ - /** Pointer to the surface used as past reference - - application - unchanged - - libavcodec - set - */ - XvMCSurface* p_past_surface; - - /** Pointer to the surface used as future reference - - application - unchanged - - libavcodec - set - */ - XvMCSurface* p_future_surface; - - /** top/bottom field or frame - - application - unchanged - - libavcodec - set - */ - unsigned int picture_structure; - - /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence - - application - unchanged - - libavcodec - set - */ - unsigned int flags; -//}@ - - /** Number of macroblock descriptions in the mv_blocks array - that have already been passed to the hardware. - - application - zeroes it on get_buffer(). - A successful ff_draw_horiz_band() may increment it - with filled_mb_block_num or zero both. - - libavcodec - unchanged - */ - int start_mv_blocks_num; - - /** Number of new macroblock descriptions in the mv_blocks array (after - start_mv_blocks_num) that are filled by libavcodec and have to be - passed to the hardware. - - application - zeroes it on get_buffer() or after successful - ff_draw_horiz_band(). - - libavcodec - increment with one of each stored MB - */ - int filled_mv_blocks_num; - - /** Number of the next free data block; one data block consists of - 64 short values in the data_blocks array. - All blocks before this one have already been claimed by placing their - position into the corresponding block description structure field, - that are part of the mv_blocks array. - - application - zeroes it on get_buffer(). - A successful ff_draw_horiz_band() may zero it together - with start_mb_blocks_num. - - libavcodec - each decoded macroblock increases it by the number - of coded blocks it contains. - */ - int next_free_data_block_num; -}; - -/** - * @} - */ - -#endif /* AVCODEC_XVMC_H */ diff --git a/3rdparty/include/ffmpeg_/libavdevice/avdevice.h b/3rdparty/include/ffmpeg_/libavdevice/avdevice.h deleted file mode 100644 index 93a044f270..0000000000 --- a/3rdparty/include/ffmpeg_/libavdevice/avdevice.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVDEVICE_AVDEVICE_H -#define AVDEVICE_AVDEVICE_H - -#include "version.h" - -/** - * @file - * @ingroup lavd - * Main libavdevice API header - */ - -/** - * @defgroup lavd Special devices muxing/demuxing library - * @{ - * Libavdevice is a complementary library to @ref libavf "libavformat". It - * provides various "special" platform-specific muxers and demuxers, e.g. for - * grabbing devices, audio capture and playback etc. As a consequence, the - * (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own - * I/O functions). The filename passed to avformat_open_input() often does not - * refer to an actually existing file, but has some special device-specific - * meaning - e.g. for x11grab it is the display name. - * - * To use libavdevice, simply call avdevice_register_all() to register all - * compiled muxers and demuxers. They all use standard libavformat API. - * @} - */ - -#include "libavformat/avformat.h" - -/** - * Return the LIBAVDEVICE_VERSION_INT constant. - */ -unsigned avdevice_version(void); - -/** - * Return the libavdevice build-time configuration. - */ -const char *avdevice_configuration(void); - -/** - * Return the libavdevice license. - */ -const char *avdevice_license(void); - -/** - * Initialize libavdevice and register all the input and output devices. - * @warning This function is not thread safe. - */ -void avdevice_register_all(void); - -#endif /* AVDEVICE_AVDEVICE_H */ diff --git a/3rdparty/include/ffmpeg_/libavdevice/version.h b/3rdparty/include/ffmpeg_/libavdevice/version.h deleted file mode 100644 index 1e18f51d4a..0000000000 --- a/3rdparty/include/ffmpeg_/libavdevice/version.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVDEVICE_VERSION_H -#define AVDEVICE_VERSION_H - -/** - * @file - * @ingroup lavd - * Libavdevice version macros - */ - -#include "libavutil/avutil.h" - -#define LIBAVDEVICE_VERSION_MAJOR 55 -#define LIBAVDEVICE_VERSION_MINOR 3 -#define LIBAVDEVICE_VERSION_MICRO 100 - -#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \ - LIBAVDEVICE_VERSION_MINOR, \ - LIBAVDEVICE_VERSION_MICRO) -#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \ - LIBAVDEVICE_VERSION_MINOR, \ - LIBAVDEVICE_VERSION_MICRO) -#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT - -#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION) - -/** - * FF_API_* defines may be placed below to indicate public API that will be - * dropped at a future version bump. The defines themselves are not part of - * the public API and may change, break or disappear at any time. - */ - -#endif /* AVDEVICE_VERSION_H */ diff --git a/3rdparty/include/ffmpeg_/libavformat/avformat.h b/3rdparty/include/ffmpeg_/libavformat/avformat.h deleted file mode 100644 index 04fad94219..0000000000 --- a/3rdparty/include/ffmpeg_/libavformat/avformat.h +++ /dev/null @@ -1,2181 +0,0 @@ -/* - * copyright (c) 2001 Fabrice Bellard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVFORMAT_AVFORMAT_H -#define AVFORMAT_AVFORMAT_H - -/** - * @file - * @ingroup libavf - * Main libavformat public API header - */ - -/** - * @defgroup libavf I/O and Muxing/Demuxing Library - * @{ - * - * Libavformat (lavf) is a library for dealing with various media container - * formats. Its main two purposes are demuxing - i.e. splitting a media file - * into component streams, and the reverse process of muxing - writing supplied - * data in a specified container format. It also has an @ref lavf_io - * "I/O module" which supports a number of protocols for accessing the data (e.g. - * file, tcp, http and others). Before using lavf, you need to call - * av_register_all() to register all compiled muxers, demuxers and protocols. - * Unless you are absolutely sure you won't use libavformat's network - * capabilities, you should also call avformat_network_init(). - * - * A supported input format is described by an AVInputFormat struct, conversely - * an output format is described by AVOutputFormat. You can iterate over all - * registered input/output formats using the av_iformat_next() / - * av_oformat_next() functions. The protocols layer is not part of the public - * API, so you can only get the names of supported protocols with the - * avio_enum_protocols() function. - * - * Main lavf structure used for both muxing and demuxing is AVFormatContext, - * which exports all information about the file being read or written. As with - * most Libavformat structures, its size is not part of public ABI, so it cannot be - * allocated on stack or directly with av_malloc(). To create an - * AVFormatContext, use avformat_alloc_context() (some functions, like - * avformat_open_input() might do that for you). - * - * Most importantly an AVFormatContext contains: - * @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat - * "output" format. It is either autodetected or set by user for input; - * always set by user for output. - * @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all - * elementary streams stored in the file. AVStreams are typically referred to - * using their index in this array. - * @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or - * set by user for input, always set by user for output (unless you are dealing - * with an AVFMT_NOFILE format). - * - * @section lavf_options Passing options to (de)muxers - * Lavf allows to configure muxers and demuxers using the @ref avoptions - * mechanism. Generic (format-independent) libavformat options are provided by - * AVFormatContext, they can be examined from a user program by calling - * av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass - * from avformat_get_class()). Private (format-specific) options are provided by - * AVFormatContext.priv_data if and only if AVInputFormat.priv_class / - * AVOutputFormat.priv_class of the corresponding format struct is non-NULL. - * Further options may be provided by the @ref AVFormatContext.pb "I/O context", - * if its AVClass is non-NULL, and the protocols layer. See the discussion on - * nesting in @ref avoptions documentation to learn how to access those. - * - * @defgroup lavf_decoding Demuxing - * @{ - * Demuxers read a media file and split it into chunks of data (@em packets). A - * @ref AVPacket "packet" contains one or more encoded frames which belongs to a - * single elementary stream. In the lavf API this process is represented by the - * avformat_open_input() function for opening a file, av_read_frame() for - * reading a single packet and finally avformat_close_input(), which does the - * cleanup. - * - * @section lavf_decoding_open Opening a media file - * The minimum information required to open a file is its URL or filename, which - * is passed to avformat_open_input(), as in the following code: - * @code - * const char *url = "in.mp3"; - * AVFormatContext *s = NULL; - * int ret = avformat_open_input(&s, url, NULL, NULL); - * if (ret < 0) - * abort(); - * @endcode - * The above code attempts to allocate an AVFormatContext, open the - * specified file (autodetecting the format) and read the header, exporting the - * information stored there into s. Some formats do not have a header or do not - * store enough information there, so it is recommended that you call the - * avformat_find_stream_info() function which tries to read and decode a few - * frames to find missing information. - * - * In some cases you might want to preallocate an AVFormatContext yourself with - * avformat_alloc_context() and do some tweaking on it before passing it to - * avformat_open_input(). One such case is when you want to use custom functions - * for reading input data instead of lavf internal I/O layer. - * To do that, create your own AVIOContext with avio_alloc_context(), passing - * your reading callbacks to it. Then set the @em pb field of your - * AVFormatContext to newly created AVIOContext. - * - * Since the format of the opened file is in general not known until after - * avformat_open_input() has returned, it is not possible to set demuxer private - * options on a preallocated context. Instead, the options should be passed to - * avformat_open_input() wrapped in an AVDictionary: - * @code - * AVDictionary *options = NULL; - * av_dict_set(&options, "video_size", "640x480", 0); - * av_dict_set(&options, "pixel_format", "rgb24", 0); - * - * if (avformat_open_input(&s, url, NULL, &options) < 0) - * abort(); - * av_dict_free(&options); - * @endcode - * This code passes the private options 'video_size' and 'pixel_format' to the - * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it - * cannot know how to interpret raw video data otherwise. If the format turns - * out to be something different than raw video, those options will not be - * recognized by the demuxer and therefore will not be applied. Such unrecognized - * options are then returned in the options dictionary (recognized options are - * consumed). The calling program can handle such unrecognized options as it - * wishes, e.g. - * @code - * AVDictionaryEntry *e; - * if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) { - * fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key); - * abort(); - * } - * @endcode - * - * After you have finished reading the file, you must close it with - * avformat_close_input(). It will free everything associated with the file. - * - * @section lavf_decoding_read Reading from an opened file - * Reading data from an opened AVFormatContext is done by repeatedly calling - * av_read_frame() on it. Each call, if successful, will return an AVPacket - * containing encoded data for one AVStream, identified by - * AVPacket.stream_index. This packet may be passed straight into the libavcodec - * decoding functions avcodec_decode_video2(), avcodec_decode_audio4() or - * avcodec_decode_subtitle2() if the caller wishes to decode the data. - * - * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be - * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for - * pts/dts, 0 for duration) if the stream does not provide them. The timing - * information will be in AVStream.time_base units, i.e. it has to be - * multiplied by the timebase to convert them to seconds. - * - * If AVPacket.buf is set on the returned packet, then the packet is - * allocated dynamically and the user may keep it indefinitely. - * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a - * static storage somewhere inside the demuxer and the packet is only valid - * until the next av_read_frame() call or closing the file. If the caller - * requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy - * of it. - * In both cases, the packet must be freed with av_free_packet() when it is no - * longer needed. - * - * @section lavf_decoding_seek Seeking - * @} - * - * @defgroup lavf_encoding Muxing - * @{ - * @} - * - * @defgroup lavf_io I/O Read/Write - * @{ - * @} - * - * @defgroup lavf_codec Demuxers - * @{ - * @defgroup lavf_codec_native Native Demuxers - * @{ - * @} - * @defgroup lavf_codec_wrappers External library wrappers - * @{ - * @} - * @} - * @defgroup lavf_protos I/O Protocols - * @{ - * @} - * @defgroup lavf_internal Internal - * @{ - * @} - * @} - * - */ - -#include -#include /* FILE */ -#include "libavcodec/avcodec.h" -#include "libavutil/dict.h" -#include "libavutil/log.h" - -#include "avio.h" -#include "libavformat/version.h" - -struct AVFormatContext; - - -/** - * @defgroup metadata_api Public Metadata API - * @{ - * @ingroup libavf - * The metadata API allows libavformat to export metadata tags to a client - * application when demuxing. Conversely it allows a client application to - * set metadata when muxing. - * - * Metadata is exported or set as pairs of key/value strings in the 'metadata' - * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs - * using the @ref lavu_dict "AVDictionary" API. Like all strings in FFmpeg, - * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata - * exported by demuxers isn't checked to be valid UTF-8 in most cases. - * - * Important concepts to keep in mind: - * - Keys are unique; there can never be 2 tags with the same key. This is - * also meant semantically, i.e., a demuxer should not knowingly produce - * several keys that are literally different but semantically identical. - * E.g., key=Author5, key=Author6. In this example, all authors must be - * placed in the same tag. - * - Metadata is flat, not hierarchical; there are no subtags. If you - * want to store, e.g., the email address of the child of producer Alice - * and actor Bob, that could have key=alice_and_bobs_childs_email_address. - * - Several modifiers can be applied to the tag name. This is done by - * appending a dash character ('-') and the modifier name in the order - * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng. - * - language -- a tag whose value is localized for a particular language - * is appended with the ISO 639-2/B 3-letter language code. - * For example: Author-ger=Michael, Author-eng=Mike - * The original/default language is in the unqualified "Author" tag. - * A demuxer should set a default if it sets any translated tag. - * - sorting -- a modified version of a tag that should be used for - * sorting will have '-sort' appended. E.g. artist="The Beatles", - * artist-sort="Beatles, The". - * - * - Demuxers attempt to export metadata in a generic format, however tags - * with no generic equivalents are left as they are stored in the container. - * Follows a list of generic tag names: - * - @verbatim - album -- name of the set this work belongs to - album_artist -- main creator of the set/album, if different from artist. - e.g. "Various Artists" for compilation albums. - artist -- main creator of the work - comment -- any additional description of the file. - composer -- who composed the work, if different from artist. - copyright -- name of copyright holder. - creation_time-- date when the file was created, preferably in ISO 8601. - date -- date when the work was created, preferably in ISO 8601. - disc -- number of a subset, e.g. disc in a multi-disc collection. - encoder -- name/settings of the software/hardware that produced the file. - encoded_by -- person/group who created the file. - filename -- original name of the file. - genre -- . - language -- main language in which the work is performed, preferably - in ISO 639-2 format. Multiple languages can be specified by - separating them with commas. - performer -- artist who performed the work, if different from artist. - E.g for "Also sprach Zarathustra", artist would be "Richard - Strauss" and performer "London Philharmonic Orchestra". - publisher -- name of the label/publisher. - service_name -- name of the service in broadcasting (channel name). - service_provider -- name of the service provider in broadcasting. - title -- name of the work. - track -- number of this work in the set, can be in form current/total. - variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of - @endverbatim - * - * Look in the examples section for an application example how to use the Metadata API. - * - * @} - */ - -/* packet functions */ - - -/** - * Allocate and read the payload of a packet and initialize its - * fields with default values. - * - * @param pkt packet - * @param size desired payload size - * @return >0 (read size) if OK, AVERROR_xxx otherwise - */ -int av_get_packet(AVIOContext *s, AVPacket *pkt, int size); - - -/** - * Read data and append it to the current content of the AVPacket. - * If pkt->size is 0 this is identical to av_get_packet. - * Note that this uses av_grow_packet and thus involves a realloc - * which is inefficient. Thus this function should only be used - * when there is no reasonable way to know (an upper bound of) - * the final size. - * - * @param pkt packet - * @param size amount of data to read - * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data - * will not be lost even if an error occurs. - */ -int av_append_packet(AVIOContext *s, AVPacket *pkt, int size); - -/*************************************************/ -/* fractional numbers for exact pts handling */ - -/** - * The exact value of the fractional number is: 'val + num / den'. - * num is assumed to be 0 <= num < den. - */ -typedef struct AVFrac { - int64_t val, num, den; -} AVFrac; - -/*************************************************/ -/* input/output formats */ - -struct AVCodecTag; - -/** - * This structure contains the data a format has to probe a file. - */ -typedef struct AVProbeData { - const char *filename; - unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */ - int buf_size; /**< Size of buf except extra allocated bytes */ -} AVProbeData; - -#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4) -#define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension -#define AVPROBE_SCORE_MAX 100 ///< maximum score - -#define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer - -/// Demuxer will use avio_open, no opened file should be provided by the caller. -#define AVFMT_NOFILE 0x0001 -#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */ -#define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */ -#define AVFMT_RAWPICTURE 0x0020 /**< Format wants AVPicture structure for - raw picture data. */ -#define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */ -#define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */ -#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */ -#define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */ -#define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */ -#define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */ -#define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */ -#define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */ -#define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fall back on generic search */ -#define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */ -#define AVFMT_ALLOW_FLUSH 0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */ -#if LIBAVFORMAT_VERSION_MAJOR <= 54 -#define AVFMT_TS_NONSTRICT 0x8020000 //we try to be compatible to the ABIs of ffmpeg and major forks -#else -#define AVFMT_TS_NONSTRICT 0x20000 -#endif - /**< Format does not require strictly - increasing timestamps, but they must - still be monotonic */ -#define AVFMT_TS_NEGATIVE 0x40000 /**< Format allows muxing negative - timestamps. If not set the timestamp - will be shifted in av_write_frame and - av_interleaved_write_frame so they - start from 0. - The user or muxer can override this through - AVFormatContext.avoid_negative_ts - */ - -#define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */ - -/** - * @addtogroup lavf_encoding - * @{ - */ -typedef struct AVOutputFormat { - const char *name; - /** - * Descriptive name for the format, meant to be more human-readable - * than name. You should use the NULL_IF_CONFIG_SMALL() macro - * to define it. - */ - const char *long_name; - const char *mime_type; - const char *extensions; /**< comma-separated filename extensions */ - /* output support */ - enum AVCodecID audio_codec; /**< default audio codec */ - enum AVCodecID video_codec; /**< default video codec */ - enum AVCodecID subtitle_codec; /**< default subtitle codec */ - /** - * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE, - * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, - * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, - * AVFMT_TS_NONSTRICT - */ - int flags; - - /** - * List of supported codec_id-codec_tag pairs, ordered by "better - * choice first". The arrays are all terminated by AV_CODEC_ID_NONE. - */ - const struct AVCodecTag * const *codec_tag; - - - const AVClass *priv_class; ///< AVClass for the private context - - /***************************************************************** - * No fields below this line are part of the public API. They - * may not be used outside of libavformat and can be changed and - * removed at will. - * New public fields should be added right above. - ***************************************************************** - */ - struct AVOutputFormat *next; - /** - * size of private data so that it can be allocated in the wrapper - */ - int priv_data_size; - - int (*write_header)(struct AVFormatContext *); - /** - * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags, - * pkt can be NULL in order to flush data buffered in the muxer. - * When flushing, return 0 if there still is more data to flush, - * or 1 if everything was flushed and there is no more buffered - * data. - */ - int (*write_packet)(struct AVFormatContext *, AVPacket *pkt); - int (*write_trailer)(struct AVFormatContext *); - /** - * Currently only used to set pixel format if not YUV420P. - */ - int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, - AVPacket *in, int flush); - /** - * Test if the given codec can be stored in this container. - * - * @return 1 if the codec is supported, 0 if it is not. - * A negative number if unknown. - * MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC - */ - int (*query_codec)(enum AVCodecID id, int std_compliance); - - void (*get_output_timestamp)(struct AVFormatContext *s, int stream, - int64_t *dts, int64_t *wall); -} AVOutputFormat; -/** - * @} - */ - -/** - * @addtogroup lavf_decoding - * @{ - */ -typedef struct AVInputFormat { - /** - * A comma separated list of short names for the format. New names - * may be appended with a minor bump. - */ - const char *name; - - /** - * Descriptive name for the format, meant to be more human-readable - * than name. You should use the NULL_IF_CONFIG_SMALL() macro - * to define it. - */ - const char *long_name; - - /** - * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, - * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, - * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. - */ - int flags; - - /** - * If extensions are defined, then no probe is done. You should - * usually not use extension format guessing because it is not - * reliable enough - */ - const char *extensions; - - const struct AVCodecTag * const *codec_tag; - - const AVClass *priv_class; ///< AVClass for the private context - - /***************************************************************** - * No fields below this line are part of the public API. They - * may not be used outside of libavformat and can be changed and - * removed at will. - * New public fields should be added right above. - ***************************************************************** - */ - struct AVInputFormat *next; - - /** - * Raw demuxers store their codec ID here. - */ - int raw_codec_id; - - /** - * Size of private data so that it can be allocated in the wrapper. - */ - int priv_data_size; - - /** - * Tell if a given file has a chance of being parsed as this format. - * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes - * big so you do not have to check for that unless you need more. - */ - int (*read_probe)(AVProbeData *); - - /** - * Read the format header and initialize the AVFormatContext - * structure. Return 0 if OK. Only used in raw format right - * now. 'avformat_new_stream' should be called to create new streams. - */ - int (*read_header)(struct AVFormatContext *); - - /** - * Read one packet and put it in 'pkt'. pts and flags are also - * set. 'avformat_new_stream' can be called only if the flag - * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a - * background thread). - * @return 0 on success, < 0 on error. - * When returning an error, pkt must not have been allocated - * or must be freed before returning - */ - int (*read_packet)(struct AVFormatContext *, AVPacket *pkt); - - /** - * Close the stream. The AVFormatContext and AVStreams are not - * freed by this function - */ - int (*read_close)(struct AVFormatContext *); - - /** - * Seek to a given timestamp relative to the frames in - * stream component stream_index. - * @param stream_index Must not be -1. - * @param flags Selects which direction should be preferred if no exact - * match is available. - * @return >= 0 on success (but not necessarily the new offset) - */ - int (*read_seek)(struct AVFormatContext *, - int stream_index, int64_t timestamp, int flags); - - /** - * Get the next timestamp in stream[stream_index].time_base units. - * @return the timestamp or AV_NOPTS_VALUE if an error occurred - */ - int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, - int64_t *pos, int64_t pos_limit); - - /** - * Start/resume playing - only meaningful if using a network-based format - * (RTSP). - */ - int (*read_play)(struct AVFormatContext *); - - /** - * Pause playing - only meaningful if using a network-based format - * (RTSP). - */ - int (*read_pause)(struct AVFormatContext *); - - /** - * Seek to timestamp ts. - * Seeking will be done so that the point from which all active streams - * can be presented successfully will be closest to ts and within min/max_ts. - * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. - */ - int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); -} AVInputFormat; -/** - * @} - */ - -enum AVStreamParseType { - AVSTREAM_PARSE_NONE, - AVSTREAM_PARSE_FULL, /**< full parsing and repack */ - AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */ - AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */ - AVSTREAM_PARSE_FULL_ONCE, /**< full parsing and repack of the first frame only, only implemented for H.264 currently */ - AVSTREAM_PARSE_FULL_RAW=MKTAG(0,'R','A','W'), /**< full parsing and repack with timestamp and position generation by parser for raw - this assumes that each packet in the file contains no demuxer level headers and - just codec level data, otherwise position generation would fail */ -}; - -typedef struct AVIndexEntry { - int64_t pos; - int64_t timestamp; /**< - * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available - * when seeking to this entry. That means preferable PTS on keyframe based formats. - * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better - * is known - */ -#define AVINDEX_KEYFRAME 0x0001 - int flags:2; - int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment). - int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */ -} AVIndexEntry; - -#define AV_DISPOSITION_DEFAULT 0x0001 -#define AV_DISPOSITION_DUB 0x0002 -#define AV_DISPOSITION_ORIGINAL 0x0004 -#define AV_DISPOSITION_COMMENT 0x0008 -#define AV_DISPOSITION_LYRICS 0x0010 -#define AV_DISPOSITION_KARAOKE 0x0020 - -/** - * Track should be used during playback by default. - * Useful for subtitle track that should be displayed - * even when user did not explicitly ask for subtitles. - */ -#define AV_DISPOSITION_FORCED 0x0040 -#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */ -#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */ -#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */ -/** - * The stream is stored in the file as an attached picture/"cover art" (e.g. - * APIC frame in ID3v2). The single packet associated with it will be returned - * among the first few packets read from the file unless seeking takes place. - * It can also be accessed at any time in AVStream.attached_pic. - */ -#define AV_DISPOSITION_ATTACHED_PIC 0x0400 - -/** - * To specify text track kind (different from subtitles default). - */ -#define AV_DISPOSITION_CAPTIONS 0x10000 -#define AV_DISPOSITION_DESCRIPTIONS 0x20000 -#define AV_DISPOSITION_METADATA 0x40000 - -/** - * Options for behavior on timestamp wrap detection. - */ -#define AV_PTS_WRAP_IGNORE 0 ///< ignore the wrap -#define AV_PTS_WRAP_ADD_OFFSET 1 ///< add the format specific offset on wrap detection -#define AV_PTS_WRAP_SUB_OFFSET -1 ///< subtract the format specific offset on wrap detection - -/** - * Stream structure. - * New fields can be added to the end with minor version bumps. - * Removal, reordering and changes to existing fields require a major - * version bump. - * sizeof(AVStream) must not be used outside libav*. - */ -typedef struct AVStream { - int index; /**< stream index in AVFormatContext */ - /** - * Format-specific stream ID. - * decoding: set by libavformat - * encoding: set by the user, replaced by libavformat if left unset - */ - int id; - /** - * Codec context associated with this stream. Allocated and freed by - * libavformat. - * - * - decoding: The demuxer exports codec information stored in the headers - * here. - * - encoding: The user sets codec information, the muxer writes it to the - * output. Mandatory fields as specified in AVCodecContext - * documentation must be set even if this AVCodecContext is - * not actually used for encoding. - */ - AVCodecContext *codec; - void *priv_data; - - /** - * encoding: pts generation when outputting stream - */ - struct AVFrac pts; - - /** - * This is the fundamental unit of time (in seconds) in terms - * of which frame timestamps are represented. - * - * decoding: set by libavformat - * encoding: set by libavformat in avformat_write_header. The muxer may use the - * user-provided value of @ref AVCodecContext.time_base "codec->time_base" - * as a hint. - */ - AVRational time_base; - - /** - * Decoding: pts of the first frame of the stream in presentation order, in stream time base. - * Only set this if you are absolutely 100% sure that the value you set - * it to really is the pts of the first frame. - * This may be undefined (AV_NOPTS_VALUE). - * @note The ASF header does NOT contain a correct start_time the ASF - * demuxer must NOT set this. - */ - int64_t start_time; - - /** - * Decoding: duration of the stream, in stream time base. - * If a source file does not specify a duration, but does specify - * a bitrate, this value will be estimated from bitrate and file size. - */ - int64_t duration; - - int64_t nb_frames; ///< number of frames in this stream if known or 0 - - int disposition; /**< AV_DISPOSITION_* bit field */ - - enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. - - /** - * sample aspect ratio (0 if unknown) - * - encoding: Set by user. - * - decoding: Set by libavformat. - */ - AVRational sample_aspect_ratio; - - AVDictionary *metadata; - - /** - * Average framerate - */ - AVRational avg_frame_rate; - - /** - * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet - * will contain the attached picture. - * - * decoding: set by libavformat, must not be modified by the caller. - * encoding: unused - */ - AVPacket attached_pic; - - /***************************************************************** - * All fields below this line are not part of the public API. They - * may not be used outside of libavformat and can be changed and - * removed at will. - * New public fields should be added right above. - ***************************************************************** - */ - - /** - * Stream information used internally by av_find_stream_info() - */ -#define MAX_STD_TIMEBASES (60*12+6) - struct { - int64_t last_dts; - int64_t duration_gcd; - int duration_count; - double (*duration_error)[2][MAX_STD_TIMEBASES]; - int64_t codec_info_duration; - int64_t codec_info_duration_fields; - int found_decoder; - - int64_t last_duration; - - /** - * Those are used for average framerate estimation. - */ - int64_t fps_first_dts; - int fps_first_dts_idx; - int64_t fps_last_dts; - int fps_last_dts_idx; - - } *info; - - int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */ - - // Timestamp generation support: - /** - * Timestamp corresponding to the last dts sync point. - * - * Initialized when AVCodecParserContext.dts_sync_point >= 0 and - * a DTS is received from the underlying container. Otherwise set to - * AV_NOPTS_VALUE by default. - */ - int64_t reference_dts; - int64_t first_dts; - int64_t cur_dts; - int64_t last_IP_pts; - int last_IP_duration; - - /** - * Number of packets to buffer for codec probing - */ -#define MAX_PROBE_PACKETS 2500 - int probe_packets; - - /** - * Number of frames that have been demuxed during av_find_stream_info() - */ - int codec_info_nb_frames; - - /* av_read_frame() support */ - enum AVStreamParseType need_parsing; - struct AVCodecParserContext *parser; - - /** - * last packet in packet_buffer for this stream when muxing. - */ - struct AVPacketList *last_in_packet_buffer; - AVProbeData probe_data; -#define MAX_REORDER_DELAY 16 - int64_t pts_buffer[MAX_REORDER_DELAY+1]; - - AVIndexEntry *index_entries; /**< Only used if the format does not - support seeking natively. */ - int nb_index_entries; - unsigned int index_entries_allocated_size; - - /** - * Real base framerate of the stream. - * This is the lowest framerate with which all timestamps can be - * represented accurately (it is the least common multiple of all - * framerates in the stream). Note, this value is just a guess! - * For example, if the time base is 1/90000 and all frames have either - * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1. - * - * Code outside avformat should access this field using: - * av_stream_get/set_r_frame_rate(stream) - */ - AVRational r_frame_rate; - - /** - * Stream Identifier - * This is the MPEG-TS stream identifier +1 - * 0 means unknown - */ - int stream_identifier; - - int64_t interleaver_chunk_size; - int64_t interleaver_chunk_duration; - - /** - * stream probing state - * -1 -> probing finished - * 0 -> no probing requested - * rest -> perform probing with request_probe being the minimum score to accept. - * NOT PART OF PUBLIC API - */ - int request_probe; - /** - * Indicates that everything up to the next keyframe - * should be discarded. - */ - int skip_to_keyframe; - - /** - * Number of samples to skip at the start of the frame decoded from the next packet. - */ - int skip_samples; - - /** - * Number of internally decoded frames, used internally in libavformat, do not access - * its lifetime differs from info which is why it is not in that structure. - */ - int nb_decoded_frames; - - /** - * Timestamp offset added to timestamps before muxing - * NOT PART OF PUBLIC API - */ - int64_t mux_ts_offset; - - /** - * Internal data to check for wrapping of the time stamp - */ - int64_t pts_wrap_reference; - - /** - * Options for behavior, when a wrap is detected. - * - * Defined by AV_PTS_WRAP_ values. - * - * If correction is enabled, there are two possibilities: - * If the first time stamp is near the wrap point, the wrap offset - * will be subtracted, which will create negative time stamps. - * Otherwise the offset will be added. - */ - int pts_wrap_behavior; - -} AVStream; - -AVRational av_stream_get_r_frame_rate(const AVStream *s); -void av_stream_set_r_frame_rate(AVStream *s, AVRational r); - -#define AV_PROGRAM_RUNNING 1 - -/** - * New fields can be added to the end with minor version bumps. - * Removal, reordering and changes to existing fields require a major - * version bump. - * sizeof(AVProgram) must not be used outside libav*. - */ -typedef struct AVProgram { - int id; - int flags; - enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller - unsigned int *stream_index; - unsigned int nb_stream_indexes; - AVDictionary *metadata; - - int program_num; - int pmt_pid; - int pcr_pid; - - /***************************************************************** - * All fields below this line are not part of the public API. They - * may not be used outside of libavformat and can be changed and - * removed at will. - * New public fields should be added right above. - ***************************************************************** - */ - int64_t start_time; - int64_t end_time; - - int64_t pts_wrap_reference; ///< reference dts for wrap detection - int pts_wrap_behavior; ///< behavior on wrap detection -} AVProgram; - -#define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present - (streams are added dynamically) */ - -typedef struct AVChapter { - int id; ///< unique ID to identify the chapter - AVRational time_base; ///< time base in which the start/end timestamps are specified - int64_t start, end; ///< chapter start/end time in time_base units - AVDictionary *metadata; -} AVChapter; - - -/** - * The duration of a video can be estimated through various ways, and this enum can be used - * to know how the duration was estimated. - */ -enum AVDurationEstimationMethod { - AVFMT_DURATION_FROM_PTS, ///< Duration accurately estimated from PTSes - AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration - AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate) -}; - -/** - * Format I/O context. - * New fields can be added to the end with minor version bumps. - * Removal, reordering and changes to existing fields require a major - * version bump. - * sizeof(AVFormatContext) must not be used outside libav*, use - * avformat_alloc_context() to create an AVFormatContext. - */ -typedef struct AVFormatContext { - /** - * A class for logging and AVOptions. Set by avformat_alloc_context(). - * Exports (de)muxer private options if they exist. - */ - const AVClass *av_class; - - /** - * Can only be iformat or oformat, not both at the same time. - * - * decoding: set by avformat_open_input(). - * encoding: set by the user. - */ - struct AVInputFormat *iformat; - struct AVOutputFormat *oformat; - - /** - * Format private data. This is an AVOptions-enabled struct - * if and only if iformat/oformat.priv_class is not NULL. - */ - void *priv_data; - - /** - * I/O context. - * - * decoding: either set by the user before avformat_open_input() (then - * the user must close it manually) or set by avformat_open_input(). - * encoding: set by the user. - * - * Do NOT set this field if AVFMT_NOFILE flag is set in - * iformat/oformat.flags. In such a case, the (de)muxer will handle - * I/O in some other way and this field will be NULL. - */ - AVIOContext *pb; - - /* stream info */ - int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */ - - /** - * A list of all streams in the file. New streams are created with - * avformat_new_stream(). - * - * decoding: streams are created by libavformat in avformat_open_input(). - * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also - * appear in av_read_frame(). - * encoding: streams are created by the user before avformat_write_header(). - */ - unsigned int nb_streams; - AVStream **streams; - - char filename[1024]; /**< input or output filename */ - - /** - * Decoding: position of the first frame of the component, in - * AV_TIME_BASE fractional seconds. NEVER set this value directly: - * It is deduced from the AVStream values. - */ - int64_t start_time; - - /** - * Decoding: duration of the stream, in AV_TIME_BASE fractional - * seconds. Only set this value if you know none of the individual stream - * durations and also do not set any of them. This is deduced from the - * AVStream values if not set. - */ - int64_t duration; - - /** - * Decoding: total stream bitrate in bit/s, 0 if not - * available. Never set it directly if the file_size and the - * duration are known as FFmpeg can compute it automatically. - */ - int bit_rate; - - unsigned int packet_size; - int max_delay; - - int flags; -#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames. -#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index. -#define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input. -#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS -#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container -#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled -#define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible -#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it. -#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted -#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload -#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) -#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) -#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate. - - /** - * decoding: size of data to probe; encoding: unused. - */ - unsigned int probesize; - - /** - * decoding: maximum time (in AV_TIME_BASE units) during which the input should - * be analyzed in avformat_find_stream_info(). - */ - int max_analyze_duration; - - const uint8_t *key; - int keylen; - - unsigned int nb_programs; - AVProgram **programs; - - /** - * Forced video codec_id. - * Demuxing: Set by user. - */ - enum AVCodecID video_codec_id; - - /** - * Forced audio codec_id. - * Demuxing: Set by user. - */ - enum AVCodecID audio_codec_id; - - /** - * Forced subtitle codec_id. - * Demuxing: Set by user. - */ - enum AVCodecID subtitle_codec_id; - - /** - * Maximum amount of memory in bytes to use for the index of each stream. - * If the index exceeds this size, entries will be discarded as - * needed to maintain a smaller size. This can lead to slower or less - * accurate seeking (depends on demuxer). - * Demuxers for which a full in-memory index is mandatory will ignore - * this. - * muxing : unused - * demuxing: set by user - */ - unsigned int max_index_size; - - /** - * Maximum amount of memory in bytes to use for buffering frames - * obtained from realtime capture devices. - */ - unsigned int max_picture_buffer; - - unsigned int nb_chapters; - AVChapter **chapters; - - AVDictionary *metadata; - - /** - * Start time of the stream in real world time, in microseconds - * since the unix epoch (00:00 1st January 1970). That is, pts=0 - * in the stream was captured at this real world time. - * - encoding: Set by user. - * - decoding: Unused. - */ - int64_t start_time_realtime; - - /** - * decoding: number of frames used to probe fps - */ - int fps_probe_size; - - /** - * Error recognition; higher values will detect more errors but may - * misdetect some more or less valid parts as errors. - * - encoding: unused - * - decoding: Set by user. - */ - int error_recognition; - - /** - * Custom interrupt callbacks for the I/O layer. - * - * decoding: set by the user before avformat_open_input(). - * encoding: set by the user before avformat_write_header() - * (mainly useful for AVFMT_NOFILE formats). The callback - * should also be passed to avio_open2() if it's used to - * open the file. - */ - AVIOInterruptCB interrupt_callback; - - /** - * Flags to enable debugging. - */ - int debug; -#define FF_FDEBUG_TS 0x0001 - - /** - * Transport stream id. - * This will be moved into demuxer private options. Thus no API/ABI compatibility - */ - int ts_id; - - /** - * Audio preload in microseconds. - * Note, not all formats support this and unpredictable things may happen if it is used when not supported. - * - encoding: Set by user via AVOptions (NO direct access) - * - decoding: unused - */ - int audio_preload; - - /** - * Max chunk time in microseconds. - * Note, not all formats support this and unpredictable things may happen if it is used when not supported. - * - encoding: Set by user via AVOptions (NO direct access) - * - decoding: unused - */ - int max_chunk_duration; - - /** - * Max chunk size in bytes - * Note, not all formats support this and unpredictable things may happen if it is used when not supported. - * - encoding: Set by user via AVOptions (NO direct access) - * - decoding: unused - */ - int max_chunk_size; - - /** - * forces the use of wallclock timestamps as pts/dts of packets - * This has undefined results in the presence of B frames. - * - encoding: unused - * - decoding: Set by user via AVOptions (NO direct access) - */ - int use_wallclock_as_timestamps; - - /** - * Avoid negative timestamps during muxing. - * 0 -> allow negative timestamps - * 1 -> avoid negative timestamps - * -1 -> choose automatically (default) - * Note, this only works when interleave_packet_per_dts is in use. - * - encoding: Set by user via AVOptions (NO direct access) - * - decoding: unused - */ - int avoid_negative_ts; - - /** - * avio flags, used to force AVIO_FLAG_DIRECT. - * - encoding: unused - * - decoding: Set by user via AVOptions (NO direct access) - */ - int avio_flags; - - /** - * The duration field can be estimated through various ways, and this field can be used - * to know how the duration was estimated. - * - encoding: unused - * - decoding: Read by user via AVOptions (NO direct access) - */ - enum AVDurationEstimationMethod duration_estimation_method; - - /** - * Skip initial bytes when opening stream - * - encoding: unused - * - decoding: Set by user via AVOptions (NO direct access) - */ - unsigned int skip_initial_bytes; - - /** - * Correct single timestamp overflows - * - encoding: unused - * - decoding: Set by user via AVOPtions (NO direct access) - */ - unsigned int correct_ts_overflow; - - /** - * Force seeking to any (also non key) frames. - * - encoding: unused - * - decoding: Set by user via AVOPtions (NO direct access) - */ - int seek2any; - - /** - * Flush the I/O context after each packet. - * - encoding: Set by user via AVOptions (NO direct access) - * - decoding: unused - */ - int flush_packets; - - /***************************************************************** - * All fields below this line are not part of the public API. They - * may not be used outside of libavformat and can be changed and - * removed at will. - * New public fields should be added right above. - ***************************************************************** - */ - - /** - * This buffer is only needed when packets were already buffered but - * not decoded, for example to get the codec parameters in MPEG - * streams. - */ - struct AVPacketList *packet_buffer; - struct AVPacketList *packet_buffer_end; - - /* av_seek_frame() support */ - int64_t data_offset; /**< offset of the first packet */ - - /** - * Raw packets from the demuxer, prior to parsing and decoding. - * This buffer is used for buffering packets until the codec can - * be identified, as parsing cannot be done without knowing the - * codec. - */ - struct AVPacketList *raw_packet_buffer; - struct AVPacketList *raw_packet_buffer_end; - /** - * Packets split by the parser get queued here. - */ - struct AVPacketList *parse_queue; - struct AVPacketList *parse_queue_end; - /** - * Remaining size available for raw_packet_buffer, in bytes. - */ -#define RAW_PACKET_BUFFER_SIZE 2500000 - int raw_packet_buffer_remaining_size; - - /** - * Offset to remap timestamps to be non-negative. - * Expressed in timebase units. - * @see AVStream.mux_ts_offset - */ - int64_t offset; - - /** - * Timebase for the timestamp offset. - */ - AVRational offset_timebase; - - /** - * IO repositioned flag. - * This is set by avformat when the underlaying IO context read pointer - * is repositioned, for example when doing byte based seeking. - * Demuxers can use the flag to detect such changes. - */ - int io_repositioned; -} AVFormatContext; - -/** - * Returns the method used to set ctx->duration. - * - * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE. - */ -enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx); - -typedef struct AVPacketList { - AVPacket pkt; - struct AVPacketList *next; -} AVPacketList; - - -/** - * @defgroup lavf_core Core functions - * @ingroup libavf - * - * Functions for querying libavformat capabilities, allocating core structures, - * etc. - * @{ - */ - -/** - * Return the LIBAVFORMAT_VERSION_INT constant. - */ -unsigned avformat_version(void); - -/** - * Return the libavformat build-time configuration. - */ -const char *avformat_configuration(void); - -/** - * Return the libavformat license. - */ -const char *avformat_license(void); - -/** - * Initialize libavformat and register all the muxers, demuxers and - * protocols. If you do not call this function, then you can select - * exactly which formats you want to support. - * - * @see av_register_input_format() - * @see av_register_output_format() - */ -void av_register_all(void); - -void av_register_input_format(AVInputFormat *format); -void av_register_output_format(AVOutputFormat *format); - -/** - * Do global initialization of network components. This is optional, - * but recommended, since it avoids the overhead of implicitly - * doing the setup for each session. - * - * Calling this function will become mandatory if using network - * protocols at some major version bump. - */ -int avformat_network_init(void); - -/** - * Undo the initialization done by avformat_network_init. - */ -int avformat_network_deinit(void); - -/** - * If f is NULL, returns the first registered input format, - * if f is non-NULL, returns the next registered input format after f - * or NULL if f is the last one. - */ -AVInputFormat *av_iformat_next(AVInputFormat *f); - -/** - * If f is NULL, returns the first registered output format, - * if f is non-NULL, returns the next registered output format after f - * or NULL if f is the last one. - */ -AVOutputFormat *av_oformat_next(AVOutputFormat *f); - -/** - * Allocate an AVFormatContext. - * avformat_free_context() can be used to free the context and everything - * allocated by the framework within it. - */ -AVFormatContext *avformat_alloc_context(void); - -/** - * Free an AVFormatContext and all its streams. - * @param s context to free - */ -void avformat_free_context(AVFormatContext *s); - -/** - * Get the AVClass for AVFormatContext. It can be used in combination with - * AV_OPT_SEARCH_FAKE_OBJ for examining options. - * - * @see av_opt_find(). - */ -const AVClass *avformat_get_class(void); - -/** - * Add a new stream to a media file. - * - * When demuxing, it is called by the demuxer in read_header(). If the - * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also - * be called in read_packet(). - * - * When muxing, should be called by the user before avformat_write_header(). - * - * @param c If non-NULL, the AVCodecContext corresponding to the new stream - * will be initialized to use this codec. This is needed for e.g. codec-specific - * defaults to be set, so codec should be provided if it is known. - * - * @return newly created stream or NULL on error. - */ -AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c); - -AVProgram *av_new_program(AVFormatContext *s, int id); - -/** - * @} - */ - - -#if FF_API_PKT_DUMP -attribute_deprecated void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload); -attribute_deprecated void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, - int dump_payload); -#endif - -#if FF_API_ALLOC_OUTPUT_CONTEXT -/** - * @deprecated deprecated in favor of avformat_alloc_output_context2() - */ -attribute_deprecated -AVFormatContext *avformat_alloc_output_context(const char *format, - AVOutputFormat *oformat, - const char *filename); -#endif - -/** - * Allocate an AVFormatContext for an output format. - * avformat_free_context() can be used to free the context and - * everything allocated by the framework within it. - * - * @param *ctx is set to the created format context, or to NULL in - * case of failure - * @param oformat format to use for allocating the context, if NULL - * format_name and filename are used instead - * @param format_name the name of output format to use for allocating the - * context, if NULL filename is used instead - * @param filename the name of the filename to use for allocating the - * context, may be NULL - * @return >= 0 in case of success, a negative AVERROR code in case of - * failure - */ -int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, - const char *format_name, const char *filename); - -/** - * @addtogroup lavf_decoding - * @{ - */ - -/** - * Find AVInputFormat based on the short name of the input format. - */ -AVInputFormat *av_find_input_format(const char *short_name); - -/** - * Guess the file format. - * - * @param is_opened Whether the file is already opened; determines whether - * demuxers with or without AVFMT_NOFILE are probed. - */ -AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened); - -/** - * Guess the file format. - * - * @param is_opened Whether the file is already opened; determines whether - * demuxers with or without AVFMT_NOFILE are probed. - * @param score_max A probe score larger that this is required to accept a - * detection, the variable is set to the actual detection - * score afterwards. - * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended - * to retry with a larger probe buffer. - */ -AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max); - -/** - * Guess the file format. - * - * @param is_opened Whether the file is already opened; determines whether - * demuxers with or without AVFMT_NOFILE are probed. - * @param score_ret The score of the best detection. - */ -AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret); - -/** - * Probe a bytestream to determine the input format. Each time a probe returns - * with a score that is too low, the probe buffer size is increased and another - * attempt is made. When the maximum probe size is reached, the input format - * with the highest score is returned. - * - * @param pb the bytestream to probe - * @param fmt the input format is put here - * @param filename the filename of the stream - * @param logctx the log context - * @param offset the offset within the bytestream to probe from - * @param max_probe_size the maximum probe buffer size (zero for default) - * @return 0 in case of success, a negative value corresponding to an - * AVERROR code otherwise - */ -int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, - const char *filename, void *logctx, - unsigned int offset, unsigned int max_probe_size); - -/** - * Open an input stream and read the header. The codecs are not opened. - * The stream must be closed with av_close_input_file(). - * - * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context). - * May be a pointer to NULL, in which case an AVFormatContext is allocated by this - * function and written into ps. - * Note that a user-supplied AVFormatContext will be freed on failure. - * @param filename Name of the stream to open. - * @param fmt If non-NULL, this parameter forces a specific input format. - * Otherwise the format is autodetected. - * @param options A dictionary filled with AVFormatContext and demuxer-private options. - * On return this parameter will be destroyed and replaced with a dict containing - * options that were not found. May be NULL. - * - * @return 0 on success, a negative AVERROR on failure. - * - * @note If you want to use custom IO, preallocate the format context and set its pb field. - */ -int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options); - -attribute_deprecated -int av_demuxer_open(AVFormatContext *ic); - -#if FF_API_FORMAT_PARAMETERS -/** - * Read packets of a media file to get stream information. This - * is useful for file formats with no headers such as MPEG. This - * function also computes the real framerate in case of MPEG-2 repeat - * frame mode. - * The logical file position is not changed by this function; - * examined packets may be buffered for later processing. - * - * @param ic media file handle - * @return >=0 if OK, AVERROR_xxx on error - * @todo Let the user decide somehow what information is needed so that - * we do not waste time getting stuff the user does not need. - * - * @deprecated use avformat_find_stream_info. - */ -attribute_deprecated -int av_find_stream_info(AVFormatContext *ic); -#endif - -/** - * Read packets of a media file to get stream information. This - * is useful for file formats with no headers such as MPEG. This - * function also computes the real framerate in case of MPEG-2 repeat - * frame mode. - * The logical file position is not changed by this function; - * examined packets may be buffered for later processing. - * - * @param ic media file handle - * @param options If non-NULL, an ic.nb_streams long array of pointers to - * dictionaries, where i-th member contains options for - * codec corresponding to i-th stream. - * On return each dictionary will be filled with options that were not found. - * @return >=0 if OK, AVERROR_xxx on error - * - * @note this function isn't guaranteed to open all the codecs, so - * options being non-empty at return is a perfectly normal behavior. - * - * @todo Let the user decide somehow what information is needed so that - * we do not waste time getting stuff the user does not need. - */ -int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options); - -/** - * Find the programs which belong to a given stream. - * - * @param ic media file handle - * @param last the last found program, the search will start after this - * program, or from the beginning if it is NULL - * @param s stream index - * @return the next program which belongs to s, NULL if no program is found or - * the last program is not among the programs of ic. - */ -AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s); - -/** - * Find the "best" stream in the file. - * The best stream is determined according to various heuristics as the most - * likely to be what the user expects. - * If the decoder parameter is non-NULL, av_find_best_stream will find the - * default decoder for the stream's codec; streams for which no decoder can - * be found are ignored. - * - * @param ic media file handle - * @param type stream type: video, audio, subtitles, etc. - * @param wanted_stream_nb user-requested stream number, - * or -1 for automatic selection - * @param related_stream try to find a stream related (eg. in the same - * program) to this one, or -1 if none - * @param decoder_ret if non-NULL, returns the decoder for the - * selected stream - * @param flags flags; none are currently defined - * @return the non-negative stream number in case of success, - * AVERROR_STREAM_NOT_FOUND if no stream with the requested type - * could be found, - * AVERROR_DECODER_NOT_FOUND if streams were found but no decoder - * @note If av_find_best_stream returns successfully and decoder_ret is not - * NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec. - */ -int av_find_best_stream(AVFormatContext *ic, - enum AVMediaType type, - int wanted_stream_nb, - int related_stream, - AVCodec **decoder_ret, - int flags); - -#if FF_API_READ_PACKET -/** - * @deprecated use AVFMT_FLAG_NOFILLIN | AVFMT_FLAG_NOPARSE to read raw - * unprocessed packets - * - * Read a transport packet from a media file. - * - * This function is obsolete and should never be used. - * Use av_read_frame() instead. - * - * @param s media file handle - * @param pkt is filled - * @return 0 if OK, AVERROR_xxx on error - */ -attribute_deprecated -int av_read_packet(AVFormatContext *s, AVPacket *pkt); -#endif - -/** - * Return the next frame of a stream. - * This function returns what is stored in the file, and does not validate - * that what is there are valid frames for the decoder. It will split what is - * stored in the file into frames and return one for each call. It will not - * omit invalid data between valid frames so as to give the decoder the maximum - * information possible for decoding. - * - * If pkt->buf is NULL, then the packet is valid until the next - * av_read_frame() or until av_close_input_file(). Otherwise the packet is valid - * indefinitely. In both cases the packet must be freed with - * av_free_packet when it is no longer needed. For video, the packet contains - * exactly one frame. For audio, it contains an integer number of frames if each - * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames - * have a variable size (e.g. MPEG audio), then it contains one frame. - * - * pkt->pts, pkt->dts and pkt->duration are always set to correct - * values in AVStream.time_base units (and guessed if the format cannot - * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format - * has B-frames, so it is better to rely on pkt->dts if you do not - * decompress the payload. - * - * @return 0 if OK, < 0 on error or end of file - */ -int av_read_frame(AVFormatContext *s, AVPacket *pkt); - -/** - * Seek to the keyframe at timestamp. - * 'timestamp' in 'stream_index'. - * @param stream_index If stream_index is (-1), a default - * stream is selected, and timestamp is automatically converted - * from AV_TIME_BASE units to the stream specific time_base. - * @param timestamp Timestamp in AVStream.time_base units - * or, if no stream is specified, in AV_TIME_BASE units. - * @param flags flags which select direction and seeking mode - * @return >= 0 on success - */ -int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, - int flags); - -/** - * Seek to timestamp ts. - * Seeking will be done so that the point from which all active streams - * can be presented successfully will be closest to ts and within min/max_ts. - * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. - * - * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and - * are the file position (this may not be supported by all demuxers). - * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames - * in the stream with stream_index (this may not be supported by all demuxers). - * Otherwise all timestamps are in units of the stream selected by stream_index - * or if stream_index is -1, in AV_TIME_BASE units. - * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as - * keyframes (this may not be supported by all demuxers). - * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored. - * - * @param stream_index index of the stream which is used as time base reference - * @param min_ts smallest acceptable timestamp - * @param ts target timestamp - * @param max_ts largest acceptable timestamp - * @param flags flags - * @return >=0 on success, error code otherwise - * - * @note This is part of the new seek API which is still under construction. - * Thus do not use this yet. It may change at any time, do not expect - * ABI compatibility yet! - */ -int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); - -/** - * Start playing a network-based stream (e.g. RTSP stream) at the - * current position. - */ -int av_read_play(AVFormatContext *s); - -/** - * Pause a network-based stream (e.g. RTSP stream). - * - * Use av_read_play() to resume it. - */ -int av_read_pause(AVFormatContext *s); - -#if FF_API_CLOSE_INPUT_FILE -/** - * @deprecated use avformat_close_input() - * Close a media file (but not its codecs). - * - * @param s media file handle - */ -attribute_deprecated -void av_close_input_file(AVFormatContext *s); -#endif - -/** - * Close an opened input AVFormatContext. Free it and all its contents - * and set *s to NULL. - */ -void avformat_close_input(AVFormatContext **s); -/** - * @} - */ - -#if FF_API_NEW_STREAM -/** - * Add a new stream to a media file. - * - * Can only be called in the read_header() function. If the flag - * AVFMTCTX_NOHEADER is in the format context, then new streams - * can be added in read_packet too. - * - * @param s media file handle - * @param id file-format-dependent stream ID - */ -attribute_deprecated -AVStream *av_new_stream(AVFormatContext *s, int id); -#endif - -#if FF_API_SET_PTS_INFO -/** - * @deprecated this function is not supposed to be called outside of lavf - */ -attribute_deprecated -void av_set_pts_info(AVStream *s, int pts_wrap_bits, - unsigned int pts_num, unsigned int pts_den); -#endif - -#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward -#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes -#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes -#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number - -/** - * @addtogroup lavf_encoding - * @{ - */ -/** - * Allocate the stream private data and write the stream header to - * an output media file. - * - * @param s Media file handle, must be allocated with avformat_alloc_context(). - * Its oformat field must be set to the desired output format; - * Its pb field must be set to an already opened AVIOContext. - * @param options An AVDictionary filled with AVFormatContext and muxer-private options. - * On return this parameter will be destroyed and replaced with a dict containing - * options that were not found. May be NULL. - * - * @return 0 on success, negative AVERROR on failure. - * - * @see av_opt_find, av_dict_set, avio_open, av_oformat_next. - */ -int avformat_write_header(AVFormatContext *s, AVDictionary **options); - -/** - * Write a packet to an output media file. - * - * The packet shall contain one audio or video frame. - * The packet must be correctly interleaved according to the container - * specification, if not then av_interleaved_write_frame must be used. - * - * @param s media file handle - * @param pkt The packet, which contains the stream_index, buf/buf_size, - * dts/pts, ... - * This can be NULL (at any time, not just at the end), in - * order to immediately flush data buffered within the muxer, - * for muxers that buffer up data internally before writing it - * to the output. - * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush - */ -int av_write_frame(AVFormatContext *s, AVPacket *pkt); - -/** - * Write a packet to an output media file ensuring correct interleaving. - * - * The packet must contain one audio or video frame. - * If the packets are already correctly interleaved, the application should - * call av_write_frame() instead as it is slightly faster. It is also important - * to keep in mind that completely non-interleaved input will need huge amounts - * of memory to interleave with this, so it is preferable to interleave at the - * demuxer level. - * - * @param s media file handle - * @param pkt The packet containing the data to be written. pkt->buf must be set - * to a valid AVBufferRef describing the packet data. Libavformat takes - * ownership of this reference and will unref it when it sees fit. The caller - * must not access the data through this reference after this function returns. - * This can be NULL (at any time, not just at the end), to flush the - * interleaving queues. - * Packet's @ref AVPacket.stream_index "stream_index" field must be set to the - * index of the corresponding stream in @ref AVFormatContext.streams - * "s.streams". - * It is very strongly recommended that timing information (@ref AVPacket.pts - * "pts", @ref AVPacket.dts "dts" @ref AVPacket.duration "duration") is set to - * correct values. - * - * @return 0 on success, a negative AVERROR on error. - */ -int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); - -/** - * Write the stream trailer to an output media file and free the - * file private data. - * - * May only be called after a successful call to avformat_write_header. - * - * @param s media file handle - * @return 0 if OK, AVERROR_xxx on error - */ -int av_write_trailer(AVFormatContext *s); - -/** - * Return the output format in the list of registered output formats - * which best matches the provided parameters, or return NULL if - * there is no match. - * - * @param short_name if non-NULL checks if short_name matches with the - * names of the registered formats - * @param filename if non-NULL checks if filename terminates with the - * extensions of the registered formats - * @param mime_type if non-NULL checks if mime_type matches with the - * MIME type of the registered formats - */ -AVOutputFormat *av_guess_format(const char *short_name, - const char *filename, - const char *mime_type); - -/** - * Guess the codec ID based upon muxer and filename. - */ -enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, - const char *filename, const char *mime_type, - enum AVMediaType type); - -/** - * Get timing information for the data currently output. - * The exact meaning of "currently output" depends on the format. - * It is mostly relevant for devices that have an internal buffer and/or - * work in real time. - * @param s media file handle - * @param stream stream in the media file - * @param[out] dts DTS of the last packet output for the stream, in stream - * time_base units - * @param[out] wall absolute time when that packet whas output, - * in microsecond - * @return 0 if OK, AVERROR(ENOSYS) if the format does not support it - * Note: some formats or devices may not allow to measure dts and wall - * atomically. - */ -int av_get_output_timestamp(struct AVFormatContext *s, int stream, - int64_t *dts, int64_t *wall); - - -/** - * @} - */ - - -/** - * @defgroup lavf_misc Utility functions - * @ingroup libavf - * @{ - * - * Miscellaneous utility functions related to both muxing and demuxing - * (or neither). - */ - -/** - * Send a nice hexadecimal dump of a buffer to the specified file stream. - * - * @param f The file stream pointer where the dump should be sent to. - * @param buf buffer - * @param size buffer size - * - * @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2 - */ -void av_hex_dump(FILE *f, const uint8_t *buf, int size); - -/** - * Send a nice hexadecimal dump of a buffer to the log. - * - * @param avcl A pointer to an arbitrary struct of which the first field is a - * pointer to an AVClass struct. - * @param level The importance level of the message, lower values signifying - * higher importance. - * @param buf buffer - * @param size buffer size - * - * @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2 - */ -void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size); - -/** - * Send a nice dump of a packet to the specified file stream. - * - * @param f The file stream pointer where the dump should be sent to. - * @param pkt packet to dump - * @param dump_payload True if the payload must be displayed, too. - * @param st AVStream that the packet belongs to - */ -void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st); - - -/** - * Send a nice dump of a packet to the log. - * - * @param avcl A pointer to an arbitrary struct of which the first field is a - * pointer to an AVClass struct. - * @param level The importance level of the message, lower values signifying - * higher importance. - * @param pkt packet to dump - * @param dump_payload True if the payload must be displayed, too. - * @param st AVStream that the packet belongs to - */ -void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload, - AVStream *st); - -/** - * Get the AVCodecID for the given codec tag tag. - * If no codec id is found returns AV_CODEC_ID_NONE. - * - * @param tags list of supported codec_id-codec_tag pairs, as stored - * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag - */ -enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag); - -/** - * Get the codec tag for the given codec id id. - * If no codec tag is found returns 0. - * - * @param tags list of supported codec_id-codec_tag pairs, as stored - * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag - */ -unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id); - -/** - * Get the codec tag for the given codec id. - * - * @param tags list of supported codec_id - codec_tag pairs, as stored - * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag - * @param id codec id that should be searched for in the list - * @param tag A pointer to the found tag - * @return 0 if id was not found in tags, > 0 if it was found - */ -int av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id, - unsigned int *tag); - -int av_find_default_stream_index(AVFormatContext *s); - -/** - * Get the index for a specific timestamp. - * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond - * to the timestamp which is <= the requested one, if backward - * is 0, then it will be >= - * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise - * @return < 0 if no such timestamp could be found - */ -int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags); - -/** - * Add an index entry into a sorted list. Update the entry if the list - * already contains it. - * - * @param timestamp timestamp in the time base of the given stream - */ -int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, - int size, int distance, int flags); - - -/** - * Split a URL string into components. - * - * The pointers to buffers for storing individual components may be null, - * in order to ignore that component. Buffers for components not found are - * set to empty strings. If the port is not found, it is set to a negative - * value. - * - * @param proto the buffer for the protocol - * @param proto_size the size of the proto buffer - * @param authorization the buffer for the authorization - * @param authorization_size the size of the authorization buffer - * @param hostname the buffer for the host name - * @param hostname_size the size of the hostname buffer - * @param port_ptr a pointer to store the port number in - * @param path the buffer for the path - * @param path_size the size of the path buffer - * @param url the URL to split - */ -void av_url_split(char *proto, int proto_size, - char *authorization, int authorization_size, - char *hostname, int hostname_size, - int *port_ptr, - char *path, int path_size, - const char *url); - - -void av_dump_format(AVFormatContext *ic, - int index, - const char *url, - int is_output); - -/** - * Return in 'buf' the path with '%d' replaced by a number. - * - * Also handles the '%0nd' format where 'n' is the total number - * of digits and '%%'. - * - * @param buf destination buffer - * @param buf_size destination buffer size - * @param path numbered sequence string - * @param number frame number - * @return 0 if OK, -1 on format error - */ -int av_get_frame_filename(char *buf, int buf_size, - const char *path, int number); - -/** - * Check whether filename actually is a numbered sequence generator. - * - * @param filename possible numbered sequence string - * @return 1 if a valid numbered sequence string, 0 otherwise - */ -int av_filename_number_test(const char *filename); - -/** - * Generate an SDP for an RTP session. - * - * Note, this overwrites the id values of AVStreams in the muxer contexts - * for getting unique dynamic payload types. - * - * @param ac array of AVFormatContexts describing the RTP streams. If the - * array is composed by only one context, such context can contain - * multiple AVStreams (one AVStream per RTP stream). Otherwise, - * all the contexts in the array (an AVCodecContext per RTP stream) - * must contain only one AVStream. - * @param n_files number of AVCodecContexts contained in ac - * @param buf buffer where the SDP will be stored (must be allocated by - * the caller) - * @param size the size of the buffer - * @return 0 if OK, AVERROR_xxx on error - */ -int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size); - -/** - * Return a positive value if the given filename has one of the given - * extensions, 0 otherwise. - * - * @param extensions a comma-separated list of filename extensions - */ -int av_match_ext(const char *filename, const char *extensions); - -/** - * Test if the given container can store a codec. - * - * @param std_compliance standards compliance level, one of FF_COMPLIANCE_* - * - * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot. - * A negative number if this information is not available. - */ -int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance); - -/** - * @defgroup riff_fourcc RIFF FourCCs - * @{ - * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are - * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the - * following code: - * @code - * uint32_t tag = MKTAG('H', '2', '6', '4'); - * const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 }; - * enum AVCodecID id = av_codec_get_id(table, tag); - * @endcode - */ -/** - * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID. - */ -const struct AVCodecTag *avformat_get_riff_video_tags(void); -/** - * @return the table mapping RIFF FourCCs for audio to AVCodecID. - */ -const struct AVCodecTag *avformat_get_riff_audio_tags(void); - -/** - * @} - */ - -/** - * Guess the sample aspect ratio of a frame, based on both the stream and the - * frame aspect ratio. - * - * Since the frame aspect ratio is set by the codec but the stream aspect ratio - * is set by the demuxer, these two may not be equal. This function tries to - * return the value that you should use if you would like to display the frame. - * - * Basic logic is to use the stream aspect ratio if it is set to something sane - * otherwise use the frame aspect ratio. This way a container setting, which is - * usually easy to modify can override the coded value in the frames. - * - * @param format the format context which the stream is part of - * @param stream the stream which the frame is part of - * @param frame the frame with the aspect ratio to be determined - * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea - */ -AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame); - -/** - * Guess the frame rate, based on both the container and codec information. - * - * @param ctx the format context which the stream is part of - * @param stream the stream which the frame is part of - * @param frame the frame for which the frame rate should be determined, may be NULL - * @return the guessed (valid) frame rate, 0/1 if no idea - */ -AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame); - -/** - * Check if the stream st contained in s is matched by the stream specifier - * spec. - * - * See the "stream specifiers" chapter in the documentation for the syntax - * of spec. - * - * @return >0 if st is matched by spec; - * 0 if st is not matched by spec; - * AVERROR code if spec is invalid - * - * @note A stream specifier can match several streams in the format. - */ -int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, - const char *spec); - -int avformat_queue_attached_pictures(AVFormatContext *s); - - -/** - * @} - */ - -#endif /* AVFORMAT_AVFORMAT_H */ diff --git a/3rdparty/include/ffmpeg_/libavformat/avio.h b/3rdparty/include/ffmpeg_/libavformat/avio.h deleted file mode 100644 index 5bdbc62836..0000000000 --- a/3rdparty/include/ffmpeg_/libavformat/avio.h +++ /dev/null @@ -1,481 +0,0 @@ -/* - * copyright (c) 2001 Fabrice Bellard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef AVFORMAT_AVIO_H -#define AVFORMAT_AVIO_H - -/** - * @file - * @ingroup lavf_io - * Buffered I/O operations - */ - -#include - -#include "libavutil/common.h" -#include "libavutil/dict.h" -#include "libavutil/log.h" - -#include "libavformat/version.h" - - -#define AVIO_SEEKABLE_NORMAL 0x0001 /**< Seeking works like for a local file */ - -/** - * Callback for checking whether to abort blocking functions. - * AVERROR_EXIT is returned in this case by the interrupted - * function. During blocking operations, callback is called with - * opaque as parameter. If the callback returns 1, the - * blocking operation will be aborted. - * - * No members can be added to this struct without a major bump, if - * new elements have been added after this struct in AVFormatContext - * or AVIOContext. - */ -typedef struct AVIOInterruptCB { - int (*callback)(void*); - void *opaque; -} AVIOInterruptCB; - -/** - * Bytestream IO Context. - * New fields can be added to the end with minor version bumps. - * Removal, reordering and changes to existing fields require a major - * version bump. - * sizeof(AVIOContext) must not be used outside libav*. - * - * @note None of the function pointers in AVIOContext should be called - * directly, they should only be set by the client application - * when implementing custom I/O. Normally these are set to the - * function pointers specified in avio_alloc_context() - */ -typedef struct AVIOContext { - /** - * A class for private options. - * - * If this AVIOContext is created by avio_open2(), av_class is set and - * passes the options down to protocols. - * - * If this AVIOContext is manually allocated, then av_class may be set by - * the caller. - * - * warning -- this field can be NULL, be sure to not pass this AVIOContext - * to any av_opt_* functions in that case. - */ - const AVClass *av_class; - unsigned char *buffer; /**< Start of the buffer. */ - int buffer_size; /**< Maximum buffer size */ - unsigned char *buf_ptr; /**< Current position in the buffer */ - unsigned char *buf_end; /**< End of the data, may be less than - buffer+buffer_size if the read function returned - less data than requested, e.g. for streams where - no more data has been received yet. */ - void *opaque; /**< A private pointer, passed to the read/write/seek/... - functions. */ - int (*read_packet)(void *opaque, uint8_t *buf, int buf_size); - int (*write_packet)(void *opaque, uint8_t *buf, int buf_size); - int64_t (*seek)(void *opaque, int64_t offset, int whence); - int64_t pos; /**< position in the file of the current buffer */ - int must_flush; /**< true if the next seek should flush */ - int eof_reached; /**< true if eof reached */ - int write_flag; /**< true if open for writing */ - int max_packet_size; - unsigned long checksum; - unsigned char *checksum_ptr; - unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size); - int error; /**< contains the error code or 0 if no error happened */ - /** - * Pause or resume playback for network streaming protocols - e.g. MMS. - */ - int (*read_pause)(void *opaque, int pause); - /** - * Seek to a given timestamp in stream with the specified stream_index. - * Needed for some network streaming protocols which don't support seeking - * to byte position. - */ - int64_t (*read_seek)(void *opaque, int stream_index, - int64_t timestamp, int flags); - /** - * A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable. - */ - int seekable; - - /** - * max filesize, used to limit allocations - * This field is internal to libavformat and access from outside is not allowed. - */ - int64_t maxsize; - - /** - * avio_read and avio_write should if possible be satisfied directly - * instead of going through a buffer, and avio_seek will always - * call the underlying seek function directly. - */ - int direct; - - /** - * Bytes read statistic - * This field is internal to libavformat and access from outside is not allowed. - */ - int64_t bytes_read; - - /** - * seek statistic - * This field is internal to libavformat and access from outside is not allowed. - */ - int seek_count; - - /** - * writeout statistic - * This field is internal to libavformat and access from outside is not allowed. - */ - int writeout_count; -} AVIOContext; - -/* unbuffered I/O */ - -/** - * Return AVIO_FLAG_* access flags corresponding to the access permissions - * of the resource in url, or a negative value corresponding to an - * AVERROR code in case of failure. The returned access flags are - * masked by the value in flags. - * - * @note This function is intrinsically unsafe, in the sense that the - * checked resource may change its existence or permission status from - * one call to another. Thus you should not trust the returned value, - * unless you are sure that no other processes are accessing the - * checked resource. - */ -int avio_check(const char *url, int flags); - -/** - * Allocate and initialize an AVIOContext for buffered I/O. It must be later - * freed with av_free(). - * - * @param buffer Memory block for input/output operations via AVIOContext. - * The buffer must be allocated with av_malloc() and friends. - * @param buffer_size The buffer size is very important for performance. - * For protocols with fixed blocksize it should be set to this blocksize. - * For others a typical size is a cache page, e.g. 4kb. - * @param write_flag Set to 1 if the buffer should be writable, 0 otherwise. - * @param opaque An opaque pointer to user-specific data. - * @param read_packet A function for refilling the buffer, may be NULL. - * @param write_packet A function for writing the buffer contents, may be NULL. - * The function may not change the input buffers content. - * @param seek A function for seeking to specified byte position, may be NULL. - * - * @return Allocated AVIOContext or NULL on failure. - */ -AVIOContext *avio_alloc_context( - unsigned char *buffer, - int buffer_size, - int write_flag, - void *opaque, - int (*read_packet)(void *opaque, uint8_t *buf, int buf_size), - int (*write_packet)(void *opaque, uint8_t *buf, int buf_size), - int64_t (*seek)(void *opaque, int64_t offset, int whence)); - -void avio_w8(AVIOContext *s, int b); -void avio_write(AVIOContext *s, const unsigned char *buf, int size); -void avio_wl64(AVIOContext *s, uint64_t val); -void avio_wb64(AVIOContext *s, uint64_t val); -void avio_wl32(AVIOContext *s, unsigned int val); -void avio_wb32(AVIOContext *s, unsigned int val); -void avio_wl24(AVIOContext *s, unsigned int val); -void avio_wb24(AVIOContext *s, unsigned int val); -void avio_wl16(AVIOContext *s, unsigned int val); -void avio_wb16(AVIOContext *s, unsigned int val); - -/** - * Write a NULL-terminated string. - * @return number of bytes written. - */ -int avio_put_str(AVIOContext *s, const char *str); - -/** - * Convert an UTF-8 string to UTF-16LE and write it. - * @return number of bytes written. - */ -int avio_put_str16le(AVIOContext *s, const char *str); - -/** - * Passing this as the "whence" parameter to a seek function causes it to - * return the filesize without seeking anywhere. Supporting this is optional. - * If it is not supported then the seek function will return <0. - */ -#define AVSEEK_SIZE 0x10000 - -/** - * Oring this flag as into the "whence" parameter to a seek function causes it to - * seek by any means (like reopening and linear reading) or other normally unreasonable - * means that can be extremely slow. - * This may be ignored by the seek code. - */ -#define AVSEEK_FORCE 0x20000 - -/** - * fseek() equivalent for AVIOContext. - * @return new position or AVERROR. - */ -int64_t avio_seek(AVIOContext *s, int64_t offset, int whence); - -/** - * Skip given number of bytes forward - * @return new position or AVERROR. - */ -int64_t avio_skip(AVIOContext *s, int64_t offset); - -/** - * ftell() equivalent for AVIOContext. - * @return position or AVERROR. - */ -static av_always_inline int64_t avio_tell(AVIOContext *s) -{ - return avio_seek(s, 0, SEEK_CUR); -} - -/** - * Get the filesize. - * @return filesize or AVERROR - */ -int64_t avio_size(AVIOContext *s); - -/** - * feof() equivalent for AVIOContext. - * @return non zero if and only if end of file - */ -int url_feof(AVIOContext *s); - -/** @warning currently size is limited */ -int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3); - -/** - * Force flushing of buffered data to the output s. - * - * Force the buffered data to be immediately written to the output, - * without to wait to fill the internal buffer. - */ -void avio_flush(AVIOContext *s); - -/** - * Read size bytes from AVIOContext into buf. - * @return number of bytes read or AVERROR - */ -int avio_read(AVIOContext *s, unsigned char *buf, int size); - -/** - * @name Functions for reading from AVIOContext - * @{ - * - * @note return 0 if EOF, so you cannot use it if EOF handling is - * necessary - */ -int avio_r8 (AVIOContext *s); -unsigned int avio_rl16(AVIOContext *s); -unsigned int avio_rl24(AVIOContext *s); -unsigned int avio_rl32(AVIOContext *s); -uint64_t avio_rl64(AVIOContext *s); -unsigned int avio_rb16(AVIOContext *s); -unsigned int avio_rb24(AVIOContext *s); -unsigned int avio_rb32(AVIOContext *s); -uint64_t avio_rb64(AVIOContext *s); -/** - * @} - */ - -/** - * Read a string from pb into buf. The reading will terminate when either - * a NULL character was encountered, maxlen bytes have been read, or nothing - * more can be read from pb. The result is guaranteed to be NULL-terminated, it - * will be truncated if buf is too small. - * Note that the string is not interpreted or validated in any way, it - * might get truncated in the middle of a sequence for multi-byte encodings. - * - * @return number of bytes read (is always <= maxlen). - * If reading ends on EOF or error, the return value will be one more than - * bytes actually read. - */ -int avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen); - -/** - * Read a UTF-16 string from pb and convert it to UTF-8. - * The reading will terminate when either a null or invalid character was - * encountered or maxlen bytes have been read. - * @return number of bytes read (is always <= maxlen) - */ -int avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen); -int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen); - - -/** - * @name URL open modes - * The flags argument to avio_open must be one of the following - * constants, optionally ORed with other flags. - * @{ - */ -#define AVIO_FLAG_READ 1 /**< read-only */ -#define AVIO_FLAG_WRITE 2 /**< write-only */ -#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */ -/** - * @} - */ - -/** - * Use non-blocking mode. - * If this flag is set, operations on the context will return - * AVERROR(EAGAIN) if they can not be performed immediately. - * If this flag is not set, operations on the context will never return - * AVERROR(EAGAIN). - * Note that this flag does not affect the opening/connecting of the - * context. Connecting a protocol will always block if necessary (e.g. on - * network protocols) but never hang (e.g. on busy devices). - * Warning: non-blocking protocols is work-in-progress; this flag may be - * silently ignored. - */ -#define AVIO_FLAG_NONBLOCK 8 - -/** - * Use direct mode. - * avio_read and avio_write should if possible be satisfied directly - * instead of going through a buffer, and avio_seek will always - * call the underlying seek function directly. - */ -#define AVIO_FLAG_DIRECT 0x8000 - -/** - * Create and initialize a AVIOContext for accessing the - * resource indicated by url. - * @note When the resource indicated by url has been opened in - * read+write mode, the AVIOContext can be used only for writing. - * - * @param s Used to return the pointer to the created AVIOContext. - * In case of failure the pointed to value is set to NULL. - * @param flags flags which control how the resource indicated by url - * is to be opened - * @return 0 in case of success, a negative value corresponding to an - * AVERROR code in case of failure - */ -int avio_open(AVIOContext **s, const char *url, int flags); - -/** - * Create and initialize a AVIOContext for accessing the - * resource indicated by url. - * @note When the resource indicated by url has been opened in - * read+write mode, the AVIOContext can be used only for writing. - * - * @param s Used to return the pointer to the created AVIOContext. - * In case of failure the pointed to value is set to NULL. - * @param flags flags which control how the resource indicated by url - * is to be opened - * @param int_cb an interrupt callback to be used at the protocols level - * @param options A dictionary filled with protocol-private options. On return - * this parameter will be destroyed and replaced with a dict containing options - * that were not found. May be NULL. - * @return 0 in case of success, a negative value corresponding to an - * AVERROR code in case of failure - */ -int avio_open2(AVIOContext **s, const char *url, int flags, - const AVIOInterruptCB *int_cb, AVDictionary **options); - -/** - * Close the resource accessed by the AVIOContext s and free it. - * This function can only be used if s was opened by avio_open(). - * - * The internal buffer is automatically flushed before closing the - * resource. - * - * @return 0 on success, an AVERROR < 0 on error. - * @see avio_closep - */ -int avio_close(AVIOContext *s); - -/** - * Close the resource accessed by the AVIOContext *s, free it - * and set the pointer pointing to it to NULL. - * This function can only be used if s was opened by avio_open(). - * - * The internal buffer is automatically flushed before closing the - * resource. - * - * @return 0 on success, an AVERROR < 0 on error. - * @see avio_close - */ -int avio_closep(AVIOContext **s); - - -/** - * Open a write only memory stream. - * - * @param s new IO context - * @return zero if no error. - */ -int avio_open_dyn_buf(AVIOContext **s); - -/** - * Return the written size and a pointer to the buffer. The buffer - * must be freed with av_free(). - * Padding of FF_INPUT_BUFFER_PADDING_SIZE is added to the buffer. - * - * @param s IO context - * @param pbuffer pointer to a byte buffer - * @return the length of the byte buffer - */ -int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer); - -/** - * Iterate through names of available protocols. - * - * @param opaque A private pointer representing current protocol. - * It must be a pointer to NULL on first iteration and will - * be updated by successive calls to avio_enum_protocols. - * @param output If set to 1, iterate over output protocols, - * otherwise over input protocols. - * - * @return A static string containing the name of current protocol or NULL - */ -const char *avio_enum_protocols(void **opaque, int output); - -/** - * Pause and resume playing - only meaningful if using a network streaming - * protocol (e.g. MMS). - * @param pause 1 for pause, 0 for resume - */ -int avio_pause(AVIOContext *h, int pause); - -/** - * Seek to a given timestamp relative to some component stream. - * Only meaningful if using a network streaming protocol (e.g. MMS.). - * @param stream_index The stream index that the timestamp is relative to. - * If stream_index is (-1) the timestamp should be in AV_TIME_BASE - * units from the beginning of the presentation. - * If a stream_index >= 0 is used and the protocol does not support - * seeking based on component streams, the call will fail. - * @param timestamp timestamp in AVStream.time_base units - * or if there is no stream specified then in AV_TIME_BASE units. - * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE - * and AVSEEK_FLAG_ANY. The protocol may silently ignore - * AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will - * fail if used and not supported. - * @return >= 0 on success - * @see AVInputFormat::read_seek - */ -int64_t avio_seek_time(AVIOContext *h, int stream_index, - int64_t timestamp, int flags); - -#endif /* AVFORMAT_AVIO_H */ diff --git a/3rdparty/include/ffmpeg_/libavformat/version.h b/3rdparty/include/ffmpeg_/libavformat/version.h deleted file mode 100644 index fd00994134..0000000000 --- a/3rdparty/include/ffmpeg_/libavformat/version.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Version macros. - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVFORMAT_VERSION_H -#define AVFORMAT_VERSION_H - -/** - * @file - * @ingroup libavf - * Libavformat version macros - */ - -#include "libavutil/avutil.h" - -#define LIBAVFORMAT_VERSION_MAJOR 55 -#define LIBAVFORMAT_VERSION_MINOR 12 -#define LIBAVFORMAT_VERSION_MICRO 100 - -#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ - LIBAVFORMAT_VERSION_MINOR, \ - LIBAVFORMAT_VERSION_MICRO) -#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \ - LIBAVFORMAT_VERSION_MINOR, \ - LIBAVFORMAT_VERSION_MICRO) -#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT - -#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION) - -/** - * FF_API_* defines may be placed below to indicate public API that will be - * dropped at a future version bump. The defines themselves are not part of - * the public API and may change, break or disappear at any time. - */ - -#ifndef FF_API_OLD_AVIO -#define FF_API_OLD_AVIO (LIBAVFORMAT_VERSION_MAJOR < 55) -#endif -#ifndef FF_API_PKT_DUMP -#define FF_API_PKT_DUMP (LIBAVFORMAT_VERSION_MAJOR < 54) -#endif -#ifndef FF_API_ALLOC_OUTPUT_CONTEXT -#define FF_API_ALLOC_OUTPUT_CONTEXT (LIBAVFORMAT_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_FORMAT_PARAMETERS -#define FF_API_FORMAT_PARAMETERS (LIBAVFORMAT_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_NEW_STREAM -#define FF_API_NEW_STREAM (LIBAVFORMAT_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_SET_PTS_INFO -#define FF_API_SET_PTS_INFO (LIBAVFORMAT_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_CLOSE_INPUT_FILE -#define FF_API_CLOSE_INPUT_FILE (LIBAVFORMAT_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_READ_PACKET -#define FF_API_READ_PACKET (LIBAVFORMAT_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_ASS_SSA -#define FF_API_ASS_SSA (LIBAVFORMAT_VERSION_MAJOR < 56) -#endif -#ifndef FF_API_R_FRAME_RATE -#define FF_API_R_FRAME_RATE 1 -#endif -#endif /* AVFORMAT_VERSION_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/adler32.h b/3rdparty/include/ffmpeg_/libavutil/adler32.h deleted file mode 100644 index 8c08d2b882..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/adler32.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * copyright (c) 2006 Mans Rullgard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_ADLER32_H -#define AVUTIL_ADLER32_H - -#include -#include "attributes.h" - -/** - * @defgroup lavu_adler32 Adler32 - * @ingroup lavu_crypto - * @{ - */ - -/** - * Calculate the Adler32 checksum of a buffer. - * - * Passing the return value to a subsequent av_adler32_update() call - * allows the checksum of multiple buffers to be calculated as though - * they were concatenated. - * - * @param adler initial checksum value - * @param buf pointer to input buffer - * @param len size of input buffer - * @return updated checksum - */ -unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf, - unsigned int len) av_pure; - -/** - * @} - */ - -#endif /* AVUTIL_ADLER32_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/aes.h b/3rdparty/include/ffmpeg_/libavutil/aes.h deleted file mode 100644 index 09efbda107..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/aes.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - * copyright (c) 2007 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_AES_H -#define AVUTIL_AES_H - -#include - -#include "attributes.h" -#include "version.h" - -/** - * @defgroup lavu_aes AES - * @ingroup lavu_crypto - * @{ - */ - -extern const int av_aes_size; - -struct AVAES; - -/** - * Allocate an AVAES context. - */ -struct AVAES *av_aes_alloc(void); - -/** - * Initialize an AVAES context. - * @param key_bits 128, 192 or 256 - * @param decrypt 0 for encryption, 1 for decryption - */ -int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt); - -/** - * Encrypt or decrypt a buffer using a previously initialized context. - * @param count number of 16 byte blocks - * @param dst destination array, can be equal to src - * @param src source array, can be equal to dst - * @param iv initialization vector for CBC mode, if NULL then ECB will be used - * @param decrypt 0 for encryption, 1 for decryption - */ -void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); - -/** - * @} - */ - -#endif /* AVUTIL_AES_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/attributes.h b/3rdparty/include/ffmpeg_/libavutil/attributes.h deleted file mode 100644 index 64b46f68f0..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/attributes.h +++ /dev/null @@ -1,154 +0,0 @@ -/* - * copyright (c) 2006 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Macro definitions for various function/variable attributes - */ - -#ifndef AVUTIL_ATTRIBUTES_H -#define AVUTIL_ATTRIBUTES_H - -#ifdef __GNUC__ -# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y) -#else -# define AV_GCC_VERSION_AT_LEAST(x,y) 0 -#endif - -#ifndef av_always_inline -#if AV_GCC_VERSION_AT_LEAST(3,1) -# define av_always_inline __attribute__((always_inline)) inline -#elif defined(_MSC_VER) -# define av_always_inline __forceinline -#else -# define av_always_inline inline -#endif -#endif - -#ifndef av_extern_inline -#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__) -# define av_extern_inline extern inline -#else -# define av_extern_inline inline -#endif -#endif - -#if AV_GCC_VERSION_AT_LEAST(3,1) -# define av_noinline __attribute__((noinline)) -#else -# define av_noinline -#endif - -#if AV_GCC_VERSION_AT_LEAST(3,1) -# define av_pure __attribute__((pure)) -#else -# define av_pure -#endif - -#ifndef av_restrict -#define av_restrict restrict -#endif - -#if AV_GCC_VERSION_AT_LEAST(2,6) -# define av_const __attribute__((const)) -#else -# define av_const -#endif - -#if AV_GCC_VERSION_AT_LEAST(4,3) -# define av_cold __attribute__((cold)) -#else -# define av_cold -#endif - -#if AV_GCC_VERSION_AT_LEAST(4,1) -# define av_flatten __attribute__((flatten)) -#else -# define av_flatten -#endif - -#if AV_GCC_VERSION_AT_LEAST(3,1) -# define attribute_deprecated __attribute__((deprecated)) -#else -# define attribute_deprecated -#endif - -/** - * Disable warnings about deprecated features - * This is useful for sections of code kept for backward compatibility and - * scheduled for removal. - */ -#ifndef AV_NOWARN_DEPRECATED -#if AV_GCC_VERSION_AT_LEAST(4,6) -# define AV_NOWARN_DEPRECATED(code) \ - _Pragma("GCC diagnostic push") \ - _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \ - code \ - _Pragma("GCC diagnostic pop") -#else -# define AV_NOWARN_DEPRECATED(code) code -#endif -#endif - - -#if defined(__GNUC__) -# define av_unused __attribute__((unused)) -#else -# define av_unused -#endif - -/** - * Mark a variable as used and prevent the compiler from optimizing it - * away. This is useful for variables accessed only from inline - * assembler without the compiler being aware. - */ -#if AV_GCC_VERSION_AT_LEAST(3,1) -# define av_used __attribute__((used)) -#else -# define av_used -#endif - -#if AV_GCC_VERSION_AT_LEAST(3,3) -# define av_alias __attribute__((may_alias)) -#else -# define av_alias -#endif - -#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__) -# define av_uninit(x) x=x -#else -# define av_uninit(x) x -#endif - -#ifdef __GNUC__ -# define av_builtin_constant_p __builtin_constant_p -# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos))) -#else -# define av_builtin_constant_p(x) 0 -# define av_printf_format(fmtpos, attrpos) -#endif - -#if AV_GCC_VERSION_AT_LEAST(2,5) -# define av_noreturn __attribute__((noreturn)) -#else -# define av_noreturn -#endif - -#endif /* AVUTIL_ATTRIBUTES_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/audio_fifo.h b/3rdparty/include/ffmpeg_/libavutil/audio_fifo.h deleted file mode 100644 index 55a538e78f..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/audio_fifo.h +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Audio FIFO - * Copyright (c) 2012 Justin Ruggles - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Audio FIFO Buffer - */ - -#ifndef AVUTIL_AUDIO_FIFO_H -#define AVUTIL_AUDIO_FIFO_H - -#include "avutil.h" -#include "fifo.h" -#include "samplefmt.h" - -/** - * @addtogroup lavu_audio - * @{ - */ - -/** - * Context for an Audio FIFO Buffer. - * - * - Operates at the sample level rather than the byte level. - * - Supports multiple channels with either planar or packed sample format. - * - Automatic reallocation when writing to a full buffer. - */ -typedef struct AVAudioFifo AVAudioFifo; - -/** - * Free an AVAudioFifo. - * - * @param af AVAudioFifo to free - */ -void av_audio_fifo_free(AVAudioFifo *af); - -/** - * Allocate an AVAudioFifo. - * - * @param sample_fmt sample format - * @param channels number of channels - * @param nb_samples initial allocation size, in samples - * @return newly allocated AVAudioFifo, or NULL on error - */ -AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, - int nb_samples); - -/** - * Reallocate an AVAudioFifo. - * - * @param af AVAudioFifo to reallocate - * @param nb_samples new allocation size, in samples - * @return 0 if OK, or negative AVERROR code on failure - */ -int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples); - -/** - * Write data to an AVAudioFifo. - * - * The AVAudioFifo will be reallocated automatically if the available space - * is less than nb_samples. - * - * @see enum AVSampleFormat - * The documentation for AVSampleFormat describes the data layout. - * - * @param af AVAudioFifo to write to - * @param data audio data plane pointers - * @param nb_samples number of samples to write - * @return number of samples actually written, or negative AVERROR - * code on failure. If successful, the number of samples - * actually written will always be nb_samples. - */ -int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples); - -/** - * Read data from an AVAudioFifo. - * - * @see enum AVSampleFormat - * The documentation for AVSampleFormat describes the data layout. - * - * @param af AVAudioFifo to read from - * @param data audio data plane pointers - * @param nb_samples number of samples to read - * @return number of samples actually read, or negative AVERROR code - * on failure. The number of samples actually read will not - * be greater than nb_samples, and will only be less than - * nb_samples if av_audio_fifo_size is less than nb_samples. - */ -int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples); - -/** - * Drain data from an AVAudioFifo. - * - * Removes the data without reading it. - * - * @param af AVAudioFifo to drain - * @param nb_samples number of samples to drain - * @return 0 if OK, or negative AVERROR code on failure - */ -int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples); - -/** - * Reset the AVAudioFifo buffer. - * - * This empties all data in the buffer. - * - * @param af AVAudioFifo to reset - */ -void av_audio_fifo_reset(AVAudioFifo *af); - -/** - * Get the current number of samples in the AVAudioFifo available for reading. - * - * @param af the AVAudioFifo to query - * @return number of samples available for reading - */ -int av_audio_fifo_size(AVAudioFifo *af); - -/** - * Get the current number of samples in the AVAudioFifo available for writing. - * - * @param af the AVAudioFifo to query - * @return number of samples available for writing - */ -int av_audio_fifo_space(AVAudioFifo *af); - -/** - * @} - */ - -#endif /* AVUTIL_AUDIO_FIFO_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/audioconvert.h b/3rdparty/include/ffmpeg_/libavutil/audioconvert.h deleted file mode 100644 index 300a67cd3d..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/audioconvert.h +++ /dev/null @@ -1,6 +0,0 @@ - -#include "version.h" - -#if FF_API_AUDIOCONVERT -#include "channel_layout.h" -#endif diff --git a/3rdparty/include/ffmpeg_/libavutil/avassert.h b/3rdparty/include/ffmpeg_/libavutil/avassert.h deleted file mode 100644 index 41f5e0eea7..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/avassert.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * copyright (c) 2010 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * simple assert() macros that are a bit more flexible than ISO C assert(). - * @author Michael Niedermayer - */ - -#ifndef AVUTIL_AVASSERT_H -#define AVUTIL_AVASSERT_H - -#include -#include "avutil.h" -#include "log.h" - -/** - * assert() equivalent, that is always enabled. - */ -#define av_assert0(cond) do { \ - if (!(cond)) { \ - av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n", \ - AV_STRINGIFY(cond), __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) - - -/** - * assert() equivalent, that does not lie in speed critical code. - * These asserts() thus can be enabled without fearing speedloss. - */ -#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0 -#define av_assert1(cond) av_assert0(cond) -#else -#define av_assert1(cond) ((void)0) -#endif - - -/** - * assert() equivalent, that does lie in speed critical code. - */ -#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1 -#define av_assert2(cond) av_assert0(cond) -#else -#define av_assert2(cond) ((void)0) -#endif - -#endif /* AVUTIL_AVASSERT_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/avconfig.h b/3rdparty/include/ffmpeg_/libavutil/avconfig.h deleted file mode 100644 index f6685b72c1..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/avconfig.h +++ /dev/null @@ -1,8 +0,0 @@ -/* Generated by ffconf */ -#ifndef AVUTIL_AVCONFIG_H -#define AVUTIL_AVCONFIG_H -#define AV_HAVE_BIGENDIAN 0 -#define AV_HAVE_FAST_UNALIGNED 1 -#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0 -#define AV_HAVE_INCOMPATIBLE_FORK_ABI 0 -#endif /* AVUTIL_AVCONFIG_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/avstring.h b/3rdparty/include/ffmpeg_/libavutil/avstring.h deleted file mode 100644 index 438ef799eb..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/avstring.h +++ /dev/null @@ -1,302 +0,0 @@ -/* - * Copyright (c) 2007 Mans Rullgard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_AVSTRING_H -#define AVUTIL_AVSTRING_H - -#include -#include "attributes.h" - -/** - * @addtogroup lavu_string - * @{ - */ - -/** - * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to - * the address of the first character in str after the prefix. - * - * @param str input string - * @param pfx prefix to test - * @param ptr updated if the prefix is matched inside str - * @return non-zero if the prefix matches, zero otherwise - */ -int av_strstart(const char *str, const char *pfx, const char **ptr); - -/** - * Return non-zero if pfx is a prefix of str independent of case. If - * it is, *ptr is set to the address of the first character in str - * after the prefix. - * - * @param str input string - * @param pfx prefix to test - * @param ptr updated if the prefix is matched inside str - * @return non-zero if the prefix matches, zero otherwise - */ -int av_stristart(const char *str, const char *pfx, const char **ptr); - -/** - * Locate the first case-independent occurrence in the string haystack - * of the string needle. A zero-length string needle is considered to - * match at the start of haystack. - * - * This function is a case-insensitive version of the standard strstr(). - * - * @param haystack string to search in - * @param needle string to search for - * @return pointer to the located match within haystack - * or a null pointer if no match - */ -char *av_stristr(const char *haystack, const char *needle); - -/** - * Locate the first occurrence of the string needle in the string haystack - * where not more than hay_length characters are searched. A zero-length - * string needle is considered to match at the start of haystack. - * - * This function is a length-limited version of the standard strstr(). - * - * @param haystack string to search in - * @param needle string to search for - * @param hay_length length of string to search in - * @return pointer to the located match within haystack - * or a null pointer if no match - */ -char *av_strnstr(const char *haystack, const char *needle, size_t hay_length); - -/** - * Copy the string src to dst, but no more than size - 1 bytes, and - * null-terminate dst. - * - * This function is the same as BSD strlcpy(). - * - * @param dst destination buffer - * @param src source string - * @param size size of destination buffer - * @return the length of src - * - * @warning since the return value is the length of src, src absolutely - * _must_ be a properly 0-terminated string, otherwise this will read beyond - * the end of the buffer and possibly crash. - */ -size_t av_strlcpy(char *dst, const char *src, size_t size); - -/** - * Append the string src to the string dst, but to a total length of - * no more than size - 1 bytes, and null-terminate dst. - * - * This function is similar to BSD strlcat(), but differs when - * size <= strlen(dst). - * - * @param dst destination buffer - * @param src source string - * @param size size of destination buffer - * @return the total length of src and dst - * - * @warning since the return value use the length of src and dst, these - * absolutely _must_ be a properly 0-terminated strings, otherwise this - * will read beyond the end of the buffer and possibly crash. - */ -size_t av_strlcat(char *dst, const char *src, size_t size); - -/** - * Append output to a string, according to a format. Never write out of - * the destination buffer, and always put a terminating 0 within - * the buffer. - * @param dst destination buffer (string to which the output is - * appended) - * @param size total size of the destination buffer - * @param fmt printf-compatible format string, specifying how the - * following parameters are used - * @return the length of the string that would have been generated - * if enough space had been available - */ -size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4); - -/** - * Print arguments following specified format into a large enough auto - * allocated buffer. It is similar to GNU asprintf(). - * @param fmt printf-compatible format string, specifying how the - * following parameters are used. - * @return the allocated string - * @note You have to free the string yourself with av_free(). - */ -char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2); - -/** - * Convert a number to a av_malloced string. - */ -char *av_d2str(double d); - -/** - * Unescape the given string until a non escaped terminating char, - * and return the token corresponding to the unescaped string. - * - * The normal \ and ' escaping is supported. Leading and trailing - * whitespaces are removed, unless they are escaped with '\' or are - * enclosed between ''. - * - * @param buf the buffer to parse, buf will be updated to point to the - * terminating char - * @param term a 0-terminated list of terminating chars - * @return the malloced unescaped string, which must be av_freed by - * the user, NULL in case of allocation failure - */ -char *av_get_token(const char **buf, const char *term); - -/** - * Split the string into several tokens which can be accessed by - * successive calls to av_strtok(). - * - * A token is defined as a sequence of characters not belonging to the - * set specified in delim. - * - * On the first call to av_strtok(), s should point to the string to - * parse, and the value of saveptr is ignored. In subsequent calls, s - * should be NULL, and saveptr should be unchanged since the previous - * call. - * - * This function is similar to strtok_r() defined in POSIX.1. - * - * @param s the string to parse, may be NULL - * @param delim 0-terminated list of token delimiters, must be non-NULL - * @param saveptr user-provided pointer which points to stored - * information necessary for av_strtok() to continue scanning the same - * string. saveptr is updated to point to the next character after the - * first delimiter found, or to NULL if the string was terminated - * @return the found token, or NULL when no token is found - */ -char *av_strtok(char *s, const char *delim, char **saveptr); - -/** - * Locale-independent conversion of ASCII isdigit. - */ -int av_isdigit(int c); - -/** - * Locale-independent conversion of ASCII isgraph. - */ -int av_isgraph(int c); - -/** - * Locale-independent conversion of ASCII isspace. - */ -int av_isspace(int c); - -/** - * Locale-independent conversion of ASCII characters to uppercase. - */ -static inline int av_toupper(int c) -{ - if (c >= 'a' && c <= 'z') - c ^= 0x20; - return c; -} - -/** - * Locale-independent conversion of ASCII characters to lowercase. - */ -static inline int av_tolower(int c) -{ - if (c >= 'A' && c <= 'Z') - c ^= 0x20; - return c; -} - -/** - * Locale-independent conversion of ASCII isxdigit. - */ -int av_isxdigit(int c); - -/** - * Locale-independent case-insensitive compare. - * @note This means only ASCII-range characters are case-insensitive - */ -int av_strcasecmp(const char *a, const char *b); - -/** - * Locale-independent case-insensitive compare. - * @note This means only ASCII-range characters are case-insensitive - */ -int av_strncasecmp(const char *a, const char *b, size_t n); - - -/** - * Thread safe basename. - * @param path the path, on DOS both \ and / are considered separators. - * @return pointer to the basename substring. - */ -const char *av_basename(const char *path); - -/** - * Thread safe dirname. - * @param path the path, on DOS both \ and / are considered separators. - * @return the path with the separator replaced by the string terminator or ".". - * @note the function may change the input string. - */ -const char *av_dirname(char *path); - -enum AVEscapeMode { - AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode. - AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping. - AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping. -}; - -/** - * Consider spaces special and escape them even in the middle of the - * string. - * - * This is equivalent to adding the whitespace characters to the special - * characters lists, except it is guaranteed to use the exact same list - * of whitespace characters as the rest of libavutil. - */ -#define AV_ESCAPE_FLAG_WHITESPACE 0x01 - -/** - * Escape only specified special characters. - * Without this flag, escape also any characters that may be considered - * special by av_get_token(), such as the single quote. - */ -#define AV_ESCAPE_FLAG_STRICT 0x02 - -/** - * Escape string in src, and put the escaped string in an allocated - * string in *dst, which must be freed with av_free(). - * - * @param dst pointer where an allocated string is put - * @param src string to escape, must be non-NULL - * @param special_chars string containing the special characters which - * need to be escaped, can be NULL - * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. - * Any unknown value for mode will be considered equivalent to - * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without - * notice. - * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_ macros - * @return the length of the allocated string, or a negative error code in case of error - * @see av_bprint_escape() - */ -int av_escape(char **dst, const char *src, const char *special_chars, - enum AVEscapeMode mode, int flags); - -/** - * @} - */ - -#endif /* AVUTIL_AVSTRING_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/avutil.h b/3rdparty/include/ffmpeg_/libavutil/avutil.h deleted file mode 100644 index 4986f4f9ef..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/avutil.h +++ /dev/null @@ -1,314 +0,0 @@ -/* - * copyright (c) 2006 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_AVUTIL_H -#define AVUTIL_AVUTIL_H - -/** - * @file - * external API header - */ - -/** - * @mainpage - * - * @section ffmpeg_intro Introduction - * - * This document describes the usage of the different libraries - * provided by FFmpeg. - * - * @li @ref libavc "libavcodec" encoding/decoding library - * @li @ref lavfi "libavfilter" graph-based frame editing library - * @li @ref libavf "libavformat" I/O and muxing/demuxing library - * @li @ref lavd "libavdevice" special devices muxing/demuxing library - * @li @ref lavu "libavutil" common utility library - * @li @ref lswr "libswresample" audio resampling, format conversion and mixing - * @li @ref lpp "libpostproc" post processing library - * @li @ref lsws "libswscale" color conversion and scaling library - * - * @section ffmpeg_versioning Versioning and compatibility - * - * Each of the FFmpeg libraries contains a version.h header, which defines a - * major, minor and micro version number with the - * LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO} macros. The major version - * number is incremented with backward incompatible changes - e.g. removing - * parts of the public API, reordering public struct members, etc. The minor - * version number is incremented for backward compatible API changes or major - * new features - e.g. adding a new public function or a new decoder. The micro - * version number is incremented for smaller changes that a calling program - * might still want to check for - e.g. changing behavior in a previously - * unspecified situation. - * - * FFmpeg guarantees backward API and ABI compatibility for each library as long - * as its major version number is unchanged. This means that no public symbols - * will be removed or renamed. Types and names of the public struct members and - * values of public macros and enums will remain the same (unless they were - * explicitly declared as not part of the public API). Documented behavior will - * not change. - * - * In other words, any correct program that works with a given FFmpeg snapshot - * should work just as well without any changes with any later snapshot with the - * same major versions. This applies to both rebuilding the program against new - * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program - * links against. - * - * However, new public symbols may be added and new members may be appended to - * public structs whose size is not part of public ABI (most public structs in - * FFmpeg). New macros and enum values may be added. Behavior in undocumented - * situations may change slightly (and be documented). All those are accompanied - * by an entry in doc/APIchanges and incrementing either the minor or micro - * version number. - */ - -/** - * @defgroup lavu Common utility functions - * - * @brief - * libavutil contains the code shared across all the other FFmpeg - * libraries - * - * @note In order to use the functions provided by avutil you must include - * the specific header. - * - * @{ - * - * @defgroup lavu_crypto Crypto and Hashing - * - * @{ - * @} - * - * @defgroup lavu_math Maths - * @{ - * - * @} - * - * @defgroup lavu_string String Manipulation - * - * @{ - * - * @} - * - * @defgroup lavu_mem Memory Management - * - * @{ - * - * @} - * - * @defgroup lavu_data Data Structures - * @{ - * - * @} - * - * @defgroup lavu_audio Audio related - * - * @{ - * - * @} - * - * @defgroup lavu_error Error Codes - * - * @{ - * - * @} - * - * @defgroup lavu_misc Other - * - * @{ - * - * @defgroup lavu_internal Internal - * - * Not exported functions, for internal usage only - * - * @{ - * - * @} - */ - - -/** - * @addtogroup lavu_ver - * @{ - */ - -/** - * Return the LIBAVUTIL_VERSION_INT constant. - */ -unsigned avutil_version(void); - -/** - * Return the libavutil build-time configuration. - */ -const char *avutil_configuration(void); - -/** - * Return the libavutil license. - */ -const char *avutil_license(void); - -/** - * @} - */ - -/** - * @addtogroup lavu_media Media Type - * @brief Media Type - */ - -enum AVMediaType { - AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA - AVMEDIA_TYPE_VIDEO, - AVMEDIA_TYPE_AUDIO, - AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous - AVMEDIA_TYPE_SUBTITLE, - AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse - AVMEDIA_TYPE_NB -}; - -/** - * Return a string describing the media_type enum, NULL if media_type - * is unknown. - */ -const char *av_get_media_type_string(enum AVMediaType media_type); - -/** - * @defgroup lavu_const Constants - * @{ - * - * @defgroup lavu_enc Encoding specific - * - * @note those definition should move to avcodec - * @{ - */ - -#define FF_LAMBDA_SHIFT 7 -#define FF_LAMBDA_SCALE (1< - -/** - * @defgroup lavu_base64 Base64 - * @ingroup lavu_crypto - * @{ - */ - - -/** - * Decode a base64-encoded string. - * - * @param out buffer for decoded data - * @param in null-terminated input string - * @param out_size size in bytes of the out buffer, must be at - * least 3/4 of the length of in - * @return number of bytes written, or a negative value in case of - * invalid input - */ -int av_base64_decode(uint8_t *out, const char *in, int out_size); - -/** - * Encode data to base64 and null-terminate. - * - * @param out buffer for encoded data - * @param out_size size in bytes of the out buffer (including the - * null terminator), must be at least AV_BASE64_SIZE(in_size) - * @param in input buffer containing the data to encode - * @param in_size size in bytes of the in buffer - * @return out or NULL in case of error - */ -char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size); - -/** - * Calculate the output size needed to base64-encode x bytes to a - * null-terminated string. - */ -#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1) - - /** - * @} - */ - -#endif /* AVUTIL_BASE64_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/blowfish.h b/3rdparty/include/ffmpeg_/libavutil/blowfish.h deleted file mode 100644 index 0b004532de..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/blowfish.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Blowfish algorithm - * Copyright (c) 2012 Samuel Pitoiset - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_BLOWFISH_H -#define AVUTIL_BLOWFISH_H - -#include - -/** - * @defgroup lavu_blowfish Blowfish - * @ingroup lavu_crypto - * @{ - */ - -#define AV_BF_ROUNDS 16 - -typedef struct AVBlowfish { - uint32_t p[AV_BF_ROUNDS + 2]; - uint32_t s[4][256]; -} AVBlowfish; - -/** - * Initialize an AVBlowfish context. - * - * @param ctx an AVBlowfish context - * @param key a key - * @param key_len length of the key - */ -void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len); - -/** - * Encrypt or decrypt a buffer using a previously initialized context. - * - * @param ctx an AVBlowfish context - * @param xl left four bytes halves of input to be encrypted - * @param xr right four bytes halves of input to be encrypted - * @param decrypt 0 for encryption, 1 for decryption - */ -void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr, - int decrypt); - -/** - * Encrypt or decrypt a buffer using a previously initialized context. - * - * @param ctx an AVBlowfish context - * @param dst destination array, can be equal to src - * @param src source array, can be equal to dst - * @param count number of 8 byte blocks - * @param iv initialization vector for CBC mode, if NULL ECB will be used - * @param decrypt 0 for encryption, 1 for decryption - */ -void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src, - int count, uint8_t *iv, int decrypt); - -/** - * @} - */ - -#endif /* AVUTIL_BLOWFISH_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/bprint.h b/3rdparty/include/ffmpeg_/libavutil/bprint.h deleted file mode 100644 index dc86f12415..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/bprint.h +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright (c) 2012 Nicolas George - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_BPRINT_H -#define AVUTIL_BPRINT_H - -#include "attributes.h" -#include "avstring.h" - -/** - * Define a structure with extra padding to a fixed size - * This helps ensuring binary compatibility with future versions. - */ -#define FF_PAD_STRUCTURE(size, ...) \ - __VA_ARGS__ \ - char reserved_padding[size - sizeof(struct { __VA_ARGS__ })]; - -/** - * Buffer to print data progressively - * - * The string buffer grows as necessary and is always 0-terminated. - * The content of the string is never accessed, and thus is - * encoding-agnostic and can even hold binary data. - * - * Small buffers are kept in the structure itself, and thus require no - * memory allocation at all (unless the contents of the buffer is needed - * after the structure goes out of scope). This is almost as lightweight as - * declaring a local "char buf[512]". - * - * The length of the string can go beyond the allocated size: the buffer is - * then truncated, but the functions still keep account of the actual total - * length. - * - * In other words, buf->len can be greater than buf->size and records the - * total length of what would have been to the buffer if there had been - * enough memory. - * - * Append operations do not need to be tested for failure: if a memory - * allocation fails, data stop being appended to the buffer, but the length - * is still updated. This situation can be tested with - * av_bprint_is_complete(). - * - * The size_max field determines several possible behaviours: - * - * size_max = -1 (= UINT_MAX) or any large value will let the buffer be - * reallocated as necessary, with an amortized linear cost. - * - * size_max = 0 prevents writing anything to the buffer: only the total - * length is computed. The write operations can then possibly be repeated in - * a buffer with exactly the necessary size - * (using size_init = size_max = len + 1). - * - * size_max = 1 is automatically replaced by the exact size available in the - * structure itself, thus ensuring no dynamic memory allocation. The - * internal buffer is large enough to hold a reasonable paragraph of text, - * such as the current paragraph. - */ -typedef struct AVBPrint { - FF_PAD_STRUCTURE(1024, - char *str; /**< string so far */ - unsigned len; /**< length so far */ - unsigned size; /**< allocated memory */ - unsigned size_max; /**< maximum allocated memory */ - char reserved_internal_buffer[1]; - ) -} AVBPrint; - -/** - * Convenience macros for special values for av_bprint_init() size_max - * parameter. - */ -#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1) -#define AV_BPRINT_SIZE_AUTOMATIC 1 -#define AV_BPRINT_SIZE_COUNT_ONLY 0 - -/** - * Init a print buffer. - * - * @param buf buffer to init - * @param size_init initial size (including the final 0) - * @param size_max maximum size; - * 0 means do not write anything, just count the length; - * 1 is replaced by the maximum value for automatic storage; - * any large value means that the internal buffer will be - * reallocated as needed up to that limit; -1 is converted to - * UINT_MAX, the largest limit possible. - * Check also AV_BPRINT_SIZE_* macros. - */ -void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max); - -/** - * Init a print buffer using a pre-existing buffer. - * - * The buffer will not be reallocated. - * - * @param buf buffer structure to init - * @param buffer byte buffer to use for the string data - * @param size size of buffer - */ -void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size); - -/** - * Append a formatted string to a print buffer. - */ -void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3); - -/** - * Append char c n times to a print buffer. - */ -void av_bprint_chars(AVBPrint *buf, char c, unsigned n); - -struct tm; -/** - * Append a formatted date and time to a print buffer. - * - * param buf bprint buffer to use - * param fmt date and time format string, see strftime() - * param tm broken-down time structure to translate - * - * @note due to poor design of the standard strftime function, it may - * produce poor results if the format string expands to a very long text and - * the bprint buffer is near the limit stated by the size_max option. - */ -void av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm); - -/** - * Allocate bytes in the buffer for external use. - * - * @param[in] buf buffer structure - * @param[in] size required size - * @param[out] mem pointer to the memory area - * @param[out] actual_size size of the memory area after allocation; - * can be larger or smaller than size - */ -void av_bprint_get_buffer(AVBPrint *buf, unsigned size, - unsigned char **mem, unsigned *actual_size); - -/** - * Reset the string to "" but keep internal allocated data. - */ -void av_bprint_clear(AVBPrint *buf); - -/** - * Test if the print buffer is complete (not truncated). - * - * It may have been truncated due to a memory allocation failure - * or the size_max limit (compare size and size_max if necessary). - */ -static inline int av_bprint_is_complete(AVBPrint *buf) -{ - return buf->len < buf->size; -} - -/** - * Finalize a print buffer. - * - * The print buffer can no longer be used afterwards, - * but the len and size fields are still valid. - * - * @arg[out] ret_str if not NULL, used to return a permanent copy of the - * buffer contents, or NULL if memory allocation fails; - * if NULL, the buffer is discarded and freed - * @return 0 for success or error code (probably AVERROR(ENOMEM)) - */ -int av_bprint_finalize(AVBPrint *buf, char **ret_str); - -/** - * Escape the content in src and append it to dstbuf. - * - * @param dstbuf already inited destination bprint buffer - * @param src string containing the text to escape - * @param special_chars string containing the special characters which - * need to be escaped, can be NULL - * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. - * Any unknown value for mode will be considered equivalent to - * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without - * notice. - * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_* macros - */ -void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars, - enum AVEscapeMode mode, int flags); - -#endif /* AVUTIL_BPRINT_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/bswap.h b/3rdparty/include/ffmpeg_/libavutil/bswap.h deleted file mode 100644 index 06f654816d..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/bswap.h +++ /dev/null @@ -1,109 +0,0 @@ -/* - * copyright (c) 2006 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * byte swapping routines - */ - -#ifndef AVUTIL_BSWAP_H -#define AVUTIL_BSWAP_H - -#include -#include "libavutil/avconfig.h" -#include "attributes.h" - -#ifdef HAVE_AV_CONFIG_H - -#include "config.h" - -#if ARCH_ARM -# include "arm/bswap.h" -#elif ARCH_AVR32 -# include "avr32/bswap.h" -#elif ARCH_BFIN -# include "bfin/bswap.h" -#elif ARCH_SH4 -# include "sh4/bswap.h" -#elif ARCH_X86 -# include "x86/bswap.h" -#endif - -#endif /* HAVE_AV_CONFIG_H */ - -#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff)) -#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16)) -#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32)) - -#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x) - -#ifndef av_bswap16 -static av_always_inline av_const uint16_t av_bswap16(uint16_t x) -{ - x= (x>>8) | (x<<8); - return x; -} -#endif - -#ifndef av_bswap32 -static av_always_inline av_const uint32_t av_bswap32(uint32_t x) -{ - return AV_BSWAP32C(x); -} -#endif - -#ifndef av_bswap64 -static inline uint64_t av_const av_bswap64(uint64_t x) -{ - return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32); -} -#endif - -// be2ne ... big-endian to native-endian -// le2ne ... little-endian to native-endian - -#if AV_HAVE_BIGENDIAN -#define av_be2ne16(x) (x) -#define av_be2ne32(x) (x) -#define av_be2ne64(x) (x) -#define av_le2ne16(x) av_bswap16(x) -#define av_le2ne32(x) av_bswap32(x) -#define av_le2ne64(x) av_bswap64(x) -#define AV_BE2NEC(s, x) (x) -#define AV_LE2NEC(s, x) AV_BSWAPC(s, x) -#else -#define av_be2ne16(x) av_bswap16(x) -#define av_be2ne32(x) av_bswap32(x) -#define av_be2ne64(x) av_bswap64(x) -#define av_le2ne16(x) (x) -#define av_le2ne32(x) (x) -#define av_le2ne64(x) (x) -#define AV_BE2NEC(s, x) AV_BSWAPC(s, x) -#define AV_LE2NEC(s, x) (x) -#endif - -#define AV_BE2NE16C(x) AV_BE2NEC(16, x) -#define AV_BE2NE32C(x) AV_BE2NEC(32, x) -#define AV_BE2NE64C(x) AV_BE2NEC(64, x) -#define AV_LE2NE16C(x) AV_LE2NEC(16, x) -#define AV_LE2NE32C(x) AV_LE2NEC(32, x) -#define AV_LE2NE64C(x) AV_LE2NEC(64, x) - -#endif /* AVUTIL_BSWAP_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/buffer.h b/3rdparty/include/ffmpeg_/libavutil/buffer.h deleted file mode 100644 index b4399fd39f..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/buffer.h +++ /dev/null @@ -1,274 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * @ingroup lavu_buffer - * refcounted data buffer API - */ - -#ifndef AVUTIL_BUFFER_H -#define AVUTIL_BUFFER_H - -#include - -/** - * @defgroup lavu_buffer AVBuffer - * @ingroup lavu_data - * - * @{ - * AVBuffer is an API for reference-counted data buffers. - * - * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer - * represents the data buffer itself; it is opaque and not meant to be accessed - * by the caller directly, but only through AVBufferRef. However, the caller may - * e.g. compare two AVBuffer pointers to check whether two different references - * are describing the same data buffer. AVBufferRef represents a single - * reference to an AVBuffer and it is the object that may be manipulated by the - * caller directly. - * - * There are two functions provided for creating a new AVBuffer with a single - * reference -- av_buffer_alloc() to just allocate a new buffer, and - * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing - * reference, additional references may be created with av_buffer_ref(). - * Use av_buffer_unref() to free a reference (this will automatically free the - * data once all the references are freed). - * - * The convention throughout this API and the rest of FFmpeg is such that the - * buffer is considered writable if there exists only one reference to it (and - * it has not been marked as read-only). The av_buffer_is_writable() function is - * provided to check whether this is true and av_buffer_make_writable() will - * automatically create a new writable buffer when necessary. - * Of course nothing prevents the calling code from violating this convention, - * however that is safe only when all the existing references are under its - * control. - * - * @note Referencing and unreferencing the buffers is thread-safe and thus - * may be done from multiple threads simultaneously without any need for - * additional locking. - * - * @note Two different references to the same buffer can point to different - * parts of the buffer (i.e. their AVBufferRef.data will not be equal). - */ - -/** - * A reference counted buffer type. It is opaque and is meant to be used through - * references (AVBufferRef). - */ -typedef struct AVBuffer AVBuffer; - -/** - * A reference to a data buffer. - * - * The size of this struct is not a part of the public ABI and it is not meant - * to be allocated directly. - */ -typedef struct AVBufferRef { - AVBuffer *buffer; - - /** - * The data buffer. It is considered writable if and only if - * this is the only reference to the buffer, in which case - * av_buffer_is_writable() returns 1. - */ - uint8_t *data; - /** - * Size of data in bytes. - */ - int size; -} AVBufferRef; - -/** - * Allocate an AVBuffer of the given size using av_malloc(). - * - * @return an AVBufferRef of given size or NULL when out of memory - */ -AVBufferRef *av_buffer_alloc(int size); - -/** - * Same as av_buffer_alloc(), except the returned buffer will be initialized - * to zero. - */ -AVBufferRef *av_buffer_allocz(int size); - -/** - * Always treat the buffer as read-only, even when it has only one - * reference. - */ -#define AV_BUFFER_FLAG_READONLY (1 << 0) - -/** - * Create an AVBuffer from an existing array. - * - * If this function is successful, data is owned by the AVBuffer. The caller may - * only access data through the returned AVBufferRef and references derived from - * it. - * If this function fails, data is left untouched. - * @param data data array - * @param size size of data in bytes - * @param free a callback for freeing this buffer's data - * @param opaque parameter to be got for processing or passed to free - * @param flags a combination of AV_BUFFER_FLAG_* - * - * @return an AVBufferRef referring to data on success, NULL on failure. - */ -AVBufferRef *av_buffer_create(uint8_t *data, int size, - void (*free)(void *opaque, uint8_t *data), - void *opaque, int flags); - -/** - * Default free callback, which calls av_free() on the buffer data. - * This function is meant to be passed to av_buffer_create(), not called - * directly. - */ -void av_buffer_default_free(void *opaque, uint8_t *data); - -/** - * Create a new reference to an AVBuffer. - * - * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on - * failure. - */ -AVBufferRef *av_buffer_ref(AVBufferRef *buf); - -/** - * Free a given reference and automatically free the buffer if there are no more - * references to it. - * - * @param buf the reference to be freed. The pointer is set to NULL on return. - */ -void av_buffer_unref(AVBufferRef **buf); - -/** - * @return 1 if the caller may write to the data referred to by buf (which is - * true if and only if buf is the only reference to the underlying AVBuffer). - * Return 0 otherwise. - * A positive answer is valid until av_buffer_ref() is called on buf. - */ -int av_buffer_is_writable(const AVBufferRef *buf); - -/** - * @return the opaque parameter set by av_buffer_create. - */ -void *av_buffer_get_opaque(const AVBufferRef *buf); - -int av_buffer_get_ref_count(const AVBufferRef *buf); - -/** - * Create a writable reference from a given buffer reference, avoiding data copy - * if possible. - * - * @param buf buffer reference to make writable. On success, buf is either left - * untouched, or it is unreferenced and a new writable AVBufferRef is - * written in its place. On failure, buf is left untouched. - * @return 0 on success, a negative AVERROR on failure. - */ -int av_buffer_make_writable(AVBufferRef **buf); - -/** - * Reallocate a given buffer. - * - * @param buf a buffer reference to reallocate. On success, buf will be - * unreferenced and a new reference with the required size will be - * written in its place. On failure buf will be left untouched. *buf - * may be NULL, then a new buffer is allocated. - * @param size required new buffer size. - * @return 0 on success, a negative AVERROR on failure. - * - * @note the buffer is actually reallocated with av_realloc() only if it was - * initially allocated through av_buffer_realloc(NULL) and there is only one - * reference to it (i.e. the one passed to this function). In all other cases - * a new buffer is allocated and the data is copied. - */ -int av_buffer_realloc(AVBufferRef **buf, int size); - -/** - * @} - */ - -/** - * @defgroup lavu_bufferpool AVBufferPool - * @ingroup lavu_data - * - * @{ - * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers. - * - * Frequently allocating and freeing large buffers may be slow. AVBufferPool is - * meant to solve this in cases when the caller needs a set of buffers of the - * same size (the most obvious use case being buffers for raw video or audio - * frames). - * - * At the beginning, the user must call av_buffer_pool_init() to create the - * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to - * get a reference to a new buffer, similar to av_buffer_alloc(). This new - * reference works in all aspects the same way as the one created by - * av_buffer_alloc(). However, when the last reference to this buffer is - * unreferenced, it is returned to the pool instead of being freed and will be - * reused for subsequent av_buffer_pool_get() calls. - * - * When the caller is done with the pool and no longer needs to allocate any new - * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable. - * Once all the buffers are released, it will automatically be freed. - * - * Allocating and releasing buffers with this API is thread-safe as long as - * either the default alloc callback is used, or the user-supplied one is - * thread-safe. - */ - -/** - * The buffer pool. This structure is opaque and not meant to be accessed - * directly. It is allocated with av_buffer_pool_init() and freed with - * av_buffer_pool_uninit(). - */ -typedef struct AVBufferPool AVBufferPool; - -/** - * Allocate and initialize a buffer pool. - * - * @param size size of each buffer in this pool - * @param alloc a function that will be used to allocate new buffers when the - * pool is empty. May be NULL, then the default allocator will be used - * (av_buffer_alloc()). - * @return newly created buffer pool on success, NULL on error. - */ -AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size)); - -/** - * Mark the pool as being available for freeing. It will actually be freed only - * once all the allocated buffers associated with the pool are released. Thus it - * is safe to call this function while some of the allocated buffers are still - * in use. - * - * @param pool pointer to the pool to be freed. It will be set to NULL. - * @see av_buffer_pool_can_uninit() - */ -void av_buffer_pool_uninit(AVBufferPool **pool); - -/** - * Allocate a new AVBuffer, reusing an old buffer from the pool when available. - * This function may be called simultaneously from multiple threads. - * - * @return a reference to the new buffer on success, NULL on error. - */ -AVBufferRef *av_buffer_pool_get(AVBufferPool *pool); - -/** - * @} - */ - -#endif /* AVUTIL_BUFFER_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/channel_layout.h b/3rdparty/include/ffmpeg_/libavutil/channel_layout.h deleted file mode 100644 index 2906098313..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/channel_layout.h +++ /dev/null @@ -1,216 +0,0 @@ -/* - * Copyright (c) 2006 Michael Niedermayer - * Copyright (c) 2008 Peter Ross - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_CHANNEL_LAYOUT_H -#define AVUTIL_CHANNEL_LAYOUT_H - -#include - -/** - * @file - * audio channel layout utility functions - */ - -/** - * @addtogroup lavu_audio - * @{ - */ - -/** - * @defgroup channel_masks Audio channel masks - * - * A channel layout is a 64-bits integer with a bit set for every channel. - * The number of bits set must be equal to the number of channels. - * The value 0 means that the channel layout is not known. - * @note this data structure is not powerful enough to handle channels - * combinations that have the same channel multiple times, such as - * dual-mono. - * - * @{ - */ -#define AV_CH_FRONT_LEFT 0x00000001 -#define AV_CH_FRONT_RIGHT 0x00000002 -#define AV_CH_FRONT_CENTER 0x00000004 -#define AV_CH_LOW_FREQUENCY 0x00000008 -#define AV_CH_BACK_LEFT 0x00000010 -#define AV_CH_BACK_RIGHT 0x00000020 -#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040 -#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080 -#define AV_CH_BACK_CENTER 0x00000100 -#define AV_CH_SIDE_LEFT 0x00000200 -#define AV_CH_SIDE_RIGHT 0x00000400 -#define AV_CH_TOP_CENTER 0x00000800 -#define AV_CH_TOP_FRONT_LEFT 0x00001000 -#define AV_CH_TOP_FRONT_CENTER 0x00002000 -#define AV_CH_TOP_FRONT_RIGHT 0x00004000 -#define AV_CH_TOP_BACK_LEFT 0x00008000 -#define AV_CH_TOP_BACK_CENTER 0x00010000 -#define AV_CH_TOP_BACK_RIGHT 0x00020000 -#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix. -#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT. -#define AV_CH_WIDE_LEFT 0x0000000080000000ULL -#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL -#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL -#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL -#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL - -/** Channel mask value used for AVCodecContext.request_channel_layout - to indicate that the user requests the channel order of the decoder output - to be the native codec channel order. */ -#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL - -/** - * @} - * @defgroup channel_mask_c Audio channel convenience macros - * @{ - * */ -#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER) -#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT) -#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY) -#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER) -#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER) -#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY) -#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER) -#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY) -#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) -#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) -#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) -#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY) -#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) -#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY) -#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER) -#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) -#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER) -#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER) -#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER) -#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY) -#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) -#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) -#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) -#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) -#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) -#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT) -#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT) - -enum AVMatrixEncoding { - AV_MATRIX_ENCODING_NONE, - AV_MATRIX_ENCODING_DOLBY, - AV_MATRIX_ENCODING_DPLII, - AV_MATRIX_ENCODING_NB -}; - -/** - * @} - */ - -/** - * Return a channel layout id that matches name, or 0 if no match is found. - * - * name can be one or several of the following notations, - * separated by '+' or '|': - * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0, - * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix); - * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC, - * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR); - * - a number of channels, in decimal, optionally followed by 'c', yielding - * the default channel layout for that number of channels (@see - * av_get_default_channel_layout); - * - a channel layout mask, in hexadecimal starting with "0x" (see the - * AV_CH_* macros). - * - * Example: "stereo+FC" = "2+FC" = "2c+1c" = "0x7" - */ -uint64_t av_get_channel_layout(const char *name); - -/** - * Return a description of a channel layout. - * If nb_channels is <= 0, it is guessed from the channel_layout. - * - * @param buf put here the string containing the channel layout - * @param buf_size size in bytes of the buffer - */ -void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout); - -struct AVBPrint; -/** - * Append a description of a channel layout to a bprint buffer. - */ -void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout); - -/** - * Return the number of channels in the channel layout. - */ -int av_get_channel_layout_nb_channels(uint64_t channel_layout); - -/** - * Return default channel layout for a given number of channels. - */ -int64_t av_get_default_channel_layout(int nb_channels); - -/** - * Get the index of a channel in channel_layout. - * - * @param channel a channel layout describing exactly one channel which must be - * present in channel_layout. - * - * @return index of channel in channel_layout on success, a negative AVERROR - * on error. - */ -int av_get_channel_layout_channel_index(uint64_t channel_layout, - uint64_t channel); - -/** - * Get the channel with the given index in channel_layout. - */ -uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index); - -/** - * Get the name of a given channel. - * - * @return channel name on success, NULL on error. - */ -const char *av_get_channel_name(uint64_t channel); - -/** - * Get the description of a given channel. - * - * @param channel a channel layout with a single channel - * @return channel description on success, NULL on error - */ -const char *av_get_channel_description(uint64_t channel); - -/** - * Get the value and name of a standard channel layout. - * - * @param[in] index index in an internal list, starting at 0 - * @param[out] layout channel layout mask - * @param[out] name name of the layout - * @return 0 if the layout exists, - * <0 if index is beyond the limits - */ -int av_get_standard_channel_layout(unsigned index, uint64_t *layout, - const char **name); - -/** - * @} - */ - -#endif /* AVUTIL_CHANNEL_LAYOUT_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/common.h b/3rdparty/include/ffmpeg_/libavutil/common.h deleted file mode 100644 index c7c32fd363..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/common.h +++ /dev/null @@ -1,459 +0,0 @@ -/* - * copyright (c) 2006 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * common internal and external API header - */ - -#ifndef AVUTIL_COMMON_H -#define AVUTIL_COMMON_H - -#include -#include -#include -#include -#include -#include -#include - -#include "attributes.h" -#include "version.h" -#include "libavutil/avconfig.h" - -#if AV_HAVE_BIGENDIAN -# define AV_NE(be, le) (be) -#else -# define AV_NE(be, le) (le) -#endif - -//rounded division & shift -#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) -/* assume b>0 */ -#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) -/* assume a>0 and b>0 */ -#define FF_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \ - : ((a) + (1<<(b)) - 1) >> (b)) -#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b)) -#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b)) -#define FFABS(a) ((a) >= 0 ? (a) : (-(a))) -#define FFSIGN(a) ((a) > 0 ? 1 : -1) - -#define FFMAX(a,b) ((a) > (b) ? (a) : (b)) -#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c) -#define FFMIN(a,b) ((a) > (b) ? (b) : (a)) -#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c) - -#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0) -#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0])) -#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1)) - -/* misc math functions */ - -/** - * Reverse the order of the bits of an 8-bits unsigned integer. - */ -#if FF_API_AV_REVERSE -extern attribute_deprecated const uint8_t av_reverse[256]; -#endif - -#ifdef HAVE_AV_CONFIG_H -# include "config.h" -# include "intmath.h" -#endif - -/* Pull in unguarded fallback defines at the end of this file. */ -#include "common.h" - -#ifndef av_log2 -av_const int av_log2(unsigned v); -#endif - -#ifndef av_log2_16bit -av_const int av_log2_16bit(unsigned v); -#endif - -/** - * Clip a signed integer value into the amin-amax range. - * @param a value to clip - * @param amin minimum value of the clip range - * @param amax maximum value of the clip range - * @return clipped value - */ -static av_always_inline av_const int av_clip_c(int a, int amin, int amax) -{ -#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 - if (amin > amax) abort(); -#endif - if (a < amin) return amin; - else if (a > amax) return amax; - else return a; -} - -/** - * Clip a signed 64bit integer value into the amin-amax range. - * @param a value to clip - * @param amin minimum value of the clip range - * @param amax maximum value of the clip range - * @return clipped value - */ -static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax) -{ -#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 - if (amin > amax) abort(); -#endif - if (a < amin) return amin; - else if (a > amax) return amax; - else return a; -} - -/** - * Clip a signed integer value into the 0-255 range. - * @param a value to clip - * @return clipped value - */ -static av_always_inline av_const uint8_t av_clip_uint8_c(int a) -{ - if (a&(~0xFF)) return (-a)>>31; - else return a; -} - -/** - * Clip a signed integer value into the -128,127 range. - * @param a value to clip - * @return clipped value - */ -static av_always_inline av_const int8_t av_clip_int8_c(int a) -{ - if ((a+0x80) & ~0xFF) return (a>>31) ^ 0x7F; - else return a; -} - -/** - * Clip a signed integer value into the 0-65535 range. - * @param a value to clip - * @return clipped value - */ -static av_always_inline av_const uint16_t av_clip_uint16_c(int a) -{ - if (a&(~0xFFFF)) return (-a)>>31; - else return a; -} - -/** - * Clip a signed integer value into the -32768,32767 range. - * @param a value to clip - * @return clipped value - */ -static av_always_inline av_const int16_t av_clip_int16_c(int a) -{ - if ((a+0x8000) & ~0xFFFF) return (a>>31) ^ 0x7FFF; - else return a; -} - -/** - * Clip a signed 64-bit integer value into the -2147483648,2147483647 range. - * @param a value to clip - * @return clipped value - */ -static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a) -{ - if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (a>>63) ^ 0x7FFFFFFF; - else return (int32_t)a; -} - -/** - * Clip a signed integer to an unsigned power of two range. - * @param a value to clip - * @param p bit position to clip at - * @return clipped value - */ -static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p) -{ - if (a & ~((1<> 31 & ((1<= 2 - if (amin > amax) abort(); -#endif - if (a < amin) return amin; - else if (a > amax) return amax; - else return a; -} - -/** - * Clip a double value into the amin-amax range. - * @param a value to clip - * @param amin minimum value of the clip range - * @param amax maximum value of the clip range - * @return clipped value - */ -static av_always_inline av_const double av_clipd_c(double a, double amin, double amax) -{ -#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 - if (amin > amax) abort(); -#endif - if (a < amin) return amin; - else if (a > amax) return amax; - else return a; -} - -/** Compute ceil(log2(x)). - * @param x value used to compute ceil(log2(x)) - * @return computed ceiling of log2(x) - */ -static av_always_inline av_const int av_ceil_log2_c(int x) -{ - return av_log2((x - 1) << 1); -} - -/** - * Count number of bits set to one in x - * @param x value to count bits of - * @return the number of bits set to one in x - */ -static av_always_inline av_const int av_popcount_c(uint32_t x) -{ - x -= (x >> 1) & 0x55555555; - x = (x & 0x33333333) + ((x >> 2) & 0x33333333); - x = (x + (x >> 4)) & 0x0F0F0F0F; - x += x >> 8; - return (x + (x >> 16)) & 0x3F; -} - -/** - * Count number of bits set to one in x - * @param x value to count bits of - * @return the number of bits set to one in x - */ -static av_always_inline av_const int av_popcount64_c(uint64_t x) -{ - return av_popcount((uint32_t)x) + av_popcount(x >> 32); -} - -#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) -#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) - -/** - * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form. - * - * @param val Output value, must be an lvalue of type uint32_t. - * @param GET_BYTE Expression reading one byte from the input. - * Evaluated up to 7 times (4 for the currently - * assigned Unicode range). With a memory buffer - * input, this could be *ptr++. - * @param ERROR Expression to be evaluated on invalid input, - * typically a goto statement. - */ -#define GET_UTF8(val, GET_BYTE, ERROR)\ - val= GET_BYTE;\ - {\ - uint32_t top = (val & 128) >> 1;\ - if ((val & 0xc0) == 0x80 || val >= 0xFE)\ - ERROR\ - while (val & top) {\ - int tmp= GET_BYTE - 128;\ - if(tmp>>6)\ - ERROR\ - val= (val<<6) + tmp;\ - top <<= 5;\ - }\ - val &= (top << 1) - 1;\ - } - -/** - * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form. - * - * @param val Output value, must be an lvalue of type uint32_t. - * @param GET_16BIT Expression returning two bytes of UTF-16 data converted - * to native byte order. Evaluated one or two times. - * @param ERROR Expression to be evaluated on invalid input, - * typically a goto statement. - */ -#define GET_UTF16(val, GET_16BIT, ERROR)\ - val = GET_16BIT;\ - {\ - unsigned int hi = val - 0xD800;\ - if (hi < 0x800) {\ - val = GET_16BIT - 0xDC00;\ - if (val > 0x3FFU || hi > 0x3FFU)\ - ERROR\ - val += (hi<<10) + 0x10000;\ - }\ - }\ - -/** - * @def PUT_UTF8(val, tmp, PUT_BYTE) - * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long). - * @param val is an input-only argument and should be of type uint32_t. It holds - * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If - * val is given as a function it is executed only once. - * @param tmp is a temporary variable and should be of type uint8_t. It - * represents an intermediate value during conversion that is to be - * output by PUT_BYTE. - * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination. - * It could be a function or a statement, and uses tmp as the input byte. - * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be - * executed up to 4 times for values in the valid UTF-8 range and up to - * 7 times in the general case, depending on the length of the converted - * Unicode character. - */ -#define PUT_UTF8(val, tmp, PUT_BYTE)\ - {\ - int bytes, shift;\ - uint32_t in = val;\ - if (in < 0x80) {\ - tmp = in;\ - PUT_BYTE\ - } else {\ - bytes = (av_log2(in) + 4) / 5;\ - shift = (bytes - 1) * 6;\ - tmp = (256 - (256 >> bytes)) | (in >> shift);\ - PUT_BYTE\ - while (shift >= 6) {\ - shift -= 6;\ - tmp = 0x80 | ((in >> shift) & 0x3f);\ - PUT_BYTE\ - }\ - }\ - } - -/** - * @def PUT_UTF16(val, tmp, PUT_16BIT) - * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes). - * @param val is an input-only argument and should be of type uint32_t. It holds - * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If - * val is given as a function it is executed only once. - * @param tmp is a temporary variable and should be of type uint16_t. It - * represents an intermediate value during conversion that is to be - * output by PUT_16BIT. - * @param PUT_16BIT writes the converted UTF-16 data to any proper destination - * in desired endianness. It could be a function or a statement, and uses tmp - * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;" - * PUT_BYTE will be executed 1 or 2 times depending on input character. - */ -#define PUT_UTF16(val, tmp, PUT_16BIT)\ - {\ - uint32_t in = val;\ - if (in < 0x10000) {\ - tmp = in;\ - PUT_16BIT\ - } else {\ - tmp = 0xD800 | ((in - 0x10000) >> 10);\ - PUT_16BIT\ - tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\ - PUT_16BIT\ - }\ - }\ - - - -#include "mem.h" - -#ifdef HAVE_AV_CONFIG_H -# include "internal.h" -#endif /* HAVE_AV_CONFIG_H */ - -#endif /* AVUTIL_COMMON_H */ - -/* - * The following definitions are outside the multiple inclusion guard - * to ensure they are immediately available in intmath.h. - */ - -#ifndef av_ceil_log2 -# define av_ceil_log2 av_ceil_log2_c -#endif -#ifndef av_clip -# define av_clip av_clip_c -#endif -#ifndef av_clip64 -# define av_clip64 av_clip64_c -#endif -#ifndef av_clip_uint8 -# define av_clip_uint8 av_clip_uint8_c -#endif -#ifndef av_clip_int8 -# define av_clip_int8 av_clip_int8_c -#endif -#ifndef av_clip_uint16 -# define av_clip_uint16 av_clip_uint16_c -#endif -#ifndef av_clip_int16 -# define av_clip_int16 av_clip_int16_c -#endif -#ifndef av_clipl_int32 -# define av_clipl_int32 av_clipl_int32_c -#endif -#ifndef av_clip_uintp2 -# define av_clip_uintp2 av_clip_uintp2_c -#endif -#ifndef av_sat_add32 -# define av_sat_add32 av_sat_add32_c -#endif -#ifndef av_sat_dadd32 -# define av_sat_dadd32 av_sat_dadd32_c -#endif -#ifndef av_clipf -# define av_clipf av_clipf_c -#endif -#ifndef av_clipd -# define av_clipd av_clipd_c -#endif -#ifndef av_popcount -# define av_popcount av_popcount_c -#endif -#ifndef av_popcount64 -# define av_popcount64 av_popcount64_c -#endif diff --git a/3rdparty/include/ffmpeg_/libavutil/cpu.h b/3rdparty/include/ffmpeg_/libavutil/cpu.h deleted file mode 100644 index df8ef8728a..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/cpu.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (c) 2000, 2001, 2002 Fabrice Bellard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_CPU_H -#define AVUTIL_CPU_H - -#include "attributes.h" - -#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ - - /* lower 16 bits - CPU features */ -#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX -#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext -#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext -#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW -#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions -#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions -#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster -#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt -#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions -#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster -#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions -#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower -#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions -#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions -#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used -#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions -#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions -// #if LIBAVUTIL_VERSION_MAJOR <52 -#define AV_CPU_FLAG_CMOV 0x1001000 ///< supports cmov instruction -// #else -// #define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction -// #endif - -#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard - -#define AV_CPU_FLAG_ARMV5TE (1 << 0) -#define AV_CPU_FLAG_ARMV6 (1 << 1) -#define AV_CPU_FLAG_ARMV6T2 (1 << 2) -#define AV_CPU_FLAG_VFP (1 << 3) -#define AV_CPU_FLAG_VFPV3 (1 << 4) -#define AV_CPU_FLAG_NEON (1 << 5) - -/** - * Return the flags which specify extensions supported by the CPU. - * The returned value is affected by av_force_cpu_flags() if that was used - * before. So av_get_cpu_flags() can easily be used in a application to - * detect the enabled cpu flags. - */ -int av_get_cpu_flags(void); - -/** - * Disables cpu detection and forces the specified flags. - * -1 is a special case that disables forcing of specific flags. - */ -void av_force_cpu_flags(int flags); - -/** - * Set a mask on flags returned by av_get_cpu_flags(). - * This function is mainly useful for testing. - * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible - * - * @warning this function is not thread safe. - */ -attribute_deprecated void av_set_cpu_flags_mask(int mask); - -/** - * Parse CPU flags from a string. - * - * The returned flags contain the specified flags as well as related unspecified flags. - * - * This function exists only for compatibility with libav. - * Please use av_parse_cpu_caps() when possible. - * @return a combination of AV_CPU_* flags, negative on error. - */ -attribute_deprecated -int av_parse_cpu_flags(const char *s); - -/** - * Parse CPU caps from a string and update the given AV_CPU_* flags based on that. - * - * @return negative on error. - */ -int av_parse_cpu_caps(unsigned *flags, const char *s); - -/** - * @return the number of logical CPU cores present. - */ -int av_cpu_count(void); - -/* The following CPU-specific functions shall not be called directly. */ -int ff_get_cpu_flags_arm(void); -int ff_get_cpu_flags_ppc(void); -int ff_get_cpu_flags_x86(void); - -#endif /* AVUTIL_CPU_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/crc.h b/3rdparty/include/ffmpeg_/libavutil/crc.h deleted file mode 100644 index 1bb0cc76e2..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/crc.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - * copyright (c) 2006 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_CRC_H -#define AVUTIL_CRC_H - -#include -#include -#include "attributes.h" - -/** - * @defgroup lavu_crc32 CRC32 - * @ingroup lavu_crypto - * @{ - */ - -typedef uint32_t AVCRC; - -typedef enum { - AV_CRC_8_ATM, - AV_CRC_16_ANSI, - AV_CRC_16_CCITT, - AV_CRC_32_IEEE, - AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */ - AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */ -}AVCRCId; - -/** - * Initialize a CRC table. - * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024 - * @param le If 1, the lowest bit represents the coefficient for the highest - * exponent of the corresponding polynomial (both for poly and - * actual CRC). - * If 0, you must swap the CRC parameter and the result of av_crc - * if you need the standard representation (can be simplified in - * most cases to e.g. bswap16): - * av_bswap32(crc << (32-bits)) - * @param bits number of bits for the CRC - * @param poly generator polynomial without the x**bits coefficient, in the - * representation as specified by le - * @param ctx_size size of ctx in bytes - * @return <0 on failure - */ -int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size); - -/** - * Get an initialized standard CRC table. - * @param crc_id ID of a standard CRC - * @return a pointer to the CRC table or NULL on failure - */ -const AVCRC *av_crc_get_table(AVCRCId crc_id); - -/** - * Calculate the CRC of a block. - * @param crc CRC of previous blocks if any or initial value for CRC - * @return CRC updated with the data from the given block - * - * @see av_crc_init() "le" parameter - */ -uint32_t av_crc(const AVCRC *ctx, uint32_t crc, - const uint8_t *buffer, size_t length) av_pure; - -/** - * @} - */ - -#endif /* AVUTIL_CRC_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/dict.h b/3rdparty/include/ffmpeg_/libavutil/dict.h deleted file mode 100644 index 38f03a407f..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/dict.h +++ /dev/null @@ -1,152 +0,0 @@ -/* - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Public dictionary API. - * @deprecated - * AVDictionary is provided for compatibility with libav. It is both in - * implementation as well as API inefficient. It does not scale and is - * extremely slow with large dictionaries. - * It is recommended that new code uses our tree container from tree.c/h - * where applicable, which uses AVL trees to achieve O(log n) performance. - */ - -#ifndef AVUTIL_DICT_H -#define AVUTIL_DICT_H - -/** - * @addtogroup lavu_dict AVDictionary - * @ingroup lavu_data - * - * @brief Simple key:value store - * - * @{ - * Dictionaries are used for storing key:value pairs. To create - * an AVDictionary, simply pass an address of a NULL pointer to - * av_dict_set(). NULL can be used as an empty dictionary wherever - * a pointer to an AVDictionary is required. - * Use av_dict_get() to retrieve an entry or iterate over all - * entries and finally av_dict_free() to free the dictionary - * and all its contents. - * - * @code - * AVDictionary *d = NULL; // "create" an empty dictionary - * av_dict_set(&d, "foo", "bar", 0); // add an entry - * - * char *k = av_strdup("key"); // if your strings are already allocated, - * char *v = av_strdup("value"); // you can avoid copying them like this - * av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL); - * - * AVDictionaryEntry *t = NULL; - * while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) { - * <....> // iterate over all entries in d - * } - * - * av_dict_free(&d); - * @endcode - * - */ - -#define AV_DICT_MATCH_CASE 1 -#define AV_DICT_IGNORE_SUFFIX 2 -#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been - allocated with av_malloc() and children. */ -#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been - allocated with av_malloc() and chilren. */ -#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries. -#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no - delimiter is added, the strings are simply concatenated. */ - -typedef struct AVDictionaryEntry { - char *key; - char *value; -} AVDictionaryEntry; - -typedef struct AVDictionary AVDictionary; - -/** - * Get a dictionary entry with matching key. - * - * @param prev Set to the previous matching element to find the next. - * If set to NULL the first matching element is returned. - * @param flags Allows case as well as suffix-insensitive comparisons. - * @return Found entry or NULL, changing key or value leads to undefined behavior. - */ -AVDictionaryEntry * -av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags); - -/** - * Get number of entries in dictionary. - * - * @param m dictionary - * @return number of entries in dictionary - */ -int av_dict_count(const AVDictionary *m); - -/** - * Set the given entry in *pm, overwriting an existing entry. - * - * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL - * a dictionary struct is allocated and put in *pm. - * @param key entry key to add to *pm (will be av_strduped depending on flags) - * @param value entry value to add to *pm (will be av_strduped depending on flags). - * Passing a NULL value will cause an existing entry to be deleted. - * @return >= 0 on success otherwise an error code <0 - */ -int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags); - -/** - * Parse the key/value pairs list and add to a dictionary. - * - * @param key_val_sep a 0-terminated list of characters used to separate - * key from value - * @param pairs_sep a 0-terminated list of characters used to separate - * two pairs from each other - * @param flags flags to use when adding to dictionary. - * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL - * are ignored since the key/value tokens will always - * be duplicated. - * @return 0 on success, negative AVERROR code on failure - */ -int av_dict_parse_string(AVDictionary **pm, const char *str, - const char *key_val_sep, const char *pairs_sep, - int flags); - -/** - * Copy entries from one AVDictionary struct into another. - * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL, - * this function will allocate a struct for you and put it in *dst - * @param src pointer to source AVDictionary struct - * @param flags flags to use when setting entries in *dst - * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag - */ -void av_dict_copy(AVDictionary **dst, AVDictionary *src, int flags); - -/** - * Free all the memory allocated for an AVDictionary struct - * and all keys and values. - */ -void av_dict_free(AVDictionary **m); - -/** - * @} - */ - -#endif /* AVUTIL_DICT_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/error.h b/3rdparty/include/ffmpeg_/libavutil/error.h deleted file mode 100644 index f3fd7bbff6..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/error.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * error code definitions - */ - -#ifndef AVUTIL_ERROR_H -#define AVUTIL_ERROR_H - -#include -#include - -/** - * @addtogroup lavu_error - * - * @{ - */ - - -/* error handling */ -#if EDOM > 0 -#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions. -#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value. -#else -/* Some platforms have E* and errno already negated. */ -#define AVERROR(e) (e) -#define AVUNERROR(e) (e) -#endif - -#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d)) - -#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found -#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2 -#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small -#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found -#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found -#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found -#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file -#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted -#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library -#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found -#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input -#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found -#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found -#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome -#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found - -#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found -/** - * This is semantically identical to AVERROR_BUG - * it has been introduced in Libav after our AVERROR_BUG and with a modified value. - */ -#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ') -#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library -#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it. - -#define AV_ERROR_MAX_STRING_SIZE 64 - -/** - * Put a description of the AVERROR code errnum in errbuf. - * In case of failure the global variable errno is set to indicate the - * error. Even in case of failure av_strerror() will print a generic - * error message indicating the errnum provided to errbuf. - * - * @param errnum error code to describe - * @param errbuf buffer to which description is written - * @param errbuf_size the size in bytes of errbuf - * @return 0 on success, a negative value if a description for errnum - * cannot be found - */ -int av_strerror(int errnum, char *errbuf, size_t errbuf_size); - -/** - * Fill the provided buffer with a string containing an error string - * corresponding to the AVERROR code errnum. - * - * @param errbuf a buffer - * @param errbuf_size size in bytes of errbuf - * @param errnum error code to describe - * @return the buffer in input, filled with the error description - * @see av_strerror() - */ -static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum) -{ - av_strerror(errnum, errbuf, errbuf_size); - return errbuf; -} - -/** - * Convenience macro, the return value should be used only directly in - * function arguments but never stand-alone. - */ -#define av_err2str(errnum) \ - av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum) - -/** - * @} - */ - -#endif /* AVUTIL_ERROR_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/eval.h b/3rdparty/include/ffmpeg_/libavutil/eval.h deleted file mode 100644 index a1d1fe345c..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/eval.h +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright (c) 2002 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * simple arithmetic expression evaluator - */ - -#ifndef AVUTIL_EVAL_H -#define AVUTIL_EVAL_H - -#include "avutil.h" - -typedef struct AVExpr AVExpr; - -/** - * Parse and evaluate an expression. - * Note, this is significantly slower than av_expr_eval(). - * - * @param res a pointer to a double where is put the result value of - * the expression, or NAN in case of error - * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" - * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} - * @param const_values a zero terminated array of values for the identifiers from const_names - * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers - * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument - * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers - * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments - * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 - * @param log_ctx parent logging context - * @return 0 in case of success, a negative value corresponding to an - * AVERROR code otherwise - */ -int av_expr_parse_and_eval(double *res, const char *s, - const char * const *const_names, const double *const_values, - const char * const *func1_names, double (* const *funcs1)(void *, double), - const char * const *func2_names, double (* const *funcs2)(void *, double, double), - void *opaque, int log_offset, void *log_ctx); - -/** - * Parse an expression. - * - * @param expr a pointer where is put an AVExpr containing the parsed - * value in case of successful parsing, or NULL otherwise. - * The pointed to AVExpr must be freed with av_expr_free() by the user - * when it is not needed anymore. - * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" - * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} - * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers - * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument - * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers - * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments - * @param log_ctx parent logging context - * @return 0 in case of success, a negative value corresponding to an - * AVERROR code otherwise - */ -int av_expr_parse(AVExpr **expr, const char *s, - const char * const *const_names, - const char * const *func1_names, double (* const *funcs1)(void *, double), - const char * const *func2_names, double (* const *funcs2)(void *, double, double), - int log_offset, void *log_ctx); - -/** - * Evaluate a previously parsed expression. - * - * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names - * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 - * @return the value of the expression - */ -double av_expr_eval(AVExpr *e, const double *const_values, void *opaque); - -/** - * Free a parsed expression previously created with av_expr_parse(). - */ -void av_expr_free(AVExpr *e); - -/** - * Parse the string in numstr and return its value as a double. If - * the string is empty, contains only whitespaces, or does not contain - * an initial substring that has the expected syntax for a - * floating-point number, no conversion is performed. In this case, - * returns a value of zero and the value returned in tail is the value - * of numstr. - * - * @param numstr a string representing a number, may contain one of - * the International System number postfixes, for example 'K', 'M', - * 'G'. If 'i' is appended after the postfix, powers of 2 are used - * instead of powers of 10. The 'B' postfix multiplies the value for - * 8, and can be appended after another postfix or used alone. This - * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix. - * @param tail if non-NULL puts here the pointer to the char next - * after the last parsed character - */ -double av_strtod(const char *numstr, char **tail); - -#endif /* AVUTIL_EVAL_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/fifo.h b/3rdparty/include/ffmpeg_/libavutil/fifo.h deleted file mode 100644 index 849b9a6b81..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/fifo.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * a very simple circular buffer FIFO implementation - */ - -#ifndef AVUTIL_FIFO_H -#define AVUTIL_FIFO_H - -#include -#include "avutil.h" -#include "attributes.h" - -typedef struct AVFifoBuffer { - uint8_t *buffer; - uint8_t *rptr, *wptr, *end; - uint32_t rndx, wndx; -} AVFifoBuffer; - -/** - * Initialize an AVFifoBuffer. - * @param size of FIFO - * @return AVFifoBuffer or NULL in case of memory allocation failure - */ -AVFifoBuffer *av_fifo_alloc(unsigned int size); - -/** - * Free an AVFifoBuffer. - * @param f AVFifoBuffer to free - */ -void av_fifo_free(AVFifoBuffer *f); - -/** - * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied. - * @param f AVFifoBuffer to reset - */ -void av_fifo_reset(AVFifoBuffer *f); - -/** - * Return the amount of data in bytes in the AVFifoBuffer, that is the - * amount of data you can read from it. - * @param f AVFifoBuffer to read from - * @return size - */ -int av_fifo_size(AVFifoBuffer *f); - -/** - * Return the amount of space in bytes in the AVFifoBuffer, that is the - * amount of data you can write into it. - * @param f AVFifoBuffer to write into - * @return size - */ -int av_fifo_space(AVFifoBuffer *f); - -/** - * Feed data from an AVFifoBuffer to a user-supplied callback. - * @param f AVFifoBuffer to read from - * @param buf_size number of bytes to read - * @param func generic read function - * @param dest data destination - */ -int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); - -/** - * Feed data from a user-supplied callback to an AVFifoBuffer. - * @param f AVFifoBuffer to write to - * @param src data source; non-const since it may be used as a - * modifiable context by the function defined in func - * @param size number of bytes to write - * @param func generic write function; the first parameter is src, - * the second is dest_buf, the third is dest_buf_size. - * func must return the number of bytes written to dest_buf, or <= 0 to - * indicate no more data available to write. - * If func is NULL, src is interpreted as a simple byte array for source data. - * @return the number of bytes written to the FIFO - */ -int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int)); - -/** - * Resize an AVFifoBuffer. - * In case of reallocation failure, the old FIFO is kept unchanged. - * - * @param f AVFifoBuffer to resize - * @param size new AVFifoBuffer size in bytes - * @return <0 for failure, >=0 otherwise - */ -int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size); - -/** - * Enlarge an AVFifoBuffer. - * In case of reallocation failure, the old FIFO is kept unchanged. - * The new fifo size may be larger than the requested size. - * - * @param f AVFifoBuffer to resize - * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size() - * @return <0 for failure, >=0 otherwise - */ -int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space); - -/** - * Read and discard the specified amount of data from an AVFifoBuffer. - * @param f AVFifoBuffer to read from - * @param size amount of data to read in bytes - */ -void av_fifo_drain(AVFifoBuffer *f, int size); - -/** - * Return a pointer to the data stored in a FIFO buffer at a certain offset. - * The FIFO buffer is not modified. - * - * @param f AVFifoBuffer to peek at, f must be non-NULL - * @param offs an offset in bytes, its absolute value must be less - * than the used buffer size or the returned pointer will - * point outside to the buffer data. - * The used buffer size can be checked with av_fifo_size(). - */ -static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs) -{ - uint8_t *ptr = f->rptr + offs; - if (ptr >= f->end) - ptr = f->buffer + (ptr - f->end); - else if (ptr < f->buffer) - ptr = f->end - (f->buffer - ptr); - return ptr; -} - -#endif /* AVUTIL_FIFO_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/file.h b/3rdparty/include/ffmpeg_/libavutil/file.h deleted file mode 100644 index a7364fe8fe..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/file.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_FILE_H -#define AVUTIL_FILE_H - -#include - -#include "avutil.h" - -/** - * @file - * Misc file utilities. - */ - -/** - * Read the file with name filename, and put its content in a newly - * allocated buffer or map it with mmap() when available. - * In case of success set *bufptr to the read or mmapped buffer, and - * *size to the size in bytes of the buffer in *bufptr. - * The returned buffer must be released with av_file_unmap(). - * - * @param log_offset loglevel offset used for logging - * @param log_ctx context used for logging - * @return a non negative number in case of success, a negative value - * corresponding to an AVERROR error code in case of failure - */ -int av_file_map(const char *filename, uint8_t **bufptr, size_t *size, - int log_offset, void *log_ctx); - -/** - * Unmap or free the buffer bufptr created by av_file_map(). - * - * @param size size in bytes of bufptr, must be the same as returned - * by av_file_map() - */ -void av_file_unmap(uint8_t *bufptr, size_t size); - -/** - * Wrapper to work around the lack of mkstemp() on mingw. - * Also, tries to create file in /tmp first, if possible. - * *prefix can be a character constant; *filename will be allocated internally. - * @return file descriptor of opened file (or -1 on error) - * and opened file name in **filename. - * @note On very old libcs it is necessary to set a secure umask before - * calling this, av_tempfile() can't call umask itself as it is used in - * libraries and could interfere with the calling application. - */ -int av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx); - -#endif /* AVUTIL_FILE_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/frame.h b/3rdparty/include/ffmpeg_/libavutil/frame.h deleted file mode 100644 index 39a664fde5..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/frame.h +++ /dev/null @@ -1,607 +0,0 @@ -/* - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_FRAME_H -#define AVUTIL_FRAME_H - -#include - -#include "libavcodec/version.h" - -#include "avutil.h" -#include "buffer.h" -#include "dict.h" -#include "rational.h" -#include "samplefmt.h" - -enum AVFrameSideDataType { - /** - * The data is the AVPanScan struct defined in libavcodec. - */ - AV_FRAME_DATA_PANSCAN, -}; - -typedef struct AVFrameSideData { - enum AVFrameSideDataType type; - uint8_t *data; - int size; - AVDictionary *metadata; -} AVFrameSideData; - -/** - * This structure describes decoded (raw) audio or video data. - * - * AVFrame must be allocated using av_frame_alloc(). Note that this only - * allocates the AVFrame itself, the buffers for the data must be managed - * through other means (see below). - * AVFrame must be freed with av_frame_free(). - * - * AVFrame is typically allocated once and then reused multiple times to hold - * different data (e.g. a single AVFrame to hold frames received from a - * decoder). In such a case, av_frame_unref() will free any references held by - * the frame and reset it to its original clean state before it - * is reused again. - * - * The data described by an AVFrame is usually reference counted through the - * AVBuffer API. The underlying buffer references are stored in AVFrame.buf / - * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at - * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case, - * every single data plane must be contained in one of the buffers in - * AVFrame.buf or AVFrame.extended_buf. - * There may be a single buffer for all the data, or one separate buffer for - * each plane, or anything in between. - * - * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added - * to the end with a minor bump. - * Similarly fields that are marked as to be only accessed by - * av_opt_ptr() can be reordered. This allows 2 forks to add fields - * without breaking compatibility with each other. - */ -typedef struct AVFrame { -#define AV_NUM_DATA_POINTERS 8 - /** - * pointer to the picture/channel planes. - * This might be different from the first allocated byte - * - * Some decoders access areas outside 0,0 - width,height, please - * see avcodec_align_dimensions2(). Some filters and swscale can read - * up to 16 bytes beyond the planes, if these filters are to be used, - * then 16 extra bytes must be allocated. - */ - uint8_t *data[AV_NUM_DATA_POINTERS]; - - /** - * For video, size in bytes of each picture line. - * For audio, size in bytes of each plane. - * - * For audio, only linesize[0] may be set. For planar audio, each channel - * plane must be the same size. - * - * For video the linesizes should be multiplies of the CPUs alignment - * preference, this is 16 or 32 for modern desktop CPUs. - * Some code requires such alignment other code can be slower without - * correct alignment, for yet other it makes no difference. - */ - int linesize[AV_NUM_DATA_POINTERS]; - - /** - * pointers to the data planes/channels. - * - * For video, this should simply point to data[]. - * - * For planar audio, each channel has a separate data pointer, and - * linesize[0] contains the size of each channel buffer. - * For packed audio, there is just one data pointer, and linesize[0] - * contains the total size of the buffer for all channels. - * - * Note: Both data and extended_data should always be set in a valid frame, - * but for planar audio with more channels that can fit in data, - * extended_data must be used in order to access all channels. - */ - uint8_t **extended_data; - - /** - * width and height of the video frame - */ - int width, height; - - /** - * number of audio samples (per channel) described by this frame - */ - int nb_samples; - - /** - * format of the frame, -1 if unknown or unset - * Values correspond to enum AVPixelFormat for video frames, - * enum AVSampleFormat for audio) - */ - int format; - - /** - * 1 -> keyframe, 0-> not - */ - int key_frame; - - /** - * Picture type of the frame. - */ - enum AVPictureType pict_type; - -#if FF_API_AVFRAME_LAVC - attribute_deprecated - uint8_t *base[AV_NUM_DATA_POINTERS]; -#endif - - /** - * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified. - */ - AVRational sample_aspect_ratio; - - /** - * Presentation timestamp in time_base units (time when frame should be shown to user). - */ - int64_t pts; - - /** - * PTS copied from the AVPacket that was decoded to produce this frame. - */ - int64_t pkt_pts; - - /** - * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isnt used) - * This is also the Presentation time of this AVFrame calculated from - * only AVPacket.dts values without pts values. - */ - int64_t pkt_dts; - - /** - * picture number in bitstream order - */ - int coded_picture_number; - /** - * picture number in display order - */ - int display_picture_number; - - /** - * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) - */ - int quality; - -#if FF_API_AVFRAME_LAVC - attribute_deprecated - int reference; - - /** - * QP table - */ - attribute_deprecated - int8_t *qscale_table; - /** - * QP store stride - */ - attribute_deprecated - int qstride; - - attribute_deprecated - int qscale_type; - - /** - * mbskip_table[mb]>=1 if MB didn't change - * stride= mb_width = (width+15)>>4 - */ - attribute_deprecated - uint8_t *mbskip_table; - - /** - * motion vector table - * @code - * example: - * int mv_sample_log2= 4 - motion_subsample_log2; - * int mb_width= (width+15)>>4; - * int mv_stride= (mb_width << mv_sample_log2) + 1; - * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y]; - * @endcode - */ - attribute_deprecated - int16_t (*motion_val[2])[2]; - - /** - * macroblock type table - * mb_type_base + mb_width + 2 - */ - attribute_deprecated - uint32_t *mb_type; - - /** - * DCT coefficients - */ - attribute_deprecated - short *dct_coeff; - - /** - * motion reference frame index - * the order in which these are stored can depend on the codec. - */ - attribute_deprecated - int8_t *ref_index[2]; -#endif - - /** - * for some private data of the user - */ - void *opaque; - - /** - * error - */ - uint64_t error[AV_NUM_DATA_POINTERS]; - -#if FF_API_AVFRAME_LAVC - attribute_deprecated - int type; -#endif - - /** - * When decoding, this signals how much the picture must be delayed. - * extra_delay = repeat_pict / (2*fps) - */ - int repeat_pict; - - /** - * The content of the picture is interlaced. - */ - int interlaced_frame; - - /** - * If the content is interlaced, is top field displayed first. - */ - int top_field_first; - - /** - * Tell user application that palette has changed from previous frame. - */ - int palette_has_changed; - -#if FF_API_AVFRAME_LAVC - attribute_deprecated - int buffer_hints; - - /** - * Pan scan. - */ - attribute_deprecated - struct AVPanScan *pan_scan; -#endif - - /** - * reordered opaque 64bit (generally an integer or a double precision float - * PTS but can be anything). - * The user sets AVCodecContext.reordered_opaque to represent the input at - * that time, - * the decoder reorders values as needed and sets AVFrame.reordered_opaque - * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque - * @deprecated in favor of pkt_pts - */ - int64_t reordered_opaque; - -#if FF_API_AVFRAME_LAVC - /** - * @deprecated this field is unused - */ - attribute_deprecated void *hwaccel_picture_private; - - attribute_deprecated - struct AVCodecContext *owner; - attribute_deprecated - void *thread_opaque; - - /** - * log2 of the size of the block which a single vector in motion_val represents: - * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2) - */ - attribute_deprecated - uint8_t motion_subsample_log2; -#endif - - /** - * Sample rate of the audio data. - */ - int sample_rate; - - /** - * Channel layout of the audio data. - */ - uint64_t channel_layout; - - /** - * AVBuffer references backing the data for this frame. If all elements of - * this array are NULL, then this frame is not reference counted. - * - * There may be at most one AVBuffer per data plane, so for video this array - * always contains all the references. For planar audio with more than - * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in - * this array. Then the extra AVBufferRef pointers are stored in the - * extended_buf array. - */ - AVBufferRef *buf[AV_NUM_DATA_POINTERS]; - - /** - * For planar audio which requires more than AV_NUM_DATA_POINTERS - * AVBufferRef pointers, this array will hold all the references which - * cannot fit into AVFrame.buf. - * - * Note that this is different from AVFrame.extended_data, which always - * contains all the pointers. This array only contains the extra pointers, - * which cannot fit into AVFrame.buf. - * - * This array is always allocated using av_malloc() by whoever constructs - * the frame. It is freed in av_frame_unref(). - */ - AVBufferRef **extended_buf; - /** - * Number of elements in extended_buf. - */ - int nb_extended_buf; - - AVFrameSideData **side_data; - int nb_side_data; - - /** - * frame timestamp estimated using various heuristics, in stream time base - * Code outside libavcodec should access this field using: - * av_frame_get_best_effort_timestamp(frame) - * - encoding: unused - * - decoding: set by libavcodec, read by user. - */ - int64_t best_effort_timestamp; - - /** - * reordered pos from the last AVPacket that has been input into the decoder - * Code outside libavcodec should access this field using: - * av_frame_get_pkt_pos(frame) - * - encoding: unused - * - decoding: Read by user. - */ - int64_t pkt_pos; - - /** - * duration of the corresponding packet, expressed in - * AVStream->time_base units, 0 if unknown. - * Code outside libavcodec should access this field using: - * av_frame_get_pkt_duration(frame) - * - encoding: unused - * - decoding: Read by user. - */ - int64_t pkt_duration; - - /** - * metadata. - * Code outside libavcodec should access this field using: - * av_frame_get_metadata(frame) - * - encoding: Set by user. - * - decoding: Set by libavcodec. - */ - AVDictionary *metadata; - - /** - * decode error flags of the frame, set to a combination of - * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there - * were errors during the decoding. - * Code outside libavcodec should access this field using: - * av_frame_get_decode_error_flags(frame) - * - encoding: unused - * - decoding: set by libavcodec, read by user. - */ - int decode_error_flags; -#define FF_DECODE_ERROR_INVALID_BITSTREAM 1 -#define FF_DECODE_ERROR_MISSING_REFERENCE 2 - - /** - * number of audio channels, only used for audio. - * Code outside libavcodec should access this field using: - * av_frame_get_channels(frame) - * - encoding: unused - * - decoding: Read by user. - */ - int channels; - - /** - * size of the corresponding packet containing the compressed - * frame. It must be accessed using av_frame_get_pkt_size() and - * av_frame_set_pkt_size(). - * It is set to a negative value if unknown. - * - encoding: unused - * - decoding: set by libavcodec, read by user. - */ - int pkt_size; - - /** - * Not to be accessed directly from outside libavutil - */ - AVBufferRef *qp_table_buf; -} AVFrame; - -/** - * Accessors for some AVFrame fields. - * The position of these field in the structure is not part of the ABI, - * they should not be accessed directly outside libavcodec. - */ -int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame); -void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val); -int64_t av_frame_get_pkt_duration (const AVFrame *frame); -void av_frame_set_pkt_duration (AVFrame *frame, int64_t val); -int64_t av_frame_get_pkt_pos (const AVFrame *frame); -void av_frame_set_pkt_pos (AVFrame *frame, int64_t val); -int64_t av_frame_get_channel_layout (const AVFrame *frame); -void av_frame_set_channel_layout (AVFrame *frame, int64_t val); -int av_frame_get_channels (const AVFrame *frame); -void av_frame_set_channels (AVFrame *frame, int val); -int av_frame_get_sample_rate (const AVFrame *frame); -void av_frame_set_sample_rate (AVFrame *frame, int val); -AVDictionary *av_frame_get_metadata (const AVFrame *frame); -void av_frame_set_metadata (AVFrame *frame, AVDictionary *val); -int av_frame_get_decode_error_flags (const AVFrame *frame); -void av_frame_set_decode_error_flags (AVFrame *frame, int val); -int av_frame_get_pkt_size(const AVFrame *frame); -void av_frame_set_pkt_size(AVFrame *frame, int val); -AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame); -int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type); -int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type); - -/** - * Allocate an AVFrame and set its fields to default values. The resulting - * struct must be freed using av_frame_free(). - * - * @return An AVFrame filled with default values or NULL on failure. - * - * @note this only allocates the AVFrame itself, not the data buffers. Those - * must be allocated through other means, e.g. with av_frame_get_buffer() or - * manually. - */ -AVFrame *av_frame_alloc(void); - -/** - * Free the frame and any dynamically allocated objects in it, - * e.g. extended_data. If the frame is reference counted, it will be - * unreferenced first. - * - * @param frame frame to be freed. The pointer will be set to NULL. - */ -void av_frame_free(AVFrame **frame); - -/** - * Setup a new reference to the data described by an given frame. - * - * Copy frame properties from src to dst and create a new reference for each - * AVBufferRef from src. - * - * If src is not reference counted, new buffers are allocated and the data is - * copied. - * - * @return 0 on success, a negative AVERROR on error - */ -int av_frame_ref(AVFrame *dst, AVFrame *src); - -/** - * Create a new frame that references the same data as src. - * - * This is a shortcut for av_frame_alloc()+av_frame_ref(). - * - * @return newly created AVFrame on success, NULL on error. - */ -AVFrame *av_frame_clone(AVFrame *src); - -/** - * Unreference all the buffers referenced by frame and reset the frame fields. - */ -void av_frame_unref(AVFrame *frame); - -/** - * Move everythnig contained in src to dst and reset src. - */ -void av_frame_move_ref(AVFrame *dst, AVFrame *src); - -/** - * Allocate new buffer(s) for audio or video data. - * - * The following fields must be set on frame before calling this function: - * - format (pixel format for video, sample format for audio) - * - width and height for video - * - nb_samples and channel_layout for audio - * - * This function will fill AVFrame.data and AVFrame.buf arrays and, if - * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf. - * For planar formats, one buffer will be allocated for each plane. - * - * @param frame frame in which to store the new buffers. - * @param align required buffer size alignment - * - * @return 0 on success, a negative AVERROR on error. - */ -int av_frame_get_buffer(AVFrame *frame, int align); - -/** - * Check if the frame data is writable. - * - * @return A positive value if the frame data is writable (which is true if and - * only if each of the underlying buffers has only one reference, namely the one - * stored in this frame). Return 0 otherwise. - * - * If 1 is returned the answer is valid until av_buffer_ref() is called on any - * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly). - * - * @see av_frame_make_writable(), av_buffer_is_writable() - */ -int av_frame_is_writable(AVFrame *frame); - -/** - * Ensure that the frame data is writable, avoiding data copy if possible. - * - * Do nothing if the frame is writable, allocate new buffers and copy the data - * if it is not. - * - * @return 0 on success, a negative AVERROR on error. - * - * @see av_frame_is_writable(), av_buffer_is_writable(), - * av_buffer_make_writable() - */ -int av_frame_make_writable(AVFrame *frame); - -/** - * Copy only "metadata" fields from src to dst. - * - * Metadata for the purpose of this function are those fields that do not affect - * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample - * aspect ratio (for video), but not width/height or channel layout. - * Side data is also copied. - */ -int av_frame_copy_props(AVFrame *dst, const AVFrame *src); - -/** - * Get the buffer reference a given data plane is stored in. - * - * @param plane index of the data plane of interest in frame->extended_data. - * - * @return the buffer reference that contains the plane or NULL if the input - * frame is not valid. - */ -AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane); - -/** - * Add a new side data to a frame. - * - * @param frame a frame to which the side data should be added - * @param type type of the added side data - * @param size size of the side data - * - * @return newly added side data on success, NULL on error - */ -AVFrameSideData *av_frame_new_side_data(AVFrame *frame, - enum AVFrameSideDataType type, - int size); - -/** - * @return a pointer to the side data of a given type on success, NULL if there - * is no side data with such type in this frame. - */ -AVFrameSideData *av_frame_get_side_data(AVFrame *frame, - enum AVFrameSideDataType type); - -#endif /* AVUTIL_FRAME_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/hmac.h b/3rdparty/include/ffmpeg_/libavutil/hmac.h deleted file mode 100644 index d36d4de19e..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/hmac.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (C) 2012 Martin Storsjo - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_HMAC_H -#define AVUTIL_HMAC_H - -#include - -/** - * @defgroup lavu_hmac HMAC - * @ingroup lavu_crypto - * @{ - */ - -enum AVHMACType { - AV_HMAC_MD5, - AV_HMAC_SHA1, - AV_HMAC_SHA224 = 10, - AV_HMAC_SHA256, - AV_HMAC_SHA384, - AV_HMAC_SHA512, -}; - -typedef struct AVHMAC AVHMAC; - -/** - * Allocate an AVHMAC context. - * @param type The hash function used for the HMAC. - */ -AVHMAC *av_hmac_alloc(enum AVHMACType type); - -/** - * Free an AVHMAC context. - * @param ctx The context to free, may be NULL - */ -void av_hmac_free(AVHMAC *ctx); - -/** - * Initialize an AVHMAC context with an authentication key. - * @param ctx The HMAC context - * @param key The authentication key - * @param keylen The length of the key, in bytes - */ -void av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen); - -/** - * Hash data with the HMAC. - * @param ctx The HMAC context - * @param data The data to hash - * @param len The length of the data, in bytes - */ -void av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len); - -/** - * Finish hashing and output the HMAC digest. - * @param ctx The HMAC context - * @param out The output buffer to write the digest into - * @param outlen The length of the out buffer, in bytes - * @return The number of bytes written to out, or a negative error code. - */ -int av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen); - -/** - * Hash an array of data with a key. - * @param ctx The HMAC context - * @param data The data to hash - * @param len The length of the data, in bytes - * @param key The authentication key - * @param keylen The length of the key, in bytes - * @param out The output buffer to write the digest into - * @param outlen The length of the out buffer, in bytes - * @return The number of bytes written to out, or a negative error code. - */ -int av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len, - const uint8_t *key, unsigned int keylen, - uint8_t *out, unsigned int outlen); - -/** - * @} - */ - -#endif /* AVUTIL_HMAC_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/imgutils.h b/3rdparty/include/ffmpeg_/libavutil/imgutils.h deleted file mode 100644 index ab32d667d3..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/imgutils.h +++ /dev/null @@ -1,200 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_IMGUTILS_H -#define AVUTIL_IMGUTILS_H - -/** - * @file - * misc image utilities - * - * @addtogroup lavu_picture - * @{ - */ - -#include "avutil.h" -#include "pixdesc.h" - -/** - * Compute the max pixel step for each plane of an image with a - * format described by pixdesc. - * - * The pixel step is the distance in bytes between the first byte of - * the group of bytes which describe a pixel component and the first - * byte of the successive group in the same plane for the same - * component. - * - * @param max_pixsteps an array which is filled with the max pixel step - * for each plane. Since a plane may contain different pixel - * components, the computed max_pixsteps[plane] is relative to the - * component in the plane with the max pixel step. - * @param max_pixstep_comps an array which is filled with the component - * for each plane which has the max pixel step. May be NULL. - */ -void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], - const AVPixFmtDescriptor *pixdesc); - -/** - * Compute the size of an image line with format pix_fmt and width - * width for the plane plane. - * - * @return the computed size in bytes - */ -int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane); - -/** - * Fill plane linesizes for an image with pixel format pix_fmt and - * width width. - * - * @param linesizes array to be filled with the linesize for each plane - * @return >= 0 in case of success, a negative error code otherwise - */ -int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width); - -/** - * Fill plane data pointers for an image with pixel format pix_fmt and - * height height. - * - * @param data pointers array to be filled with the pointer for each image plane - * @param ptr the pointer to a buffer which will contain the image - * @param linesizes the array containing the linesize for each - * plane, should be filled by av_image_fill_linesizes() - * @return the size in bytes required for the image buffer, a negative - * error code in case of failure - */ -int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, - uint8_t *ptr, const int linesizes[4]); - -/** - * Allocate an image with size w and h and pixel format pix_fmt, and - * fill pointers and linesizes accordingly. - * The allocated image buffer has to be freed by using - * av_freep(&pointers[0]). - * - * @param align the value to use for buffer size alignment - * @return the size in bytes required for the image buffer, a negative - * error code in case of failure - */ -int av_image_alloc(uint8_t *pointers[4], int linesizes[4], - int w, int h, enum AVPixelFormat pix_fmt, int align); - -/** - * Copy image plane from src to dst. - * That is, copy "height" number of lines of "bytewidth" bytes each. - * The first byte of each successive line is separated by *_linesize - * bytes. - * - * bytewidth must be contained by both absolute values of dst_linesize - * and src_linesize, otherwise the function behavior is undefined. - * - * @param dst_linesize linesize for the image plane in dst - * @param src_linesize linesize for the image plane in src - */ -void av_image_copy_plane(uint8_t *dst, int dst_linesize, - const uint8_t *src, int src_linesize, - int bytewidth, int height); - -/** - * Copy image in src_data to dst_data. - * - * @param dst_linesizes linesizes for the image in dst_data - * @param src_linesizes linesizes for the image in src_data - */ -void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], - const uint8_t *src_data[4], const int src_linesizes[4], - enum AVPixelFormat pix_fmt, int width, int height); - -/** - * Setup the data pointers and linesizes based on the specified image - * parameters and the provided array. - * - * The fields of the given image are filled in by using the src - * address which points to the image data buffer. Depending on the - * specified pixel format, one or multiple image data pointers and - * line sizes will be set. If a planar format is specified, several - * pointers will be set pointing to the different picture planes and - * the line sizes of the different planes will be stored in the - * lines_sizes array. Call with src == NULL to get the required - * size for the src buffer. - * - * To allocate the buffer and fill in the dst_data and dst_linesize in - * one call, use av_image_alloc(). - * - * @param dst_data data pointers to be filled in - * @param dst_linesizes linesizes for the image in dst_data to be filled in - * @param src buffer which will contain or contains the actual image data, can be NULL - * @param pix_fmt the pixel format of the image - * @param width the width of the image in pixels - * @param height the height of the image in pixels - * @param align the value used in src for linesize alignment - * @return the size in bytes required for src, a negative error code - * in case of failure - */ -int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], - const uint8_t *src, - enum AVPixelFormat pix_fmt, int width, int height, int align); - -/** - * Return the size in bytes of the amount of data required to store an - * image with the given parameters. - * - * @param[in] align the assumed linesize alignment - */ -int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align); - -/** - * Copy image data from an image into a buffer. - * - * av_image_get_buffer_size() can be used to compute the required size - * for the buffer to fill. - * - * @param dst a buffer into which picture data will be copied - * @param dst_size the size in bytes of dst - * @param src_data pointers containing the source image data - * @param src_linesizes linesizes for the image in src_data - * @param pix_fmt the pixel format of the source image - * @param width the width of the source image in pixels - * @param height the height of the source image in pixels - * @param align the assumed linesize alignment for dst - * @return the number of bytes written to dst, or a negative value - * (error code) on error - */ -int av_image_copy_to_buffer(uint8_t *dst, int dst_size, - const uint8_t * const src_data[4], const int src_linesize[4], - enum AVPixelFormat pix_fmt, int width, int height, int align); - -/** - * Check if the given dimension of an image is valid, meaning that all - * bytes of the image can be addressed with a signed int. - * - * @param w the width of the picture - * @param h the height of the picture - * @param log_offset the offset to sum to the log level for logging with log_ctx - * @param log_ctx the parent logging context, it may be NULL - * @return >= 0 if valid, a negative error code otherwise - */ -int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx); - -int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt); - -/** - * @} - */ - - -#endif /* AVUTIL_IMGUTILS_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/intfloat.h b/3rdparty/include/ffmpeg_/libavutil/intfloat.h deleted file mode 100644 index 38d26ad87e..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/intfloat.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2011 Mans Rullgard - * - * This file is part of Libav. - * - * Libav is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * Libav is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with Libav; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_INTFLOAT_H -#define AVUTIL_INTFLOAT_H - -#include -#include "attributes.h" - -union av_intfloat32 { - uint32_t i; - float f; -}; - -union av_intfloat64 { - uint64_t i; - double f; -}; - -/** - * Reinterpret a 32-bit integer as a float. - */ -static av_always_inline float av_int2float(uint32_t i) -{ - union av_intfloat32 v; - v.i = i; - return v.f; -} - -/** - * Reinterpret a float as a 32-bit integer. - */ -static av_always_inline uint32_t av_float2int(float f) -{ - union av_intfloat32 v; - v.f = f; - return v.i; -} - -/** - * Reinterpret a 64-bit integer as a double. - */ -static av_always_inline double av_int2double(uint64_t i) -{ - union av_intfloat64 v; - v.i = i; - return v.f; -} - -/** - * Reinterpret a double as a 64-bit integer. - */ -static av_always_inline uint64_t av_double2int(double f) -{ - union av_intfloat64 v; - v.f = f; - return v.i; -} - -#endif /* AVUTIL_INTFLOAT_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/intfloat_readwrite.h b/3rdparty/include/ffmpeg_/libavutil/intfloat_readwrite.h deleted file mode 100644 index 9709f4dae4..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/intfloat_readwrite.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * copyright (c) 2005 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_INTFLOAT_READWRITE_H -#define AVUTIL_INTFLOAT_READWRITE_H - -#include -#include "attributes.h" - -/* IEEE 80 bits extended float */ -typedef struct AVExtFloat { - uint8_t exponent[2]; - uint8_t mantissa[8]; -} AVExtFloat; - -attribute_deprecated double av_int2dbl(int64_t v) av_const; -attribute_deprecated float av_int2flt(int32_t v) av_const; -attribute_deprecated double av_ext2dbl(const AVExtFloat ext) av_const; -attribute_deprecated int64_t av_dbl2int(double d) av_const; -attribute_deprecated int32_t av_flt2int(float d) av_const; -attribute_deprecated AVExtFloat av_dbl2ext(double d) av_const; - -#endif /* AVUTIL_INTFLOAT_READWRITE_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/intreadwrite.h b/3rdparty/include/ffmpeg_/libavutil/intreadwrite.h deleted file mode 100644 index 7ee6977554..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/intreadwrite.h +++ /dev/null @@ -1,621 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_INTREADWRITE_H -#define AVUTIL_INTREADWRITE_H - -#include -#include "libavutil/avconfig.h" -#include "attributes.h" -#include "bswap.h" - -typedef union { - uint64_t u64; - uint32_t u32[2]; - uint16_t u16[4]; - uint8_t u8 [8]; - double f64; - float f32[2]; -} av_alias av_alias64; - -typedef union { - uint32_t u32; - uint16_t u16[2]; - uint8_t u8 [4]; - float f32; -} av_alias av_alias32; - -typedef union { - uint16_t u16; - uint8_t u8 [2]; -} av_alias av_alias16; - -/* - * Arch-specific headers can provide any combination of - * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. - * Preprocessor symbols must be defined, even if these are implemented - * as inline functions. - */ - -#ifdef HAVE_AV_CONFIG_H - -#include "config.h" - -#if ARCH_ARM -# include "arm/intreadwrite.h" -#elif ARCH_AVR32 -# include "avr32/intreadwrite.h" -#elif ARCH_MIPS -# include "mips/intreadwrite.h" -#elif ARCH_PPC -# include "ppc/intreadwrite.h" -#elif ARCH_TOMI -# include "tomi/intreadwrite.h" -#elif ARCH_X86 -# include "x86/intreadwrite.h" -#endif - -#endif /* HAVE_AV_CONFIG_H */ - -/* - * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. - */ - -#if AV_HAVE_BIGENDIAN - -# if defined(AV_RN16) && !defined(AV_RB16) -# define AV_RB16(p) AV_RN16(p) -# elif !defined(AV_RN16) && defined(AV_RB16) -# define AV_RN16(p) AV_RB16(p) -# endif - -# if defined(AV_WN16) && !defined(AV_WB16) -# define AV_WB16(p, v) AV_WN16(p, v) -# elif !defined(AV_WN16) && defined(AV_WB16) -# define AV_WN16(p, v) AV_WB16(p, v) -# endif - -# if defined(AV_RN24) && !defined(AV_RB24) -# define AV_RB24(p) AV_RN24(p) -# elif !defined(AV_RN24) && defined(AV_RB24) -# define AV_RN24(p) AV_RB24(p) -# endif - -# if defined(AV_WN24) && !defined(AV_WB24) -# define AV_WB24(p, v) AV_WN24(p, v) -# elif !defined(AV_WN24) && defined(AV_WB24) -# define AV_WN24(p, v) AV_WB24(p, v) -# endif - -# if defined(AV_RN32) && !defined(AV_RB32) -# define AV_RB32(p) AV_RN32(p) -# elif !defined(AV_RN32) && defined(AV_RB32) -# define AV_RN32(p) AV_RB32(p) -# endif - -# if defined(AV_WN32) && !defined(AV_WB32) -# define AV_WB32(p, v) AV_WN32(p, v) -# elif !defined(AV_WN32) && defined(AV_WB32) -# define AV_WN32(p, v) AV_WB32(p, v) -# endif - -# if defined(AV_RN48) && !defined(AV_RB48) -# define AV_RB48(p) AV_RN48(p) -# elif !defined(AV_RN48) && defined(AV_RB48) -# define AV_RN48(p) AV_RB48(p) -# endif - -# if defined(AV_WN48) && !defined(AV_WB48) -# define AV_WB48(p, v) AV_WN48(p, v) -# elif !defined(AV_WN48) && defined(AV_WB48) -# define AV_WN48(p, v) AV_WB48(p, v) -# endif - -# if defined(AV_RN64) && !defined(AV_RB64) -# define AV_RB64(p) AV_RN64(p) -# elif !defined(AV_RN64) && defined(AV_RB64) -# define AV_RN64(p) AV_RB64(p) -# endif - -# if defined(AV_WN64) && !defined(AV_WB64) -# define AV_WB64(p, v) AV_WN64(p, v) -# elif !defined(AV_WN64) && defined(AV_WB64) -# define AV_WN64(p, v) AV_WB64(p, v) -# endif - -#else /* AV_HAVE_BIGENDIAN */ - -# if defined(AV_RN16) && !defined(AV_RL16) -# define AV_RL16(p) AV_RN16(p) -# elif !defined(AV_RN16) && defined(AV_RL16) -# define AV_RN16(p) AV_RL16(p) -# endif - -# if defined(AV_WN16) && !defined(AV_WL16) -# define AV_WL16(p, v) AV_WN16(p, v) -# elif !defined(AV_WN16) && defined(AV_WL16) -# define AV_WN16(p, v) AV_WL16(p, v) -# endif - -# if defined(AV_RN24) && !defined(AV_RL24) -# define AV_RL24(p) AV_RN24(p) -# elif !defined(AV_RN24) && defined(AV_RL24) -# define AV_RN24(p) AV_RL24(p) -# endif - -# if defined(AV_WN24) && !defined(AV_WL24) -# define AV_WL24(p, v) AV_WN24(p, v) -# elif !defined(AV_WN24) && defined(AV_WL24) -# define AV_WN24(p, v) AV_WL24(p, v) -# endif - -# if defined(AV_RN32) && !defined(AV_RL32) -# define AV_RL32(p) AV_RN32(p) -# elif !defined(AV_RN32) && defined(AV_RL32) -# define AV_RN32(p) AV_RL32(p) -# endif - -# if defined(AV_WN32) && !defined(AV_WL32) -# define AV_WL32(p, v) AV_WN32(p, v) -# elif !defined(AV_WN32) && defined(AV_WL32) -# define AV_WN32(p, v) AV_WL32(p, v) -# endif - -# if defined(AV_RN48) && !defined(AV_RL48) -# define AV_RL48(p) AV_RN48(p) -# elif !defined(AV_RN48) && defined(AV_RL48) -# define AV_RN48(p) AV_RL48(p) -# endif - -# if defined(AV_WN48) && !defined(AV_WL48) -# define AV_WL48(p, v) AV_WN48(p, v) -# elif !defined(AV_WN48) && defined(AV_WL48) -# define AV_WN48(p, v) AV_WL48(p, v) -# endif - -# if defined(AV_RN64) && !defined(AV_RL64) -# define AV_RL64(p) AV_RN64(p) -# elif !defined(AV_RN64) && defined(AV_RL64) -# define AV_RN64(p) AV_RL64(p) -# endif - -# if defined(AV_WN64) && !defined(AV_WL64) -# define AV_WL64(p, v) AV_WN64(p, v) -# elif !defined(AV_WN64) && defined(AV_WL64) -# define AV_WN64(p, v) AV_WL64(p, v) -# endif - -#endif /* !AV_HAVE_BIGENDIAN */ - -/* - * Define AV_[RW]N helper macros to simplify definitions not provided - * by per-arch headers. - */ - -#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__) - -union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; -union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; -union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; - -# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) -# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) - -#elif defined(__DECC) - -# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) -# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) - -#elif AV_HAVE_FAST_UNALIGNED - -# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) -# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) - -#else - -#ifndef AV_RB16 -# define AV_RB16(x) \ - ((((const uint8_t*)(x))[0] << 8) | \ - ((const uint8_t*)(x))[1]) -#endif -#ifndef AV_WB16 -# define AV_WB16(p, darg) do { \ - unsigned d = (darg); \ - ((uint8_t*)(p))[1] = (d); \ - ((uint8_t*)(p))[0] = (d)>>8; \ - } while(0) -#endif - -#ifndef AV_RL16 -# define AV_RL16(x) \ - ((((const uint8_t*)(x))[1] << 8) | \ - ((const uint8_t*)(x))[0]) -#endif -#ifndef AV_WL16 -# define AV_WL16(p, darg) do { \ - unsigned d = (darg); \ - ((uint8_t*)(p))[0] = (d); \ - ((uint8_t*)(p))[1] = (d)>>8; \ - } while(0) -#endif - -#ifndef AV_RB32 -# define AV_RB32(x) \ - (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ - (((const uint8_t*)(x))[1] << 16) | \ - (((const uint8_t*)(x))[2] << 8) | \ - ((const uint8_t*)(x))[3]) -#endif -#ifndef AV_WB32 -# define AV_WB32(p, darg) do { \ - unsigned d = (darg); \ - ((uint8_t*)(p))[3] = (d); \ - ((uint8_t*)(p))[2] = (d)>>8; \ - ((uint8_t*)(p))[1] = (d)>>16; \ - ((uint8_t*)(p))[0] = (d)>>24; \ - } while(0) -#endif - -#ifndef AV_RL32 -# define AV_RL32(x) \ - (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ - (((const uint8_t*)(x))[2] << 16) | \ - (((const uint8_t*)(x))[1] << 8) | \ - ((const uint8_t*)(x))[0]) -#endif -#ifndef AV_WL32 -# define AV_WL32(p, darg) do { \ - unsigned d = (darg); \ - ((uint8_t*)(p))[0] = (d); \ - ((uint8_t*)(p))[1] = (d)>>8; \ - ((uint8_t*)(p))[2] = (d)>>16; \ - ((uint8_t*)(p))[3] = (d)>>24; \ - } while(0) -#endif - -#ifndef AV_RB64 -# define AV_RB64(x) \ - (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ - ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ - ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ - ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ - ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ - ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ - ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ - (uint64_t)((const uint8_t*)(x))[7]) -#endif -#ifndef AV_WB64 -# define AV_WB64(p, darg) do { \ - uint64_t d = (darg); \ - ((uint8_t*)(p))[7] = (d); \ - ((uint8_t*)(p))[6] = (d)>>8; \ - ((uint8_t*)(p))[5] = (d)>>16; \ - ((uint8_t*)(p))[4] = (d)>>24; \ - ((uint8_t*)(p))[3] = (d)>>32; \ - ((uint8_t*)(p))[2] = (d)>>40; \ - ((uint8_t*)(p))[1] = (d)>>48; \ - ((uint8_t*)(p))[0] = (d)>>56; \ - } while(0) -#endif - -#ifndef AV_RL64 -# define AV_RL64(x) \ - (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ - ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ - ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ - ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ - ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ - ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ - ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ - (uint64_t)((const uint8_t*)(x))[0]) -#endif -#ifndef AV_WL64 -# define AV_WL64(p, darg) do { \ - uint64_t d = (darg); \ - ((uint8_t*)(p))[0] = (d); \ - ((uint8_t*)(p))[1] = (d)>>8; \ - ((uint8_t*)(p))[2] = (d)>>16; \ - ((uint8_t*)(p))[3] = (d)>>24; \ - ((uint8_t*)(p))[4] = (d)>>32; \ - ((uint8_t*)(p))[5] = (d)>>40; \ - ((uint8_t*)(p))[6] = (d)>>48; \ - ((uint8_t*)(p))[7] = (d)>>56; \ - } while(0) -#endif - -#if AV_HAVE_BIGENDIAN -# define AV_RN(s, p) AV_RB##s(p) -# define AV_WN(s, p, v) AV_WB##s(p, v) -#else -# define AV_RN(s, p) AV_RL##s(p) -# define AV_WN(s, p, v) AV_WL##s(p, v) -#endif - -#endif /* HAVE_FAST_UNALIGNED */ - -#ifndef AV_RN16 -# define AV_RN16(p) AV_RN(16, p) -#endif - -#ifndef AV_RN32 -# define AV_RN32(p) AV_RN(32, p) -#endif - -#ifndef AV_RN64 -# define AV_RN64(p) AV_RN(64, p) -#endif - -#ifndef AV_WN16 -# define AV_WN16(p, v) AV_WN(16, p, v) -#endif - -#ifndef AV_WN32 -# define AV_WN32(p, v) AV_WN(32, p, v) -#endif - -#ifndef AV_WN64 -# define AV_WN64(p, v) AV_WN(64, p, v) -#endif - -#if AV_HAVE_BIGENDIAN -# define AV_RB(s, p) AV_RN##s(p) -# define AV_WB(s, p, v) AV_WN##s(p, v) -# define AV_RL(s, p) av_bswap##s(AV_RN##s(p)) -# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v)) -#else -# define AV_RB(s, p) av_bswap##s(AV_RN##s(p)) -# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v)) -# define AV_RL(s, p) AV_RN##s(p) -# define AV_WL(s, p, v) AV_WN##s(p, v) -#endif - -#define AV_RB8(x) (((const uint8_t*)(x))[0]) -#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) - -#define AV_RL8(x) AV_RB8(x) -#define AV_WL8(p, d) AV_WB8(p, d) - -#ifndef AV_RB16 -# define AV_RB16(p) AV_RB(16, p) -#endif -#ifndef AV_WB16 -# define AV_WB16(p, v) AV_WB(16, p, v) -#endif - -#ifndef AV_RL16 -# define AV_RL16(p) AV_RL(16, p) -#endif -#ifndef AV_WL16 -# define AV_WL16(p, v) AV_WL(16, p, v) -#endif - -#ifndef AV_RB32 -# define AV_RB32(p) AV_RB(32, p) -#endif -#ifndef AV_WB32 -# define AV_WB32(p, v) AV_WB(32, p, v) -#endif - -#ifndef AV_RL32 -# define AV_RL32(p) AV_RL(32, p) -#endif -#ifndef AV_WL32 -# define AV_WL32(p, v) AV_WL(32, p, v) -#endif - -#ifndef AV_RB64 -# define AV_RB64(p) AV_RB(64, p) -#endif -#ifndef AV_WB64 -# define AV_WB64(p, v) AV_WB(64, p, v) -#endif - -#ifndef AV_RL64 -# define AV_RL64(p) AV_RL(64, p) -#endif -#ifndef AV_WL64 -# define AV_WL64(p, v) AV_WL(64, p, v) -#endif - -#ifndef AV_RB24 -# define AV_RB24(x) \ - ((((const uint8_t*)(x))[0] << 16) | \ - (((const uint8_t*)(x))[1] << 8) | \ - ((const uint8_t*)(x))[2]) -#endif -#ifndef AV_WB24 -# define AV_WB24(p, d) do { \ - ((uint8_t*)(p))[2] = (d); \ - ((uint8_t*)(p))[1] = (d)>>8; \ - ((uint8_t*)(p))[0] = (d)>>16; \ - } while(0) -#endif - -#ifndef AV_RL24 -# define AV_RL24(x) \ - ((((const uint8_t*)(x))[2] << 16) | \ - (((const uint8_t*)(x))[1] << 8) | \ - ((const uint8_t*)(x))[0]) -#endif -#ifndef AV_WL24 -# define AV_WL24(p, d) do { \ - ((uint8_t*)(p))[0] = (d); \ - ((uint8_t*)(p))[1] = (d)>>8; \ - ((uint8_t*)(p))[2] = (d)>>16; \ - } while(0) -#endif - -#ifndef AV_RB48 -# define AV_RB48(x) \ - (((uint64_t)((const uint8_t*)(x))[0] << 40) | \ - ((uint64_t)((const uint8_t*)(x))[1] << 32) | \ - ((uint64_t)((const uint8_t*)(x))[2] << 24) | \ - ((uint64_t)((const uint8_t*)(x))[3] << 16) | \ - ((uint64_t)((const uint8_t*)(x))[4] << 8) | \ - (uint64_t)((const uint8_t*)(x))[5]) -#endif -#ifndef AV_WB48 -# define AV_WB48(p, darg) do { \ - uint64_t d = (darg); \ - ((uint8_t*)(p))[5] = (d); \ - ((uint8_t*)(p))[4] = (d)>>8; \ - ((uint8_t*)(p))[3] = (d)>>16; \ - ((uint8_t*)(p))[2] = (d)>>24; \ - ((uint8_t*)(p))[1] = (d)>>32; \ - ((uint8_t*)(p))[0] = (d)>>40; \ - } while(0) -#endif - -#ifndef AV_RL48 -# define AV_RL48(x) \ - (((uint64_t)((const uint8_t*)(x))[5] << 40) | \ - ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ - ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ - ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ - ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ - (uint64_t)((const uint8_t*)(x))[0]) -#endif -#ifndef AV_WL48 -# define AV_WL48(p, darg) do { \ - uint64_t d = (darg); \ - ((uint8_t*)(p))[0] = (d); \ - ((uint8_t*)(p))[1] = (d)>>8; \ - ((uint8_t*)(p))[2] = (d)>>16; \ - ((uint8_t*)(p))[3] = (d)>>24; \ - ((uint8_t*)(p))[4] = (d)>>32; \ - ((uint8_t*)(p))[5] = (d)>>40; \ - } while(0) -#endif - -/* - * The AV_[RW]NA macros access naturally aligned data - * in a type-safe way. - */ - -#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) -#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) - -#ifndef AV_RN16A -# define AV_RN16A(p) AV_RNA(16, p) -#endif - -#ifndef AV_RN32A -# define AV_RN32A(p) AV_RNA(32, p) -#endif - -#ifndef AV_RN64A -# define AV_RN64A(p) AV_RNA(64, p) -#endif - -#ifndef AV_WN16A -# define AV_WN16A(p, v) AV_WNA(16, p, v) -#endif - -#ifndef AV_WN32A -# define AV_WN32A(p, v) AV_WNA(32, p, v) -#endif - -#ifndef AV_WN64A -# define AV_WN64A(p, v) AV_WNA(64, p, v) -#endif - -/* - * The AV_COPYxxU macros are suitable for copying data to/from unaligned - * memory locations. - */ - -#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s)); - -#ifndef AV_COPY16U -# define AV_COPY16U(d, s) AV_COPYU(16, d, s) -#endif - -#ifndef AV_COPY32U -# define AV_COPY32U(d, s) AV_COPYU(32, d, s) -#endif - -#ifndef AV_COPY64U -# define AV_COPY64U(d, s) AV_COPYU(64, d, s) -#endif - -#ifndef AV_COPY128U -# define AV_COPY128U(d, s) \ - do { \ - AV_COPY64U(d, s); \ - AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \ - } while(0) -#endif - -/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be - * naturally aligned. They may be implemented using MMX, - * so emms_c() must be called before using any float code - * afterwards. - */ - -#define AV_COPY(n, d, s) \ - (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) - -#ifndef AV_COPY16 -# define AV_COPY16(d, s) AV_COPY(16, d, s) -#endif - -#ifndef AV_COPY32 -# define AV_COPY32(d, s) AV_COPY(32, d, s) -#endif - -#ifndef AV_COPY64 -# define AV_COPY64(d, s) AV_COPY(64, d, s) -#endif - -#ifndef AV_COPY128 -# define AV_COPY128(d, s) \ - do { \ - AV_COPY64(d, s); \ - AV_COPY64((char*)(d)+8, (char*)(s)+8); \ - } while(0) -#endif - -#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) - -#ifndef AV_SWAP64 -# define AV_SWAP64(a, b) AV_SWAP(64, a, b) -#endif - -#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) - -#ifndef AV_ZERO16 -# define AV_ZERO16(d) AV_ZERO(16, d) -#endif - -#ifndef AV_ZERO32 -# define AV_ZERO32(d) AV_ZERO(32, d) -#endif - -#ifndef AV_ZERO64 -# define AV_ZERO64(d) AV_ZERO(64, d) -#endif - -#ifndef AV_ZERO128 -# define AV_ZERO128(d) \ - do { \ - AV_ZERO64(d); \ - AV_ZERO64((char*)(d)+8); \ - } while(0) -#endif - -#endif /* AVUTIL_INTREADWRITE_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/lfg.h b/3rdparty/include/ffmpeg_/libavutil/lfg.h deleted file mode 100644 index ec90562cf2..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/lfg.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Lagged Fibonacci PRNG - * Copyright (c) 2008 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_LFG_H -#define AVUTIL_LFG_H - -typedef struct AVLFG { - unsigned int state[64]; - int index; -} AVLFG; - -void av_lfg_init(AVLFG *c, unsigned int seed); - -/** - * Get the next random unsigned 32-bit number using an ALFG. - * - * Please also consider a simple LCG like state= state*1664525+1013904223, - * it may be good enough and faster for your specific use case. - */ -static inline unsigned int av_lfg_get(AVLFG *c){ - c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63]; - return c->state[c->index++ & 63]; -} - -/** - * Get the next random unsigned 32-bit number using a MLFG. - * - * Please also consider av_lfg_get() above, it is faster. - */ -static inline unsigned int av_mlfg_get(AVLFG *c){ - unsigned int a= c->state[(c->index-55) & 63]; - unsigned int b= c->state[(c->index-24) & 63]; - return c->state[c->index++ & 63] = 2*a*b+a+b; -} - -/** - * Get the next two numbers generated by a Box-Muller Gaussian - * generator using the random numbers issued by lfg. - * - * @param out array where the two generated numbers are placed - */ -void av_bmg_get(AVLFG *lfg, double out[2]); - -#endif /* AVUTIL_LFG_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/log.h b/3rdparty/include/ffmpeg_/libavutil/log.h deleted file mode 100644 index 7ea95fa503..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/log.h +++ /dev/null @@ -1,222 +0,0 @@ -/* - * copyright (c) 2006 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_LOG_H -#define AVUTIL_LOG_H - -#include -#include "avutil.h" -#include "attributes.h" - -typedef enum { - AV_CLASS_CATEGORY_NA = 0, - AV_CLASS_CATEGORY_INPUT, - AV_CLASS_CATEGORY_OUTPUT, - AV_CLASS_CATEGORY_MUXER, - AV_CLASS_CATEGORY_DEMUXER, - AV_CLASS_CATEGORY_ENCODER, - AV_CLASS_CATEGORY_DECODER, - AV_CLASS_CATEGORY_FILTER, - AV_CLASS_CATEGORY_BITSTREAM_FILTER, - AV_CLASS_CATEGORY_SWSCALER, - AV_CLASS_CATEGORY_SWRESAMPLER, - AV_CLASS_CATEGORY_NB, ///< not part of ABI/API -}AVClassCategory; - -struct AVOptionRanges; - -/** - * Describe the class of an AVClass context structure. That is an - * arbitrary struct of which the first field is a pointer to an - * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.). - */ -typedef struct AVClass { - /** - * The name of the class; usually it is the same name as the - * context structure type to which the AVClass is associated. - */ - const char* class_name; - - /** - * A pointer to a function which returns the name of a context - * instance ctx associated with the class. - */ - const char* (*item_name)(void* ctx); - - /** - * a pointer to the first option specified in the class if any or NULL - * - * @see av_set_default_options() - */ - const struct AVOption *option; - - /** - * LIBAVUTIL_VERSION with which this structure was created. - * This is used to allow fields to be added without requiring major - * version bumps everywhere. - */ - - int version; - - /** - * Offset in the structure where log_level_offset is stored. - * 0 means there is no such variable - */ - int log_level_offset_offset; - - /** - * Offset in the structure where a pointer to the parent context for - * logging is stored. For example a decoder could pass its AVCodecContext - * to eval as such a parent context, which an av_log() implementation - * could then leverage to display the parent context. - * The offset can be NULL. - */ - int parent_log_context_offset; - - /** - * Return next AVOptions-enabled child or NULL - */ - void* (*child_next)(void *obj, void *prev); - - /** - * Return an AVClass corresponding to the next potential - * AVOptions-enabled child. - * - * The difference between child_next and this is that - * child_next iterates over _already existing_ objects, while - * child_class_next iterates over _all possible_ children. - */ - const struct AVClass* (*child_class_next)(const struct AVClass *prev); - - /** - * Category used for visualization (like color) - * This is only set if the category is equal for all objects using this class. - * available since version (51 << 16 | 56 << 8 | 100) - */ - AVClassCategory category; - - /** - * Callback to return the category. - * available since version (51 << 16 | 59 << 8 | 100) - */ - AVClassCategory (*get_category)(void* ctx); - - /** - * Callback to return the supported/allowed ranges. - * available since version (52.12) - */ - int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags); -} AVClass; - -/* av_log API */ - -#define AV_LOG_QUIET -8 - -/** - * Something went really wrong and we will crash now. - */ -#define AV_LOG_PANIC 0 - -/** - * Something went wrong and recovery is not possible. - * For example, no header was found for a format which depends - * on headers or an illegal combination of parameters is used. - */ -#define AV_LOG_FATAL 8 - -/** - * Something went wrong and cannot losslessly be recovered. - * However, not all future data is affected. - */ -#define AV_LOG_ERROR 16 - -/** - * Something somehow does not look correct. This may or may not - * lead to problems. An example would be the use of '-vstrict -2'. - */ -#define AV_LOG_WARNING 24 - -#define AV_LOG_INFO 32 -#define AV_LOG_VERBOSE 40 - -/** - * Stuff which is only useful for libav* developers. - */ -#define AV_LOG_DEBUG 48 - -#define AV_LOG_MAX_OFFSET (AV_LOG_DEBUG - AV_LOG_QUIET) - -/** - * Send the specified message to the log if the level is less than or equal - * to the current av_log_level. By default, all logging messages are sent to - * stderr. This behavior can be altered by setting a different av_vlog callback - * function. - * - * @param avcl A pointer to an arbitrary struct of which the first field is a - * pointer to an AVClass struct. - * @param level The importance level of the message, lower values signifying - * higher importance. - * @param fmt The format string (printf-compatible) that specifies how - * subsequent arguments are converted to output. - * @see av_vlog - */ -void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4); - -void av_vlog(void *avcl, int level, const char *fmt, va_list); -int av_log_get_level(void); -void av_log_set_level(int); -void av_log_set_callback(void (*)(void*, int, const char*, va_list)); -void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl); -const char* av_default_item_name(void* ctx); -AVClassCategory av_default_get_category(void *ptr); - -/** - * Format a line of log the same way as the default callback. - * @param line buffer to receive the formated line - * @param line_size size of the buffer - * @param print_prefix used to store whether the prefix must be printed; - * must point to a persistent integer initially set to 1 - */ -void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, - char *line, int line_size, int *print_prefix); - -/** - * av_dlog macros - * Useful to print debug messages that shouldn't get compiled in normally. - */ - -#ifdef DEBUG -# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__) -#else -# define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0) -#endif - -/** - * Skip repeated messages, this requires the user app to use av_log() instead of - * (f)printf as the 2 would otherwise interfere and lead to - * "Last message repeated x times" messages below (f)printf messages with some - * bad luck. - * Also to receive the last, "last repeated" line if any, the user app must - * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end - */ -#define AV_LOG_SKIP_REPEATED 1 -void av_log_set_flags(int arg); - -#endif /* AVUTIL_LOG_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/lzo.h b/3rdparty/include/ffmpeg_/libavutil/lzo.h deleted file mode 100644 index c03403992d..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/lzo.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * LZO 1x decompression - * copyright (c) 2006 Reimar Doeffinger - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_LZO_H -#define AVUTIL_LZO_H - -/** - * @defgroup lavu_lzo LZO - * @ingroup lavu_crypto - * - * @{ - */ - -#include - -/** @name Error flags returned by av_lzo1x_decode - * @{ */ -/// end of the input buffer reached before decoding finished -#define AV_LZO_INPUT_DEPLETED 1 -/// decoded data did not fit into output buffer -#define AV_LZO_OUTPUT_FULL 2 -/// a reference to previously decoded data was wrong -#define AV_LZO_INVALID_BACKPTR 4 -/// a non-specific error in the compressed bitstream -#define AV_LZO_ERROR 8 -/** @} */ - -#define AV_LZO_INPUT_PADDING 8 -#define AV_LZO_OUTPUT_PADDING 12 - -/** - * @brief Decodes LZO 1x compressed data. - * @param out output buffer - * @param outlen size of output buffer, number of bytes left are returned here - * @param in input buffer - * @param inlen size of input buffer, number of bytes left are returned here - * @return 0 on success, otherwise a combination of the error flags above - * - * Make sure all buffers are appropriately padded, in must provide - * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes. - */ -int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen); - -/** - * @} - */ - -#endif /* AVUTIL_LZO_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/mathematics.h b/3rdparty/include/ffmpeg_/libavutil/mathematics.h deleted file mode 100644 index 71f0392218..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/mathematics.h +++ /dev/null @@ -1,147 +0,0 @@ -/* - * copyright (c) 2005-2012 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_MATHEMATICS_H -#define AVUTIL_MATHEMATICS_H - -#include -#include -#include "attributes.h" -#include "rational.h" -#include "intfloat.h" - -#ifndef M_E -#define M_E 2.7182818284590452354 /* e */ -#endif -#ifndef M_LN2 -#define M_LN2 0.69314718055994530942 /* log_e 2 */ -#endif -#ifndef M_LN10 -#define M_LN10 2.30258509299404568402 /* log_e 10 */ -#endif -#ifndef M_LOG2_10 -#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */ -#endif -#ifndef M_PHI -#define M_PHI 1.61803398874989484820 /* phi / golden ratio */ -#endif -#ifndef M_PI -#define M_PI 3.14159265358979323846 /* pi */ -#endif -#ifndef M_SQRT1_2 -#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */ -#endif -#ifndef M_SQRT2 -#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */ -#endif -#ifndef NAN -#define NAN av_int2float(0x7fc00000) -#endif -#ifndef INFINITY -#define INFINITY av_int2float(0x7f800000) -#endif - -/** - * @addtogroup lavu_math - * @{ - */ - - -enum AVRounding { - AV_ROUND_ZERO = 0, ///< Round toward zero. - AV_ROUND_INF = 1, ///< Round away from zero. - AV_ROUND_DOWN = 2, ///< Round toward -infinity. - AV_ROUND_UP = 3, ///< Round toward +infinity. - AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero. - AV_ROUND_PASS_MINMAX = 8192, ///< Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE -}; - -/** - * Return the greatest common divisor of a and b. - * If both a and b are 0 or either or both are <0 then behavior is - * undefined. - */ -int64_t av_const av_gcd(int64_t a, int64_t b); - -/** - * Rescale a 64-bit integer with rounding to nearest. - * A simple a*b/c isn't possible as it can overflow. - */ -int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const; - -/** - * Rescale a 64-bit integer with specified rounding. - * A simple a*b/c isn't possible as it can overflow. - * - * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is - * INT64_MIN or INT64_MAX then a is passed through unchanged. - */ -int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const; - -/** - * Rescale a 64-bit integer by 2 rational numbers. - */ -int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const; - -/** - * Rescale a 64-bit integer by 2 rational numbers with specified rounding. - * - * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is - * INT64_MIN or INT64_MAX then a is passed through unchanged. - */ -int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, - enum AVRounding) av_const; - -/** - * Compare 2 timestamps each in its own timebases. - * The result of the function is undefined if one of the timestamps - * is outside the int64_t range when represented in the others timebase. - * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position - */ -int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b); - -/** - * Compare 2 integers modulo mod. - * That is we compare integers a and b for which only the least - * significant log2(mod) bits are known. - * - * @param mod must be a power of 2 - * @return a negative value if a is smaller than b - * a positive value if a is greater than b - * 0 if a equals b - */ -int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod); - -/** - * Rescale a timestamp while preserving known durations. - * - * @param in_ts Input timestamp - * @param in_tb Input timesbase - * @param fs_tb Duration and *last timebase - * @param duration duration till the next call - * @param out_tb Output timesbase - */ -int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb); - -/** - * @} - */ - -#endif /* AVUTIL_MATHEMATICS_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/md5.h b/3rdparty/include/ffmpeg_/libavutil/md5.h deleted file mode 100644 index 79702c88c2..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/md5.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * copyright (c) 2006 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_MD5_H -#define AVUTIL_MD5_H - -#include - -#include "attributes.h" -#include "version.h" - -/** - * @defgroup lavu_md5 MD5 - * @ingroup lavu_crypto - * @{ - */ - -extern const int av_md5_size; - -struct AVMD5; - -/** - * Allocate an AVMD5 context. - */ -struct AVMD5 *av_md5_alloc(void); - -/** - * Initialize MD5 hashing. - * - * @param ctx pointer to the function context (of size av_md5_size) - */ -void av_md5_init(struct AVMD5 *ctx); - -/** - * Update hash value. - * - * @param ctx hash function context - * @param src input data to update hash with - * @param len input data length - */ -void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len); - -/** - * Finish hashing and output digest value. - * - * @param ctx hash function context - * @param dst buffer where output digest value is stored - */ -void av_md5_final(struct AVMD5 *ctx, uint8_t *dst); - -/** - * Hash an array of data. - * - * @param dst The output buffer to write the digest into - * @param src The data to hash - * @param len The length of the data, in bytes - */ -void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len); - -/** - * @} - */ - -#endif /* AVUTIL_MD5_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/mem.h b/3rdparty/include/ffmpeg_/libavutil/mem.h deleted file mode 100644 index fb23a69094..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/mem.h +++ /dev/null @@ -1,307 +0,0 @@ -/* - * copyright (c) 2006 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * memory handling functions - */ - -#ifndef AVUTIL_MEM_H -#define AVUTIL_MEM_H - -#include -#include - -#include "attributes.h" -#include "error.h" -#include "avutil.h" - -/** - * @addtogroup lavu_mem - * @{ - */ - - -#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C) - #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v - #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v -#elif defined(__TI_COMPILER_VERSION__) - #define DECLARE_ALIGNED(n,t,v) \ - AV_PRAGMA(DATA_ALIGN(v,n)) \ - t __attribute__((aligned(n))) v - #define DECLARE_ASM_CONST(n,t,v) \ - AV_PRAGMA(DATA_ALIGN(v,n)) \ - static const t __attribute__((aligned(n))) v -#elif defined(__GNUC__) - #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v - #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v -#elif defined(_MSC_VER) - #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v - #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v -#else - #define DECLARE_ALIGNED(n,t,v) t v - #define DECLARE_ASM_CONST(n,t,v) static const t v -#endif - -#if AV_GCC_VERSION_AT_LEAST(3,1) - #define av_malloc_attrib __attribute__((__malloc__)) -#else - #define av_malloc_attrib -#endif - -#if AV_GCC_VERSION_AT_LEAST(4,3) - #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__))) -#else - #define av_alloc_size(...) -#endif - -/** - * Allocate a block of size bytes with alignment suitable for all - * memory accesses (including vectors if available on the CPU). - * @param size Size in bytes for the memory block to be allocated. - * @return Pointer to the allocated block, NULL if the block cannot - * be allocated. - * @see av_mallocz() - */ -void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1); - -/** - * Helper function to allocate a block of size * nmemb bytes with - * using av_malloc() - * @param nmemb Number of elements - * @param size Size of the single element - * @return Pointer to the allocated block, NULL if the block cannot - * be allocated. - * @see av_malloc() - */ -av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size) -{ - if (size <= 0 || nmemb >= INT_MAX / size) - return NULL; - return av_malloc(nmemb * size); -} - -/** - * Allocate or reallocate a block of memory. - * If ptr is NULL and size > 0, allocate a new block. If - * size is zero, free the memory block pointed to by ptr. - * @param ptr Pointer to a memory block already allocated with - * av_malloc(z)() or av_realloc() or NULL. - * @param size Size in bytes for the memory block to be allocated or - * reallocated. - * @return Pointer to a newly reallocated block or NULL if the block - * cannot be reallocated or the function is used to free the memory block. - * @see av_fast_realloc() - */ -void *av_realloc(void *ptr, size_t size) av_alloc_size(2); - -/** - * Allocate or reallocate a block of memory. - * This function does the same thing as av_realloc, except: - * - It takes two arguments and checks the result of the multiplication for - * integer overflow. - * - It frees the input block in case of failure, thus avoiding the memory - * leak with the classic "buf = realloc(buf); if (!buf) return -1;". - */ -void *av_realloc_f(void *ptr, size_t nelem, size_t elsize); - -/** - * Allocate or reallocate an array. - * If ptr is NULL and nmemb > 0, allocate a new block. If - * nmemb is zero, free the memory block pointed to by ptr. - * @param ptr Pointer to a memory block already allocated with - * av_malloc(z)() or av_realloc() or NULL. - * @param nmemb Number of elements - * @param size Size of the single element - * @return Pointer to a newly reallocated block or NULL if the block - * cannot be reallocated or the function is used to free the memory block. - */ -av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size); - -/** - * Allocate or reallocate an array. - * If *ptr is NULL and nmemb > 0, allocate a new block. If - * nmemb is zero, free the memory block pointed to by ptr. - * @param ptr Pointer to a pointer to a memory block already allocated - * with av_malloc(z)() or av_realloc(), or pointer to a pointer to NULL. - * The pointer is updated on success, or freed on failure. - * @param nmemb Number of elements - * @param size Size of the single element - * @return Zero on success, an AVERROR error code on failure. - */ -av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); - -/** - * Free a memory block which has been allocated with av_malloc(z)() or - * av_realloc(). - * @param ptr Pointer to the memory block which should be freed. - * @note ptr = NULL is explicitly allowed. - * @note It is recommended that you use av_freep() instead. - * @see av_freep() - */ -void av_free(void *ptr); - -/** - * Allocate a block of size bytes with alignment suitable for all - * memory accesses (including vectors if available on the CPU) and - * zero all the bytes of the block. - * @param size Size in bytes for the memory block to be allocated. - * @return Pointer to the allocated block, NULL if it cannot be allocated. - * @see av_malloc() - */ -void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1); - -/** - * Allocate a block of nmemb * size bytes with alignment suitable for all - * memory accesses (including vectors if available on the CPU) and - * zero all the bytes of the block. - * The allocation will fail if nmemb * size is greater than or equal - * to INT_MAX. - * @param nmemb - * @param size - * @return Pointer to the allocated block, NULL if it cannot be allocated. - */ -void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib; - -/** - * Helper function to allocate a block of size * nmemb bytes with - * using av_mallocz() - * @param nmemb Number of elements - * @param size Size of the single element - * @return Pointer to the allocated block, NULL if the block cannot - * be allocated. - * @see av_mallocz() - * @see av_malloc_array() - */ -av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size) -{ - if (size <= 0 || nmemb >= INT_MAX / size) - return NULL; - return av_mallocz(nmemb * size); -} - -/** - * Duplicate the string s. - * @param s string to be duplicated - * @return Pointer to a newly allocated string containing a - * copy of s or NULL if the string cannot be allocated. - */ -char *av_strdup(const char *s) av_malloc_attrib; - -/** - * Duplicate the buffer p. - * @param p buffer to be duplicated - * @return Pointer to a newly allocated buffer containing a - * copy of p or NULL if the buffer cannot be allocated. - */ -void *av_memdup(const void *p, size_t size); - -/** - * Free a memory block which has been allocated with av_malloc(z)() or - * av_realloc() and set the pointer pointing to it to NULL. - * @param ptr Pointer to the pointer to the memory block which should - * be freed. - * @see av_free() - */ -void av_freep(void *ptr); - -/** - * Add an element to a dynamic array. - * - * The array to grow is supposed to be an array of pointers to - * structures, and the element to add must be a pointer to an already - * allocated structure. - * - * The array is reallocated when its size reaches powers of 2. - * Therefore, the amortized cost of adding an element is constant. - * - * In case of success, the pointer to the array is updated in order to - * point to the new grown array, and the number pointed to by nb_ptr - * is incremented. - * In case of failure, the array is freed, *tab_ptr is set to NULL and - * *nb_ptr is set to 0. - * - * @param tab_ptr pointer to the array to grow - * @param nb_ptr pointer to the number of elements in the array - * @param elem element to add - * @see av_dynarray2_add() - */ -void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem); - -/** - * Add an element of size elem_size to a dynamic array. - * - * The array is reallocated when its number of elements reaches powers of 2. - * Therefore, the amortized cost of adding an element is constant. - * - * In case of success, the pointer to the array is updated in order to - * point to the new grown array, and the number pointed to by nb_ptr - * is incremented. - * In case of failure, the array is freed, *tab_ptr is set to NULL and - * *nb_ptr is set to 0. - * - * @param tab_ptr pointer to the array to grow - * @param nb_ptr pointer to the number of elements in the array - * @param elem_size size in bytes of the elements in the array - * @param elem_data pointer to the data of the element to add. If NULL, the space of - * the new added element is not filled. - * @return pointer to the data of the element to copy in the new allocated space. - * If NULL, the new allocated space is left uninitialized." - * @see av_dynarray_add() - */ -void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, - const uint8_t *elem_data); - -/** - * Multiply two size_t values checking for overflow. - * @return 0 if success, AVERROR(EINVAL) if overflow. - */ -static inline int av_size_mult(size_t a, size_t b, size_t *r) -{ - size_t t = a * b; - /* Hack inspired from glibc: only try the division if nelem and elsize - * are both greater than sqrt(SIZE_MAX). */ - if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b) - return AVERROR(EINVAL); - *r = t; - return 0; -} - -/** - * Set the maximum size that may me allocated in one block. - */ -void av_max_alloc(size_t max); - -/** - * @brief deliberately overlapping memcpy implementation - * @param dst destination buffer - * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0 - * @param cnt number of bytes to copy, must be >= 0 - * - * cnt > back is valid, this will copy the bytes we just copied, - * thus creating a repeating pattern with a period length of back. - */ -void av_memcpy_backptr(uint8_t *dst, int back, int cnt); - -/** - * @} - */ - -#endif /* AVUTIL_MEM_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/murmur3.h b/3rdparty/include/ffmpeg_/libavutil/murmur3.h deleted file mode 100644 index f29ed973e9..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/murmur3.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (C) 2013 Reimar Döffinger - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_MURMUR3_H -#define AVUTIL_MURMUR3_H - -#include - -struct AVMurMur3 *av_murmur3_alloc(void); -void av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed); -void av_murmur3_init(struct AVMurMur3 *c); -void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len); -void av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]); - -#endif /* AVUTIL_MURMUR3_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/old_pix_fmts.h b/3rdparty/include/ffmpeg_/libavutil/old_pix_fmts.h deleted file mode 100644 index 57b699220f..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/old_pix_fmts.h +++ /dev/null @@ -1,171 +0,0 @@ -/* - * copyright (c) 2006-2012 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_OLD_PIX_FMTS_H -#define AVUTIL_OLD_PIX_FMTS_H - -/* - * This header exists to prevent new pixel formats from being accidentally added - * to the deprecated list. - * Do not include it directly. It will be removed on next major bump - * - * Do not add new items to this list. Use the AVPixelFormat enum instead. - */ - PIX_FMT_NONE = AV_PIX_FMT_NONE, - PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) - PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr - PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... - PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... - PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) - PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) - PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) - PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) - PIX_FMT_GRAY8, ///< Y , 8bpp - PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb - PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb - PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette - PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range - PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range - PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range - PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing - PIX_FMT_XVMC_MPEG2_IDCT, - PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 - PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 - PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) - PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits - PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) - PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) - PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits - PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) - PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) - PIX_FMT_NV21, ///< as above, but U and V bytes are swapped - - PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... - PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... - PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... - PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... - - PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian - PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian - PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) - PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range - PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) - PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian - PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian - - PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian - PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian - PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 - PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 - - PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian - PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian - PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 - PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 - - PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers - PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers - PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - - PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer - - PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 - PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 - PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 - PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 - PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha - PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian - PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian - - //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus - //If you want to support multiple bit depths, then using PIX_FMT_YUV420P16* with the bpp stored separately - //is better - PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - PIX_FMT_VDA_VLD, ///< hardware decoding through VDA - -#ifdef AV_PIX_FMT_ABI_GIT_MASTER - PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian - PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian -#endif - PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp - PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian - PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian - PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian - PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian - PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian - PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian - -#ifndef AV_PIX_FMT_ABI_GIT_MASTER - PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian - PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian -#endif - PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB... - PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0... - PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR... - PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0... - PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) - PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) - - PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big endian - PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little endian - PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big endian - PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little endian - - PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions -#endif /* AVUTIL_OLD_PIX_FMTS_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/opt.h b/3rdparty/include/ffmpeg_/libavutil/opt.h deleted file mode 100644 index 2344aa7b4d..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/opt.h +++ /dev/null @@ -1,754 +0,0 @@ -/* - * AVOptions - * copyright (c) 2005 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_OPT_H -#define AVUTIL_OPT_H - -/** - * @file - * AVOptions - */ - -#include "rational.h" -#include "avutil.h" -#include "dict.h" -#include "log.h" -#include "pixfmt.h" -#include "samplefmt.h" - -/** - * @defgroup avoptions AVOptions - * @ingroup lavu_data - * @{ - * AVOptions provide a generic system to declare options on arbitrary structs - * ("objects"). An option can have a help text, a type and a range of possible - * values. Options may then be enumerated, read and written to. - * - * @section avoptions_implement Implementing AVOptions - * This section describes how to add AVOptions capabilities to a struct. - * - * All AVOptions-related information is stored in an AVClass. Therefore - * the first member of the struct should be a pointer to an AVClass describing it. - * The option field of the AVClass must be set to a NULL-terminated static array - * of AVOptions. Each AVOption must have a non-empty name, a type, a default - * value and for number-type AVOptions also a range of allowed values. It must - * also declare an offset in bytes from the start of the struct, where the field - * associated with this AVOption is located. Other fields in the AVOption struct - * should also be set when applicable, but are not required. - * - * The following example illustrates an AVOptions-enabled struct: - * @code - * typedef struct test_struct { - * AVClass *class; - * int int_opt; - * char *str_opt; - * uint8_t *bin_opt; - * int bin_len; - * } test_struct; - * - * static const AVOption options[] = { - * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt), - * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX }, - * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt), - * AV_OPT_TYPE_STRING }, - * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt), - * AV_OPT_TYPE_BINARY }, - * { NULL }, - * }; - * - * static const AVClass test_class = { - * .class_name = "test class", - * .item_name = av_default_item_name, - * .option = options, - * .version = LIBAVUTIL_VERSION_INT, - * }; - * @endcode - * - * Next, when allocating your struct, you must ensure that the AVClass pointer - * is set to the correct value. Then, av_opt_set_defaults() can be called to - * initialize defaults. After that the struct is ready to be used with the - * AVOptions API. - * - * When cleaning up, you may use the av_opt_free() function to automatically - * free all the allocated string and binary options. - * - * Continuing with the above example: - * - * @code - * test_struct *alloc_test_struct(void) - * { - * test_struct *ret = av_malloc(sizeof(*ret)); - * ret->class = &test_class; - * av_opt_set_defaults(ret); - * return ret; - * } - * void free_test_struct(test_struct **foo) - * { - * av_opt_free(*foo); - * av_freep(foo); - * } - * @endcode - * - * @subsection avoptions_implement_nesting Nesting - * It may happen that an AVOptions-enabled struct contains another - * AVOptions-enabled struct as a member (e.g. AVCodecContext in - * libavcodec exports generic options, while its priv_data field exports - * codec-specific options). In such a case, it is possible to set up the - * parent struct to export a child's options. To do that, simply - * implement AVClass.child_next() and AVClass.child_class_next() in the - * parent struct's AVClass. - * Assuming that the test_struct from above now also contains a - * child_struct field: - * - * @code - * typedef struct child_struct { - * AVClass *class; - * int flags_opt; - * } child_struct; - * static const AVOption child_opts[] = { - * { "test_flags", "This is a test option of flags type.", - * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX }, - * { NULL }, - * }; - * static const AVClass child_class = { - * .class_name = "child class", - * .item_name = av_default_item_name, - * .option = child_opts, - * .version = LIBAVUTIL_VERSION_INT, - * }; - * - * void *child_next(void *obj, void *prev) - * { - * test_struct *t = obj; - * if (!prev && t->child_struct) - * return t->child_struct; - * return NULL - * } - * const AVClass child_class_next(const AVClass *prev) - * { - * return prev ? NULL : &child_class; - * } - * @endcode - * Putting child_next() and child_class_next() as defined above into - * test_class will now make child_struct's options accessible through - * test_struct (again, proper setup as described above needs to be done on - * child_struct right after it is created). - * - * From the above example it might not be clear why both child_next() - * and child_class_next() are needed. The distinction is that child_next() - * iterates over actually existing objects, while child_class_next() - * iterates over all possible child classes. E.g. if an AVCodecContext - * was initialized to use a codec which has private options, then its - * child_next() will return AVCodecContext.priv_data and finish - * iterating. OTOH child_class_next() on AVCodecContext.av_class will - * iterate over all available codecs with private options. - * - * @subsection avoptions_implement_named_constants Named constants - * It is possible to create named constants for options. Simply set the unit - * field of the option the constants should apply to to a string and - * create the constants themselves as options of type AV_OPT_TYPE_CONST - * with their unit field set to the same string. - * Their default_val field should contain the value of the named - * constant. - * For example, to add some named constants for the test_flags option - * above, put the following into the child_opts array: - * @code - * { "test_flags", "This is a test option of flags type.", - * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" }, - * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" }, - * @endcode - * - * @section avoptions_use Using AVOptions - * This section deals with accessing options in an AVOptions-enabled struct. - * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or - * AVFormatContext in libavformat. - * - * @subsection avoptions_use_examine Examining AVOptions - * The basic functions for examining options are av_opt_next(), which iterates - * over all options defined for one object, and av_opt_find(), which searches - * for an option with the given name. - * - * The situation is more complicated with nesting. An AVOptions-enabled struct - * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag - * to av_opt_find() will make the function search children recursively. - * - * For enumerating there are basically two cases. The first is when you want to - * get all options that may potentially exist on the struct and its children - * (e.g. when constructing documentation). In that case you should call - * av_opt_child_class_next() recursively on the parent struct's AVClass. The - * second case is when you have an already initialized struct with all its - * children and you want to get all options that can be actually written or read - * from it. In that case you should call av_opt_child_next() recursively (and - * av_opt_next() on each result). - * - * @subsection avoptions_use_get_set Reading and writing AVOptions - * When setting options, you often have a string read directly from the - * user. In such a case, simply passing it to av_opt_set() is enough. For - * non-string type options, av_opt_set() will parse the string according to the - * option type. - * - * Similarly av_opt_get() will read any option type and convert it to a string - * which will be returned. Do not forget that the string is allocated, so you - * have to free it with av_free(). - * - * In some cases it may be more convenient to put all options into an - * AVDictionary and call av_opt_set_dict() on it. A specific case of this - * are the format/codec open functions in lavf/lavc which take a dictionary - * filled with option as a parameter. This allows to set some options - * that cannot be set otherwise, since e.g. the input file format is not known - * before the file is actually opened. - */ - -enum AVOptionType{ - AV_OPT_TYPE_FLAGS, - AV_OPT_TYPE_INT, - AV_OPT_TYPE_INT64, - AV_OPT_TYPE_DOUBLE, - AV_OPT_TYPE_FLOAT, - AV_OPT_TYPE_STRING, - AV_OPT_TYPE_RATIONAL, - AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length - AV_OPT_TYPE_CONST = 128, - AV_OPT_TYPE_IMAGE_SIZE = MKBETAG('S','I','Z','E'), ///< offset must point to two consecutive integers - AV_OPT_TYPE_PIXEL_FMT = MKBETAG('P','F','M','T'), - AV_OPT_TYPE_SAMPLE_FMT = MKBETAG('S','F','M','T'), - AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational - AV_OPT_TYPE_DURATION = MKBETAG('D','U','R',' '), - AV_OPT_TYPE_COLOR = MKBETAG('C','O','L','R'), -#if FF_API_OLD_AVOPTIONS - FF_OPT_TYPE_FLAGS = 0, - FF_OPT_TYPE_INT, - FF_OPT_TYPE_INT64, - FF_OPT_TYPE_DOUBLE, - FF_OPT_TYPE_FLOAT, - FF_OPT_TYPE_STRING, - FF_OPT_TYPE_RATIONAL, - FF_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length - FF_OPT_TYPE_CONST=128, -#endif -}; - -/** - * AVOption - */ -typedef struct AVOption { - const char *name; - - /** - * short English help text - * @todo What about other languages? - */ - const char *help; - - /** - * The offset relative to the context structure where the option - * value is stored. It should be 0 for named constants. - */ - int offset; - enum AVOptionType type; - - /** - * the default value for scalar options - */ - union { - int64_t i64; - double dbl; - const char *str; - /* TODO those are unused now */ - AVRational q; - } default_val; - double min; ///< minimum valid value for the option - double max; ///< maximum valid value for the option - - int flags; -#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding -#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding -#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ... -#define AV_OPT_FLAG_AUDIO_PARAM 8 -#define AV_OPT_FLAG_VIDEO_PARAM 16 -#define AV_OPT_FLAG_SUBTITLE_PARAM 32 -#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering -//FIXME think about enc-audio, ... style flags - - /** - * The logical unit to which the option belongs. Non-constant - * options and corresponding named constants share the same - * unit. May be NULL. - */ - const char *unit; -} AVOption; - -/** - * A single allowed range of values, or a single allowed value. - */ -typedef struct AVOptionRange { - const char *str; - double value_min, value_max; ///< For string ranges this represents the min/max length, for dimensions this represents the min/max pixel count - double component_min, component_max; ///< For string this represents the unicode range for chars, 0-127 limits to ASCII - int is_range; ///< if set to 1 the struct encodes a range, if set to 0 a single value -} AVOptionRange; - -/** - * List of AVOptionRange structs - */ -typedef struct AVOptionRanges { - AVOptionRange **range; - int nb_ranges; -} AVOptionRanges; - - -#if FF_API_FIND_OPT -/** - * Look for an option in obj. Look only for the options which - * have the flags set as specified in mask and flags (that is, - * for which it is the case that opt->flags & mask == flags). - * - * @param[in] obj a pointer to a struct whose first element is a - * pointer to an AVClass - * @param[in] name the name of the option to look for - * @param[in] unit the unit of the option to look for, or any if NULL - * @return a pointer to the option found, or NULL if no option - * has been found - * - * @deprecated use av_opt_find. - */ -attribute_deprecated -const AVOption *av_find_opt(void *obj, const char *name, const char *unit, int mask, int flags); -#endif - -#if FF_API_OLD_AVOPTIONS -/** - * Set the field of obj with the given name to value. - * - * @param[in] obj A struct whose first element is a pointer to an - * AVClass. - * @param[in] name the name of the field to set - * @param[in] val The value to set. If the field is not of a string - * type, then the given string is parsed. - * SI postfixes and some named scalars are supported. - * If the field is of a numeric type, it has to be a numeric or named - * scalar. Behavior with more than one scalar and +- infix operators - * is undefined. - * If the field is of a flags type, it has to be a sequence of numeric - * scalars or named flags separated by '+' or '-'. Prefixing a flag - * with '+' causes it to be set without affecting the other flags; - * similarly, '-' unsets a flag. - * @param[out] o_out if non-NULL put here a pointer to the AVOption - * found - * @param alloc this parameter is currently ignored - * @return 0 if the value has been set, or an AVERROR code in case of - * error: - * AVERROR_OPTION_NOT_FOUND if no matching option exists - * AVERROR(ERANGE) if the value is out of range - * AVERROR(EINVAL) if the value is not valid - * @deprecated use av_opt_set() - */ -attribute_deprecated -int av_set_string3(void *obj, const char *name, const char *val, int alloc, const AVOption **o_out); - -attribute_deprecated const AVOption *av_set_double(void *obj, const char *name, double n); -attribute_deprecated const AVOption *av_set_q(void *obj, const char *name, AVRational n); -attribute_deprecated const AVOption *av_set_int(void *obj, const char *name, int64_t n); - -double av_get_double(void *obj, const char *name, const AVOption **o_out); -AVRational av_get_q(void *obj, const char *name, const AVOption **o_out); -int64_t av_get_int(void *obj, const char *name, const AVOption **o_out); -attribute_deprecated const char *av_get_string(void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len); -attribute_deprecated const AVOption *av_next_option(void *obj, const AVOption *last); -#endif - -/** - * Show the obj options. - * - * @param req_flags requested flags for the options to show. Show only the - * options for which it is opt->flags & req_flags. - * @param rej_flags rejected flags for the options to show. Show only the - * options for which it is !(opt->flags & req_flags). - * @param av_log_obj log context to use for showing the options - */ -int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags); - -/** - * Set the values of all AVOption fields to their default values. - * - * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) - */ -void av_opt_set_defaults(void *s); - -#if FF_API_OLD_AVOPTIONS -attribute_deprecated -void av_opt_set_defaults2(void *s, int mask, int flags); -#endif - -/** - * Parse the key/value pairs list in opts. For each key/value pair - * found, stores the value in the field in ctx that is named like the - * key. ctx must be an AVClass context, storing is done using - * AVOptions. - * - * @param opts options string to parse, may be NULL - * @param key_val_sep a 0-terminated list of characters used to - * separate key from value - * @param pairs_sep a 0-terminated list of characters used to separate - * two pairs from each other - * @return the number of successfully set key/value pairs, or a negative - * value corresponding to an AVERROR code in case of error: - * AVERROR(EINVAL) if opts cannot be parsed, - * the error code issued by av_set_string3() if a key/value pair - * cannot be set - */ -int av_set_options_string(void *ctx, const char *opts, - const char *key_val_sep, const char *pairs_sep); - -/** - * Parse the key-value pairs list in opts. For each key=value pair found, - * set the value of the corresponding option in ctx. - * - * @param ctx the AVClass object to set options on - * @param opts the options string, key-value pairs separated by a - * delimiter - * @param shorthand a NULL-terminated array of options names for shorthand - * notation: if the first field in opts has no key part, - * the key is taken from the first element of shorthand; - * then again for the second, etc., until either opts is - * finished, shorthand is finished or a named option is - * found; after that, all options must be named - * @param key_val_sep a 0-terminated list of characters used to separate - * key from value, for example '=' - * @param pairs_sep a 0-terminated list of characters used to separate - * two pairs from each other, for example ':' or ',' - * @return the number of successfully set key=value pairs, or a negative - * value corresponding to an AVERROR code in case of error: - * AVERROR(EINVAL) if opts cannot be parsed, - * the error code issued by av_set_string3() if a key/value pair - * cannot be set - * - * Options names must use only the following characters: a-z A-Z 0-9 - . / _ - * Separators must use characters distinct from option names and from each - * other. - */ -int av_opt_set_from_string(void *ctx, const char *opts, - const char *const *shorthand, - const char *key_val_sep, const char *pairs_sep); -/** - * Free all string and binary options in obj. - */ -void av_opt_free(void *obj); - -/** - * Check whether a particular flag is set in a flags field. - * - * @param field_name the name of the flag field option - * @param flag_name the name of the flag to check - * @return non-zero if the flag is set, zero if the flag isn't set, - * isn't of the right type, or the flags field doesn't exist. - */ -int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name); - -/** - * Set all the options from a given dictionary on an object. - * - * @param obj a struct whose first element is a pointer to AVClass - * @param options options to process. This dictionary will be freed and replaced - * by a new one containing all options not found in obj. - * Of course this new dictionary needs to be freed by caller - * with av_dict_free(). - * - * @return 0 on success, a negative AVERROR if some option was found in obj, - * but could not be set. - * - * @see av_dict_copy() - */ -int av_opt_set_dict(void *obj, struct AVDictionary **options); - -/** - * Extract a key-value pair from the beginning of a string. - * - * @param ropts pointer to the options string, will be updated to - * point to the rest of the string (one of the pairs_sep - * or the final NUL) - * @param key_val_sep a 0-terminated list of characters used to separate - * key from value, for example '=' - * @param pairs_sep a 0-terminated list of characters used to separate - * two pairs from each other, for example ':' or ',' - * @param flags flags; see the AV_OPT_FLAG_* values below - * @param rkey parsed key; must be freed using av_free() - * @param rval parsed value; must be freed using av_free() - * - * @return >=0 for success, or a negative value corresponding to an - * AVERROR code in case of error; in particular: - * AVERROR(EINVAL) if no key is present - * - */ -int av_opt_get_key_value(const char **ropts, - const char *key_val_sep, const char *pairs_sep, - unsigned flags, - char **rkey, char **rval); - -enum { - - /** - * Accept to parse a value without a key; the key will then be returned - * as NULL. - */ - AV_OPT_FLAG_IMPLICIT_KEY = 1, -}; - -/** - * @defgroup opt_eval_funcs Evaluating option strings - * @{ - * This group of functions can be used to evaluate option strings - * and get numbers out of them. They do the same thing as av_opt_set(), - * except the result is written into the caller-supplied pointer. - * - * @param obj a struct whose first element is a pointer to AVClass. - * @param o an option for which the string is to be evaluated. - * @param val string to be evaluated. - * @param *_out value of the string will be written here. - * - * @return 0 on success, a negative number on failure. - */ -int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out); -int av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out); -int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out); -int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out); -int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out); -int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out); -/** - * @} - */ - -#define AV_OPT_SEARCH_CHILDREN 0x0001 /**< Search in possible children of the - given object first. */ -/** - * The obj passed to av_opt_find() is fake -- only a double pointer to AVClass - * instead of a required pointer to a struct containing AVClass. This is - * useful for searching for options without needing to allocate the corresponding - * object. - */ -#define AV_OPT_SEARCH_FAKE_OBJ 0x0002 - -/** - * Look for an option in an object. Consider only options which - * have all the specified flags set. - * - * @param[in] obj A pointer to a struct whose first element is a - * pointer to an AVClass. - * Alternatively a double pointer to an AVClass, if - * AV_OPT_SEARCH_FAKE_OBJ search flag is set. - * @param[in] name The name of the option to look for. - * @param[in] unit When searching for named constants, name of the unit - * it belongs to. - * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). - * @param search_flags A combination of AV_OPT_SEARCH_*. - * - * @return A pointer to the option found, or NULL if no option - * was found. - * - * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable - * directly with av_set_string3(). Use special calls which take an options - * AVDictionary (e.g. avformat_open_input()) to set options found with this - * flag. - */ -const AVOption *av_opt_find(void *obj, const char *name, const char *unit, - int opt_flags, int search_flags); - -/** - * Look for an option in an object. Consider only options which - * have all the specified flags set. - * - * @param[in] obj A pointer to a struct whose first element is a - * pointer to an AVClass. - * Alternatively a double pointer to an AVClass, if - * AV_OPT_SEARCH_FAKE_OBJ search flag is set. - * @param[in] name The name of the option to look for. - * @param[in] unit When searching for named constants, name of the unit - * it belongs to. - * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). - * @param search_flags A combination of AV_OPT_SEARCH_*. - * @param[out] target_obj if non-NULL, an object to which the option belongs will be - * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present - * in search_flags. This parameter is ignored if search_flags contain - * AV_OPT_SEARCH_FAKE_OBJ. - * - * @return A pointer to the option found, or NULL if no option - * was found. - */ -const AVOption *av_opt_find2(void *obj, const char *name, const char *unit, - int opt_flags, int search_flags, void **target_obj); - -/** - * Iterate over all AVOptions belonging to obj. - * - * @param obj an AVOptions-enabled struct or a double pointer to an - * AVClass describing it. - * @param prev result of the previous call to av_opt_next() on this object - * or NULL - * @return next AVOption or NULL - */ -const AVOption *av_opt_next(void *obj, const AVOption *prev); - -/** - * Iterate over AVOptions-enabled children of obj. - * - * @param prev result of a previous call to this function or NULL - * @return next AVOptions-enabled child or NULL - */ -void *av_opt_child_next(void *obj, void *prev); - -/** - * Iterate over potential AVOptions-enabled children of parent. - * - * @param prev result of a previous call to this function or NULL - * @return AVClass corresponding to next potential child or NULL - */ -const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev); - -/** - * @defgroup opt_set_funcs Option setting functions - * @{ - * Those functions set the field of obj with the given name to value. - * - * @param[in] obj A struct whose first element is a pointer to an AVClass. - * @param[in] name the name of the field to set - * @param[in] val The value to set. In case of av_opt_set() if the field is not - * of a string type, then the given string is parsed. - * SI postfixes and some named scalars are supported. - * If the field is of a numeric type, it has to be a numeric or named - * scalar. Behavior with more than one scalar and +- infix operators - * is undefined. - * If the field is of a flags type, it has to be a sequence of numeric - * scalars or named flags separated by '+' or '-'. Prefixing a flag - * with '+' causes it to be set without affecting the other flags; - * similarly, '-' unsets a flag. - * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN - * is passed here, then the option may be set on a child of obj. - * - * @return 0 if the value has been set, or an AVERROR code in case of - * error: - * AVERROR_OPTION_NOT_FOUND if no matching option exists - * AVERROR(ERANGE) if the value is out of range - * AVERROR(EINVAL) if the value is not valid - */ -int av_opt_set (void *obj, const char *name, const char *val, int search_flags); -int av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags); -int av_opt_set_double(void *obj, const char *name, double val, int search_flags); -int av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags); -int av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags); -int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags); -int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags); -int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags); -int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags); - -/** - * Set a binary option to an integer list. - * - * @param obj AVClass object to set options on - * @param name name of the binary option - * @param val pointer to an integer list (must have the correct type with - * regard to the contents of the list) - * @param term list terminator (usually 0 or -1) - * @param flags search flags - */ -#define av_opt_set_int_list(obj, name, val, term, flags) \ - (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \ - AVERROR(EINVAL) : \ - av_opt_set_bin(obj, name, (const uint8_t *)(val), \ - av_int_list_length(val, term) * sizeof(*(val)), flags)) -/** - * @} - */ - -/** - * @defgroup opt_get_funcs Option getting functions - * @{ - * Those functions get a value of the option with the given name from an object. - * - * @param[in] obj a struct whose first element is a pointer to an AVClass. - * @param[in] name name of the option to get. - * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN - * is passed here, then the option may be found in a child of obj. - * @param[out] out_val value of the option will be written here - * @return 0 on success, a negative error code otherwise - */ -/** - * @note the returned string will av_malloc()ed and must be av_free()ed by the caller - */ -int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val); -int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val); -int av_opt_get_double(void *obj, const char *name, int search_flags, double *out_val); -int av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val); -int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out); -int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt); -int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt); -int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val); -/** - * @} - */ -/** - * Gets a pointer to the requested field in a struct. - * This function allows accessing a struct even when its fields are moved or - * renamed since the application making the access has been compiled, - * - * @returns a pointer to the field, it can be cast to the correct type and read - * or written to. - */ -void *av_opt_ptr(const AVClass *avclass, void *obj, const char *name); - -/** - * Free an AVOptionRanges struct and set it to NULL. - */ -void av_opt_freep_ranges(AVOptionRanges **ranges); - -/** - * Get a list of allowed ranges for the given option. - * - * The returned list may depend on other fields in obj like for example profile. - * - * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored - * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance - * - * The result must be freed with av_opt_freep_ranges. - * - * @return >= 0 on success, a negative errro code otherwise - */ -int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags); - -/** - * Get a default list of allowed ranges for the given option. - * - * This list is constructed without using the AVClass.query_ranges() callback - * and can be used as fallback from within the callback. - * - * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored - * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance - * - * The result must be freed with av_opt_free_ranges. - * - * @return >= 0 on success, a negative errro code otherwise - */ -int av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags); - -/** - * @} - */ - -#endif /* AVUTIL_OPT_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/parseutils.h b/3rdparty/include/ffmpeg_/libavutil/parseutils.h deleted file mode 100644 index 3eb35fc050..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/parseutils.h +++ /dev/null @@ -1,174 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_PARSEUTILS_H -#define AVUTIL_PARSEUTILS_H - -#include - -#include "rational.h" - -/** - * @file - * misc parsing utilities - */ - -/** - * Parse str and store the parsed ratio in q. - * - * Note that a ratio with infinite (1/0) or negative value is - * considered valid, so you should check on the returned value if you - * want to exclude those values. - * - * The undefined value can be expressed using the "0:0" string. - * - * @param[in,out] q pointer to the AVRational which will contain the ratio - * @param[in] str the string to parse: it has to be a string in the format - * num:den, a float number or an expression - * @param[in] max the maximum allowed numerator and denominator - * @param[in] log_offset log level offset which is applied to the log - * level of log_ctx - * @param[in] log_ctx parent logging context - * @return >= 0 on success, a negative error code otherwise - */ -int av_parse_ratio(AVRational *q, const char *str, int max, - int log_offset, void *log_ctx); - -#define av_parse_ratio_quiet(rate, str, max) \ - av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL) - -/** - * Parse str and put in width_ptr and height_ptr the detected values. - * - * @param[in,out] width_ptr pointer to the variable which will contain the detected - * width value - * @param[in,out] height_ptr pointer to the variable which will contain the detected - * height value - * @param[in] str the string to parse: it has to be a string in the format - * width x height or a valid video size abbreviation. - * @return >= 0 on success, a negative error code otherwise - */ -int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str); - -/** - * Parse str and store the detected values in *rate. - * - * @param[in,out] rate pointer to the AVRational which will contain the detected - * frame rate - * @param[in] str the string to parse: it has to be a string in the format - * rate_num / rate_den, a float number or a valid video rate abbreviation - * @return >= 0 on success, a negative error code otherwise - */ -int av_parse_video_rate(AVRational *rate, const char *str); - -/** - * Put the RGBA values that correspond to color_string in rgba_color. - * - * @param color_string a string specifying a color. It can be the name of - * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence, - * possibly followed by "@" and a string representing the alpha - * component. - * The alpha component may be a string composed by "0x" followed by an - * hexadecimal number or a decimal number between 0.0 and 1.0, which - * represents the opacity value (0x00/0.0 means completely transparent, - * 0xff/1.0 completely opaque). - * If the alpha component is not specified then 0xff is assumed. - * The string "random" will result in a random color. - * @param slen length of the initial part of color_string containing the - * color. It can be set to -1 if color_string is a null terminated string - * containing nothing else than the color. - * @return >= 0 in case of success, a negative value in case of - * failure (for example if color_string cannot be parsed). - */ -int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, - void *log_ctx); - -/** - * Parse timestr and return in *time a corresponding number of - * microseconds. - * - * @param timeval puts here the number of microseconds corresponding - * to the string in timestr. If the string represents a duration, it - * is the number of microseconds contained in the time interval. If - * the string is a date, is the number of microseconds since 1st of - * January, 1970 up to the time of the parsed date. If timestr cannot - * be successfully parsed, set *time to INT64_MIN. - - * @param timestr a string representing a date or a duration. - * - If a date the syntax is: - * @code - * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z] - * now - * @endcode - * If the value is "now" it takes the current time. - * Time is local time unless Z is appended, in which case it is - * interpreted as UTC. - * If the year-month-day part is not specified it takes the current - * year-month-day. - * - If a duration the syntax is: - * @code - * [-][HH:]MM:SS[.m...] - * [-]S+[.m...] - * @endcode - * @param duration flag which tells how to interpret timestr, if not - * zero timestr is interpreted as a duration, otherwise as a date - * @return 0 in case of success, a negative value corresponding to an - * AVERROR code otherwise - */ -int av_parse_time(int64_t *timeval, const char *timestr, int duration); - -/** - * Parse the input string p according to the format string fmt and - * store its results in the structure dt. - * This implementation supports only a subset of the formats supported - * by the standard strptime(). - * - * In particular it actually supports the parameters: - * - %H: the hour as a decimal number, using a 24-hour clock, in the - * range '00' through '23' - * - %J: hours as a decimal number, in the range '0' through INT_MAX - * - %M: the minute as a decimal number, using a 24-hour clock, in the - * range '00' through '59' - * - %S: the second as a decimal number, using a 24-hour clock, in the - * range '00' through '59' - * - %Y: the year as a decimal number, using the Gregorian calendar - * - %m: the month as a decimal number, in the range '1' through '12' - * - %d: the day of the month as a decimal number, in the range '1' - * through '31' - * - %%: a literal '%' - * - * @return a pointer to the first character not processed in this - * function call, or NULL in case the function fails to match all of - * the fmt string and therefore an error occurred - */ -char *av_small_strptime(const char *p, const char *fmt, struct tm *dt); - -/** - * Attempt to find a specific tag in a URL. - * - * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. - * Return 1 if found. - */ -int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info); - -/** - * Convert the decomposed UTC time in tm to a time_t value. - */ -time_t av_timegm(struct tm *tm); - -#endif /* AVUTIL_PARSEUTILS_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/pixdesc.h b/3rdparty/include/ffmpeg_/libavutil/pixdesc.h deleted file mode 100644 index f4482e81fd..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/pixdesc.h +++ /dev/null @@ -1,289 +0,0 @@ -/* - * pixel format descriptor - * Copyright (c) 2009 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_PIXDESC_H -#define AVUTIL_PIXDESC_H - -#include -#include "pixfmt.h" - -typedef struct AVComponentDescriptor{ - uint16_t plane :2; ///< which of the 4 planes contains the component - - /** - * Number of elements between 2 horizontally consecutive pixels minus 1. - * Elements are bits for bitstream formats, bytes otherwise. - */ - uint16_t step_minus1 :3; - - /** - * Number of elements before the component of the first pixel plus 1. - * Elements are bits for bitstream formats, bytes otherwise. - */ - uint16_t offset_plus1 :3; - uint16_t shift :3; ///< number of least significant bits that must be shifted away to get the value - uint16_t depth_minus1 :4; ///< number of bits in the component minus 1 -}AVComponentDescriptor; - -/** - * Descriptor that unambiguously describes how the bits of a pixel are - * stored in the up to 4 data planes of an image. It also stores the - * subsampling factors and number of components. - * - * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV - * and all the YUV variants) AVPixFmtDescriptor just stores how values - * are stored not what these values represent. - */ -typedef struct AVPixFmtDescriptor{ - const char *name; - uint8_t nb_components; ///< The number of components each pixel has, (1-4) - - /** - * Amount to shift the luma width right to find the chroma width. - * For YV12 this is 1 for example. - * chroma_width = -((-luma_width) >> log2_chroma_w) - * The note above is needed to ensure rounding up. - * This value only refers to the chroma components. - */ - uint8_t log2_chroma_w; ///< chroma_width = -((-luma_width )>>log2_chroma_w) - - /** - * Amount to shift the luma height right to find the chroma height. - * For YV12 this is 1 for example. - * chroma_height= -((-luma_height) >> log2_chroma_h) - * The note above is needed to ensure rounding up. - * This value only refers to the chroma components. - */ - uint8_t log2_chroma_h; - uint8_t flags; - - /** - * Parameters that describe how pixels are packed. - * If the format has 2 or 4 components, then alpha is last. - * If the format has 1 or 2 components, then luma is 0. - * If the format has 3 or 4 components, - * if the RGB flag is set then 0 is red, 1 is green and 2 is blue; - * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V. - */ - AVComponentDescriptor comp[4]; -}AVPixFmtDescriptor; - -/** - * Pixel format is big-endian. - */ -#define AV_PIX_FMT_FLAG_BE (1 << 0) -/** - * Pixel format has a palette in data[1], values are indexes in this palette. - */ -#define AV_PIX_FMT_FLAG_PAL (1 << 1) -/** - * All values of a component are bit-wise packed end to end. - */ -#define AV_PIX_FMT_FLAG_BITSTREAM (1 << 2) -/** - * Pixel format is an HW accelerated format. - */ -#define AV_PIX_FMT_FLAG_HWACCEL (1 << 3) -/** - * At least one pixel component is not in the first data plane. - */ -#define AV_PIX_FMT_FLAG_PLANAR (1 << 4) -/** - * The pixel format contains RGB-like data (as opposed to YUV/grayscale). - */ -#define AV_PIX_FMT_FLAG_RGB (1 << 5) -/** - * The pixel format is "pseudo-paletted". This means that FFmpeg treats it as - * paletted internally, but the palette is generated by the decoder and is not - * stored in the file. - */ -#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6) -/** - * The pixel format has an alpha channel. - */ -#define AV_PIX_FMT_FLAG_ALPHA (1 << 7) - -#if FF_API_PIX_FMT -/** - * @deprecate use the AV_PIX_FMT_FLAG_* flags - */ -#define PIX_FMT_BE AV_PIX_FMT_FLAG_BE -#define PIX_FMT_PAL AV_PIX_FMT_FLAG_PAL -#define PIX_FMT_BITSTREAM AV_PIX_FMT_FLAG_BITSTREAM -#define PIX_FMT_HWACCEL AV_PIX_FMT_FLAG_HWACCEL -#define PIX_FMT_PLANAR AV_PIX_FMT_FLAG_PLANAR -#define PIX_FMT_RGB AV_PIX_FMT_FLAG_RGB -#define PIX_FMT_PSEUDOPAL AV_PIX_FMT_FLAG_PSEUDOPAL -#define PIX_FMT_ALPHA AV_PIX_FMT_FLAG_ALPHA -#endif - -#if FF_API_PIX_FMT_DESC -/** - * The array of all the pixel format descriptors. - */ -extern const AVPixFmtDescriptor av_pix_fmt_descriptors[]; -#endif - -/** - * Read a line from an image, and write the values of the - * pixel format component c to dst. - * - * @param data the array containing the pointers to the planes of the image - * @param linesize the array containing the linesizes of the image - * @param desc the pixel format descriptor for the image - * @param x the horizontal coordinate of the first pixel to read - * @param y the vertical coordinate of the first pixel to read - * @param w the width of the line to read, that is the number of - * values to write to dst - * @param read_pal_component if not zero and the format is a paletted - * format writes the values corresponding to the palette - * component c in data[1] to dst, rather than the palette indexes in - * data[0]. The behavior is undefined if the format is not paletted. - */ -void av_read_image_line(uint16_t *dst, const uint8_t *data[4], const int linesize[4], - const AVPixFmtDescriptor *desc, int x, int y, int c, int w, int read_pal_component); - -/** - * Write the values from src to the pixel format component c of an - * image line. - * - * @param src array containing the values to write - * @param data the array containing the pointers to the planes of the - * image to write into. It is supposed to be zeroed. - * @param linesize the array containing the linesizes of the image - * @param desc the pixel format descriptor for the image - * @param x the horizontal coordinate of the first pixel to write - * @param y the vertical coordinate of the first pixel to write - * @param w the width of the line to write, that is the number of - * values to write to the image line - */ -void av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesize[4], - const AVPixFmtDescriptor *desc, int x, int y, int c, int w); - -/** - * Return the pixel format corresponding to name. - * - * If there is no pixel format with name name, then looks for a - * pixel format with the name corresponding to the native endian - * format of name. - * For example in a little-endian system, first looks for "gray16", - * then for "gray16le". - * - * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE. - */ -enum AVPixelFormat av_get_pix_fmt(const char *name); - -/** - * Return the short name for a pixel format, NULL in case pix_fmt is - * unknown. - * - * @see av_get_pix_fmt(), av_get_pix_fmt_string() - */ -const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt); - -/** - * Print in buf the string corresponding to the pixel format with - * number pix_fmt, or an header if pix_fmt is negative. - * - * @param buf the buffer where to write the string - * @param buf_size the size of buf - * @param pix_fmt the number of the pixel format to print the - * corresponding info string, or a negative value to print the - * corresponding header. - */ -char *av_get_pix_fmt_string (char *buf, int buf_size, enum AVPixelFormat pix_fmt); - -/** - * Return the number of bits per pixel used by the pixel format - * described by pixdesc. Note that this is not the same as the number - * of bits per sample. - * - * The returned number of bits refers to the number of bits actually - * used for storing the pixel information, that is padding bits are - * not counted. - */ -int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); - -/** - * Return the number of bits per pixel for the pixel format - * described by pixdesc, including any padding or unused bits. - */ -int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); - -/** - * @return a pixel format descriptor for provided pixel format or NULL if - * this pixel format is unknown. - */ -const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt); - -/** - * Iterate over all pixel format descriptors known to libavutil. - * - * @param prev previous descriptor. NULL to get the first descriptor. - * - * @return next descriptor or NULL after the last descriptor - */ -const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev); - -/** - * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc - * is not a valid pointer to a pixel format descriptor. - */ -enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc); - -/** - * Utility function to access log2_chroma_w log2_chroma_h from - * the pixel format AVPixFmtDescriptor. - * - * See avcodec_get_chroma_sub_sample() for a function that asserts a - * valid pixel format instead of returning an error code. - * Its recommanded that you use avcodec_get_chroma_sub_sample unless - * you do check the return code! - * - * @param[in] pix_fmt the pixel format - * @param[out] h_shift store log2_chroma_w - * @param[out] v_shift store log2_chroma_h - * - * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format - */ -int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, - int *h_shift, int *v_shift); - -/** - * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a - * valid pixel format. - */ -int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt); - -void ff_check_pixfmt_descriptors(void); - -/** - * Utility function to swap the endianness of a pixel format. - * - * @param[in] pix_fmt the pixel format - * - * @return pixel format with swapped endianness if it exists, - * otherwise AV_PIX_FMT_NONE - */ -enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt); - - -#endif /* AVUTIL_PIXDESC_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/pixfmt.h b/3rdparty/include/ffmpeg_/libavutil/pixfmt.h deleted file mode 100644 index ae32a8f3e2..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/pixfmt.h +++ /dev/null @@ -1,366 +0,0 @@ -/* - * copyright (c) 2006 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_PIXFMT_H -#define AVUTIL_PIXFMT_H - -/** - * @file - * pixel format definitions - * - */ - -#include "libavutil/avconfig.h" -#include "libavutil/version.h" - -#define AVPALETTE_SIZE 1024 -#define AVPALETTE_COUNT 256 - -/** - * Pixel format. - * - * @note - * PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA - * color is put together as: - * (A << 24) | (R << 16) | (G << 8) | B - * This is stored as BGRA on little-endian CPU architectures and ARGB on - * big-endian CPUs. - * - * @par - * When the pixel format is palettized RGB (PIX_FMT_PAL8), the palettized - * image data is stored in AVFrame.data[0]. The palette is transported in - * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is - * formatted the same as in PIX_FMT_RGB32 described above (i.e., it is - * also endian-specific). Note also that the individual RGB palette - * components stored in AVFrame.data[1] should be in the range 0..255. - * This is important as many custom PAL8 video codecs that were designed - * to run on the IBM VGA graphics adapter use 6-bit palette components. - * - * @par - * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like - * for pal8. This palette is filled in automatically by the function - * allocating the picture. - * - * @note - * Make sure that all newly added big-endian formats have pix_fmt & 1 == 1 - * and that all newly added little-endian formats have pix_fmt & 1 == 0. - * This allows simpler detection of big vs little-endian. - */ -enum AVPixelFormat { - AV_PIX_FMT_NONE = -1, - AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) - AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr - AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... - AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... - AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) - AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) - AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) - AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) - AV_PIX_FMT_GRAY8, ///< Y , 8bpp - AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb - AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb - AV_PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette - AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range - AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range - AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range - AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing - AV_PIX_FMT_XVMC_MPEG2_IDCT, - AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 - AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 - AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) - AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits - AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) - AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) - AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits - AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) - AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) - AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped - - AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... - AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... - AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... - AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... - - AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian - AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian - AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) - AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range - AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) - AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian - AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian - - AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian - AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian - AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 - AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 - - AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian - AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian - AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 - AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 - - AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers - AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers - AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - - AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers - AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer - - AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 - AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 - AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 - AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 - AV_PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha - AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian - AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian - - //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus - //If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately - //is better - AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA - -#ifdef AV_PIX_FMT_ABI_GIT_MASTER - AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian - AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian -#endif - AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp - AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian - AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian - AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian - AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian - AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian - AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian - - /** - * duplicated pixel formats for compatibility with libav. - * FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55) - * Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85) - */ - AV_PIX_FMT_YUVA422P_LIBAV, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) - AV_PIX_FMT_YUVA444P_LIBAV, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) - - AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian - AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian - AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian - AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian - AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian - AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian - AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) - AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) - AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) - AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) - AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) - AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) - AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) - AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) - AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) - AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) - AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) - AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) - - AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface - - AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 - AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 - -#ifndef AV_PIX_FMT_ABI_GIT_MASTER - AV_PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian - AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian - AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian -#endif - AV_PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB... - AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0... - AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR... - AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0... - AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) - AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) - - AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian - AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian - AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian - AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian - AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian - AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp - AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian - AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian - AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range - AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions - -#if FF_API_PIX_FMT -#include "old_pix_fmts.h" -#endif -}; - -#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI -#define AV_PIX_FMT_YUVA422P AV_PIX_FMT_YUVA422P_LIBAV -#define AV_PIX_FMT_YUVA444P AV_PIX_FMT_YUVA444P_LIBAV -#endif - - -#define AV_PIX_FMT_Y400A AV_PIX_FMT_GRAY8A -#define AV_PIX_FMT_GBR24P AV_PIX_FMT_GBRP - -#if AV_HAVE_BIGENDIAN -# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be -#else -# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le -#endif - -#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA) -#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR) -#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA) -#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB) -#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0) -#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0) - -#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE) -#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE) -#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE) -#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE) -#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE) -#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE) -#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE) -#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE) -#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE) - -#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE) -#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE) -#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE) -#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE) -#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE) -#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE) -#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE) -#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE) -#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE) -#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE) -#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE) -#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE) -#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE) -#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE) -#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE) - -#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE) -#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE) -#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE) -#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE) -#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE) -#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE) -#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE) - -#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) -#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) -#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE) -#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE) -#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE) -#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE) -#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE) -#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE) -#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) - -#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE) - -#if FF_API_PIX_FMT -#define PixelFormat AVPixelFormat - -#define PIX_FMT_Y400A AV_PIX_FMT_Y400A -#define PIX_FMT_GBR24P AV_PIX_FMT_GBR24P - -#define PIX_FMT_NE(be, le) AV_PIX_FMT_NE(be, le) - -#define PIX_FMT_RGB32 AV_PIX_FMT_RGB32 -#define PIX_FMT_RGB32_1 AV_PIX_FMT_RGB32_1 -#define PIX_FMT_BGR32 AV_PIX_FMT_BGR32 -#define PIX_FMT_BGR32_1 AV_PIX_FMT_BGR32_1 -#define PIX_FMT_0RGB32 AV_PIX_FMT_0RGB32 -#define PIX_FMT_0BGR32 AV_PIX_FMT_0BGR32 - -#define PIX_FMT_GRAY16 AV_PIX_FMT_GRAY16 -#define PIX_FMT_RGB48 AV_PIX_FMT_RGB48 -#define PIX_FMT_RGB565 AV_PIX_FMT_RGB565 -#define PIX_FMT_RGB555 AV_PIX_FMT_RGB555 -#define PIX_FMT_RGB444 AV_PIX_FMT_RGB444 -#define PIX_FMT_BGR48 AV_PIX_FMT_BGR48 -#define PIX_FMT_BGR565 AV_PIX_FMT_BGR565 -#define PIX_FMT_BGR555 AV_PIX_FMT_BGR555 -#define PIX_FMT_BGR444 AV_PIX_FMT_BGR444 - -#define PIX_FMT_YUV420P9 AV_PIX_FMT_YUV420P9 -#define PIX_FMT_YUV422P9 AV_PIX_FMT_YUV422P9 -#define PIX_FMT_YUV444P9 AV_PIX_FMT_YUV444P9 -#define PIX_FMT_YUV420P10 AV_PIX_FMT_YUV420P10 -#define PIX_FMT_YUV422P10 AV_PIX_FMT_YUV422P10 -#define PIX_FMT_YUV444P10 AV_PIX_FMT_YUV444P10 -#define PIX_FMT_YUV420P12 AV_PIX_FMT_YUV420P12 -#define PIX_FMT_YUV422P12 AV_PIX_FMT_YUV422P12 -#define PIX_FMT_YUV444P12 AV_PIX_FMT_YUV444P12 -#define PIX_FMT_YUV420P14 AV_PIX_FMT_YUV420P14 -#define PIX_FMT_YUV422P14 AV_PIX_FMT_YUV422P14 -#define PIX_FMT_YUV444P14 AV_PIX_FMT_YUV444P14 -#define PIX_FMT_YUV420P16 AV_PIX_FMT_YUV420P16 -#define PIX_FMT_YUV422P16 AV_PIX_FMT_YUV422P16 -#define PIX_FMT_YUV444P16 AV_PIX_FMT_YUV444P16 - -#define PIX_FMT_RGBA64 AV_PIX_FMT_RGBA64 -#define PIX_FMT_BGRA64 AV_PIX_FMT_BGRA64 -#define PIX_FMT_GBRP9 AV_PIX_FMT_GBRP9 -#define PIX_FMT_GBRP10 AV_PIX_FMT_GBRP10 -#define PIX_FMT_GBRP12 AV_PIX_FMT_GBRP12 -#define PIX_FMT_GBRP14 AV_PIX_FMT_GBRP14 -#define PIX_FMT_GBRP16 AV_PIX_FMT_GBRP16 -#endif - -#endif /* AVUTIL_PIXFMT_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/random_seed.h b/3rdparty/include/ffmpeg_/libavutil/random_seed.h deleted file mode 100644 index 0462a048e0..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/random_seed.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2009 Baptiste Coudurier - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_RANDOM_SEED_H -#define AVUTIL_RANDOM_SEED_H - -#include -/** - * @addtogroup lavu_crypto - * @{ - */ - -/** - * Get a seed to use in conjunction with random functions. - * This function tries to provide a good seed at a best effort bases. - * Its possible to call this function multiple times if more bits are needed. - * It can be quite slow, which is why it should only be used as seed for a faster - * PRNG. The quality of the seed depends on the platform. - */ -uint32_t av_get_random_seed(void); - -/** - * @} - */ - -#endif /* AVUTIL_RANDOM_SEED_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/rational.h b/3rdparty/include/ffmpeg_/libavutil/rational.h deleted file mode 100644 index 417e29e577..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/rational.h +++ /dev/null @@ -1,155 +0,0 @@ -/* - * rational numbers - * Copyright (c) 2003 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * rational numbers - * @author Michael Niedermayer - */ - -#ifndef AVUTIL_RATIONAL_H -#define AVUTIL_RATIONAL_H - -#include -#include -#include "attributes.h" - -/** - * @addtogroup lavu_math - * @{ - */ - -/** - * rational number numerator/denominator - */ -typedef struct AVRational{ - int num; ///< numerator - int den; ///< denominator -} AVRational; - -/** - * Compare two rationals. - * @param a first rational - * @param b second rational - * @return 0 if a==b, 1 if a>b, -1 if a>63)|1; - else if(b.den && a.den) return 0; - else if(a.num && b.num) return (a.num>>31) - (b.num>>31); - else return INT_MIN; -} - -/** - * Convert rational to double. - * @param a rational to convert - * @return (double) a - */ -static inline double av_q2d(AVRational a){ - return a.num / (double) a.den; -} - -/** - * Reduce a fraction. - * This is useful for framerate calculations. - * @param dst_num destination numerator - * @param dst_den destination denominator - * @param num source numerator - * @param den source denominator - * @param max the maximum allowed for dst_num & dst_den - * @return 1 if exact, 0 otherwise - */ -int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max); - -/** - * Multiply two rationals. - * @param b first rational - * @param c second rational - * @return b*c - */ -AVRational av_mul_q(AVRational b, AVRational c) av_const; - -/** - * Divide one rational by another. - * @param b first rational - * @param c second rational - * @return b/c - */ -AVRational av_div_q(AVRational b, AVRational c) av_const; - -/** - * Add two rationals. - * @param b first rational - * @param c second rational - * @return b+c - */ -AVRational av_add_q(AVRational b, AVRational c) av_const; - -/** - * Subtract one rational from another. - * @param b first rational - * @param c second rational - * @return b-c - */ -AVRational av_sub_q(AVRational b, AVRational c) av_const; - -/** - * Invert a rational. - * @param q value - * @return 1 / q - */ -static av_always_inline AVRational av_inv_q(AVRational q) -{ - AVRational r = { q.den, q.num }; - return r; -} - -/** - * Convert a double precision floating point number to a rational. - * inf is expressed as {1,0} or {-1,0} depending on the sign. - * - * @param d double to convert - * @param max the maximum allowed numerator and denominator - * @return (AVRational) d - */ -AVRational av_d2q(double d, int max) av_const; - -/** - * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer - * than q1, 0 if they have the same distance. - */ -int av_nearer_q(AVRational q, AVRational q1, AVRational q2); - -/** - * Find the nearest value in q_list to q. - * @param q_list an array of rationals terminated by {0, 0} - * @return the index of the nearest value found in the array - */ -int av_find_nearest_q_idx(AVRational q, const AVRational* q_list); - -/** - * @} - */ - -#endif /* AVUTIL_RATIONAL_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/ripemd.h b/3rdparty/include/ffmpeg_/libavutil/ripemd.h deleted file mode 100644 index 7b0c8bc89c..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/ripemd.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (C) 2007 Michael Niedermayer - * Copyright (C) 2013 James Almer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_RIPEMD_H -#define AVUTIL_RIPEMD_H - -#include - -#include "attributes.h" -#include "version.h" - -/** - * @defgroup lavu_ripemd RIPEMD - * @ingroup lavu_crypto - * @{ - */ - -extern const int av_ripemd_size; - -struct AVRIPEMD; - -/** - * Allocate an AVRIPEMD context. - */ -struct AVRIPEMD *av_ripemd_alloc(void); - -/** - * Initialize RIPEMD hashing. - * - * @param context pointer to the function context (of size av_ripemd_size) - * @param bits number of bits in digest (128, 160, 256 or 320 bits) - * @return zero if initialization succeeded, -1 otherwise - */ -int av_ripemd_init(struct AVRIPEMD* context, int bits); - -/** - * Update hash value. - * - * @param context hash function context - * @param data input data to update hash with - * @param len input data length - */ -void av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len); - -/** - * Finish hashing and output digest value. - * - * @param context hash function context - * @param digest buffer where output digest value is stored - */ -void av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest); - -/** - * @} - */ - -#endif /* AVUTIL_RIPEMD_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/samplefmt.h b/3rdparty/include/ffmpeg_/libavutil/samplefmt.h deleted file mode 100644 index db17d43bcf..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/samplefmt.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_SAMPLEFMT_H -#define AVUTIL_SAMPLEFMT_H - -#include - -#include "avutil.h" -#include "attributes.h" - -/** - * Audio Sample Formats - * - * @par - * The data described by the sample format is always in native-endian order. - * Sample values can be expressed by native C types, hence the lack of a signed - * 24-bit sample format even though it is a common raw audio data format. - * - * @par - * The floating-point formats are based on full volume being in the range - * [-1.0, 1.0]. Any values outside this range are beyond full volume level. - * - * @par - * The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg - * (such as AVFrame in libavcodec) is as follows: - * - * For planar sample formats, each audio channel is in a separate data plane, - * and linesize is the buffer size, in bytes, for a single plane. All data - * planes must be the same size. For packed sample formats, only the first data - * plane is used, and samples for each channel are interleaved. In this case, - * linesize is the buffer size, in bytes, for the 1 plane. - */ -enum AVSampleFormat { - AV_SAMPLE_FMT_NONE = -1, - AV_SAMPLE_FMT_U8, ///< unsigned 8 bits - AV_SAMPLE_FMT_S16, ///< signed 16 bits - AV_SAMPLE_FMT_S32, ///< signed 32 bits - AV_SAMPLE_FMT_FLT, ///< float - AV_SAMPLE_FMT_DBL, ///< double - - AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar - AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar - AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar - AV_SAMPLE_FMT_FLTP, ///< float, planar - AV_SAMPLE_FMT_DBLP, ///< double, planar - - AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically -}; - -/** - * Return the name of sample_fmt, or NULL if sample_fmt is not - * recognized. - */ -const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt); - -/** - * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE - * on error. - */ -enum AVSampleFormat av_get_sample_fmt(const char *name); - -/** - * Return the planar<->packed alternative form of the given sample format, or - * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the - * requested planar/packed format, the format returned is the same as the - * input. - */ -enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar); - -/** - * Get the packed alternative form of the given sample format. - * - * If the passed sample_fmt is already in packed format, the format returned is - * the same as the input. - * - * @return the packed alternative form of the given sample format or - AV_SAMPLE_FMT_NONE on error. - */ -enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt); - -/** - * Get the planar alternative form of the given sample format. - * - * If the passed sample_fmt is already in planar format, the format returned is - * the same as the input. - * - * @return the planar alternative form of the given sample format or - AV_SAMPLE_FMT_NONE on error. - */ -enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt); - -/** - * Generate a string corresponding to the sample format with - * sample_fmt, or a header if sample_fmt is negative. - * - * @param buf the buffer where to write the string - * @param buf_size the size of buf - * @param sample_fmt the number of the sample format to print the - * corresponding info string, or a negative value to print the - * corresponding header. - * @return the pointer to the filled buffer or NULL if sample_fmt is - * unknown or in case of other errors - */ -char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt); - -#if FF_API_GET_BITS_PER_SAMPLE_FMT -/** - * @deprecated Use av_get_bytes_per_sample() instead. - */ -attribute_deprecated -int av_get_bits_per_sample_fmt(enum AVSampleFormat sample_fmt); -#endif - -/** - * Return number of bytes per sample. - * - * @param sample_fmt the sample format - * @return number of bytes per sample or zero if unknown for the given - * sample format - */ -int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt); - -/** - * Check if the sample format is planar. - * - * @param sample_fmt the sample format to inspect - * @return 1 if the sample format is planar, 0 if it is interleaved - */ -int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt); - -/** - * Get the required buffer size for the given audio parameters. - * - * @param[out] linesize calculated linesize, may be NULL - * @param nb_channels the number of channels - * @param nb_samples the number of samples in a single channel - * @param sample_fmt the sample format - * @param align buffer size alignment (0 = default, 1 = no alignment) - * @return required buffer size, or negative error code on failure - */ -int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, - enum AVSampleFormat sample_fmt, int align); - -/** - * Fill plane data pointers and linesize for samples with sample - * format sample_fmt. - * - * The audio_data array is filled with the pointers to the samples data planes: - * for planar, set the start point of each channel's data within the buffer, - * for packed, set the start point of the entire buffer only. - * - * The value pointed to by linesize is set to the aligned size of each - * channel's data buffer for planar layout, or to the aligned size of the - * buffer for all channels for packed layout. - * - * The buffer in buf must be big enough to contain all the samples - * (use av_samples_get_buffer_size() to compute its minimum size), - * otherwise the audio_data pointers will point to invalid data. - * - * @see enum AVSampleFormat - * The documentation for AVSampleFormat describes the data layout. - * - * @param[out] audio_data array to be filled with the pointer for each channel - * @param[out] linesize calculated linesize, may be NULL - * @param buf the pointer to a buffer containing the samples - * @param nb_channels the number of channels - * @param nb_samples the number of samples in a single channel - * @param sample_fmt the sample format - * @param align buffer size alignment (0 = default, 1 = no alignment) - * @return >=0 on success or a negative error code on failure - * @todo return minimum size in bytes required for the buffer in case - * of success at the next bump - */ -int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, - const uint8_t *buf, - int nb_channels, int nb_samples, - enum AVSampleFormat sample_fmt, int align); - -/** - * Allocate a samples buffer for nb_samples samples, and fill data pointers and - * linesize accordingly. - * The allocated samples buffer can be freed by using av_freep(&audio_data[0]) - * Allocated data will be initialized to silence. - * - * @see enum AVSampleFormat - * The documentation for AVSampleFormat describes the data layout. - * - * @param[out] audio_data array to be filled with the pointer for each channel - * @param[out] linesize aligned size for audio buffer(s), may be NULL - * @param nb_channels number of audio channels - * @param nb_samples number of samples per channel - * @param align buffer size alignment (0 = default, 1 = no alignment) - * @return >=0 on success or a negative error code on failure - * @todo return the size of the allocated buffer in case of success at the next bump - * @see av_samples_fill_arrays() - * @see av_samples_alloc_array_and_samples() - */ -int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, - int nb_samples, enum AVSampleFormat sample_fmt, int align); - -/** - * Allocate a data pointers array, samples buffer for nb_samples - * samples, and fill data pointers and linesize accordingly. - * - * This is the same as av_samples_alloc(), but also allocates the data - * pointers array. - * - * @see av_samples_alloc() - */ -int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels, - int nb_samples, enum AVSampleFormat sample_fmt, int align); - -/** - * Copy samples from src to dst. - * - * @param dst destination array of pointers to data planes - * @param src source array of pointers to data planes - * @param dst_offset offset in samples at which the data will be written to dst - * @param src_offset offset in samples at which the data will be read from src - * @param nb_samples number of samples to be copied - * @param nb_channels number of audio channels - * @param sample_fmt audio sample format - */ -int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset, - int src_offset, int nb_samples, int nb_channels, - enum AVSampleFormat sample_fmt); - -/** - * Fill an audio buffer with silence. - * - * @param audio_data array of pointers to data planes - * @param offset offset in samples at which to start filling - * @param nb_samples number of samples to fill - * @param nb_channels number of audio channels - * @param sample_fmt audio sample format - */ -int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, - int nb_channels, enum AVSampleFormat sample_fmt); - -#endif /* AVUTIL_SAMPLEFMT_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/sha.h b/3rdparty/include/ffmpeg_/libavutil/sha.h deleted file mode 100644 index bf4377e51b..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/sha.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (C) 2007 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_SHA_H -#define AVUTIL_SHA_H - -#include - -#include "attributes.h" -#include "version.h" - -/** - * @defgroup lavu_sha SHA - * @ingroup lavu_crypto - * @{ - */ - -extern const int av_sha_size; - -struct AVSHA; - -/** - * Allocate an AVSHA context. - */ -struct AVSHA *av_sha_alloc(void); - -/** - * Initialize SHA-1 or SHA-2 hashing. - * - * @param context pointer to the function context (of size av_sha_size) - * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits) - * @return zero if initialization succeeded, -1 otherwise - */ -int av_sha_init(struct AVSHA* context, int bits); - -/** - * Update hash value. - * - * @param context hash function context - * @param data input data to update hash with - * @param len input data length - */ -void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len); - -/** - * Finish hashing and output digest value. - * - * @param context hash function context - * @param digest buffer where output digest value is stored - */ -void av_sha_final(struct AVSHA* context, uint8_t *digest); - -/** - * @} - */ - -#endif /* AVUTIL_SHA_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/sha512.h b/3rdparty/include/ffmpeg_/libavutil/sha512.h deleted file mode 100644 index 7b08701477..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/sha512.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (C) 2007 Michael Niedermayer - * Copyright (C) 2013 James Almer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_SHA512_H -#define AVUTIL_SHA512_H - -#include - -#include "attributes.h" -#include "version.h" - -/** - * @defgroup lavu_sha512 SHA512 - * @ingroup lavu_crypto - * @{ - */ - -extern const int av_sha512_size; - -struct AVSHA512; - -/** - * Allocate an AVSHA512 context. - */ -struct AVSHA512 *av_sha512_alloc(void); - -/** - * Initialize SHA-2 512 hashing. - * - * @param context pointer to the function context (of size av_sha512_size) - * @param bits number of bits in digest (224, 256, 384 or 512 bits) - * @return zero if initialization succeeded, -1 otherwise - */ -int av_sha512_init(struct AVSHA512* context, int bits); - -/** - * Update hash value. - * - * @param context hash function context - * @param data input data to update hash with - * @param len input data length - */ -void av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len); - -/** - * Finish hashing and output digest value. - * - * @param context hash function context - * @param digest buffer where output digest value is stored - */ -void av_sha512_final(struct AVSHA512* context, uint8_t *digest); - -/** - * @} - */ - -#endif /* AVUTIL_SHA512_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/time.h b/3rdparty/include/ffmpeg_/libavutil/time.h deleted file mode 100644 index 90eb436949..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/time.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2000-2003 Fabrice Bellard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_TIME_H -#define AVUTIL_TIME_H - -#include - -/** - * Get the current time in microseconds. - */ -int64_t av_gettime(void); - -/** - * Sleep for a period of time. Although the duration is expressed in - * microseconds, the actual delay may be rounded to the precision of the - * system timer. - * - * @param usec Number of microseconds to sleep. - * @return zero on success or (negative) error code. - */ -int av_usleep(unsigned usec); - -#endif /* AVUTIL_TIME_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/timecode.h b/3rdparty/include/ffmpeg_/libavutil/timecode.h deleted file mode 100644 index 56e3975fd8..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/timecode.h +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier - * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * Timecode helpers header - */ - -#ifndef AVUTIL_TIMECODE_H -#define AVUTIL_TIMECODE_H - -#include -#include "rational.h" - -#define AV_TIMECODE_STR_SIZE 16 - -enum AVTimecodeFlag { - AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame - AV_TIMECODE_FLAG_24HOURSMAX = 1<<1, ///< timecode wraps after 24 hours - AV_TIMECODE_FLAG_ALLOWNEGATIVE = 1<<2, ///< negative time values are allowed -}; - -typedef struct { - int start; ///< timecode frame start (first base frame number) - uint32_t flags; ///< flags such as drop frame, +24 hours support, ... - AVRational rate; ///< frame rate in rational form - unsigned fps; ///< frame per second; must be consistent with the rate field -} AVTimecode; - -/** - * Adjust frame number for NTSC drop frame time code. - * - * @param framenum frame number to adjust - * @param fps frame per second, 30 or 60 - * @return adjusted frame number - * @warning adjustment is only valid in NTSC 29.97 and 59.94 - */ -int av_timecode_adjust_ntsc_framenum2(int framenum, int fps); - -/** - * Convert frame number to SMPTE 12M binary representation. - * - * @param tc timecode data correctly initialized - * @param framenum frame number - * @return the SMPTE binary representation - * - * @note Frame number adjustment is automatically done in case of drop timecode, - * you do NOT have to call av_timecode_adjust_ntsc_framenum2(). - * @note The frame number is relative to tc->start. - * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity - * correction (PC) bits are set to zero. - */ -uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum); - -/** - * Load timecode string in buf. - * - * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long - * @param tc timecode data correctly initialized - * @param framenum frame number - * @return the buf parameter - * - * @note Timecode representation can be a negative timecode and have more than - * 24 hours, but will only be honored if the flags are correctly set. - * @note The frame number is relative to tc->start. - */ -char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum); - -/** - * Get the timecode string from the SMPTE timecode format. - * - * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long - * @param tcsmpte the 32-bit SMPTE timecode - * @param prevent_df prevent the use of a drop flag when it is known the DF bit - * is arbitrary - * @return the buf parameter - */ -char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df); - -/** - * Get the timecode string from the 25-bit timecode format (MPEG GOP format). - * - * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long - * @param tc25bit the 25-bits timecode - * @return the buf parameter - */ -char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit); - -/** - * Init a timecode struct with the passed parameters. - * - * @param log_ctx a pointer to an arbitrary struct of which the first field - * is a pointer to an AVClass struct (used for av_log) - * @param tc pointer to an allocated AVTimecode - * @param rate frame rate in rational form - * @param flags miscellaneous flags such as drop frame, +24 hours, ... - * (see AVTimecodeFlag) - * @param frame_start the first frame number - * @return 0 on success, AVERROR otherwise - */ -int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx); - -/** - * Parse timecode representation (hh:mm:ss[:;.]ff). - * - * @param log_ctx a pointer to an arbitrary struct of which the first field is a - * pointer to an AVClass struct (used for av_log). - * @param tc pointer to an allocated AVTimecode - * @param rate frame rate in rational form - * @param str timecode string which will determine the frame start - * @return 0 on success, AVERROR otherwise - */ -int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx); - -/** - * Check if the timecode feature is available for the given frame rate - * - * @return 0 if supported, <0 otherwise - */ -int av_timecode_check_frame_rate(AVRational rate); - -#endif /* AVUTIL_TIMECODE_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/timestamp.h b/3rdparty/include/ffmpeg_/libavutil/timestamp.h deleted file mode 100644 index f63a08c579..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/timestamp.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * timestamp utils, mostly useful for debugging/logging purposes - */ - -#ifndef AVUTIL_TIMESTAMP_H -#define AVUTIL_TIMESTAMP_H - -#include "common.h" - -#define AV_TS_MAX_STRING_SIZE 32 - -/** - * Fill the provided buffer with a string containing a timestamp - * representation. - * - * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE - * @param ts the timestamp to represent - * @return the buffer in input - */ -static inline char *av_ts_make_string(char *buf, int64_t ts) -{ - if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); - else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%"PRId64, ts); - return buf; -} - -/** - * Convenience macro, the return value should be used only directly in - * function arguments but never stand-alone. - */ -#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts) - -/** - * Fill the provided buffer with a string containing a timestamp time - * representation. - * - * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE - * @param ts the timestamp to represent - * @param tb the timebase of the timestamp - * @return the buffer in input - */ -static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb) -{ - if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); - else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts); - return buf; -} - -/** - * Convenience macro, the return value should be used only directly in - * function arguments but never stand-alone. - */ -#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb) - -#endif /* AVUTIL_TIMESTAMP_H */ diff --git a/3rdparty/include/ffmpeg_/libavutil/version.h b/3rdparty/include/ffmpeg_/libavutil/version.h deleted file mode 100644 index b03aa4a871..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/version.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * copyright (c) 2003 Fabrice Bellard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_VERSION_H -#define AVUTIL_VERSION_H - -/** - * @defgroup preproc_misc Preprocessor String Macros - * - * String manipulation macros - * - * @{ - */ - -#define AV_STRINGIFY(s) AV_TOSTRING(s) -#define AV_TOSTRING(s) #s - -#define AV_GLUE(a, b) a ## b -#define AV_JOIN(a, b) AV_GLUE(a, b) - -#define AV_PRAGMA(s) _Pragma(#s) - -/** - * @} - */ - -/** - * @defgroup version_utils Library Version Macros - * - * Useful to check and match library version in order to maintain - * backward compatibility. - * - * @{ - */ - -#define AV_VERSION_INT(a, b, c) (a<<16 | b<<8 | c) -#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c -#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c) - -/** - * @} - */ - - -/** - * @file - * @ingroup lavu - * Libavutil version macros - */ - -/** - * @defgroup lavu_ver Version and Build diagnostics - * - * Macros and function useful to check at compiletime and at runtime - * which version of libavutil is in use. - * - * @{ - */ - -#define LIBAVUTIL_VERSION_MAJOR 52 -#define LIBAVUTIL_VERSION_MINOR 38 -#define LIBAVUTIL_VERSION_MICRO 100 - -#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ - LIBAVUTIL_VERSION_MINOR, \ - LIBAVUTIL_VERSION_MICRO) -#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \ - LIBAVUTIL_VERSION_MINOR, \ - LIBAVUTIL_VERSION_MICRO) -#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT - -#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION) - -/** - * @} - * - * @defgroup depr_guards Deprecation guards - * FF_API_* defines may be placed below to indicate public API that will be - * dropped at a future version bump. The defines themselves are not part of - * the public API and may change, break or disappear at any time. - * - * @{ - */ - -#ifndef FF_API_GET_BITS_PER_SAMPLE_FMT -#define FF_API_GET_BITS_PER_SAMPLE_FMT (LIBAVUTIL_VERSION_MAJOR < 53) -#endif -#ifndef FF_API_FIND_OPT -#define FF_API_FIND_OPT (LIBAVUTIL_VERSION_MAJOR < 53) -#endif -#ifndef FF_API_OLD_AVOPTIONS -#define FF_API_OLD_AVOPTIONS (LIBAVUTIL_VERSION_MAJOR < 53) -#endif -#ifndef FF_API_PIX_FMT -#define FF_API_PIX_FMT (LIBAVUTIL_VERSION_MAJOR < 53) -#endif -#ifndef FF_API_CONTEXT_SIZE -#define FF_API_CONTEXT_SIZE (LIBAVUTIL_VERSION_MAJOR < 53) -#endif -#ifndef FF_API_PIX_FMT_DESC -#define FF_API_PIX_FMT_DESC (LIBAVUTIL_VERSION_MAJOR < 53) -#endif -#ifndef FF_API_AV_REVERSE -#define FF_API_AV_REVERSE (LIBAVUTIL_VERSION_MAJOR < 53) -#endif -#ifndef FF_API_AUDIOCONVERT -#define FF_API_AUDIOCONVERT (LIBAVUTIL_VERSION_MAJOR < 53) -#endif -#ifndef FF_API_CPU_FLAG_MMX2 -#define FF_API_CPU_FLAG_MMX2 (LIBAVUTIL_VERSION_MAJOR < 53) -#endif -#ifndef FF_API_SAMPLES_UTILS_RETURN_ZERO -#define FF_API_SAMPLES_UTILS_RETURN_ZERO (LIBAVUTIL_VERSION_MAJOR < 53) -#endif -#ifndef FF_API_LLS_PRIVATE -#define FF_API_LLS_PRIVATE (LIBAVUTIL_VERSION_MAJOR < 53) -#endif -#ifndef FF_API_AVFRAME_LAVC -#define FF_API_AVFRAME_LAVC (LIBAVUTIL_VERSION_MAJOR < 53) -#endif - -/** - * @} - */ - -#endif /* AVUTIL_VERSION_H */ - diff --git a/3rdparty/include/ffmpeg_/libavutil/xtea.h b/3rdparty/include/ffmpeg_/libavutil/xtea.h deleted file mode 100644 index 0899c92bc8..0000000000 --- a/3rdparty/include/ffmpeg_/libavutil/xtea.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * A 32-bit implementation of the XTEA algorithm - * Copyright (c) 2012 Samuel Pitoiset - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVUTIL_XTEA_H -#define AVUTIL_XTEA_H - -#include - -/** - * @defgroup lavu_xtea XTEA - * @ingroup lavu_crypto - * @{ - */ - -typedef struct AVXTEA { - uint32_t key[16]; -} AVXTEA; - -/** - * Initialize an AVXTEA context. - * - * @param ctx an AVXTEA context - * @param key a key of 16 bytes used for encryption/decryption - */ -void av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]); - -/** - * Encrypt or decrypt a buffer using a previously initialized context. - * - * @param ctx an AVXTEA context - * @param dst destination array, can be equal to src - * @param src source array, can be equal to dst - * @param count number of 8 byte blocks - * @param iv initialization vector for CBC mode, if NULL then ECB will be used - * @param decrypt 0 for encryption, 1 for decryption - */ -void av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, - int count, uint8_t *iv, int decrypt); - -/** - * @} - */ - -#endif /* AVUTIL_XTEA_H */ diff --git a/3rdparty/include/ffmpeg_/libswscale/swscale.h b/3rdparty/include/ffmpeg_/libswscale/swscale.h deleted file mode 100644 index 42702b7aa2..0000000000 --- a/3rdparty/include/ffmpeg_/libswscale/swscale.h +++ /dev/null @@ -1,362 +0,0 @@ -/* - * Copyright (C) 2001-2011 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef SWSCALE_SWSCALE_H -#define SWSCALE_SWSCALE_H - -/** - * @file - * @ingroup lsws - * external API header - */ - -/** - * @defgroup lsws Libswscale - * @{ - */ - -#include - -#include "libavutil/avutil.h" -#include "libavutil/log.h" -#include "libavutil/pixfmt.h" -#include "version.h" - -/** - * Return the LIBSWSCALE_VERSION_INT constant. - */ -unsigned swscale_version(void); - -/** - * Return the libswscale build-time configuration. - */ -const char *swscale_configuration(void); - -/** - * Return the libswscale license. - */ -const char *swscale_license(void); - -/* values for the flags, the stuff on the command line is different */ -#define SWS_FAST_BILINEAR 1 -#define SWS_BILINEAR 2 -#define SWS_BICUBIC 4 -#define SWS_X 8 -#define SWS_POINT 0x10 -#define SWS_AREA 0x20 -#define SWS_BICUBLIN 0x40 -#define SWS_GAUSS 0x80 -#define SWS_SINC 0x100 -#define SWS_LANCZOS 0x200 -#define SWS_SPLINE 0x400 - -#define SWS_SRC_V_CHR_DROP_MASK 0x30000 -#define SWS_SRC_V_CHR_DROP_SHIFT 16 - -#define SWS_PARAM_DEFAULT 123456 - -#define SWS_PRINT_INFO 0x1000 - -//the following 3 flags are not completely implemented -//internal chrominace subsampling info -#define SWS_FULL_CHR_H_INT 0x2000 -//input subsampling info -#define SWS_FULL_CHR_H_INP 0x4000 -#define SWS_DIRECT_BGR 0x8000 -#define SWS_ACCURATE_RND 0x40000 -#define SWS_BITEXACT 0x80000 -#define SWS_ERROR_DIFFUSION 0x800000 - -#if FF_API_SWS_CPU_CAPS -/** - * CPU caps are autodetected now, those flags - * are only provided for API compatibility. - */ -#define SWS_CPU_CAPS_MMX 0x80000000 -#define SWS_CPU_CAPS_MMXEXT 0x20000000 -#define SWS_CPU_CAPS_MMX2 0x20000000 -#define SWS_CPU_CAPS_3DNOW 0x40000000 -#define SWS_CPU_CAPS_ALTIVEC 0x10000000 -#define SWS_CPU_CAPS_BFIN 0x01000000 -#define SWS_CPU_CAPS_SSE2 0x02000000 -#endif - -#define SWS_MAX_REDUCE_CUTOFF 0.002 - -#define SWS_CS_ITU709 1 -#define SWS_CS_FCC 4 -#define SWS_CS_ITU601 5 -#define SWS_CS_ITU624 5 -#define SWS_CS_SMPTE170M 5 -#define SWS_CS_SMPTE240M 7 -#define SWS_CS_DEFAULT 5 - -/** - * Return a pointer to yuv<->rgb coefficients for the given colorspace - * suitable for sws_setColorspaceDetails(). - * - * @param colorspace One of the SWS_CS_* macros. If invalid, - * SWS_CS_DEFAULT is used. - */ -const int *sws_getCoefficients(int colorspace); - -// when used for filters they must have an odd number of elements -// coeffs cannot be shared between vectors -typedef struct SwsVector { - double *coeff; ///< pointer to the list of coefficients - int length; ///< number of coefficients in the vector -} SwsVector; - -// vectors can be shared -typedef struct SwsFilter { - SwsVector *lumH; - SwsVector *lumV; - SwsVector *chrH; - SwsVector *chrV; -} SwsFilter; - -struct SwsContext; - -/** - * Return a positive value if pix_fmt is a supported input format, 0 - * otherwise. - */ -int sws_isSupportedInput(enum AVPixelFormat pix_fmt); - -/** - * Return a positive value if pix_fmt is a supported output format, 0 - * otherwise. - */ -int sws_isSupportedOutput(enum AVPixelFormat pix_fmt); - -/** - * @param[in] pix_fmt the pixel format - * @return a positive value if an endianness conversion for pix_fmt is - * supported, 0 otherwise. - */ -int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt); - -/** - * Allocate an empty SwsContext. This must be filled and passed to - * sws_init_context(). For filling see AVOptions, options.c and - * sws_setColorspaceDetails(). - */ -struct SwsContext *sws_alloc_context(void); - -/** - * Initialize the swscaler context sws_context. - * - * @return zero or positive value on success, a negative value on - * error - */ -int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter); - -/** - * Free the swscaler context swsContext. - * If swsContext is NULL, then does nothing. - */ -void sws_freeContext(struct SwsContext *swsContext); - -#if FF_API_SWS_GETCONTEXT -/** - * Allocate and return an SwsContext. You need it to perform - * scaling/conversion operations using sws_scale(). - * - * @param srcW the width of the source image - * @param srcH the height of the source image - * @param srcFormat the source image format - * @param dstW the width of the destination image - * @param dstH the height of the destination image - * @param dstFormat the destination image format - * @param flags specify which algorithm and options to use for rescaling - * @return a pointer to an allocated context, or NULL in case of error - * @note this function is to be removed after a saner alternative is - * written - * @deprecated Use sws_getCachedContext() instead. - */ -struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, - int dstW, int dstH, enum AVPixelFormat dstFormat, - int flags, SwsFilter *srcFilter, - SwsFilter *dstFilter, const double *param); -#endif - -/** - * Scale the image slice in srcSlice and put the resulting scaled - * slice in the image in dst. A slice is a sequence of consecutive - * rows in an image. - * - * Slices have to be provided in sequential order, either in - * top-bottom or bottom-top order. If slices are provided in - * non-sequential order the behavior of the function is undefined. - * - * @param c the scaling context previously created with - * sws_getContext() - * @param srcSlice the array containing the pointers to the planes of - * the source slice - * @param srcStride the array containing the strides for each plane of - * the source image - * @param srcSliceY the position in the source image of the slice to - * process, that is the number (counted starting from - * zero) in the image of the first row of the slice - * @param srcSliceH the height of the source slice, that is the number - * of rows in the slice - * @param dst the array containing the pointers to the planes of - * the destination image - * @param dstStride the array containing the strides for each plane of - * the destination image - * @return the height of the output slice - */ -int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], - const int srcStride[], int srcSliceY, int srcSliceH, - uint8_t *const dst[], const int dstStride[]); - -/** - * @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg) - * @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg) - * @param table the yuv2rgb coefficients describing the output yuv space, normally ff_yuv2rgb_coeffs[x] - * @param inv_table the yuv2rgb coefficients describing the input yuv space, normally ff_yuv2rgb_coeffs[x] - * @param brightness 16.16 fixed point brightness correction - * @param contrast 16.16 fixed point contrast correction - * @param saturation 16.16 fixed point saturation correction - * @return -1 if not supported - */ -int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], - int srcRange, const int table[4], int dstRange, - int brightness, int contrast, int saturation); - -/** - * @return -1 if not supported - */ -int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table, - int *srcRange, int **table, int *dstRange, - int *brightness, int *contrast, int *saturation); - -/** - * Allocate and return an uninitialized vector with length coefficients. - */ -SwsVector *sws_allocVec(int length); - -/** - * Return a normalized Gaussian curve used to filter stuff - * quality = 3 is high quality, lower is lower quality. - */ -SwsVector *sws_getGaussianVec(double variance, double quality); - -/** - * Allocate and return a vector with length coefficients, all - * with the same value c. - */ -SwsVector *sws_getConstVec(double c, int length); - -/** - * Allocate and return a vector with just one coefficient, with - * value 1.0. - */ -SwsVector *sws_getIdentityVec(void); - -/** - * Scale all the coefficients of a by the scalar value. - */ -void sws_scaleVec(SwsVector *a, double scalar); - -/** - * Scale all the coefficients of a so that their sum equals height. - */ -void sws_normalizeVec(SwsVector *a, double height); -void sws_convVec(SwsVector *a, SwsVector *b); -void sws_addVec(SwsVector *a, SwsVector *b); -void sws_subVec(SwsVector *a, SwsVector *b); -void sws_shiftVec(SwsVector *a, int shift); - -/** - * Allocate and return a clone of the vector a, that is a vector - * with the same coefficients as a. - */ -SwsVector *sws_cloneVec(SwsVector *a); - -/** - * Print with av_log() a textual representation of the vector a - * if log_level <= av_log_level. - */ -void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level); - -void sws_freeVec(SwsVector *a); - -SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur, - float lumaSharpen, float chromaSharpen, - float chromaHShift, float chromaVShift, - int verbose); -void sws_freeFilter(SwsFilter *filter); - -/** - * Check if context can be reused, otherwise reallocate a new one. - * - * If context is NULL, just calls sws_getContext() to get a new - * context. Otherwise, checks if the parameters are the ones already - * saved in context. If that is the case, returns the current - * context. Otherwise, frees context and gets a new context with - * the new parameters. - * - * Be warned that srcFilter and dstFilter are not checked, they - * are assumed to remain the same. - */ -struct SwsContext *sws_getCachedContext(struct SwsContext *context, - int srcW, int srcH, enum AVPixelFormat srcFormat, - int dstW, int dstH, enum AVPixelFormat dstFormat, - int flags, SwsFilter *srcFilter, - SwsFilter *dstFilter, const double *param); - -/** - * Convert an 8-bit paletted frame into a frame with a color depth of 32 bits. - * - * The output frame will have the same packed format as the palette. - * - * @param src source frame buffer - * @param dst destination frame buffer - * @param num_pixels number of pixels to convert - * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src - */ -void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); - -/** - * Convert an 8-bit paletted frame into a frame with a color depth of 24 bits. - * - * With the palette format "ABCD", the destination frame ends up with the format "ABC". - * - * @param src source frame buffer - * @param dst destination frame buffer - * @param num_pixels number of pixels to convert - * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src - */ -void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette); - -/** - * Get the AVClass for swsContext. It can be used in combination with - * AV_OPT_SEARCH_FAKE_OBJ for examining options. - * - * @see av_opt_find(). - */ -const AVClass *sws_get_class(void); - -/** - * @} - */ - -#endif /* SWSCALE_SWSCALE_H */ diff --git a/3rdparty/include/ffmpeg_/libswscale/version.h b/3rdparty/include/ffmpeg_/libswscale/version.h deleted file mode 100644 index f635e3d0a7..0000000000 --- a/3rdparty/include/ffmpeg_/libswscale/version.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef SWSCALE_VERSION_H -#define SWSCALE_VERSION_H - -/** - * @file - * swscale version macros - */ - -#include "libavutil/avutil.h" - -#define LIBSWSCALE_VERSION_MAJOR 2 -#define LIBSWSCALE_VERSION_MINOR 3 -#define LIBSWSCALE_VERSION_MICRO 100 - -#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ - LIBSWSCALE_VERSION_MINOR, \ - LIBSWSCALE_VERSION_MICRO) -#define LIBSWSCALE_VERSION AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \ - LIBSWSCALE_VERSION_MINOR, \ - LIBSWSCALE_VERSION_MICRO) -#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT - -#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION) - -/** - * FF_API_* defines may be placed below to indicate public API that will be - * dropped at a future version bump. The defines themselves are not part of - * the public API and may change, break or disappear at any time. - */ - -#ifndef FF_API_SWS_GETCONTEXT -#define FF_API_SWS_GETCONTEXT (LIBSWSCALE_VERSION_MAJOR < 3) -#endif -#ifndef FF_API_SWS_CPU_CAPS -#define FF_API_SWS_CPU_CAPS (LIBSWSCALE_VERSION_MAJOR < 3) -#endif -#ifndef FF_API_SWS_FORMAT_NAME -#define FF_API_SWS_FORMAT_NAME (LIBSWSCALE_VERSION_MAJOR < 3) -#endif - -#endif /* SWSCALE_VERSION_H */ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so deleted file mode 100755 index aac6634b46..0000000000 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.2.0.so and /dev/null differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so deleted file mode 100755 index d523f69dec..0000000000 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r2.3.3.so and /dev/null differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so deleted file mode 100755 index e386bf4f92..0000000000 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r3.0.1.so and /dev/null differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so deleted file mode 100755 index 028ab7d1e6..0000000000 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.0.so and /dev/null differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so deleted file mode 100755 index 48cbdd096e..0000000000 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.0.3.so and /dev/null differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so deleted file mode 100755 index 7fe50875c6..0000000000 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.1.1.so and /dev/null differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so deleted file mode 100755 index 15827d8186..0000000000 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.2.0.so and /dev/null differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so deleted file mode 100755 index ec1edfb04d..0000000000 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.3.0.so and /dev/null differ diff --git a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.4.0.so b/3rdparty/lib/armeabi-v7a/libnative_camera_r4.4.0.so deleted file mode 100755 index 4d777edf89..0000000000 Binary files a/3rdparty/lib/armeabi-v7a/libnative_camera_r4.4.0.so and /dev/null differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r2.2.0.so b/3rdparty/lib/armeabi/libnative_camera_r2.2.0.so deleted file mode 100755 index 1707a8850c..0000000000 Binary files a/3rdparty/lib/armeabi/libnative_camera_r2.2.0.so and /dev/null differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r2.3.3.so b/3rdparty/lib/armeabi/libnative_camera_r2.3.3.so deleted file mode 100755 index fb4b125fdb..0000000000 Binary files a/3rdparty/lib/armeabi/libnative_camera_r2.3.3.so and /dev/null differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r3.0.1.so b/3rdparty/lib/armeabi/libnative_camera_r3.0.1.so deleted file mode 100755 index 96b264d0e3..0000000000 Binary files a/3rdparty/lib/armeabi/libnative_camera_r3.0.1.so and /dev/null differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.0.0.so b/3rdparty/lib/armeabi/libnative_camera_r4.0.0.so deleted file mode 100755 index 179eef9a94..0000000000 Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.0.0.so and /dev/null differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.0.3.so b/3rdparty/lib/armeabi/libnative_camera_r4.0.3.so deleted file mode 100755 index 165dc463c8..0000000000 Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.0.3.so and /dev/null differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.1.1.so b/3rdparty/lib/armeabi/libnative_camera_r4.1.1.so deleted file mode 100755 index a9a5d7da74..0000000000 Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.1.1.so and /dev/null differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.2.0.so b/3rdparty/lib/armeabi/libnative_camera_r4.2.0.so deleted file mode 100755 index 9037c68600..0000000000 Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.2.0.so and /dev/null differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.3.0.so b/3rdparty/lib/armeabi/libnative_camera_r4.3.0.so deleted file mode 100755 index 026f0b48bb..0000000000 Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.3.0.so and /dev/null differ diff --git a/3rdparty/lib/armeabi/libnative_camera_r4.4.0.so b/3rdparty/lib/armeabi/libnative_camera_r4.4.0.so deleted file mode 100755 index 6aebec9234..0000000000 Binary files a/3rdparty/lib/armeabi/libnative_camera_r4.4.0.so and /dev/null differ diff --git a/3rdparty/lib/libavcodec.a b/3rdparty/lib/libavcodec.a deleted file mode 100644 index 45c31c5788..0000000000 Binary files a/3rdparty/lib/libavcodec.a and /dev/null differ diff --git a/3rdparty/lib/libavcodec64.a b/3rdparty/lib/libavcodec64.a deleted file mode 100644 index 899566825d..0000000000 Binary files a/3rdparty/lib/libavcodec64.a and /dev/null differ diff --git a/3rdparty/lib/libavdevice.a b/3rdparty/lib/libavdevice.a deleted file mode 100644 index 41ba6a1299..0000000000 Binary files a/3rdparty/lib/libavdevice.a and /dev/null differ diff --git a/3rdparty/lib/libavdevice64.a b/3rdparty/lib/libavdevice64.a deleted file mode 100644 index 249ab71b2a..0000000000 Binary files a/3rdparty/lib/libavdevice64.a and /dev/null differ diff --git a/3rdparty/lib/libavformat.a b/3rdparty/lib/libavformat.a deleted file mode 100644 index ab267be9aa..0000000000 Binary files a/3rdparty/lib/libavformat.a and /dev/null differ diff --git a/3rdparty/lib/libavformat64.a b/3rdparty/lib/libavformat64.a deleted file mode 100644 index 84f7e76d39..0000000000 Binary files a/3rdparty/lib/libavformat64.a and /dev/null differ diff --git a/3rdparty/lib/libavutil.a b/3rdparty/lib/libavutil.a deleted file mode 100644 index 7c0cda3410..0000000000 Binary files a/3rdparty/lib/libavutil.a and /dev/null differ diff --git a/3rdparty/lib/libavutil64.a b/3rdparty/lib/libavutil64.a deleted file mode 100644 index dc23d1bc9e..0000000000 Binary files a/3rdparty/lib/libavutil64.a and /dev/null differ diff --git a/3rdparty/lib/libcoldname_.a b/3rdparty/lib/libcoldname_.a deleted file mode 100644 index a3b8d3e56b..0000000000 Binary files a/3rdparty/lib/libcoldname_.a and /dev/null differ diff --git a/3rdparty/lib/libgcc64.a b/3rdparty/lib/libgcc64.a deleted file mode 100644 index 0526c6bdc1..0000000000 Binary files a/3rdparty/lib/libgcc64.a and /dev/null differ diff --git a/3rdparty/lib/libgcc_.a b/3rdparty/lib/libgcc_.a deleted file mode 100644 index 4d0498cd42..0000000000 Binary files a/3rdparty/lib/libgcc_.a and /dev/null differ diff --git a/3rdparty/lib/libmingwex64.a b/3rdparty/lib/libmingwex64.a deleted file mode 100644 index 7bd9d98dcc..0000000000 Binary files a/3rdparty/lib/libmingwex64.a and /dev/null differ diff --git a/3rdparty/lib/libmingwex_.a b/3rdparty/lib/libmingwex_.a deleted file mode 100644 index 41984db59a..0000000000 Binary files a/3rdparty/lib/libmingwex_.a and /dev/null differ diff --git a/3rdparty/lib/libswscale.a b/3rdparty/lib/libswscale.a deleted file mode 100644 index 91f9d7597c..0000000000 Binary files a/3rdparty/lib/libswscale.a and /dev/null differ diff --git a/3rdparty/lib/libswscale64.a b/3rdparty/lib/libswscale64.a deleted file mode 100644 index c05d605d4e..0000000000 Binary files a/3rdparty/lib/libswscale64.a and /dev/null differ diff --git a/3rdparty/lib/libwsock3264.a b/3rdparty/lib/libwsock3264.a deleted file mode 100644 index 86c4156cc7..0000000000 Binary files a/3rdparty/lib/libwsock3264.a and /dev/null differ diff --git a/3rdparty/lib/libwsock32_.a b/3rdparty/lib/libwsock32_.a deleted file mode 100644 index c02bc25927..0000000000 Binary files a/3rdparty/lib/libwsock32_.a and /dev/null differ diff --git a/3rdparty/lib/mips/libnative_camera_r4.0.3.so b/3rdparty/lib/mips/libnative_camera_r4.0.3.so deleted file mode 100755 index 6dee897800..0000000000 Binary files a/3rdparty/lib/mips/libnative_camera_r4.0.3.so and /dev/null differ diff --git a/3rdparty/lib/mips/libnative_camera_r4.1.1.so b/3rdparty/lib/mips/libnative_camera_r4.1.1.so deleted file mode 100755 index 71a6354ac3..0000000000 Binary files a/3rdparty/lib/mips/libnative_camera_r4.1.1.so and /dev/null differ diff --git a/3rdparty/lib/mips/libnative_camera_r4.2.0.so b/3rdparty/lib/mips/libnative_camera_r4.2.0.so deleted file mode 100755 index 21bcffb4af..0000000000 Binary files a/3rdparty/lib/mips/libnative_camera_r4.2.0.so and /dev/null differ diff --git a/3rdparty/lib/mips/libnative_camera_r4.3.0.so b/3rdparty/lib/mips/libnative_camera_r4.3.0.so deleted file mode 100755 index 653c2f1ca6..0000000000 Binary files a/3rdparty/lib/mips/libnative_camera_r4.3.0.so and /dev/null differ diff --git a/3rdparty/lib/mips/libnative_camera_r4.4.0.so b/3rdparty/lib/mips/libnative_camera_r4.4.0.so deleted file mode 100755 index 8d6fdf2bc4..0000000000 Binary files a/3rdparty/lib/mips/libnative_camera_r4.4.0.so and /dev/null differ diff --git a/3rdparty/lib/x86/libnative_camera_r2.3.3.so b/3rdparty/lib/x86/libnative_camera_r2.3.3.so deleted file mode 100755 index a47b8b2ce0..0000000000 Binary files a/3rdparty/lib/x86/libnative_camera_r2.3.3.so and /dev/null differ diff --git a/3rdparty/lib/x86/libnative_camera_r3.0.1.so b/3rdparty/lib/x86/libnative_camera_r3.0.1.so deleted file mode 100755 index faa13461f9..0000000000 Binary files a/3rdparty/lib/x86/libnative_camera_r3.0.1.so and /dev/null differ diff --git a/3rdparty/lib/x86/libnative_camera_r4.0.3.so b/3rdparty/lib/x86/libnative_camera_r4.0.3.so deleted file mode 100755 index 2d2fb8eb14..0000000000 Binary files a/3rdparty/lib/x86/libnative_camera_r4.0.3.so and /dev/null differ diff --git a/3rdparty/lib/x86/libnative_camera_r4.1.1.so b/3rdparty/lib/x86/libnative_camera_r4.1.1.so deleted file mode 100755 index f40da0d9db..0000000000 Binary files a/3rdparty/lib/x86/libnative_camera_r4.1.1.so and /dev/null differ diff --git a/3rdparty/lib/x86/libnative_camera_r4.2.0.so b/3rdparty/lib/x86/libnative_camera_r4.2.0.so deleted file mode 100755 index 0d4ac03b55..0000000000 Binary files a/3rdparty/lib/x86/libnative_camera_r4.2.0.so and /dev/null differ diff --git a/3rdparty/lib/x86/libnative_camera_r4.3.0.so b/3rdparty/lib/x86/libnative_camera_r4.3.0.so deleted file mode 100755 index 7e1c5803a1..0000000000 Binary files a/3rdparty/lib/x86/libnative_camera_r4.3.0.so and /dev/null differ diff --git a/3rdparty/lib/x86/libnative_camera_r4.4.0.so b/3rdparty/lib/x86/libnative_camera_r4.4.0.so deleted file mode 100755 index 37ab6d0806..0000000000 Binary files a/3rdparty/lib/x86/libnative_camera_r4.4.0.so and /dev/null differ diff --git a/3rdparty/libwebp/CMakeLists.txt b/3rdparty/libwebp/CMakeLists.txt index 74519ec820..12ca16e8ae 100644 --- a/3rdparty/libwebp/CMakeLists.txt +++ b/3rdparty/libwebp/CMakeLists.txt @@ -40,7 +40,7 @@ if(UNIX) endif() endif() -ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-variable -Wshadow -Wmaybe-uninitialized) +ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-variable -Wunused-function -Wshadow -Wmaybe-uninitialized) ocv_warnings_disable(CMAKE_C_FLAGS /wd4244 /wd4267) # vs2005 set_target_properties(${WEBP_LIBRARY} diff --git a/CMakeLists.txt b/CMakeLists.txt index d9a17b3820..89450fc58f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -188,7 +188,7 @@ OCV_OPTION(WITH_QUICKTIME "Use QuickTime for Video I/O insted of QTKit" OFF OCV_OPTION(WITH_TBB "Include Intel TBB support" OFF IF (NOT IOS AND NOT WINRT) ) OCV_OPTION(WITH_OPENMP "Include OpenMP support" OFF) OCV_OPTION(WITH_CSTRIPES "Include C= support" OFF IF (WIN32 AND NOT WINRT) ) -OCV_OPTION(WITH_PTHREADS_PF "Use pthreads-based parallel_for" OFF IF (NOT WIN32) ) +OCV_OPTION(WITH_PTHREADS_PF "Use pthreads-based parallel_for" ON IF (NOT WIN32) ) OCV_OPTION(WITH_TIFF "Include TIFF support" ON IF (NOT IOS) ) OCV_OPTION(WITH_UNICAP "Include Unicap support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) ) OCV_OPTION(WITH_V4L "Include Video 4 Linux support" ON IF (UNIX AND NOT ANDROID) ) @@ -216,8 +216,8 @@ OCV_OPTION(BUILD_ANDROID_EXAMPLES "Build examples for Android platform" OCV_OPTION(BUILD_DOCS "Create build rules for OpenCV Documentation" ON IF NOT WINRT) OCV_OPTION(BUILD_EXAMPLES "Build all examples" OFF ) OCV_OPTION(BUILD_PACKAGE "Enables 'make package_source' command" ON IF NOT WINRT) -OCV_OPTION(BUILD_PERF_TESTS "Build performance tests" ON IF (NOT IOS AND NOT WINRT) ) -OCV_OPTION(BUILD_TESTS "Build accuracy & regression tests" ON IF (NOT IOS AND NOT WINRT) ) +OCV_OPTION(BUILD_PERF_TESTS "Build performance tests" ON IF (NOT IOS) ) +OCV_OPTION(BUILD_TESTS "Build accuracy & regression tests" ON IF (NOT IOS) ) OCV_OPTION(BUILD_WITH_DEBUG_INFO "Include debug info into debug libs (not MSCV only)" ON ) OCV_OPTION(BUILD_WITH_STATIC_CRT "Enables use of staticaly linked CRT for staticaly linked OpenCV" ON IF MSVC ) OCV_OPTION(BUILD_WITH_DYNAMIC_IPP "Enables dynamic linking of IPP (only for standalone IPP)" OFF ) @@ -1026,6 +1026,27 @@ if(DEFINED WITH_GPHOTO2) endif(DEFINED WITH_GPHOTO2) +# Order is similar to CV_PARALLEL_FRAMEWORK in core/src/parallel.cpp +ocv_clear_vars(CV_PARALLEL_FRAMEWORK) +if(HAVE_TBB) + set(CV_PARALLEL_FRAMEWORK "TBB (ver ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR} interface ${TBB_INTERFACE_VERSION})") +elseif(HAVE_CSTRIPES) + set(CV_PARALLEL_FRAMEWORK "C=") +elseif(HAVE_OPENMP) + set(CV_PARALLEL_FRAMEWORK "OpenMP") +elseif(HAVE_GCD) + set(CV_PARALLEL_FRAMEWORK "GCD") +elseif(WINRT OR HAVE_CONCURRENCY) + set(CV_PARALLEL_FRAMEWORK "Concurrency") +elseif(HAVE_PTHREADS_PF) + set(CV_PARALLEL_FRAMEWORK "pthreads") +else() + set(CV_PARALLEL_FRAMEWORK "none") +endif() +status("") +status(" Parallel framework:" TRUE THEN "${CV_PARALLEL_FRAMEWORK}" ELSE NO) + + # ========================== Other third-party libraries ========================== status("") status(" Other third-party libraries:") @@ -1045,12 +1066,6 @@ status(" Use IPP Async:" HAVE_IPP_A THEN "YES" ELSE NO) endif(DEFINED WITH_IPP_A) status(" Use Eigen:" HAVE_EIGEN THEN "YES (ver ${EIGEN_WORLD_VERSION}.${EIGEN_MAJOR_VERSION}.${EIGEN_MINOR_VERSION})" ELSE NO) -status(" Use TBB:" HAVE_TBB THEN "YES (ver ${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR} interface ${TBB_INTERFACE_VERSION})" ELSE NO) -status(" Use OpenMP:" HAVE_OPENMP THEN YES ELSE NO) -status(" Use GCD" HAVE_GCD THEN YES ELSE NO) -status(" Use Concurrency" HAVE_CONCURRENCY THEN YES ELSE NO) -status(" Use C=:" HAVE_CSTRIPES THEN YES ELSE NO) -status(" Use pthreads for parallel for:" HAVE_PTHREADS_PF THEN YES ELSE NO) status(" Use Cuda:" HAVE_CUDA THEN "YES (ver ${CUDA_VERSION_STRING})" ELSE NO) status(" Use OpenCL:" HAVE_OPENCL THEN YES ELSE NO) diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index 6c235ebfbf..fe3de3fed9 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -47,6 +47,18 @@ macro(add_extra_compiler_option option) endif() endmacro() +# Gets environment variable and puts its value to the corresponding preprocessor definition +# Useful for WINRT that has no access to environment variables +macro(add_env_definitions option) + set(value $ENV{${option}}) + if("${value}" STREQUAL "") + message(WARNING "${option} environment variable is empty. Please set it to appropriate location to get correct results") + else() + string(REPLACE "\\" "\\\\" value ${value}) + endif() + add_definitions("-D${option}=\"${value}\"") +endmacro() + # OpenCV fails some tests when 'char' is 'unsigned' by default add_extra_compiler_option(-fsigned-char) @@ -286,6 +298,11 @@ if(MSVC12 AND NOT CMAKE_GENERATOR MATCHES "Visual Studio") set(OPENCV_EXTRA_CXX_FLAGS "${OPENCV_EXTRA_CXX_FLAGS} /FS") endif() +# Adding additional using directory for WindowsPhone 8.0 to get Windows.winmd properly +if(WINRT_PHONE AND WINRT_8_0) + set(OPENCV_EXTRA_CXX_FLAGS "${OPENCV_EXTRA_CXX_FLAGS} /AI\$(WindowsSDK_MetadataPath)") +endif() + # Extra link libs if the user selects building static libs: if(NOT BUILD_SHARED_LIBS AND CMAKE_COMPILER_IS_GNUCXX AND NOT ANDROID) # Android does not need these settings because they are already set by toolchain file diff --git a/cmake/OpenCVDetectPython.cmake b/cmake/OpenCVDetectPython.cmake index f883525c80..4bca7bd932 100644 --- a/cmake/OpenCVDetectPython.cmake +++ b/cmake/OpenCVDetectPython.cmake @@ -85,9 +85,9 @@ function(find_python preferred_version min_version library_env include_dir_env # not using _version_string here, because it might not conform to the CMake version format if(CMAKE_CROSSCOMPILING) # builder version can differ from target, matching base version (e.g. 2.7) - find_host_package(PythonLibs "${_version_major_minor}") + find_package(PythonLibs "${_version_major_minor}") else() - find_host_package(PythonLibs "${_version_major_minor}.${_version_patch}" EXACT) + find_package(PythonLibs "${_version_major_minor}.${_version_patch}" EXACT) endif() if(PYTHONLIBS_FOUND) @@ -105,7 +105,7 @@ function(find_python preferred_version min_version library_env include_dir_env set(_include_dir ${PYTHON_INCLUDE_DIR}) set(_include_dir2 ${PYTHON_INCLUDE_DIR2}) - # Clear find_host_package side effects + # Clear find_package side effects unset(PYTHONLIBS_FOUND) unset(PYTHON_LIBRARIES) unset(PYTHON_INCLUDE_PATH) diff --git a/cmake/OpenCVFindLibsGrfmt.cmake b/cmake/OpenCVFindLibsGrfmt.cmake index b5f38279e3..614f844c7b 100644 --- a/cmake/OpenCVFindLibsGrfmt.cmake +++ b/cmake/OpenCVFindLibsGrfmt.cmake @@ -8,7 +8,8 @@ if(BUILD_ZLIB) else() find_package(ZLIB "${MIN_VER_ZLIB}") if(ZLIB_FOUND AND ANDROID) - if(ZLIB_LIBRARIES STREQUAL "${ANDROID_SYSROOT}/usr/lib/libz.so") + if(ZLIB_LIBRARIES STREQUAL "${ANDROID_SYSROOT}/usr/lib/libz.so" OR + ZLIB_LIBRARIES STREQUAL "${ANDROID_SYSROOT}/usr/lib64/libz.so") set(ZLIB_LIBRARIES z) endif() endif() diff --git a/cmake/OpenCVFindLibsPerf.cmake b/cmake/OpenCVFindLibsPerf.cmake index bda5d792a3..d1bc5419ab 100644 --- a/cmake/OpenCVFindLibsPerf.cmake +++ b/cmake/OpenCVFindLibsPerf.cmake @@ -120,12 +120,16 @@ if(WITH_OPENMP) set(HAVE_OPENMP "${OPENMP_FOUND}") endif() -if(UNIX OR ANDROID) -if(NOT APPLE AND NOT HAVE_TBB AND NOT HAVE_OPENMP) - set(HAVE_PTHREADS_PF 1) -else() - set(HAVE_PTHREADS_PF 0) -endif() +if(NOT MSVC AND NOT DEFINED HAVE_PTHREADS) + set(_fname "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/pthread_test.cpp") + file(WRITE "${_fname}" "#include \nint main() { (void)pthread_self(); return 0; }\n") + try_compile(HAVE_PTHREADS "${CMAKE_BINARY_DIR}" "${_fname}") + file(REMOVE "${_fname}") +endif() + +ocv_clear_vars(HAVE_PTHREADS_PF) +if(WITH_PTHREADS_PF) + set(HAVE_PTHREADS_PF ${HAVE_PTHREADS}) else() set(HAVE_PTHREADS_PF 0) endif() diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index 279787a34d..94c735c693 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -190,7 +190,7 @@ endif(WITH_XIMEA) ocv_clear_vars(HAVE_FFMPEG HAVE_FFMPEG_CODEC HAVE_FFMPEG_FORMAT HAVE_FFMPEG_UTIL HAVE_FFMPEG_SWSCALE HAVE_FFMPEG_RESAMPLE HAVE_GENTOO_FFMPEG HAVE_FFMPEG_FFMPEG) if(WITH_FFMPEG) if(WIN32 AND NOT ARM) - include("${OpenCV_SOURCE_DIR}/3rdparty/ffmpeg/ffmpeg_version.cmake") + include("${OpenCV_SOURCE_DIR}/3rdparty/ffmpeg/ffmpeg.cmake") elseif(UNIX) CHECK_MODULE(libavcodec HAVE_FFMPEG_CODEC) CHECK_MODULE(libavformat HAVE_FFMPEG_FORMAT) diff --git a/cmake/OpenCVGenAndroidMK.cmake b/cmake/OpenCVGenAndroidMK.cmake index 627d860169..9cc52147af 100644 --- a/cmake/OpenCVGenAndroidMK.cmake +++ b/cmake/OpenCVGenAndroidMK.cmake @@ -30,13 +30,6 @@ if(ANDROID) # replace 'opencv_' -> ''' string(REPLACE "opencv_" "" OPENCV_MODULES_CONFIGMAKE "${OPENCV_MODULES_CONFIGMAKE}") - - # prepare 3rd-party component list without TBB for armeabi and mips platforms. TBB is useless there. - set(OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE_NO_TBB ${OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE}) - foreach(mod ${OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE_NO_TBB}) - string(REPLACE "tbb" "" OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE_NO_TBB "${OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE_NO_TBB}") - endforeach() - if(BUILD_FAT_JAVA_LIB) set(OPENCV_LIBS_CONFIGMAKE java3) else() @@ -52,6 +45,7 @@ if(ANDROID) set(OPENCV_3RDPARTY_LIBS_DIR_CONFIGCMAKE "\$(OPENCV_THIS_DIR)/3rdparty/lib/\$(OPENCV_TARGET_ARCH_ABI)") configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCV.mk.in" "${CMAKE_BINARY_DIR}/OpenCV.mk" @ONLY) + configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCV-abi.mk.in" "${CMAKE_BINARY_DIR}/OpenCV-${ANDROID_NDK_ABI_NAME}.mk" @ONLY) # ------------------------------------------------------------------------------------------- # Part 2/2: ${BIN_DIR}/unix-install/OpenCV.mk -> For use with "make install" @@ -62,5 +56,7 @@ if(ANDROID) set(OPENCV_3RDPARTY_LIBS_DIR_CONFIGCMAKE "\$(OPENCV_THIS_DIR)/../3rdparty/libs/\$(OPENCV_TARGET_ARCH_ABI)") configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCV.mk.in" "${CMAKE_BINARY_DIR}/unix-install/OpenCV.mk" @ONLY) + configure_file("${OpenCV_SOURCE_DIR}/cmake/templates/OpenCV-abi.mk.in" "${CMAKE_BINARY_DIR}/unix-install/OpenCV-${ANDROID_NDK_ABI_NAME}.mk" @ONLY) install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCV.mk DESTINATION ${OPENCV_CONFIG_INSTALL_PATH} COMPONENT dev) + install(FILES ${CMAKE_BINARY_DIR}/unix-install/OpenCV-${ANDROID_NDK_ABI_NAME}.mk DESTINATION ${OPENCV_CONFIG_INSTALL_PATH} COMPONENT dev) endif(ANDROID) diff --git a/cmake/OpenCVModule.cmake b/cmake/OpenCVModule.cmake index a1a1b90202..57305054d3 100644 --- a/cmake/OpenCVModule.cmake +++ b/cmake/OpenCVModule.cmake @@ -176,15 +176,11 @@ macro(ocv_add_module _name) endif() endif() - # add HAL as dependency - if(NOT "${the_module}" STREQUAL "opencv_hal") - ocv_add_dependencies(${the_module} opencv_hal) - endif() - # add self to the world dependencies if((NOT DEFINED OPENCV_MODULE_IS_PART_OF_WORLD AND NOT OPENCV_MODULE_${the_module}_CLASS STREQUAL "BINDINGS" - AND NOT OPENCV_PROCESSING_EXTRA_MODULES) + AND NOT OPENCV_PROCESSING_EXTRA_MODULES + AND (NOT BUILD_SHARED_LIBS OR NOT "x${OPENCV_MODULE_TYPE}" STREQUAL "xSTATIC")) OR OPENCV_MODULE_IS_PART_OF_WORLD ) set(OPENCV_MODULE_${the_module}_IS_PART_OF_WORLD ON CACHE INTERNAL "") @@ -662,7 +658,7 @@ macro(ocv_glob_module_sources) ocv_include_directories(${OPENCL_INCLUDE_DIRS}) add_custom_command( OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${OCL_NAME}.cpp" "${CMAKE_CURRENT_BINARY_DIR}/${OCL_NAME}.hpp" - COMMAND ${CMAKE_COMMAND} -DMODULE_NAME="${name}" -DCL_DIR="${CMAKE_CURRENT_LIST_DIR}/src/opencl" -DOUTPUT="${CMAKE_CURRENT_BINARY_DIR}/${OCL_NAME}.cpp" -P "${OpenCV_SOURCE_DIR}/cmake/cl2cpp.cmake" + COMMAND ${CMAKE_COMMAND} "-DMODULE_NAME=${name}" "-DCL_DIR=${CMAKE_CURRENT_LIST_DIR}/src/opencl" "-DOUTPUT=${CMAKE_CURRENT_BINARY_DIR}/${OCL_NAME}.cpp" -P "${OpenCV_SOURCE_DIR}/cmake/cl2cpp.cmake" DEPENDS ${cl_kernels} "${OpenCV_SOURCE_DIR}/cmake/cl2cpp.cmake") ocv_source_group("Src\\opencl\\kernels" FILES ${cl_kernels}) ocv_source_group("Src\\opencl\\kernels\\autogenerated" FILES "${CMAKE_CURRENT_BINARY_DIR}/${OCL_NAME}.cpp" "${CMAKE_CURRENT_BINARY_DIR}/${OCL_NAME}.hpp") @@ -688,6 +684,28 @@ macro(ocv_create_module) _ocv_create_module(${ARGN}) set(the_module_target ${the_module}) endif() + + if(WINRT) + # removing APPCONTAINER from modules to run from console + # in case of usual starting of WinRT test apps output is missing + # so starting of console version w/o APPCONTAINER is required to get test results + # also this allows to use opencv_extra test data for these tests + if(NOT "${the_module}" STREQUAL "opencv_ts" AND NOT "${the_module}" STREQUAL "opencv_hal") + add_custom_command(TARGET ${the_module} + POST_BUILD + COMMAND link.exe /edit /APPCONTAINER:NO $(TargetPath)) + endif() + + if("${the_module}" STREQUAL "opencv_ts") + # copy required dll files; WinRT apps need these dlls that are usually substituted by Visual Studio + # however they are not on path and need to be placed with executables to run from console w/o APPCONTAINER + add_custom_command(TARGET ${the_module} + POST_BUILD + COMMAND copy /y "\"$(VCInstallDir)redist\\$(PlatformTarget)\\Microsoft.VC$(PlatformToolsetVersion).CRT\\msvcp$(PlatformToolsetVersion).dll\"" "\"${CMAKE_BINARY_DIR}\\bin\\$(Configuration)\\msvcp$(PlatformToolsetVersion)_app.dll\"" + COMMAND copy /y "\"$(VCInstallDir)redist\\$(PlatformTarget)\\Microsoft.VC$(PlatformToolsetVersion).CRT\\msvcr$(PlatformToolsetVersion).dll\"" "\"${CMAKE_BINARY_DIR}\\bin\\$(Configuration)\\msvcr$(PlatformToolsetVersion)_app.dll\"" + COMMAND copy /y "\"$(VCInstallDir)redist\\$(PlatformTarget)\\Microsoft.VC$(PlatformToolsetVersion).CRT\\vccorlib$(PlatformToolsetVersion).dll\"" "\"${CMAKE_BINARY_DIR}\\bin\\$(Configuration)\\vccorlib$(PlatformToolsetVersion)_app.dll\"") + endif() + endif() endmacro() macro(_ocv_create_module) @@ -702,15 +720,15 @@ macro(_ocv_create_module) set(sub_links "") set(cuda_objs "") if (OPENCV_MODULE_${the_module}_CHILDREN) - status("Complex module ${the_module}") + message(STATUS "Complex module ${the_module}") foreach (m ${OPENCV_MODULE_${the_module}_CHILDREN}) if (BUILD_${m} AND TARGET ${m}_object) get_target_property(_sub_links ${m} LINK_LIBRARIES) list(APPEND sub_objs $) list(APPEND sub_links ${_sub_links}) - status(" + ${m}") + message(STATUS " + ${m}") else() - status(" - ${m}") + message(STATUS " - ${m}") endif() list(APPEND cuda_objs ${OPENCV_MODULE_${m}_CUDA_OBJECTS}) endforeach() @@ -906,6 +924,10 @@ endmacro() function(ocv_add_perf_tests) ocv_debug_message("ocv_add_perf_tests(" ${ARGN} ")") + if(WINRT) + set(OPENCV_DEBUG_POSTFIX "") + endif() + set(perf_path "${CMAKE_CURRENT_LIST_DIR}/perf") if(BUILD_PERF_TESTS AND EXISTS "${perf_path}") __ocv_parse_test_sources(PERF ${ARGN}) @@ -940,11 +962,18 @@ function(ocv_add_perf_tests) DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}" RUNTIME_OUTPUT_DIRECTORY "${EXECUTABLE_OUTPUT_PATH}" ) - if(ENABLE_SOLUTION_FOLDERS) set_target_properties(${the_target} PROPERTIES FOLDER "tests performance") endif() + if(WINRT) + # removing APPCONTAINER from tests to run from console + # look for detailed description inside of ocv_create_module macro above + add_custom_command(TARGET "opencv_perf_${name}" + POST_BUILD + COMMAND link.exe /edit /APPCONTAINER:NO $(TargetPath)) + endif() + if(NOT BUILD_opencv_world) _ocv_add_precompiled_headers(${the_target}) endif() @@ -962,6 +991,10 @@ endfunction() function(ocv_add_accuracy_tests) ocv_debug_message("ocv_add_accuracy_tests(" ${ARGN} ")") + if(WINRT) + set(OPENCV_DEBUG_POSTFIX "") + endif() + set(test_path "${CMAKE_CURRENT_LIST_DIR}/test") if(BUILD_TESTS AND EXISTS "${test_path}") __ocv_parse_test_sources(TEST ${ARGN}) @@ -1004,6 +1037,14 @@ function(ocv_add_accuracy_tests) get_target_property(LOC ${the_target} LOCATION) add_test(${the_target} "${LOC}") + if(WINRT) + # removing APPCONTAINER from tests to run from console + # look for detailed description inside of ocv_create_module macro above + add_custom_command(TARGET "opencv_test_${name}" + POST_BUILD + COMMAND link.exe /edit /APPCONTAINER:NO $(TargetPath)) + endif() + if(NOT BUILD_opencv_world) _ocv_add_precompiled_headers(${the_target}) endif() diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake index 3e2ea8a7a4..336762f9d7 100644 --- a/cmake/OpenCVUtils.cmake +++ b/cmake/OpenCVUtils.cmake @@ -795,8 +795,12 @@ macro(ocv_get_all_libs _modules _extra _3rdparty) set(${_extra} "") set(${_3rdparty} "") foreach(m ${OPENCV_MODULES_PUBLIC}) - get_target_property(deps ${m} INTERFACE_LINK_LIBRARIES) - if(NOT deps) + if(TARGET ${m}) + get_target_property(deps ${m} INTERFACE_LINK_LIBRARIES) + if(NOT deps) + set(deps "") + endif() + else() set(deps "") endif() list(INSERT ${_modules} 0 ${deps} ${m}) @@ -824,7 +828,7 @@ macro(ocv_get_all_libs _modules _extra _3rdparty) endif() # split 3rdparty libs and modules - list(REMOVE_ITEM ${_modules} ${${_3rdparty}} ${${_extra}}) + list(REMOVE_ITEM ${_modules} ${${_3rdparty}} ${${_extra}} non_empty_list) # convert CMake lists to makefile literals foreach(lst ${_modules} ${_3rdparty} ${_extra}) @@ -832,3 +836,76 @@ macro(ocv_get_all_libs _modules _extra _3rdparty) ocv_list_reverse(${lst}) endforeach() endmacro() + +function(ocv_download) + cmake_parse_arguments(DL "" "PACKAGE;HASH;URL;DESTINATION_DIR;DOWNLOAD_DIR" "" ${ARGN}) + if(NOT DL_DOWNLOAD_DIR) + set(DL_DOWNLOAD_DIR "${DL_DESTINATION_DIR}/downloads") + endif() + if(DEFINED DL_DESTINATION_DIR) + set(DESTINATION_TARGET "${DL_DESTINATION_DIR}/${DL_PACKAGE}") + if(EXISTS "${DESTINATION_TARGET}") + file(MD5 "${DESTINATION_TARGET}" target_md5) + if(NOT target_md5 STREQUAL DL_HASH) + file(REMOVE "${DESTINATION_TARGET}") + else() + set(DOWNLOAD_PACKAGE_LOCATION "" PARENT_SCOPE) + unset(DOWNLOAD_PACKAGE_LOCATION) + return() + endif() + endif() + endif() + set(DOWNLOAD_TARGET "${DL_DOWNLOAD_DIR}/${DL_HASH}/${DL_PACKAGE}") + get_filename_component(DOWNLOAD_TARGET_DIR "${DOWNLOAD_TARGET}" PATH) + if(EXISTS "${DOWNLOAD_TARGET}") + file(MD5 "${DOWNLOAD_TARGET}" target_md5) + if(NOT target_md5 STREQUAL DL_HASH) + message(WARNING "Download: Local copy of ${DL_PACKAGE} has invalid MD5 hash: ${target_md5} (expected: ${DL_HASH})") + file(REMOVE "${DOWNLOAD_TARGET}") + file(REMOVE_RECURSE "${DOWNLOAD_TARGET_DIR}") + endif() + endif() + + if(NOT EXISTS "${DOWNLOAD_TARGET}") + set(__url "") + foreach(__url_i ${DL_URL}) + if(NOT ("${__url_i}" STREQUAL "")) + set(__url "${__url_i}") + break() + endif() + endforeach() + if("${__url}" STREQUAL "") + message(FATAL_ERROR "Download URL is not specified for package ${DL_PACKAGE}") + endif() + + if(NOT EXISTS "${DOWNLOAD_TARGET_DIR}") + file(MAKE_DIRECTORY ${DOWNLOAD_TARGET_DIR}) + endif() + message(STATUS "Downloading ${DL_PACKAGE}...") + #message(STATUS " ${__url}${DL_PACKAGE}") + file(DOWNLOAD "${__url}${DL_PACKAGE}" "${DOWNLOAD_TARGET}" + TIMEOUT 600 STATUS __status + EXPECTED_MD5 ${DL_HASH}) + if(NOT __status EQUAL 0) + message(FATAL_ERROR "Failed to download ${DL_PACKAGE}. Status=${__status}") + else() + # Don't remove this code, because EXPECTED_MD5 parameter doesn't fail "file(DOWNLOAD)" step on wrong hash + file(MD5 "${DOWNLOAD_TARGET}" target_md5) + if(NOT target_md5 STREQUAL DL_HASH) + message(FATAL_ERROR "Downloaded copy of ${DL_PACKAGE} has invalid MD5 hash: ${target_md5} (expected: ${DL_HASH})") + endif() + endif() + message(STATUS "Downloading ${DL_PACKAGE}... Done") + endif() + + if(DEFINED DL_DESTINATION_DIR) + execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different "${DOWNLOAD_TARGET}" "${DL_DESTINATION_DIR}/" + RESULT_VARIABLE __result) + + if(NOT __result EQUAL 0) + message(FATAL_ERROR "Downloader: Failed to copy package from ${DOWNLOAD_TARGET} to ${DL_DESTINATION_DIR} with error ${__result}") + endif() + endif() + + set(DOWNLOAD_PACKAGE_LOCATION ${DOWNLOAD_TARGET} PARENT_SCOPE) +endfunction() diff --git a/cmake/cl2cpp.cmake b/cmake/cl2cpp.cmake index 700f12fb5c..c0e211b900 100644 --- a/cmake/cl2cpp.cmake +++ b/cmake/cl2cpp.cmake @@ -1,6 +1,14 @@ +if (NOT EXISTS "${CL_DIR}") + message(FATAL_ERROR "Specified wrong OpenCL kernels directory: ${CL_DIR}") +endif() + file(GLOB cl_list "${CL_DIR}/*.cl" ) list(SORT cl_list) +if (NOT cl_list) + message(FATAL_ERROR "Can't find OpenCL kernels in directory: ${CL_DIR}") +endif() + string(REPLACE ".cpp" ".hpp" OUTPUT_HPP "${OUTPUT}") get_filename_component(OUTPUT_HPP_NAME "${OUTPUT_HPP}" NAME) diff --git a/cmake/templates/OpenCV-abi.mk.in b/cmake/templates/OpenCV-abi.mk.in new file mode 100644 index 0000000000..41d5054504 --- /dev/null +++ b/cmake/templates/OpenCV-abi.mk.in @@ -0,0 +1,2 @@ +OPENCV_3RDPARTY_COMPONENTS:=@OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE@ +OPENCV_EXTRA_COMPONENTS:=@OPENCV_EXTRA_COMPONENTS_CONFIGMAKE@ diff --git a/cmake/templates/OpenCV.mk.in b/cmake/templates/OpenCV.mk.in index acbb763c94..e99b1ad8bd 100644 --- a/cmake/templates/OpenCV.mk.in +++ b/cmake/templates/OpenCV.mk.in @@ -19,6 +19,7 @@ OPENCV_3RDPARTY_LIBS_DIR:=@OPENCV_3RDPARTY_LIBS_DIR_CONFIGCMAKE@ OPENCV_BASEDIR:=@OPENCV_BASE_INCLUDE_DIR_CONFIGCMAKE@ OPENCV_LOCAL_C_INCLUDES:=@OPENCV_INCLUDE_DIRS_CONFIGCMAKE@ OPENCV_MODULES:=@OPENCV_MODULES_CONFIGMAKE@ +OPENCV_SUB_MK:=$(call my-dir)/OpenCV-$(TARGET_ARCH_ABI).mk ifeq ($(OPENCV_LIB_TYPE),) OPENCV_LIB_TYPE:=@OPENCV_LIBTYPE_CONFIGMAKE@ @@ -36,22 +37,7 @@ ifeq ($(OPENCV_LIB_TYPE),SHARED) OPENCV_3RDPARTY_COMPONENTS:= OPENCV_EXTRA_COMPONENTS:= else - ifeq ($(TARGET_ARCH_ABI),armeabi-v7a) - OPENCV_3RDPARTY_COMPONENTS:=@OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE@ - OPENCV_EXTRA_COMPONENTS:=@OPENCV_EXTRA_COMPONENTS_CONFIGMAKE@ - endif - ifeq ($(TARGET_ARCH_ABI),x86) - OPENCV_3RDPARTY_COMPONENTS:=@OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE@ - OPENCV_EXTRA_COMPONENTS:=@OPENCV_EXTRA_COMPONENTS_CONFIGMAKE@ - endif - ifeq ($(TARGET_ARCH_ABI),armeabi) - OPENCV_3RDPARTY_COMPONENTS:=@OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE_NO_TBB@ - OPENCV_EXTRA_COMPONENTS:=@OPENCV_EXTRA_COMPONENTS_CONFIGMAKE@ - endif - ifeq ($(TARGET_ARCH_ABI),mips) - OPENCV_3RDPARTY_COMPONENTS:=@OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE@ - OPENCV_EXTRA_COMPONENTS:=@OPENCV_EXTRA_COMPONENTS_CONFIGMAKE@ - endif + include $(OPENCV_SUB_MK) endif ifeq ($(OPENCV_LIB_TYPE),SHARED) diff --git a/cmake/templates/OpenCVConfig.cmake.in b/cmake/templates/OpenCVConfig.cmake.in index e5904aba37..ee57ecda63 100644 --- a/cmake/templates/OpenCVConfig.cmake.in +++ b/cmake/templates/OpenCVConfig.cmake.in @@ -7,7 +7,7 @@ # In your CMakeLists.txt, add these lines: # # find_package(OpenCV REQUIRED) -# include_directories(${OpenCV_INCLUDE_DIRS}) +# include_directories(${OpenCV_INCLUDE_DIRS}) # Not needed for CMake >= 2.8.11 # target_link_libraries(MY_TARGET_NAME ${OpenCV_LIBS}) # # Or you can search for specific OpenCV modules: @@ -177,6 +177,20 @@ if(OpenCV2_INCLUDE_DIRS) endif() endif() +if(NOT CMAKE_VERSION VERSION_LESS "2.8.11") + # Target property INTERFACE_INCLUDE_DIRECTORIES available since 2.8.11: + # * http://www.cmake.org/cmake/help/v2.8.11/cmake.html#prop_tgt:INTERFACE_INCLUDE_DIRECTORIES + foreach(__component ${OpenCV_LIB_COMPONENTS}) + if(TARGET ${__component}) + set_target_properties( + ${__component} + PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${OpenCV_INCLUDE_DIRS}" + ) + endif() + endforeach() +endif() + # ============================================================== # Check OpenCV availability # ============================================================== diff --git a/cmake/templates/cvconfig.h.in b/cmake/templates/cvconfig.h.in index 4a1d1c632c..3330774c14 100644 --- a/cmake/templates/cvconfig.h.in +++ b/cmake/templates/cvconfig.h.in @@ -139,6 +139,12 @@ /* PNG codec */ #cmakedefine HAVE_PNG +/* Posix threads (pthreads) */ +#cmakedefine HAVE_PTHREADS + +/* parallel_for with pthreads */ +#cmakedefine HAVE_PTHREADS_PF + /* Qt support */ #cmakedefine HAVE_QT diff --git a/doc/pattern_tools/gen_pattern.py b/doc/pattern_tools/gen_pattern.py index fc1e74bbc3..ebeeb123fe 100755 --- a/doc/pattern_tools/gen_pattern.py +++ b/doc/pattern_tools/gen_pattern.py @@ -3,7 +3,6 @@ """gen_pattern.py Usage example: python gen_pattern.py -o out.svg -r 11 -c 8 -T circles -s 20.0 -R 5.0 -u mm -w 216 -h 279 - -o, --output - output file (default out.svg) -r, --rows - pattern rows (default 11) -c, --columns - pattern columns (default 8) @@ -13,6 +12,7 @@ python gen_pattern.py -o out.svg -r 11 -c 8 -T circles -s 20.0 -R 5.0 -u mm -w 2 -u, --units - mm, inches, px, m (default mm) -w, --page_width - page width in units (default 216) -h, --page_height - page height in units (default 279) +-a, --page_size - page size (default A4), supercedes -h -w arguments -H, --help - show help """ @@ -51,11 +51,13 @@ class PatternMaker: def makeCheckerboardPattern(self): spacing = self.square_size - for x in range(1,self.cols+1): - for y in range(1,self.rows+1): + xspacing = (self.width - self.cols * self.square_size) / 2.0 + yspacing = (self.height - self.rows * self.square_size) / 2.0 + for x in range(0,self.cols): + for y in range(0,self.rows): if x%2 == y%2: - dot = SVG("rect", x=x * spacing, y=y * spacing, width=spacing, height=spacing, stroke_width="0", fill="black") - self.g.append(dot) + square = SVG("rect", x=x * spacing + xspacing, y=y * spacing + yspacing, width=spacing, height=spacing, fill="black") + self.g.append(square) def save(self): c = canvas(self.g,width="%d%s"%(self.width,self.units),height="%d%s"%(self.height,self.units),viewBox="0 0 %d %d"%(self.width,self.height)) @@ -65,9 +67,9 @@ class PatternMaker: def main(): # parse command line options, TODO use argparse for better doc try: - opts, args = getopt.getopt(sys.argv[1:], "Ho:c:r:T:u:s:R:w:h:", ["help","output=","columns=","rows=", + opts, args = getopt.getopt(sys.argv[1:], "Ho:c:r:T:u:s:R:w:h:a:", ["help","output=","columns=","rows=", "type=","units=","square_size=","radius_rate=", - "page_width=","page_height="]) + "page_width=","page_height=", "page_size="]) except getopt.error, msg: print msg print "for help use --help" @@ -79,8 +81,11 @@ def main(): units = "mm" square_size = 20.0 radius_rate = 5.0 - page_width = 216 #8.5 inches - page_height = 279 #11 inches + page_size = "A4" + # page size dict (ISO standard, mm) for easy lookup. format - size: [width, height] + page_sizes = {"A0": [840, 1188], "A1": [594, 840], "A2": [420, 594], "A3": [297, 420], "A4": [210, 297], "A5": [148, 210]} + page_width = page_sizes[page_size.upper()][0] + page_height = page_sizes[page_size.upper()][1] # process options for o, a in opts: if o in ("-H", "--help"): @@ -104,6 +109,11 @@ def main(): page_width = float(a) elif o in ("-h", "--page_height"): page_height = float(a) + elif o in ("-a", "--page_size"): + units = "mm" + page_size = a.upper() + page_width = page_sizes[page_size][0] + page_height = page_sizes[page_size][1] pm = PatternMaker(columns,rows,output,units,square_size,radius_rate,page_width,page_height) #dict for easy lookup of pattern type mp = {"circles":pm.makeCirclesPattern,"acircles":pm.makeACirclesPattern,"checkerboard":pm.makeCheckerboardPattern} diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index 66ffe2c1d3..0504abf666 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -444,7 +444,7 @@ vector\ ), where N is the number of points in the view. @param cameraMatrix Camera matrix \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$ . @param distCoeffs Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements. If -the vector is NULL/empty, the zero distortion coefficients are assumed. +the vector is empty, the zero distortion coefficients are assumed. @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or vector\ . @param jacobian Optional output 2Nx(10+\) jacobian matrix of derivatives of image @@ -1187,7 +1187,7 @@ are feature points from cameras with same focal length and principle point. @param pp principle point of the camera. @param method Method for computing a fundamental matrix. - **RANSAC** for the RANSAC algorithm. -- **MEDS** for the LMedS algorithm. +- **LMEDS** for the LMedS algorithm. @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar line in pixels, beyond which the point is considered an outlier and is not used for computing the final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the @@ -1555,7 +1555,8 @@ public: enum { MODE_SGBM = 0, - MODE_HH = 1 + MODE_HH = 1, + MODE_SGBM_3WAY = 2 }; CV_WRAP virtual int getPreFilterCap() const = 0; diff --git a/modules/calib3d/perf/perf_pnp.cpp b/modules/calib3d/perf/perf_pnp.cpp index e5a92bf1c4..a980655e8e 100644 --- a/modules/calib3d/perf/perf_pnp.cpp +++ b/modules/calib3d/perf/perf_pnp.cpp @@ -90,6 +90,11 @@ PERF_TEST_P(PointsNum_Algo, solvePnPSmallPoints, warmup(rvec, WARMUP_RNG); warmup(tvec, WARMUP_RNG); + // normalize Rodrigues vector + Mat rvec_tmp = Mat::eye(3, 3, CV_32F); + Rodrigues(rvec, rvec_tmp); + Rodrigues(rvec_tmp, rvec); + projectPoints(points3d, rvec, tvec, intrinsics, distortion, points2d); //add noise diff --git a/modules/calib3d/perf/perf_stereosgbm.cpp b/modules/calib3d/perf/perf_stereosgbm.cpp new file mode 100644 index 0000000000..8dc71625da --- /dev/null +++ b/modules/calib3d/perf/perf_stereosgbm.cpp @@ -0,0 +1,159 @@ +/* + * By downloading, copying, installing or using the software you agree to this license. + * If you do not agree to this license, do not download, install, + * copy or use the software. + * + * + * License Agreement + * For Open Source Computer Vision Library + * (3 - clause BSD License) + * + * Redistribution and use in source and binary forms, with or without modification, + * are permitted provided that the following conditions are met : + * + * *Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and / or other materials provided with the distribution. + * + * * Neither the names of the copyright holders nor the names of the contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * This software is provided by the copyright holders and contributors "as is" and + * any express or implied warranties, including, but not limited to, the implied + * warranties of merchantability and fitness for a particular purpose are disclaimed. + * In no event shall copyright holders or contributors be liable for any direct, + * indirect, incidental, special, exemplary, or consequential damages + * (including, but not limited to, procurement of substitute goods or services; + * loss of use, data, or profits; or business interruption) however caused + * and on any theory of liability, whether in contract, strict liability, + * or tort(including negligence or otherwise) arising in any way out of + * the use of this software, even if advised of the possibility of such damage. + */ + +#include "perf_precomp.hpp" + +namespace cvtest +{ + +using std::tr1::tuple; +using std::tr1::get; +using namespace perf; +using namespace testing; +using namespace cv; + +void MakeArtificialExample(RNG rng, Mat& dst_left_view, Mat& dst_view); + +CV_ENUM(SGBMModes, StereoSGBM::MODE_SGBM, StereoSGBM::MODE_SGBM_3WAY); +typedef tuple SGBMParams; +typedef TestBaseWithParam TestStereoCorresp; + +PERF_TEST_P( TestStereoCorresp, SGBM, Combine(Values(Size(1280,720),Size(640,480)), Values(256,128), SGBMModes::all()) ) +{ + RNG rng(0); + + SGBMParams params = GetParam(); + + Size sz = get<0>(params); + int num_disparities = get<1>(params); + int mode = get<2>(params); + + Mat src_left(sz, CV_8UC3); + Mat src_right(sz, CV_8UC3); + Mat dst(sz, CV_16S); + + MakeArtificialExample(rng,src_left,src_right); + + cv::setNumThreads(cv::getNumberOfCPUs()); + int wsize = 3; + int P1 = 8*src_left.channels()*wsize*wsize; + TEST_CYCLE() + { + Ptr sgbm = StereoSGBM::create(0,num_disparities,wsize,P1,4*P1,1,63,25,0,0,mode); + sgbm->compute(src_left,src_right,dst); + } + + SANITY_CHECK(dst, .01, ERROR_RELATIVE); +} + +void MakeArtificialExample(RNG rng, Mat& dst_left_view, Mat& dst_right_view) +{ + int w = dst_left_view.cols; + int h = dst_left_view.rows; + + //params: + unsigned char bg_level = (unsigned char)rng.uniform(0.0,255.0); + unsigned char fg_level = (unsigned char)rng.uniform(0.0,255.0); + int rect_width = (int)rng.uniform(w/16,w/2); + int rect_height = (int)rng.uniform(h/16,h/2); + int rect_disparity = (int)(0.15*w); + double sigma = 3.0; + + int rect_x_offset = (w-rect_width) /2; + int rect_y_offset = (h-rect_height)/2; + + if(dst_left_view.channels()==3) + { + dst_left_view = Scalar(Vec3b(bg_level,bg_level,bg_level)); + dst_right_view = Scalar(Vec3b(bg_level,bg_level,bg_level)); + } + else + { + dst_left_view = Scalar(bg_level); + dst_right_view = Scalar(bg_level); + } + + Mat dst_left_view_rect = Mat(dst_left_view, Rect(rect_x_offset,rect_y_offset,rect_width,rect_height)); + if(dst_left_view.channels()==3) + dst_left_view_rect = Scalar(Vec3b(fg_level,fg_level,fg_level)); + else + dst_left_view_rect = Scalar(fg_level); + + rect_x_offset-=rect_disparity; + + Mat dst_right_view_rect = Mat(dst_right_view, Rect(rect_x_offset,rect_y_offset,rect_width,rect_height)); + if(dst_right_view.channels()==3) + dst_right_view_rect = Scalar(Vec3b(fg_level,fg_level,fg_level)); + else + dst_right_view_rect = Scalar(fg_level); + + //add some gaussian noise: + unsigned char *l, *r; + for(int i=0;i(l[0] + rng.gaussian(sigma)); + l[1] = saturate_cast(l[1] + rng.gaussian(sigma)); + l[2] = saturate_cast(l[2] + rng.gaussian(sigma)); + l+=3; + + r[0] = saturate_cast(r[0] + rng.gaussian(sigma)); + r[1] = saturate_cast(r[1] + rng.gaussian(sigma)); + r[2] = saturate_cast(r[2] + rng.gaussian(sigma)); + r+=3; + } + } + else + { + for(int j=0;j(l[0] + rng.gaussian(sigma)); + l++; + + r[0] = saturate_cast(r[0] + rng.gaussian(sigma)); + r++; + } + } + } +} + +} diff --git a/modules/calib3d/src/calibration.cpp b/modules/calib3d/src/calibration.cpp index 98deab6396..5dcd274e34 100644 --- a/modules/calib3d/src/calibration.cpp +++ b/modules/calib3d/src/calibration.cpp @@ -2194,7 +2194,7 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2, for( k = 0; k < 2; k++ ) { const CvMat* A = k == 0 ? _cameraMatrix1 : _cameraMatrix2; const CvMat* Dk = k == 0 ? _distCoeffs1 : _distCoeffs2; - double dk1 = Dk ? cvmGet(Dk, 0, 0) : 0; + double dk1 = Dk && Dk->data.ptr ? cvmGet(Dk, 0, 0) : 0; double fc = cvmGet(A,idx^1,idx^1); if( dk1 < 0 ) { fc *= 1 + dk1*(nx*nx + ny*ny)/(4*fc*fc); @@ -3372,7 +3372,9 @@ void cv::stereoRectify( InputArray _cameraMatrix1, InputArray _distCoeffs1, p_Q = &(c_Q = _Qmat.getMat()); } - cvStereoRectify( &c_cameraMatrix1, &c_cameraMatrix2, &c_distCoeffs1, &c_distCoeffs2, + CvMat *p_distCoeffs1 = distCoeffs1.empty() ? NULL : &c_distCoeffs1; + CvMat *p_distCoeffs2 = distCoeffs2.empty() ? NULL : &c_distCoeffs2; + cvStereoRectify( &c_cameraMatrix1, &c_cameraMatrix2, p_distCoeffs1, p_distCoeffs2, imageSize, &c_R, &c_T, &c_R1, &c_R2, &c_P1, &c_P2, p_Q, flags, alpha, newImageSize, (CvRect*)validPixROI1, (CvRect*)validPixROI2); } diff --git a/modules/calib3d/src/fundam.cpp b/modules/calib3d/src/fundam.cpp index 230182e8c9..5d7e706a41 100644 --- a/modules/calib3d/src/fundam.cpp +++ b/modules/calib3d/src/fundam.cpp @@ -547,45 +547,32 @@ static int run7Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix ) static int run8Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix ) { - double a[9*9], w[9], v[9*9]; - Mat W( 9, 1, CV_64F, w ); - Mat V( 9, 9, CV_64F, v ); - Mat A( 9, 9, CV_64F, a ); - Mat U, F0, TF; - Point2d m1c(0,0), m2c(0,0); double t, scale1 = 0, scale2 = 0; const Point2f* m1 = _m1.ptr(); const Point2f* m2 = _m2.ptr(); - double* fmatrix = _fmatrix.ptr(); CV_Assert( (_m1.cols == 1 || _m1.rows == 1) && _m1.size() == _m2.size()); - int i, j, k, count = _m1.checkVector(2); + int i, count = _m1.checkVector(2); // compute centers and average distances for each of the two point sets for( i = 0; i < count; i++ ) { - double x = m1[i].x, y = m1[i].y; - m1c.x += x; m1c.y += y; - - x = m2[i].x, y = m2[i].y; - m2c.x += x; m2c.y += y; + m1c += Point2d(m1[i]); + m2c += Point2d(m2[i]); } // calculate the normalizing transformations for each of the point sets: // after the transformation each set will have the mass center at the coordinate origin // and the average distance from the origin will be ~sqrt(2). t = 1./count; - m1c.x *= t; m1c.y *= t; - m2c.x *= t; m2c.y *= t; + m1c *= t; + m2c *= t; for( i = 0; i < count; i++ ) { - double x = m1[i].x - m1c.x, y = m1[i].y - m1c.y; - scale1 += std::sqrt(x*x + y*y); - - x = m2[i].x - m2c.x, y = m2[i].y - m2c.y; - scale2 += std::sqrt(x*x + y*y); + scale1 += norm(Point2d(m1[i].x - m1c.x, m1[i].y - m1c.y)); + scale2 += norm(Point2d(m2[i].x - m2c.x, m2[i].y - m2c.y)); } scale1 *= t; @@ -597,7 +584,7 @@ static int run8Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix ) scale1 = std::sqrt(2.)/scale1; scale2 = std::sqrt(2.)/scale2; - A.setTo(Scalar::all(0)); + Matx A; // form a linear system Ax=0: for each selected pair of points m1 & m2, // the row of A(=a) represents the coefficients of equation: (m2, 1)'*F*(m1, 1) = 0 @@ -608,56 +595,50 @@ static int run8Point( const Mat& _m1, const Mat& _m2, Mat& _fmatrix ) double y1 = (m1[i].y - m1c.y)*scale1; double x2 = (m2[i].x - m2c.x)*scale2; double y2 = (m2[i].y - m2c.y)*scale2; - double r[9] = { x2*x1, x2*y1, x2, y2*x1, y2*y1, y2, x1, y1, 1 }; - for( j = 0; j < 9; j++ ) - for( k = 0; k < 9; k++ ) - a[j*9+k] += r[j]*r[k]; + Vec r( x2*x1, x2*y1, x2, y2*x1, y2*y1, y2, x1, y1, 1 ); + A += r*r.t(); } + Vec W; + Matx V; + eigen(A, W, V); for( i = 0; i < 9; i++ ) { - if( fabs(w[i]) < DBL_EPSILON ) + if( fabs(W[i]) < DBL_EPSILON ) break; } if( i < 8 ) return 0; - F0 = Mat( 3, 3, CV_64F, v + 9*8 ); // take the last column of v as a solution of Af = 0 + Matx33d F0( V.val + 9*8 ); // take the last column of v as a solution of Af = 0 // make F0 singular (of rank 2) by decomposing it with SVD, // zeroing the last diagonal element of W and then composing the matrices back. - // use v as a temporary storage for different 3x3 matrices - W = U = V = TF = F0; - W = Mat(3, 1, CV_64F, v); - U = Mat(3, 3, CV_64F, v + 9); - V = Mat(3, 3, CV_64F, v + 18); - TF = Mat(3, 3, CV_64F, v + 27); + Vec3d w; + Matx33d U; + Matx33d Vt; - SVDecomp( F0, W, U, V, SVD::MODIFY_A ); - W.at(2) = 0.; + SVD::compute( F0, w, U, Vt); + w[2] = 0.; - // F0 <- U*diag([W(1), W(2), 0])*V' - gemm( U, Mat::diag(W), 1., 0, 0., TF, 0 ); - gemm( TF, V, 1., 0, 0., F0, 0/*CV_GEMM_B_T*/ ); + F0 = U*Matx33d::diag(w)*Vt; // apply the transformation that is inverse // to what we used to normalize the point coordinates - double tt1[] = { scale1, 0, -scale1*m1c.x, 0, scale1, -scale1*m1c.y, 0, 0, 1 }; - double tt2[] = { scale2, 0, -scale2*m2c.x, 0, scale2, -scale2*m2c.y, 0, 0, 1 }; - Mat T1(3, 3, CV_64F, tt1), T2(3, 3, CV_64F, tt2); + Matx33d T1( scale1, 0, -scale1*m1c.x, 0, scale1, -scale1*m1c.y, 0, 0, 1 ); + Matx33d T2( scale2, 0, -scale2*m2c.x, 0, scale2, -scale2*m2c.y, 0, 0, 1 ); - // F0 <- T2'*F0*T1 - gemm( T2, F0, 1., 0, 0., TF, GEMM_1_T ); - F0 = Mat(3, 3, CV_64F, fmatrix); - gemm( TF, T1, 1., 0, 0., F0, 0 ); + F0 = T2.t()*F0*T1; // make F(3,3) = 1 - if( fabs(F0.at(2,2)) > FLT_EPSILON ) - F0 *= 1./F0.at(2,2); + if( fabs(F0(2,2)) > FLT_EPSILON ) + F0 *= 1./F0(2,2); + + Mat(F0).copyTo(_fmatrix); return 1; } diff --git a/modules/calib3d/src/stereobm.cpp b/modules/calib3d/src/stereobm.cpp index 78152b33d5..760abefa84 100644 --- a/modules/calib3d/src/stereobm.cpp +++ b/modules/calib3d/src/stereobm.cpp @@ -367,7 +367,7 @@ static void findStereoCorrespondenceBM_SSE2( const Mat& left, const Mat& right, { hsad = hsad0 - dy0*ndisp; cbuf = cbuf0 + (x + wsz2 + 1)*cstep - dy0*ndisp; lptr = lptr0 + MIN(MAX(x, -lofs), width-lofs-1) - dy0*sstep; - rptr = rptr0 + MIN(MAX(x, -rofs), width-rofs-1) - dy0*sstep; + rptr = rptr0 + MIN(MAX(x, -rofs), width-rofs-ndisp) - dy0*sstep; for( y = -dy0; y < height + dy1; y++, hsad += ndisp, cbuf += ndisp, lptr += sstep, rptr += sstep ) { @@ -408,7 +408,7 @@ static void findStereoCorrespondenceBM_SSE2( const Mat& left, const Mat& right, hsad = hsad0 - dy0*ndisp; lptr_sub = lptr0 + MIN(MAX(x0, -lofs), width-1-lofs) - dy0*sstep; lptr = lptr0 + MIN(MAX(x1, -lofs), width-1-lofs) - dy0*sstep; - rptr = rptr0 + MIN(MAX(x1, -rofs), width-1-rofs) - dy0*sstep; + rptr = rptr0 + MIN(MAX(x1, -rofs), width-ndisp-rofs) - dy0*sstep; for( y = -dy0; y < height + dy1; y++, cbuf += ndisp, cbuf_sub += ndisp, hsad += ndisp, lptr += sstep, lptr_sub += sstep, rptr += sstep ) @@ -624,7 +624,7 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right, { hsad = hsad0 - dy0*ndisp; cbuf = cbuf0 + (x + wsz2 + 1)*cstep - dy0*ndisp; lptr = lptr0 + std::min(std::max(x, -lofs), width-lofs-1) - dy0*sstep; - rptr = rptr0 + std::min(std::max(x, -rofs), width-rofs-1) - dy0*sstep; + rptr = rptr0 + std::min(std::max(x, -rofs), width-rofs-ndisp) - dy0*sstep; for( y = -dy0; y < height + dy1; y++, hsad += ndisp, cbuf += ndisp, lptr += sstep, rptr += sstep ) { int lval = lptr[0]; @@ -674,7 +674,7 @@ findStereoCorrespondenceBM( const Mat& left, const Mat& right, hsad = hsad0 - dy0*ndisp; lptr_sub = lptr0 + MIN(MAX(x0, -lofs), width-1-lofs) - dy0*sstep; lptr = lptr0 + MIN(MAX(x1, -lofs), width-1-lofs) - dy0*sstep; - rptr = rptr0 + MIN(MAX(x1, -rofs), width-1-rofs) - dy0*sstep; + rptr = rptr0 + MIN(MAX(x1, -rofs), width-ndisp-rofs) - dy0*sstep; for( y = -dy0; y < height + dy1; y++, cbuf += ndisp, cbuf_sub += ndisp, hsad += ndisp, lptr += sstep, lptr_sub += sstep, rptr += sstep ) diff --git a/modules/calib3d/src/stereosgbm.cpp b/modules/calib3d/src/stereosgbm.cpp index 4b0aa5a25b..9dab0a82c5 100644 --- a/modules/calib3d/src/stereosgbm.cpp +++ b/modules/calib3d/src/stereosgbm.cpp @@ -52,6 +52,7 @@ #include "precomp.hpp" #include +#include "opencv2/hal/intrin.hpp" namespace cv { @@ -110,7 +111,7 @@ struct StereoSGBMParams }; /* - For each pixel row1[x], max(-maxD, 0) <= minX <= x < maxX <= width - max(0, -minD), + For each pixel row1[x], max(maxD, 0) <= minX <= x < maxX <= width - max(0, -minD), and for each disparity minD<=d(y), *row2 = img2.ptr(y); @@ -180,10 +181,6 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y, buffer -= minX2; cost -= minX1*D + minD; // simplify the cost indices inside the loop -#if CV_SSE2 - volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2); -#endif - #if 1 for( c = 0; c < cn*2; c++, prow1 += width, prow2 += width ) { @@ -211,43 +208,39 @@ static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y, int u0 = std::min(ul, ur); u0 = std::min(u0, u); int u1 = std::max(ul, ur); u1 = std::max(u1, u); - #if CV_SSE2 - if( useSIMD ) + #if CV_SIMD128 + v_uint8x16 _u = v_setall_u8((uchar)u), _u0 = v_setall_u8((uchar)u0); + v_uint8x16 _u1 = v_setall_u8((uchar)u1); + + for( int d = minD; d < maxD; d += 16 ) { - __m128i _u = _mm_set1_epi8((char)u), _u0 = _mm_set1_epi8((char)u0); - __m128i _u1 = _mm_set1_epi8((char)u1), z = _mm_setzero_si128(); - __m128i ds = _mm_cvtsi32_si128(diff_scale); + v_uint8x16 _v = v_load(prow2 + width-x-1 + d); + v_uint8x16 _v0 = v_load(buffer + width-x-1 + d); + v_uint8x16 _v1 = v_load(buffer + width-x-1 + d + width2); + v_uint8x16 c0 = v_max(_u - _v1, _v0 - _u); + v_uint8x16 c1 = v_max(_v - _u1, _u0 - _v); + v_uint8x16 diff = v_min(c0, c1); - for( int d = minD; d < maxD; d += 16 ) - { - __m128i _v = _mm_loadu_si128((const __m128i*)(prow2 + width-x-1 + d)); - __m128i _v0 = _mm_loadu_si128((const __m128i*)(buffer + width-x-1 + d)); - __m128i _v1 = _mm_loadu_si128((const __m128i*)(buffer + width-x-1 + d + width2)); - __m128i c0 = _mm_max_epu8(_mm_subs_epu8(_u, _v1), _mm_subs_epu8(_v0, _u)); - __m128i c1 = _mm_max_epu8(_mm_subs_epu8(_v, _u1), _mm_subs_epu8(_u0, _v)); - __m128i diff = _mm_min_epu8(c0, c1); + v_int16x8 _c0 = v_load_aligned(cost + x*D + d); + v_int16x8 _c1 = v_load_aligned(cost + x*D + d + 8); - c0 = _mm_load_si128((__m128i*)(cost + x*D + d)); - c1 = _mm_load_si128((__m128i*)(cost + x*D + d + 8)); - - _mm_store_si128((__m128i*)(cost + x*D + d), _mm_adds_epi16(c0, _mm_srl_epi16(_mm_unpacklo_epi8(diff,z), ds))); - _mm_store_si128((__m128i*)(cost + x*D + d + 8), _mm_adds_epi16(c1, _mm_srl_epi16(_mm_unpackhi_epi8(diff,z), ds))); - } + v_uint16x8 diff1,diff2; + v_expand(diff,diff1,diff2); + v_store_aligned(cost + x*D + d, _c0 + v_reinterpret_as_s16(diff1 >> diff_scale)); + v_store_aligned(cost + x*D + d + 8, _c1 + v_reinterpret_as_s16(diff2 >> diff_scale)); + } + #else + for( int d = minD; d < maxD; d++ ) + { + int v = prow2[width-x-1 + d]; + int v0 = buffer[width-x-1 + d]; + int v1 = buffer[width-x-1 + d + width2]; + int c0 = std::max(0, u - v1); c0 = std::max(c0, v0 - u); + int c1 = std::max(0, v - u1); c1 = std::max(c1, u0 - v); + + cost[x*D + d] = (CostType)(cost[x*D+d] + (std::min(c0, c1) >> diff_scale)); } - else #endif - { - for( int d = minD; d < maxD; d++ ) - { - int v = prow2[width-x-1 + d]; - int v0 = buffer[width-x-1 + d]; - int v1 = buffer[width-x-1 + d + width2]; - int c0 = std::max(0, u - v1); c0 = std::max(c0, v0 - u); - int c1 = std::max(0, v - u1); c1 = std::max(c1, u0 - v); - - cost[x*D + d] = (CostType)(cost[x*D+d] + (std::min(c0, c1) >> diff_scale)); - } - } } } #else @@ -340,7 +333,7 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2, int disp12MaxDiff = params.disp12MaxDiff > 0 ? params.disp12MaxDiff : 1; int P1 = params.P1 > 0 ? params.P1 : 2, P2 = std::max(params.P2 > 0 ? params.P2 : 5, P1+1); int k, width = disp1.cols, height = disp1.rows; - int minX1 = std::max(-maxD, 0), maxX1 = width + std::min(minD, 0); + int minX1 = std::max(maxD, 0), maxX1 = width + std::min(minD, 0); int D = maxD - minD, width1 = maxX1 - minX1; int INVALID_DISP = minD - 1, INVALID_DISP_SCALED = INVALID_DISP*DISP_SCALE; int SW2 = SADWindowSize.width/2, SH2 = SADWindowSize.height/2; @@ -829,6 +822,645 @@ static void computeDisparitySGBM( const Mat& img1, const Mat& img2, } } +////////////////////////////////////////////////////////////////////////////////////////////////////// + +void getBufferPointers(Mat& buffer, int width, int width1, int D, int num_ch, int SH2, int P2, + CostType*& curCostVolumeLine, CostType*& hsumBuf, CostType*& pixDiff, + PixType*& tmpBuf, CostType*& horPassCostVolume, + CostType*& vertPassCostVolume, CostType*& vertPassMin, CostType*& rightPassBuf, + CostType*& disp2CostBuf, short*& disp2Buf); + +struct SGBM3WayMainLoop : public ParallelLoopBody +{ + Mat* buffers; + const Mat *img1, *img2; + Mat* dst_disp; + + int nstripes, stripe_sz; + int stripe_overlap; + + int width,height; + int minD, maxD, D; + int minX1, maxX1, width1; + + int SW2, SH2; + int P1, P2; + int uniquenessRatio, disp12MaxDiff; + + int costBufSize, hsumBufNRows; + int TAB_OFS, ftzero; + + PixType* clipTab; + + SGBM3WayMainLoop(Mat *_buffers, const Mat& _img1, const Mat& _img2, Mat* _dst_disp, const StereoSGBMParams& params, PixType* _clipTab, int _nstripes, int _stripe_overlap); + void getRawMatchingCost(CostType* C, CostType* hsumBuf, CostType* pixDiff, PixType* tmpBuf, int y, int src_start_idx) const; + void operator () (const Range& range) const; +}; + +SGBM3WayMainLoop::SGBM3WayMainLoop(Mat *_buffers, const Mat& _img1, const Mat& _img2, Mat* _dst_disp, const StereoSGBMParams& params, PixType* _clipTab, int _nstripes, int _stripe_overlap): +buffers(_buffers), img1(&_img1), img2(&_img2), dst_disp(_dst_disp), clipTab(_clipTab) +{ + nstripes = _nstripes; + stripe_overlap = _stripe_overlap; + stripe_sz = (int)ceil(img1->rows/(double)nstripes); + + width = img1->cols; height = img1->rows; + minD = params.minDisparity; maxD = minD + params.numDisparities; D = maxD - minD; + minX1 = std::max(maxD, 0); maxX1 = width + std::min(minD, 0); width1 = maxX1 - minX1; + CV_Assert( D % 16 == 0 ); + + SW2 = SH2 = params.SADWindowSize > 0 ? params.SADWindowSize/2 : 1; + + P1 = params.P1 > 0 ? params.P1 : 2; P2 = std::max(params.P2 > 0 ? params.P2 : 5, P1+1); + uniquenessRatio = params.uniquenessRatio >= 0 ? params.uniquenessRatio : 10; + disp12MaxDiff = params.disp12MaxDiff > 0 ? params.disp12MaxDiff : 1; + + costBufSize = width1*D; + hsumBufNRows = SH2*2 + 2; + TAB_OFS = 256*4; + ftzero = std::max(params.preFilterCap, 15) | 1; +} + +void getBufferPointers(Mat& buffer, int width, int width1, int D, int num_ch, int SH2, int P2, + CostType*& curCostVolumeLine, CostType*& hsumBuf, CostType*& pixDiff, + PixType*& tmpBuf, CostType*& horPassCostVolume, + CostType*& vertPassCostVolume, CostType*& vertPassMin, CostType*& rightPassBuf, + CostType*& disp2CostBuf, short*& disp2Buf) +{ + // allocating all the required memory: + int costVolumeLineSize = width1*D; + int width1_ext = width1+2; + int costVolumeLineSize_ext = width1_ext*D; + int hsumBufNRows = SH2*2 + 2; + + // main buffer to store matching costs for the current line: + int curCostVolumeLineSize = costVolumeLineSize*sizeof(CostType); + + // auxiliary buffers for the raw matching cost computation: + int hsumBufSize = costVolumeLineSize*hsumBufNRows*sizeof(CostType); + int pixDiffSize = costVolumeLineSize*sizeof(CostType); + int tmpBufSize = width*16*num_ch*sizeof(PixType); + + // auxiliary buffers for the matching cost aggregation: + int horPassCostVolumeSize = costVolumeLineSize_ext*sizeof(CostType); // buffer for the 2-pass horizontal cost aggregation + int vertPassCostVolumeSize = costVolumeLineSize_ext*sizeof(CostType); // buffer for the vertical cost aggregation + int vertPassMinSize = width1_ext*sizeof(CostType); // buffer for storing minimum costs from the previous line + int rightPassBufSize = D*sizeof(CostType); // additional small buffer for the right-to-left pass + + // buffers for the pseudo-LRC check: + int disp2CostBufSize = width*sizeof(CostType); + int disp2BufSize = width*sizeof(short); + + // sum up the sizes of all the buffers: + size_t totalBufSize = curCostVolumeLineSize + + hsumBufSize + + pixDiffSize + + tmpBufSize + + horPassCostVolumeSize + + vertPassCostVolumeSize + + vertPassMinSize + + rightPassBufSize + + disp2CostBufSize + + disp2BufSize + + 16; //to compensate for the alignPtr shifts + + if( buffer.empty() || !buffer.isContinuous() || buffer.cols*buffer.rows*buffer.elemSize() < totalBufSize ) + buffer.create(1, (int)totalBufSize, CV_8U); + + // set up all the pointers: + curCostVolumeLine = (CostType*)alignPtr(buffer.ptr(), 16); + hsumBuf = curCostVolumeLine + costVolumeLineSize; + pixDiff = hsumBuf + costVolumeLineSize*hsumBufNRows; + tmpBuf = (PixType*)(pixDiff + costVolumeLineSize); + horPassCostVolume = (CostType*)(tmpBuf + width*16*num_ch); + vertPassCostVolume = horPassCostVolume + costVolumeLineSize_ext; + rightPassBuf = vertPassCostVolume + costVolumeLineSize_ext; + vertPassMin = rightPassBuf + D; + disp2CostBuf = vertPassMin + width1_ext; + disp2Buf = disp2CostBuf + width; + + // initialize memory: + memset(buffer.ptr(),0,totalBufSize); + for(int i=0;i src_start_idx ) + { + const CostType* hsumSub = hsumBuf + (std::max(y - SH2 - 1, src_start_idx) % hsumBufNRows)*costBufSize; + + for( x = D; x < width1*D; x += D ) + { + const CostType* pixAdd = pixDiff + std::min(x + SW2*D, (width1-1)*D); + const CostType* pixSub = pixDiff + std::max(x - (SW2+1)*D, 0); + +#if CV_SIMD128 + v_int16x8 hv_reg; + for( d = 0; d < D; d+=8 ) + { + hv_reg = v_load_aligned(hsumAdd+x-D+d) + (v_load_aligned(pixAdd+d) - v_load_aligned(pixSub+d)); + v_store_aligned(hsumAdd+x+d,hv_reg); + v_store_aligned(C+x+d,v_load_aligned(C+x+d)+(hv_reg-v_load_aligned(hsumSub+x+d))); + } +#else + for( d = 0; d < D; d++ ) + { + int hv = hsumAdd[x + d] = (CostType)(hsumAdd[x - D + d] + pixAdd[d] - pixSub[d]); + C[x + d] = (CostType)(C[x + d] + hv - hsumSub[x + d]); + } +#endif + } + } + else + { + for( x = D; x < width1*D; x += D ) + { + const CostType* pixAdd = pixDiff + std::min(x + SW2*D, (width1-1)*D); + const CostType* pixSub = pixDiff + std::max(x - (SW2+1)*D, 0); + + for( d = 0; d < D; d++ ) + hsumAdd[x + d] = (CostType)(hsumAdd[x - D + d] + pixAdd[d] - pixSub[d]); + } + } + } + + if( y == src_start_idx ) + { + int scale = k == src_start_idx ? SH2 + 1 : 1; + for( x = 0; x < width1*D; x++ ) + C[x] = (CostType)(C[x] + hsumAdd[x]*scale); + } + } +} + +#if CV_SIMD128 && CV_SSE2 +// define some additional reduce operations: +inline short min(const v_int16x8& a) +{ + short CV_DECL_ALIGNED(16) buf[8]; + v_store_aligned(buf, a); + short s0 = std::min(buf[0], buf[1]); + short s1 = std::min(buf[2], buf[3]); + short s2 = std::min(buf[4], buf[5]); + short s3 = std::min(buf[6], buf[7]); + return std::min(std::min(s0, s1),std::min(s2, s3)); +} + +inline short min_pos(const v_int16x8& val,const v_int16x8& pos) +{ + short CV_DECL_ALIGNED(16) val_buf[8]; + v_store_aligned(val_buf, val); + short CV_DECL_ALIGNED(16) pos_buf[8]; + v_store_aligned(pos_buf, pos); + short res_pos = 0; + short min_val = SHRT_MAX; + if(val_buf[0](P1)); + + v_int16x8 leftMinCostP2_reg = v_setall_s16(cv::saturate_cast(leftMinCost+P2)); + v_int16x8 leftMinCost_new_reg = v_setall_s16(SHRT_MAX); + v_int16x8 src0_leftBuf = v_setall_s16(SHRT_MAX); + v_int16x8 src1_leftBuf = v_load_aligned(leftBuf_prev); + + v_int16x8 topMinCostP2_reg = v_setall_s16(cv::saturate_cast(topMinCost+P2)); + v_int16x8 topMinCost_new_reg = v_setall_s16(SHRT_MAX); + v_int16x8 src0_topBuf = v_setall_s16(SHRT_MAX); + v_int16x8 src1_topBuf = v_load_aligned(topBuf); + + v_int16x8 src2; + v_int16x8 src_shifted_left,src_shifted_right; + v_int16x8 res; + + for(int i=0;i(costs[i] + std::min(std::min(leftBuf_prev_i_minus_1+P1,leftBuf_prev[i+1]+P1),std::min((int)leftBuf_prev[i],leftMinCost_P2))-leftMinCost_P2); + leftBuf_prev_i_minus_1 = leftBuf_prev[i]; + leftMinCost_new = std::min(leftMinCost_new,leftBuf[i]); + + tmp = topBuf[i]; + topBuf[i] = cv::saturate_cast(costs[i] + std::min(std::min(topBuf_i_minus_1+P1,topBuf[i+1]+P1),std::min((int)topBuf[i],topMinCost_P2))-topMinCost_P2); + topBuf_i_minus_1 = tmp; + topMinCost_new = std::min(topMinCost_new,topBuf[i]); + } + + leftBuf[D-1] = cv::saturate_cast(costs[D-1] + std::min(leftBuf_prev_i_minus_1+P1,std::min((int)leftBuf_prev[D-1],leftMinCost_P2))-leftMinCost_P2); + leftMinCost = std::min(leftMinCost_new,leftBuf[D-1]); + + topBuf[D-1] = cv::saturate_cast(costs[D-1] + std::min(topBuf_i_minus_1+P1,std::min((int)topBuf[D-1],topMinCost_P2))-topMinCost_P2); + topMinCost = std::min(topMinCost_new,topBuf[D-1]); +#endif +} + +// performing in-place SGM cost accumulation from right to left (the result is stored in rightBuf) and +// summing rightBuf, topBuf, leftBuf together (the result is stored in leftBuf), as well as finding the +// optimal disparity value with minimum accumulated cost +inline void accumulateCostsRight(CostType* rightBuf, CostType* topBuf, CostType* leftBuf, CostType* costs, + CostType& rightMinCost, int D, int P1, int P2, int& optimal_disp, CostType& min_cost) +{ +#if CV_SIMD128 && CV_SSE2 + v_int16x8 P1_reg = v_setall_s16(cv::saturate_cast(P1)); + + v_int16x8 rightMinCostP2_reg = v_setall_s16(cv::saturate_cast(rightMinCost+P2)); + v_int16x8 rightMinCost_new_reg = v_setall_s16(SHRT_MAX); + v_int16x8 src0_rightBuf = v_setall_s16(SHRT_MAX); + v_int16x8 src1_rightBuf = v_load(rightBuf); + + v_int16x8 src2; + v_int16x8 src_shifted_left,src_shifted_right; + v_int16x8 res; + + v_int16x8 min_sum_cost_reg = v_setall_s16(SHRT_MAX); + v_int16x8 min_sum_pos_reg = v_setall_s16(0); + v_int16x8 loop_idx(0,1,2,3,4,5,6,7); + v_int16x8 eight_reg = v_setall_s16(8); + + for(int i=0;i(costs[i] + std::min(std::min(rightBuf_i_minus_1+P1,rightBuf[i+1]+P1),std::min((int)rightBuf[i],rightMinCost_P2))-rightMinCost_P2); + rightBuf_i_minus_1 = tmp; + rightMinCost_new = std::min(rightMinCost_new,rightBuf[i]); + leftBuf[i] = cv::saturate_cast((int)leftBuf[i]+rightBuf[i]+topBuf[i]); + if(leftBuf[i](costs[D-1] + std::min(rightBuf_i_minus_1+P1,std::min((int)rightBuf[D-1],rightMinCost_P2))-rightMinCost_P2); + rightMinCost = std::min(rightMinCost_new,rightBuf[D-1]); + leftBuf[D-1] = cv::saturate_cast((int)leftBuf[D-1]+rightBuf[D-1]+topBuf[D-1]); + if(leftBuf[D-1]range.start+1) + { + for(int n=range.start;nchannels(),SH2,P2, + curCostVolumeLine,hsumBuf,pixDiff,tmpBuf,horPassCostVolume, + vertPassCostVolume,vertPassMin,rightPassBuf,disp2CostBuf,disp2Buf); + + // start real processing: + for(int y=src_start_idx;y=D;x-=D) + { + accumulateCostsRight(rightPassBuf,vertPassCostVolume+x,horPassCostVolume+x,C+x,prev_min,D,P1,P2,best_d,min_cost); + + if(uniquenessRatio>0) + { +#if CV_SIMD128 + horPassCostVolume+=x; + int thresh = (100*min_cost)/(100-uniquenessRatio); + v_int16x8 thresh_reg = v_setall_s16((short)(thresh+1)); + v_int16x8 d1 = v_setall_s16((short)(best_d-1)); + v_int16x8 d2 = v_setall_s16((short)(best_d+1)); + v_int16x8 eight_reg = v_setall_s16(8); + v_int16x8 cur_d(0,1,2,3,4,5,6,7); + v_int16x8 mask,cost1,cost2; + + for( d = 0; d < D; d+=16 ) + { + cost1 = v_load_aligned(horPassCostVolume+d); + cost2 = v_load_aligned(horPassCostVolume+d+8); + + mask = cost1 < thresh_reg; + mask = mask & ( (cur_dd2) ); + if( v_signmask(mask) ) + break; + + cur_d = cur_d+eight_reg; + + mask = cost2 < thresh_reg; + mask = mask & ( (cur_dd2) ); + if( v_signmask(mask) ) + break; + + cur_d = cur_d+eight_reg; + } + horPassCostVolume-=x; +#else + for( d = 0; d < D; d++ ) + { + if( horPassCostVolume[x+d]*(100 - uniquenessRatio) < min_cost*100 && std::abs(d - best_d) > 1 ) + break; + } +#endif + if( d < D ) + continue; + } + d = best_d; + + int _x2 = x/D - 1 + minX1 - d - minD; + if( _x2>=0 && _x2 min_cost ) + { + disp2CostBuf[_x2] = min_cost; + disp2Buf[_x2] = (short)(d + minD); + } + + if( 0 < d && d < D-1 ) + { + // do subpixel quadratic interpolation: + // fit parabola into (x1=d-1, y1=Sp[d-1]), (x2=d, y2=Sp[d]), (x3=d+1, y3=Sp[d+1]) + // then find minimum of the parabola. + int denom2 = std::max(horPassCostVolume[x+d-1] + horPassCostVolume[x+d+1] - 2*horPassCostVolume[x+d], 1); + d = d*DISP_SCALE + ((horPassCostVolume[x+d-1] - horPassCostVolume[x+d+1])*DISP_SCALE + denom2)/(denom2*2); + } + else + d *= DISP_SCALE; + + disp_row[(x/D)-1 + minX1] = (DispType)(d + minD*DISP_SCALE); + } + + for(int x = minX1; x < maxX1; x++ ) + { + // pseudo LRC consistency check using only one disparity map; + // pixels with difference more than disp12MaxDiff are invalidated + int d1 = disp_row[x]; + if( d1 == INVALID_DISP_SCALED ) + continue; + int _d = d1 >> StereoMatcher::DISP_SHIFT; + int d_ = (d1 + DISP_SCALE-1) >> StereoMatcher::DISP_SHIFT; + int _x = x - _d, x_ = x - d_; + if( 0 <= _x && _x < width && disp2Buf[_x] >= minD && std::abs(disp2Buf[_x] - _d) > disp12MaxDiff && + 0 <= x_ && x_ < width && disp2Buf[x_] >= minD && std::abs(disp2Buf[x_] - d_) > disp12MaxDiff ) + disp_row[x] = (short)INVALID_DISP_SCALED; + } + } +} + +static void computeDisparity3WaySGBM( const Mat& img1, const Mat& img2, + Mat& disp1, const StereoSGBMParams& params, + Mat* buffers, int nstripes ) +{ + // precompute a lookup table for the raw matching cost computation: + const int TAB_OFS = 256*4, TAB_SIZE = 256 + TAB_OFS*2; + PixType* clipTab = new PixType[TAB_SIZE]; + int ftzero = std::max(params.preFilterCap, 15) | 1; + for(int k = 0; k < TAB_SIZE; k++ ) + clipTab[k] = (PixType)(std::min(std::max(k - TAB_OFS, -ftzero), ftzero) + ftzero); + + // allocate separate dst_disp arrays to avoid conflicts due to stripe overlap: + int stripe_sz = (int)ceil(img1.rows/(double)nstripes); + int stripe_overlap = (params.SADWindowSize/2+1) + (int)ceil(0.1*stripe_sz); + Mat* dst_disp = new Mat[nstripes]; + for(int i=0;i 0 ) @@ -933,6 +1569,12 @@ public: StereoSGBMParams params; Mat buffer; + + // the number of stripes is fixed, disregarding the number of threads/processors + // to make the results fully reproducible: + static const int num_stripes = 4; + Mat buffers[num_stripes]; + static const char* name_; }; @@ -1157,6 +1799,10 @@ void cv::validateDisparity( InputOutputArray _disp, InputArray _cost, int minDis for( x = minX1; x < maxX1; x++ ) { int d = dptr[x], c = cptr[x]; + + if( d == INVALID_DISP_SCALED ) + continue; + int x2 = x - ((d + DISP_SCALE/2) >> DISP_SHIFT); if( disp2cost[x2] > c ) @@ -1173,9 +1819,13 @@ void cv::validateDisparity( InputOutputArray _disp, InputArray _cost, int minDis for( x = minX1; x < maxX1; x++ ) { int d = dptr[x], c = cptr[x]; + + if( d == INVALID_DISP_SCALED ) + continue; + int x2 = x - ((d + DISP_SCALE/2) >> DISP_SHIFT); - if( disp2cost[x2] < c ) + if( disp2cost[x2] > c ) { disp2cost[x2] = c; disp2buf[x2] = d; diff --git a/modules/calib3d/src/upnp.cpp b/modules/calib3d/src/upnp.cpp index 378f5a11b4..1054e0bffe 100644 --- a/modules/calib3d/src/upnp.cpp +++ b/modules/calib3d/src/upnp.cpp @@ -114,6 +114,7 @@ double upnp::compute_pose(Mat& R, Mat& t) SVD::compute(MtM, D, Ut, Vt, SVD::MODIFY_A | SVD::FULL_UV); Mat(Ut.t()).copyTo(Ut); M->release(); + delete M; double l_6x12[6 * 12], rho[6]; Mat L_6x12 = Mat(6, 12, CV_64F, l_6x12); @@ -589,7 +590,16 @@ void upnp::gauss_newton(const Mat * L_6x12, const Mat * Rho, double betas[4], do } if (f[0] < 0) f[0] = -f[0]; - fu = fv = f[0]; + fu = fv = f[0]; + + A->release(); + delete A; + + B->release(); + delete B; + + X->release(); + delete X; } diff --git a/modules/calib3d/test/test_fisheye.cpp b/modules/calib3d/test/test_fisheye.cpp index ba05f1ee26..4e7bf53634 100644 --- a/modules/calib3d/test/test_fisheye.cpp +++ b/modules/calib3d/test/test_fisheye.cpp @@ -43,6 +43,7 @@ #include "test_precomp.hpp" #include #include "../src/fisheye.hpp" +#include "opencv2/videoio.hpp" class fisheyeTest : public ::testing::Test { diff --git a/modules/calib3d/test/test_stereomatching.cpp b/modules/calib3d/test/test_stereomatching.cpp index 41290a1c3c..0aee42acee 100644 --- a/modules/calib3d/test/test_stereomatching.cpp +++ b/modules/calib3d/test/test_stereomatching.cpp @@ -742,7 +742,7 @@ protected: { int ndisp; int winSize; - bool fullDP; + int mode; }; vector caseRunParams; @@ -757,7 +757,7 @@ protected: RunParams params; String ndisp = fn[i+2]; params.ndisp = atoi(ndisp.c_str()); String winSize = fn[i+3]; params.winSize = atoi(winSize.c_str()); - String fullDP = fn[i+4]; params.fullDP = atoi(fullDP.c_str()) == 0 ? false : true; + String mode = fn[i+4]; params.mode = atoi(mode.c_str()); caseNames.push_back( caseName ); caseDatasets.push_back( datasetName ); caseRunParams.push_back( params ); @@ -773,8 +773,7 @@ protected: Ptr sgbm = StereoSGBM::create( 0, params.ndisp, params.winSize, 10*params.winSize*params.winSize, 40*params.winSize*params.winSize, - 1, 63, 10, 100, 32, params.fullDP ? - StereoSGBM::MODE_HH : StereoSGBM::MODE_SGBM ); + 1, 63, 10, 100, 32, params.mode ); sgbm->compute( leftImg, rightImg, leftDisp ); CV_Assert( leftDisp.type() == CV_16SC1 ); leftDisp/=16; diff --git a/modules/core/CMakeLists.txt b/modules/core/CMakeLists.txt index c45760c654..dbedc5b065 100644 --- a/modules/core/CMakeLists.txt +++ b/modules/core/CMakeLists.txt @@ -1,7 +1,9 @@ set(the_description "The Core Functionality") -ocv_add_module(core PRIVATE_REQUIRED ${ZLIB_LIBRARIES} "${OPENCL_LIBRARIES}" +ocv_add_module(core + opencv_hal + PRIVATE_REQUIRED ${ZLIB_LIBRARIES} "${OPENCL_LIBRARIES}" OPTIONAL opencv_cudev - WRAP java python) + WRAP java python) set(extra_libs "") diff --git a/modules/core/include/opencv2/core/base.hpp b/modules/core/include/opencv2/core/base.hpp index 83cc311c42..3d440dabe0 100644 --- a/modules/core/include/opencv2/core/base.hpp +++ b/modules/core/include/opencv2/core/base.hpp @@ -305,6 +305,7 @@ enum BorderTypes { #define CV_SUPPRESS_DEPRECATED_START #define CV_SUPPRESS_DEPRECATED_END #endif +#define CV_UNUSED(name) (void)name //! @endcond /*! @brief Signals an error and raises the exception. diff --git a/modules/core/include/opencv2/core/cvstd.inl.hpp b/modules/core/include/opencv2/core/cvstd.inl.hpp index 03bac3729c..ad154061ef 100644 --- a/modules/core/include/opencv2/core/cvstd.inl.hpp +++ b/modules/core/include/opencv2/core/cvstd.inl.hpp @@ -87,7 +87,7 @@ String::String(const std::string& str, size_t pos, size_t len) : cstr_(0), len_(0) { size_t strlen = str.size(); - pos = max(pos, strlen); + pos = min(pos, strlen); len = min(strlen - pos, len); if (!len) return; memcpy(allocate(len), str.c_str() + pos, len); diff --git a/modules/core/include/opencv2/core/directx.hpp b/modules/core/include/opencv2/core/directx.hpp index 837548e51b..bb6167511f 100644 --- a/modules/core/include/opencv2/core/directx.hpp +++ b/modules/core/include/opencv2/core/directx.hpp @@ -68,12 +68,38 @@ namespace ocl { using namespace cv::ocl; //! @addtogroup core_directx +// This section describes OpenCL and DirectX interoperability. +// +// To enable DirectX support, configure OpenCV using CMake with WITH_DIRECTX=ON . Note, DirectX is +// supported only on Windows. +// +// To use OpenCL functionality you should first initialize OpenCL context from DirectX resource. +// //! @{ // TODO static functions in the Context class +//! @brief Creates OpenCL context from D3D11 device +// +//! @param pD3D11Device - pointer to D3D11 device +//! @return Returns reference to OpenCL Context CV_EXPORTS Context& initializeContextFromD3D11Device(ID3D11Device* pD3D11Device); + +//! @brief Creates OpenCL context from D3D10 device +// +//! @param pD3D10Device - pointer to D3D10 device +//! @return Returns reference to OpenCL Context CV_EXPORTS Context& initializeContextFromD3D10Device(ID3D10Device* pD3D10Device); + +//! @brief Creates OpenCL context from Direct3DDevice9Ex device +// +//! @param pDirect3DDevice9Ex - pointer to Direct3DDevice9Ex device +//! @return Returns reference to OpenCL Context CV_EXPORTS Context& initializeContextFromDirect3DDevice9Ex(IDirect3DDevice9Ex* pDirect3DDevice9Ex); + +//! @brief Creates OpenCL context from Direct3DDevice9 device +// +//! @param pDirect3DDevice9 - pointer to Direct3Device9 device +//! @return Returns reference to OpenCL Context CV_EXPORTS Context& initializeContextFromDirect3DDevice9(IDirect3DDevice9* pDirect3DDevice9); //! @} @@ -83,19 +109,70 @@ CV_EXPORTS Context& initializeContextFromDirect3DDevice9(IDirect3DDevice9* pDire //! @addtogroup core_directx //! @{ +//! @brief Converts InputArray to ID3D11Texture2D +// +//! @note Note: function does memory copy from src to +//! pD3D11Texture2D +// +//! @param src - source InputArray +//! @param pD3D11Texture2D - destination D3D11 texture CV_EXPORTS void convertToD3D11Texture2D(InputArray src, ID3D11Texture2D* pD3D11Texture2D); + +//! @brief Converts ID3D11Texture2D to OutputArray +// +//! @note Note: function does memory copy from pD3D11Texture2D +//! to dst +// +//! @param pD3D11Texture2D - source D3D11 texture +//! @param dst - destination OutputArray CV_EXPORTS void convertFromD3D11Texture2D(ID3D11Texture2D* pD3D11Texture2D, OutputArray dst); +//! @brief Converts InputArray to ID3D10Texture2D +// +//! @note Note: function does memory copy from src to +//! pD3D10Texture2D +// +//! @param src - source InputArray +//! @param pD3D10Texture2D - destination D3D10 texture CV_EXPORTS void convertToD3D10Texture2D(InputArray src, ID3D10Texture2D* pD3D10Texture2D); + +//! @brief Converts ID3D10Texture2D to OutputArray +// +//! @note Note: function does memory copy from pD3D10Texture2D +//! to dst +// +//! @param pD3D10Texture2D - source D3D10 texture +//! @param dst - destination OutputArray CV_EXPORTS void convertFromD3D10Texture2D(ID3D10Texture2D* pD3D10Texture2D, OutputArray dst); +//! @brief Converts InputArray to IDirect3DSurface9 +// +//! @note Note: function does memory copy from src to +//! pDirect3DSurface9 +// +//! @param src - source InputArray +//! @param pDirect3DSurface9 - destination D3D10 texture +//! @param surfaceSharedHandle - shared handle CV_EXPORTS void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurface9, void* surfaceSharedHandle = NULL); + +//! @brief Converts IDirect3DSurface9 to OutputArray +// +//! @note Note: function does memory copy from pDirect3DSurface9 +//! to dst +// +//! @param pDirect3DSurface9 - source D3D10 texture +//! @param dst - destination OutputArray +//! @param surfaceSharedHandle - shared handle CV_EXPORTS void convertFromDirect3DSurface9(IDirect3DSurface9* pDirect3DSurface9, OutputArray dst, void* surfaceSharedHandle = NULL); -// Get OpenCV type from DirectX type, return -1 if there is no equivalent +//! @brief Get OpenCV type from DirectX type +//! @param iDXGI_FORMAT - enum DXGI_FORMAT for D3D10/D3D11 +//! @return OpenCV type or -1 if there is no equivalent CV_EXPORTS int getTypeFromDXGI_FORMAT(const int iDXGI_FORMAT); // enum DXGI_FORMAT for D3D10/D3D11 -// Get OpenCV type from DirectX type, return -1 if there is no equivalent +//! @brief Get OpenCV type from DirectX type +//! @param iD3DFORMAT - enum D3DTYPE for D3D9 +//! @return OpenCV type or -1 if there is no equivalent CV_EXPORTS int getTypeFromD3DFORMAT(const int iD3DFORMAT); // enum D3DTYPE for D3D9 //! @} diff --git a/modules/core/include/opencv2/core/mat.hpp b/modules/core/include/opencv2/core/mat.hpp index 45f3cef0ce..7f609dbb1d 100644 --- a/modules/core/include/opencv2/core/mat.hpp +++ b/modules/core/include/opencv2/core/mat.hpp @@ -222,6 +222,7 @@ public: bool isMatVector() const; bool isUMatVector() const; bool isMatx() const; + bool isVector() const; ~_InputArray(); @@ -1796,7 +1797,7 @@ public: /** @brief Invoke with arguments functor, and runs the functor over all matrix element. - The methos runs operation in parallel. Operation is passed by arguments. Operation have to be a + The methods runs operation in parallel. Operation is passed by arguments. Operation have to be a function pointer, a function object or a lambda(C++11). All of below operation is equal. Put 0xFF to first channel of all matrix elements: @@ -3240,7 +3241,7 @@ Here are examples of matrix expressions: // sharpen image using "unsharp mask" algorithm Mat blurred; double sigma = 1, threshold = 5, amount = 1; GaussianBlur(img, blurred, Size(), sigma, sigma); - Mat lowConstrastMask = abs(img - blurred) < threshold; + Mat lowContrastMask = abs(img - blurred) < threshold; Mat sharpened = img*(1+amount) + blurred*(-amount); img.copyTo(sharpened, lowContrastMask); @endcode diff --git a/modules/core/include/opencv2/core/mat.inl.hpp b/modules/core/include/opencv2/core/mat.inl.hpp index 3779b83f82..ba52efde1d 100644 --- a/modules/core/include/opencv2/core/mat.inl.hpp +++ b/modules/core/include/opencv2/core/mat.inl.hpp @@ -130,6 +130,7 @@ inline bool _InputArray::isUMat() const { return kind() == _InputArray::UMAT; } inline bool _InputArray::isMatVector() const { return kind() == _InputArray::STD_VECTOR_MAT; } inline bool _InputArray::isUMatVector() const { return kind() == _InputArray::STD_VECTOR_UMAT; } inline bool _InputArray::isMatx() const { return kind() == _InputArray::MATX; } +inline bool _InputArray::isVector() const { return kind() == _InputArray::STD_VECTOR || kind() == _InputArray::STD_BOOL_VECTOR; } //////////////////////////////////////////////////////////////////////////////////////// @@ -396,6 +397,8 @@ Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step) data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0), allocator(0), u(0), size(&rows) { + CV_Assert(total() == 0 || data != NULL); + size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type); size_t minstep = cols * esz; if( _step == AUTO_STEP ) @@ -427,6 +430,8 @@ Mat::Mat(Size _sz, int _type, void* _data, size_t _step) data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0), allocator(0), u(0), size(&rows) { + CV_Assert(total() == 0 || data != NULL); + size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type); size_t minstep = cols*esz; if( _step == AUTO_STEP ) @@ -1096,6 +1101,12 @@ void Mat::push_back(const Mat_<_Tp>& m) push_back((const Mat&)m); } +template<> inline +void Mat::push_back(const MatExpr& expr) +{ + push_back(static_cast(expr)); +} + ///////////////////////////// MatSize //////////////////////////// inline @@ -1586,7 +1597,13 @@ template template inline Mat_<_Tp>::operator Vec::channel_type, n>() const { CV_Assert(n % DataType<_Tp>::channels == 0); + +#if defined _MSC_VER + const Mat* pMat = (const Mat*)this; // workaround for MSVS <= 2012 compiler bugs (but GCC 4.6 dislikes this workaround) + return pMat->operator Vec::channel_type, n>(); +#else return this->Mat::operator Vec::channel_type, n>(); +#endif } template template inline @@ -1594,8 +1611,14 @@ Mat_<_Tp>::operator Matx::channel_type, m, n>() const { CV_Assert(n % DataType<_Tp>::channels == 0); +#if defined _MSC_VER + const Mat* pMat = (const Mat*)this; // workaround for MSVS <= 2012 compiler bugs (but GCC 4.6 dislikes this workaround) + Matx::channel_type, m, n> res = pMat->operator Matx::channel_type, m, n>(); + return res; +#else Matx::channel_type, m, n> res = this->Mat::operator Matx::channel_type, m, n>(); return res; +#endif } template inline diff --git a/modules/core/include/opencv2/core/ocl.hpp b/modules/core/include/opencv2/core/ocl.hpp index 173722f61f..bc989a3285 100644 --- a/modules/core/include/opencv2/core/ocl.hpp +++ b/modules/core/include/opencv2/core/ocl.hpp @@ -276,6 +276,58 @@ protected: Impl* p; }; +/* +//! @brief Attaches OpenCL context to OpenCV +// +//! @note Note: +// OpenCV will check if available OpenCL platform has platformName name, +// then assign context to OpenCV and call clRetainContext function. +// The deviceID device will be used as target device and new command queue +// will be created. +// +// Params: +//! @param platformName - name of OpenCL platform to attach, +//! this string is used to check if platform is available +//! to OpenCV at runtime +//! @param platfromID - ID of platform attached context was created for +//! @param context - OpenCL context to be attached to OpenCV +//! @param deviceID - ID of device, must be created from attached context +*/ +CV_EXPORTS void attachContext(const String& platformName, void* platformID, void* context, void* deviceID); + +/* +//! @brief Convert OpenCL buffer to UMat +// +//! @note Note: +// OpenCL buffer (cl_mem_buffer) should contain 2D image data, compatible with OpenCV. +// Memory content is not copied from clBuffer to UMat. Instead, buffer handle assigned +// to UMat and clRetainMemObject is called. +// +// Params: +//! @param cl_mem_buffer - source clBuffer handle +//! @param step - num of bytes in single row +//! @param rows - number of rows +//! @param cols - number of cols +//! @param type - OpenCV type of image +//! @param dst - destination UMat +*/ +CV_EXPORTS void convertFromBuffer(void* cl_mem_buffer, size_t step, int rows, int cols, int type, UMat& dst); + +/* +//! @brief Convert OpenCL image2d_t to UMat +// +//! @note Note: +// OpenCL image2d_t (cl_mem_image), should be compatible with OpenCV +// UMat formats. +// Memory content is copied from image to UMat with +// clEnqueueCopyImageToBuffer function. +// +// Params: +//! @param cl_mem_image - source image2d_t handle +//! @param dst - destination UMat +*/ +CV_EXPORTS void convertFromImage(void* cl_mem_image, UMat& dst); + // TODO Move to internal header void initializeContextFromHandle(Context& ctx, void* platform, void* context, void* device); diff --git a/modules/core/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl.hpp b/modules/core/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl.hpp new file mode 100644 index 0000000000..f37ad158e1 --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl.hpp @@ -0,0 +1,58 @@ +// +// AUTOGENERATED, DO NOT EDIT +// +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_HPP__ +#error "Invalid usage" +#endif + +// generated by parser_cl.py +#define clCreateFromGLBuffer clCreateFromGLBuffer_ +#define clCreateFromGLRenderbuffer clCreateFromGLRenderbuffer_ +#define clCreateFromGLTexture clCreateFromGLTexture_ +#define clCreateFromGLTexture2D clCreateFromGLTexture2D_ +#define clCreateFromGLTexture3D clCreateFromGLTexture3D_ +#define clEnqueueAcquireGLObjects clEnqueueAcquireGLObjects_ +#define clEnqueueReleaseGLObjects clEnqueueReleaseGLObjects_ +#define clGetGLContextInfoKHR clGetGLContextInfoKHR_ +#define clGetGLObjectInfo clGetGLObjectInfo_ +#define clGetGLTextureInfo clGetGLTextureInfo_ + +#if defined __APPLE__ +#include +#else +#include +#endif + +// generated by parser_cl.py +#undef clCreateFromGLBuffer +#define clCreateFromGLBuffer clCreateFromGLBuffer_pfn +#undef clCreateFromGLRenderbuffer +#define clCreateFromGLRenderbuffer clCreateFromGLRenderbuffer_pfn +#undef clCreateFromGLTexture +#define clCreateFromGLTexture clCreateFromGLTexture_pfn +#undef clCreateFromGLTexture2D +#define clCreateFromGLTexture2D clCreateFromGLTexture2D_pfn +#undef clCreateFromGLTexture3D +#define clCreateFromGLTexture3D clCreateFromGLTexture3D_pfn +#undef clEnqueueAcquireGLObjects +#define clEnqueueAcquireGLObjects clEnqueueAcquireGLObjects_pfn +#undef clEnqueueReleaseGLObjects +#define clEnqueueReleaseGLObjects clEnqueueReleaseGLObjects_pfn +#undef clGetGLContextInfoKHR +#define clGetGLContextInfoKHR clGetGLContextInfoKHR_pfn +#undef clGetGLObjectInfo +#define clGetGLObjectInfo clGetGLObjectInfo_pfn +#undef clGetGLTextureInfo +#define clGetGLTextureInfo clGetGLTextureInfo_pfn + +// generated by parser_cl.py +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateFromGLBuffer)(cl_context, cl_mem_flags, cl_GLuint, int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateFromGLRenderbuffer)(cl_context, cl_mem_flags, cl_GLuint, cl_int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateFromGLTexture)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateFromGLTexture2D)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*); +extern CL_RUNTIME_EXPORT cl_mem (CL_API_CALL*clCreateFromGLTexture3D)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueAcquireGLObjects)(cl_command_queue, cl_uint, const cl_mem*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clEnqueueReleaseGLObjects)(cl_command_queue, cl_uint, const cl_mem*, cl_uint, const cl_event*, cl_event*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetGLContextInfoKHR)(const cl_context_properties*, cl_gl_context_info, size_t, void*, size_t*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetGLObjectInfo)(cl_mem, cl_gl_object_type*, cl_GLuint*); +extern CL_RUNTIME_EXPORT cl_int (CL_API_CALL*clGetGLTextureInfo)(cl_mem, cl_gl_texture_info, size_t, void*, size_t*); diff --git a/modules/core/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl_wrappers.hpp b/modules/core/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl_wrappers.hpp new file mode 100644 index 0000000000..105867f47a --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/runtime/autogenerated/opencl_gl_wrappers.hpp @@ -0,0 +1,38 @@ +// +// AUTOGENERATED, DO NOT EDIT +// +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_WRAPPERS_HPP__ +#error "Invalid usage" +#endif + +// generated by parser_cl.py +#undef clCreateFromGLBuffer +#define clCreateFromGLBuffer clCreateFromGLBuffer_fn +inline cl_mem clCreateFromGLBuffer(cl_context p0, cl_mem_flags p1, cl_GLuint p2, int* p3) { return clCreateFromGLBuffer_pfn(p0, p1, p2, p3); } +#undef clCreateFromGLRenderbuffer +#define clCreateFromGLRenderbuffer clCreateFromGLRenderbuffer_fn +inline cl_mem clCreateFromGLRenderbuffer(cl_context p0, cl_mem_flags p1, cl_GLuint p2, cl_int* p3) { return clCreateFromGLRenderbuffer_pfn(p0, p1, p2, p3); } +#undef clCreateFromGLTexture +#define clCreateFromGLTexture clCreateFromGLTexture_fn +inline cl_mem clCreateFromGLTexture(cl_context p0, cl_mem_flags p1, cl_GLenum p2, cl_GLint p3, cl_GLuint p4, cl_int* p5) { return clCreateFromGLTexture_pfn(p0, p1, p2, p3, p4, p5); } +#undef clCreateFromGLTexture2D +#define clCreateFromGLTexture2D clCreateFromGLTexture2D_fn +inline cl_mem clCreateFromGLTexture2D(cl_context p0, cl_mem_flags p1, cl_GLenum p2, cl_GLint p3, cl_GLuint p4, cl_int* p5) { return clCreateFromGLTexture2D_pfn(p0, p1, p2, p3, p4, p5); } +#undef clCreateFromGLTexture3D +#define clCreateFromGLTexture3D clCreateFromGLTexture3D_fn +inline cl_mem clCreateFromGLTexture3D(cl_context p0, cl_mem_flags p1, cl_GLenum p2, cl_GLint p3, cl_GLuint p4, cl_int* p5) { return clCreateFromGLTexture3D_pfn(p0, p1, p2, p3, p4, p5); } +#undef clEnqueueAcquireGLObjects +#define clEnqueueAcquireGLObjects clEnqueueAcquireGLObjects_fn +inline cl_int clEnqueueAcquireGLObjects(cl_command_queue p0, cl_uint p1, const cl_mem* p2, cl_uint p3, const cl_event* p4, cl_event* p5) { return clEnqueueAcquireGLObjects_pfn(p0, p1, p2, p3, p4, p5); } +#undef clEnqueueReleaseGLObjects +#define clEnqueueReleaseGLObjects clEnqueueReleaseGLObjects_fn +inline cl_int clEnqueueReleaseGLObjects(cl_command_queue p0, cl_uint p1, const cl_mem* p2, cl_uint p3, const cl_event* p4, cl_event* p5) { return clEnqueueReleaseGLObjects_pfn(p0, p1, p2, p3, p4, p5); } +#undef clGetGLContextInfoKHR +#define clGetGLContextInfoKHR clGetGLContextInfoKHR_fn +inline cl_int clGetGLContextInfoKHR(const cl_context_properties* p0, cl_gl_context_info p1, size_t p2, void* p3, size_t* p4) { return clGetGLContextInfoKHR_pfn(p0, p1, p2, p3, p4); } +#undef clGetGLObjectInfo +#define clGetGLObjectInfo clGetGLObjectInfo_fn +inline cl_int clGetGLObjectInfo(cl_mem p0, cl_gl_object_type* p1, cl_GLuint* p2) { return clGetGLObjectInfo_pfn(p0, p1, p2); } +#undef clGetGLTextureInfo +#define clGetGLTextureInfo clGetGLTextureInfo_fn +inline cl_int clGetGLTextureInfo(cl_mem p0, cl_gl_texture_info p1, size_t p2, void* p3, size_t* p4) { return clGetGLTextureInfo_pfn(p0, p1, p2, p3, p4); } diff --git a/modules/core/include/opencv2/core/opencl/runtime/opencl_gl.hpp b/modules/core/include/opencv2/core/opencl/runtime/opencl_gl.hpp new file mode 100644 index 0000000000..7c7a82e9ea --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/runtime/opencl_gl.hpp @@ -0,0 +1,65 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the OpenCV Foundation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_HPP__ +#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_HPP__ + +#if defined HAVE_OPENCL && defined HAVE_OPENGL + +#include "opencl_core.hpp" + +#if defined(HAVE_OPENCL_STATIC) + +#if defined __APPLE__ +#include +#else +#include +#endif + +#else // HAVE_OPENCL_STATIC + +#include "autogenerated/opencl_gl.hpp" + +#endif // HAVE_OPENCL_STATIC + +#endif // defined HAVE_OPENCL && defined HAVE_OPENGL + +#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_HPP__ diff --git a/modules/core/include/opencv2/core/opencl/runtime/opencl_gl_wrappers.hpp b/modules/core/include/opencv2/core/opencl/runtime/opencl_gl_wrappers.hpp new file mode 100644 index 0000000000..9327d2ede6 --- /dev/null +++ b/modules/core/include/opencv2/core/opencl/runtime/opencl_gl_wrappers.hpp @@ -0,0 +1,47 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the OpenCV Foundation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_WRAPPERS_HPP__ +#define __OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_WRAPPERS_HPP__ + +#include "autogenerated/opencl_gl_wrappers.hpp" + +#endif // __OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_WRAPPERS_HPP__ diff --git a/modules/core/include/opencv2/core/opengl.hpp b/modules/core/include/opencv2/core/opengl.hpp index 15c635c880..fd47c520e2 100644 --- a/modules/core/include/opencv2/core/opengl.hpp +++ b/modules/core/include/opencv2/core/opengl.hpp @@ -48,6 +48,7 @@ #endif #include "opencv2/core.hpp" +#include "ocl.hpp" namespace cv { namespace ogl { @@ -511,7 +512,51 @@ CV_EXPORTS void render(const Arrays& arr, int mode = POINTS, Scalar color = Scal */ CV_EXPORTS void render(const Arrays& arr, InputArray indices, int mode = POINTS, Scalar color = Scalar::all(255)); -//! @} core_opengl +/////////////////// CL-GL Interoperability Functions /////////////////// + +namespace ocl { +using namespace cv::ocl; + +// TODO static functions in the Context class +/** @brief Creates OpenCL context from GL. +@return Returns reference to OpenCL Context + */ +CV_EXPORTS Context& initializeContextFromGL(); + +} // namespace cv::ogl::ocl + +/** @brief Converts InputArray to Texture2D object. +@param src - source InputArray. +@param texture - destination Texture2D object. + */ +CV_EXPORTS void convertToGLTexture2D(InputArray src, Texture2D& texture); + +/** @brief Converts Texture2D object to OutputArray. +@param texture - source Texture2D object. +@param dst - destination OutputArray. + */ +CV_EXPORTS void convertFromGLTexture2D(const Texture2D& texture, OutputArray dst); + +/** @brief Maps Buffer object to process on CL side (convert to UMat). + +Function creates CL buffer from GL one, and then constructs UMat that can be used +to process buffer data with OpenCV functions. Note that in current implementation +UMat constructed this way doesn't own corresponding GL buffer object, so it is +the user responsibility to close down CL/GL buffers relationships by explicitly +calling unmapGLBuffer() function. +@param buffer - source Buffer object. +@param accessFlags - data access flags (ACCESS_READ|ACCESS_WRITE). +@return Returns UMat object + */ +CV_EXPORTS UMat mapGLBuffer(const Buffer& buffer, int accessFlags = ACCESS_READ|ACCESS_WRITE); + +/** @brief Unmaps Buffer object (releases UMat, previously mapped from Buffer). + +Function must be called explicitly by the user for each UMat previously constructed +by the call to mapGLBuffer() function. +@param u - source UMat, created by mapGLBuffer(). + */ +CV_EXPORTS void unmapGLBuffer(UMat& u); }} // namespace cv::ogl diff --git a/modules/core/include/opencv2/core/private.hpp b/modules/core/include/opencv2/core/private.hpp index 4f9f487778..0e8765be1f 100644 --- a/modules/core/include/opencv2/core/private.hpp +++ b/modules/core/include/opencv2/core/private.hpp @@ -195,7 +195,10 @@ CV_EXPORTS void scalarToRawData(const cv::Scalar& s, void* buf, int type, int un # define IPP_VERSION_X100 (IPP_VERSION_MAJOR * 100 + IPP_VERSION_MINOR) -#define IPP_ALIGN 32 // required for AVX optimization +#ifdef CV_MALLOC_ALIGN +#undef CV_MALLOC_ALIGN +#endif +#define CV_MALLOC_ALIGN 32 // required for AVX optimization #define setIppErrorStatus() cv::ipp::setIppStatus(-1, CV_Func, __FILE__, __LINE__) @@ -235,9 +238,67 @@ static inline IppDataType ippiGetDataType(int depth) # define IPP_VERSION_X100 0 #endif +#ifdef HAVE_IPP_ICV_ONLY +#define HAVE_ICV 1 +#else +#define HAVE_ICV 0 +#endif + + #define CV_IPP_CHECK_COND (cv::ipp::useIPP()) #define CV_IPP_CHECK() if(CV_IPP_CHECK_COND) +#ifdef HAVE_IPP + +#ifdef CV_IPP_RUN_VERBOSE +#define CV_IPP_RUN_(condition, func, ...) \ + { \ + if (cv::ipp::useIPP() && (condition) && func) \ + { \ + printf("%s: IPP implementation is running\n", CV_Func); \ + fflush(stdout); \ + CV_IMPL_ADD(CV_IMPL_IPP); \ + return __VA_ARGS__; \ + } \ + else \ + { \ + printf("%s: Plain implementation is running\n", CV_Func); \ + fflush(stdout); \ + } \ + } +#elif defined CV_IPP_RUN_ASSERT +#define CV_IPP_RUN_(condition, func, ...) \ + { \ + if (cv::ipp::useIPP() && (condition)) \ + { \ + if(func) \ + { \ + CV_IMPL_ADD(CV_IMPL_IPP); \ + } \ + else \ + { \ + setIppErrorStatus(); \ + CV_Error(cv::Error::StsAssert, #func); \ + } \ + return __VA_ARGS__; \ + } \ + } +#else +#define CV_IPP_RUN_(condition, func, ...) \ + if (cv::ipp::useIPP() && (condition) && func) \ + { \ + CV_IMPL_ADD(CV_IMPL_IPP); \ + return __VA_ARGS__; \ + } +#endif + +#else +#define CV_IPP_RUN_(condition, func, ...) +#endif + +#define CV_IPP_RUN(condition, func, ...) CV_IPP_RUN_(condition, func, __VA_ARGS__) + + #ifndef IPPI_CALL # define IPPI_CALL(func) CV_Assert((func) >= 0) #endif diff --git a/modules/core/include/opencv2/core/utility.hpp b/modules/core/include/opencv2/core/utility.hpp index 3ec066045b..f0b473fac3 100644 --- a/modules/core/include/opencv2/core/utility.hpp +++ b/modules/core/include/opencv2/core/utility.hpp @@ -201,7 +201,7 @@ framework: @param nthreads Number of threads used by OpenCV. @sa getNumThreads, getThreadNum */ -CV_EXPORTS void setNumThreads(int nthreads); +CV_EXPORTS_W void setNumThreads(int nthreads); /** @brief Returns the number of threads used by OpenCV for parallel regions. @@ -219,7 +219,7 @@ The exact meaning of return value depends on the threading framework used by Ope available for the process. @sa setNumThreads, getThreadNum */ -CV_EXPORTS int getNumThreads(); +CV_EXPORTS_W int getNumThreads(); /** @brief Returns the index of the currently executed thread within the current parallel region. Always returns 0 if called outside of parallel region. @@ -233,7 +233,7 @@ The exact meaning of return value depends on the threading framework used by Ope - `C=` – The index of the current parallel task. @sa setNumThreads, getNumThreads */ -CV_EXPORTS int getThreadNum(); +CV_EXPORTS_W int getThreadNum(); /** @brief Returns full configuration time cmake output. diff --git a/modules/core/src/convert.cpp b/modules/core/src/convert.cpp index 090acf5508..fd07a7fc53 100644 --- a/modules/core/src/convert.cpp +++ b/modules/core/src/convert.cpp @@ -5194,18 +5194,7 @@ dtype* dst, size_t dstep, Size size, double* scale) \ static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \ dtype* dst, size_t dstep, Size size, double*) \ { \ - CV_IPP_CHECK()\ - {\ - if (src && dst)\ - {\ - if (ippiConvert_##ippFavor(src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height)) >= 0) \ - {\ - CV_IMPL_ADD(CV_IMPL_IPP)\ - return; \ - }\ - setIppErrorStatus(); \ - }\ - }\ + CV_IPP_RUN(src && dst, ippiConvert_##ippFavor(src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height)) >= 0)\ cvt_(src, sstep, dst, dstep, size); \ } @@ -5213,18 +5202,7 @@ static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \ static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \ dtype* dst, size_t dstep, Size size, double*) \ { \ - CV_IPP_CHECK()\ - {\ - if (src && dst)\ - {\ - if (ippiConvert_##ippFavor(src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height), ippRndFinancial, 0) >= 0) \ - {\ - CV_IMPL_ADD(CV_IMPL_IPP)\ - return; \ - }\ - setIppErrorStatus(); \ - }\ - }\ + CV_IPP_RUN(src && dst, ippiConvert_##ippFavor(src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height), ippRndFinancial, 0) >= 0)\ cvt_(src, sstep, dst, dstep, size); \ } #else @@ -5860,6 +5838,45 @@ private: IppLUTParallelBody_LUTCN& operator=(const IppLUTParallelBody_LUTCN&); }; } // namespace ipp + +static bool ipp_lut(Mat &src, Mat &lut, Mat &dst) +{ + int lutcn = lut.channels(); + + if(src.dims > 2) + return false; + + bool ok = false; + Ptr body; + + size_t elemSize1 = CV_ELEM_SIZE1(dst.depth()); +#if 0 // there are no performance benefits (PR #2653) + if (lutcn == 1) + { + ParallelLoopBody* p = new ipp::IppLUTParallelBody_LUTC1(src, lut, dst, &ok); + body.reset(p); + } + else +#endif + if ((lutcn == 3 || lutcn == 4) && elemSize1 == 1) + { + ParallelLoopBody* p = new ipp::IppLUTParallelBody_LUTCN(src, lut, dst, &ok); + body.reset(p); + } + + if (body != NULL && ok) + { + Range all(0, dst.rows); + if (dst.total()>>18) + parallel_for_(all, *body, (double)std::max((size_t)1, dst.total()>>16)); + else + (*body)(all); + if (ok) + return true; + } + + return false; +} #endif // IPP class LUTParallelBody : public ParallelLoopBody @@ -5923,29 +5940,13 @@ void cv::LUT( InputArray _src, InputArray _lut, OutputArray _dst ) _dst.create(src.dims, src.size, CV_MAKETYPE(_lut.depth(), cn)); Mat dst = _dst.getMat(); + CV_IPP_RUN(_src.dims() <= 2, ipp_lut(src, lut, dst)); + if (_src.dims() <= 2) { bool ok = false; Ptr body; -#if defined(HAVE_IPP) - CV_IPP_CHECK() - { - size_t elemSize1 = CV_ELEM_SIZE1(dst.depth()); -#if 0 // there are no performance benefits (PR #2653) - if (lutcn == 1) - { - ParallelLoopBody* p = new ipp::IppLUTParallelBody_LUTC1(src, lut, dst, &ok); - body.reset(p); - } - else -#endif - if ((lutcn == 3 || lutcn == 4) && elemSize1 == 1) - { - ParallelLoopBody* p = new ipp::IppLUTParallelBody_LUTCN(src, lut, dst, &ok); - body.reset(p); - } - } -#endif + if (body == NULL || ok == false) { ok = false; diff --git a/modules/core/src/copy.cpp b/modules/core/src/copy.cpp index aa6eb2fb5a..22b18710d8 100644 --- a/modules/core/src/copy.cpp +++ b/modules/core/src/copy.cpp @@ -82,17 +82,7 @@ copyMask_(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, ucha template<> void copyMask_(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size) { -#if defined HAVE_IPP - CV_IPP_CHECK() - { - if (ippiCopy_8u_C1MR(_src, (int)sstep, _dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } -#endif + CV_IPP_RUN(true, ippiCopy_8u_C1MR(_src, (int)sstep, _dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0) for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep ) { @@ -132,17 +122,7 @@ copyMask_(const uchar* _src, size_t sstep, const uchar* mask, size_t mste template<> void copyMask_(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size) { -#if defined HAVE_IPP - CV_IPP_CHECK() - { - if (ippiCopy_16u_C1MR((const Ipp16u *)_src, (int)sstep, (Ipp16u *)_dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } -#endif + CV_IPP_RUN(true, ippiCopy_16u_C1MR((const Ipp16u *)_src, (int)sstep, (Ipp16u *)_dst, (int)dstep, ippiSize(size), mask, (int)mstep) >= 0) for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep ) { @@ -214,15 +194,7 @@ static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \ uchar* dst, size_t dstep, Size size, void*) \ { \ - CV_IPP_CHECK()\ - {\ - if (ippiCopy_##ippfavor((const ipptype *)src, (int)sstep, (ipptype *)dst, (int)dstep, ippiSize(size), (const Ipp8u *)mask, (int)mstep) >= 0) \ - {\ - CV_IMPL_ADD(CV_IMPL_IPP);\ - return;\ - }\ - setIppErrorStatus(); \ - }\ + CV_IPP_RUN(true, ippiCopy_##ippfavor((const ipptype *)src, (int)sstep, (ipptype *)dst, (int)dstep, ippiSize(size), (const Ipp8u *)mask, (int)mstep) >= 0)\ copyMask_(src, sstep, mask, mstep, dst, dstep, size); \ } #else @@ -313,24 +285,25 @@ void Mat::copyTo( OutputArray _dst ) const if( rows > 0 && cols > 0 ) { + // For some cases (with vector) dst.size != src.size, so force to column-based form + // It prevents memory corruption in case of column-based src + if (_dst.isVector()) + dst = dst.reshape(0, (int)dst.total()); + const uchar* sptr = data; uchar* dptr = dst.data; + CV_IPP_RUN( + (size_t)cols*elemSize() <= (size_t)INT_MAX && + (size_t)step <= (size_t)INT_MAX && + (size_t)dst.step <= (size_t)INT_MAX + , + ippiCopy_8u_C1R(sptr, (int)step, dptr, (int)dst.step, ippiSize((int)(cols*elemSize()), rows)) >= 0 + ) + Size sz = getContinuousSize(*this, dst); size_t len = sz.width*elemSize(); -#if defined HAVE_IPP - CV_IPP_CHECK() - { - if (ippiCopy_8u_C1R(sptr, (int)step, dptr, (int)dst.step, ippiSize((int)len, sz.height)) >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP) - return; - } - setIppErrorStatus(); - } -#endif - for( ; sz.height--; sptr += step, dptr += dst.step ) memcpy( dptr, sptr, len ); } @@ -461,6 +434,86 @@ Mat& Mat::operator = (const Scalar& s) return *this; } +#if defined HAVE_IPP +static bool ipp_Mat_setTo(Mat *src, Mat &value, Mat &mask) +{ + int cn = src->channels(), depth0 = src->depth(); + + if (!mask.empty() && (src->dims <= 2 || (src->isContinuous() && mask.isContinuous())) && + (/*depth0 == CV_8U ||*/ depth0 == CV_16U || depth0 == CV_16S || depth0 == CV_32S || depth0 == CV_32F) && + (cn == 1 || cn == 3 || cn == 4)) + { + uchar _buf[32]; + void * buf = _buf; + convertAndUnrollScalar( value, src->type(), _buf, 1 ); + + IppStatus status = (IppStatus)-1; + IppiSize roisize = { src->cols, src->rows }; + int mstep = (int)mask.step[0], dstep = (int)src->step[0]; + + if (src->isContinuous() && mask.isContinuous()) + { + roisize.width = (int)src->total(); + roisize.height = 1; + } + + if (cn == 1) + { + /*if (depth0 == CV_8U) + status = ippiSet_8u_C1MR(*(Ipp8u *)buf, (Ipp8u *)data, dstep, roisize, mask.data, mstep); + else*/ if (depth0 == CV_16U) + status = ippiSet_16u_C1MR(*(Ipp16u *)buf, (Ipp16u *)src->data, dstep, roisize, mask.data, mstep); + else if (depth0 == CV_16S) + status = ippiSet_16s_C1MR(*(Ipp16s *)buf, (Ipp16s *)src->data, dstep, roisize, mask.data, mstep); + else if (depth0 == CV_32S) + status = ippiSet_32s_C1MR(*(Ipp32s *)buf, (Ipp32s *)src->data, dstep, roisize, mask.data, mstep); + else if (depth0 == CV_32F) + status = ippiSet_32f_C1MR(*(Ipp32f *)buf, (Ipp32f *)src->data, dstep, roisize, mask.data, mstep); + } + else if (cn == 3 || cn == 4) + { + +#define IPP_SET(ippfavor, ippcn) \ + do \ + { \ + typedef Ipp##ippfavor ipptype; \ + ipptype ippvalue[4] = { ((ipptype *)buf)[0], ((ipptype *)buf)[1], ((ipptype *)buf)[2], ((ipptype *)buf)[3] }; \ + status = ippiSet_##ippfavor##_C##ippcn##MR(ippvalue, (ipptype *)src->data, dstep, roisize, mask.data, mstep); \ + } while ((void)0, 0) + +#define IPP_SET_CN(ippcn) \ + do \ + { \ + if (cn == ippcn) \ + { \ + /*if (depth0 == CV_8U) \ + IPP_SET(8u, ippcn); \ + else*/ if (depth0 == CV_16U) \ + IPP_SET(16u, ippcn); \ + else if (depth0 == CV_16S) \ + IPP_SET(16s, ippcn); \ + else if (depth0 == CV_32S) \ + IPP_SET(32s, ippcn); \ + else if (depth0 == CV_32F) \ + IPP_SET(32f, ippcn); \ + } \ + } while ((void)0, 0) + + IPP_SET_CN(3); + IPP_SET_CN(4); + +#undef IPP_SET_CN +#undef IPP_SET + } + + if (status >= 0) + return true; + } + + return false; +} +#endif + Mat& Mat::setTo(InputArray _value, InputArray _mask) { @@ -472,86 +525,7 @@ Mat& Mat::setTo(InputArray _value, InputArray _mask) CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::MAT )); CV_Assert( mask.empty() || (mask.type() == CV_8U && size == mask.size) ); -#if defined HAVE_IPP - CV_IPP_CHECK() - { - int cn = channels(), depth0 = depth(); - - if (!mask.empty() && (dims <= 2 || (isContinuous() && mask.isContinuous())) && - (/*depth0 == CV_8U ||*/ depth0 == CV_16U || depth0 == CV_16S || depth0 == CV_32S || depth0 == CV_32F) && - (cn == 1 || cn == 3 || cn == 4)) - { - uchar _buf[32]; - void * buf = _buf; - convertAndUnrollScalar( value, type(), _buf, 1 ); - - IppStatus status = (IppStatus)-1; - IppiSize roisize = { cols, rows }; - int mstep = (int)mask.step[0], dstep = (int)step[0]; - - if (isContinuous() && mask.isContinuous()) - { - roisize.width = (int)total(); - roisize.height = 1; - } - - if (cn == 1) - { - /*if (depth0 == CV_8U) - status = ippiSet_8u_C1MR(*(Ipp8u *)buf, (Ipp8u *)data, dstep, roisize, mask.data, mstep); - else*/ if (depth0 == CV_16U) - status = ippiSet_16u_C1MR(*(Ipp16u *)buf, (Ipp16u *)data, dstep, roisize, mask.data, mstep); - else if (depth0 == CV_16S) - status = ippiSet_16s_C1MR(*(Ipp16s *)buf, (Ipp16s *)data, dstep, roisize, mask.data, mstep); - else if (depth0 == CV_32S) - status = ippiSet_32s_C1MR(*(Ipp32s *)buf, (Ipp32s *)data, dstep, roisize, mask.data, mstep); - else if (depth0 == CV_32F) - status = ippiSet_32f_C1MR(*(Ipp32f *)buf, (Ipp32f *)data, dstep, roisize, mask.data, mstep); - } - else if (cn == 3 || cn == 4) - { -#define IPP_SET(ippfavor, ippcn) \ - do \ - { \ - typedef Ipp##ippfavor ipptype; \ - ipptype ippvalue[4] = { ((ipptype *)buf)[0], ((ipptype *)buf)[1], ((ipptype *)buf)[2], ((ipptype *)buf)[3] }; \ - status = ippiSet_##ippfavor##_C##ippcn##MR(ippvalue, (ipptype *)data, dstep, roisize, mask.data, mstep); \ - } while ((void)0, 0) - -#define IPP_SET_CN(ippcn) \ - do \ - { \ - if (cn == ippcn) \ - { \ - /*if (depth0 == CV_8U) \ - IPP_SET(8u, ippcn); \ - else*/ if (depth0 == CV_16U) \ - IPP_SET(16u, ippcn); \ - else if (depth0 == CV_16S) \ - IPP_SET(16s, ippcn); \ - else if (depth0 == CV_32S) \ - IPP_SET(32s, ippcn); \ - else if (depth0 == CV_32F) \ - IPP_SET(32f, ippcn); \ - } \ - } while ((void)0, 0) - - IPP_SET_CN(3); - IPP_SET_CN(4); - -#undef IPP_SET_CN -#undef IPP_SET - } - - if (status >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return *this; - } - setIppErrorStatus(); - } - } -#endif + CV_IPP_RUN(true, ipp_Mat_setTo((cv::Mat*)this, value, mask), *this) size_t esz = elemSize(); BinaryFunc copymask = getCopyMaskFunc(esz); @@ -725,6 +699,76 @@ static bool ocl_flip(InputArray _src, OutputArray _dst, int flipCode ) #endif +#if defined HAVE_IPP +static bool ipp_flip( Mat &src, Mat &dst, int flip_mode ) +{ + int type = src.type(); + + typedef IppStatus (CV_STDCALL * ippiMirror)(const void * pSrc, int srcStep, void * pDst, int dstStep, IppiSize roiSize, IppiAxis flip); + typedef IppStatus (CV_STDCALL * ippiMirrorI)(const void * pSrcDst, int srcDstStep, IppiSize roiSize, IppiAxis flip); + ippiMirror ippFunc = 0; + ippiMirrorI ippFuncI = 0; + + if (src.data == dst.data) + { + CV_SUPPRESS_DEPRECATED_START + ippFuncI = + type == CV_8UC1 ? (ippiMirrorI)ippiMirror_8u_C1IR : + type == CV_8UC3 ? (ippiMirrorI)ippiMirror_8u_C3IR : + type == CV_8UC4 ? (ippiMirrorI)ippiMirror_8u_C4IR : + type == CV_16UC1 ? (ippiMirrorI)ippiMirror_16u_C1IR : + type == CV_16UC3 ? (ippiMirrorI)ippiMirror_16u_C3IR : + type == CV_16UC4 ? (ippiMirrorI)ippiMirror_16u_C4IR : + type == CV_16SC1 ? (ippiMirrorI)ippiMirror_16s_C1IR : + type == CV_16SC3 ? (ippiMirrorI)ippiMirror_16s_C3IR : + type == CV_16SC4 ? (ippiMirrorI)ippiMirror_16s_C4IR : + type == CV_32SC1 ? (ippiMirrorI)ippiMirror_32s_C1IR : + type == CV_32SC3 ? (ippiMirrorI)ippiMirror_32s_C3IR : + type == CV_32SC4 ? (ippiMirrorI)ippiMirror_32s_C4IR : + type == CV_32FC1 ? (ippiMirrorI)ippiMirror_32f_C1IR : + type == CV_32FC3 ? (ippiMirrorI)ippiMirror_32f_C3IR : + type == CV_32FC4 ? (ippiMirrorI)ippiMirror_32f_C4IR : 0; + CV_SUPPRESS_DEPRECATED_END + } + else + { + ippFunc = + type == CV_8UC1 ? (ippiMirror)ippiMirror_8u_C1R : + type == CV_8UC3 ? (ippiMirror)ippiMirror_8u_C3R : + type == CV_8UC4 ? (ippiMirror)ippiMirror_8u_C4R : + type == CV_16UC1 ? (ippiMirror)ippiMirror_16u_C1R : + type == CV_16UC3 ? (ippiMirror)ippiMirror_16u_C3R : + type == CV_16UC4 ? (ippiMirror)ippiMirror_16u_C4R : + type == CV_16SC1 ? (ippiMirror)ippiMirror_16s_C1R : + type == CV_16SC3 ? (ippiMirror)ippiMirror_16s_C3R : + type == CV_16SC4 ? (ippiMirror)ippiMirror_16s_C4R : + type == CV_32SC1 ? (ippiMirror)ippiMirror_32s_C1R : + type == CV_32SC3 ? (ippiMirror)ippiMirror_32s_C3R : + type == CV_32SC4 ? (ippiMirror)ippiMirror_32s_C4R : + type == CV_32FC1 ? (ippiMirror)ippiMirror_32f_C1R : + type == CV_32FC3 ? (ippiMirror)ippiMirror_32f_C3R : + type == CV_32FC4 ? (ippiMirror)ippiMirror_32f_C4R : 0; + } + IppiAxis axis = flip_mode == 0 ? ippAxsHorizontal : + flip_mode > 0 ? ippAxsVertical : ippAxsBoth; + IppiSize roisize = { dst.cols, dst.rows }; + + if (ippFunc != 0) + { + if (ippFunc(src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, ippiSize(src.cols, src.rows), axis) >= 0) + return true; + } + else if (ippFuncI != 0) + { + if (ippFuncI(dst.ptr(), (int)dst.step, roisize, axis) >= 0) + return true; + } + + return false; +} +#endif + + void flip( InputArray _src, OutputArray _dst, int flip_mode ) { CV_Assert( _src.dims() <= 2 ); @@ -751,81 +795,11 @@ void flip( InputArray _src, OutputArray _dst, int flip_mode ) int type = src.type(); _dst.create( size, type ); Mat dst = _dst.getMat(); + + CV_IPP_RUN(true, ipp_flip(src, dst, flip_mode)); + size_t esz = CV_ELEM_SIZE(type); -#if defined HAVE_IPP - CV_IPP_CHECK() - { - typedef IppStatus (CV_STDCALL * ippiMirror)(const void * pSrc, int srcStep, void * pDst, int dstStep, IppiSize roiSize, IppiAxis flip); - typedef IppStatus (CV_STDCALL * ippiMirrorI)(const void * pSrcDst, int srcDstStep, IppiSize roiSize, IppiAxis flip); - ippiMirror ippFunc = 0; - ippiMirrorI ippFuncI = 0; - - if (src.data == dst.data) - { - CV_SUPPRESS_DEPRECATED_START - ippFuncI = - type == CV_8UC1 ? (ippiMirrorI)ippiMirror_8u_C1IR : - type == CV_8UC3 ? (ippiMirrorI)ippiMirror_8u_C3IR : - type == CV_8UC4 ? (ippiMirrorI)ippiMirror_8u_C4IR : - type == CV_16UC1 ? (ippiMirrorI)ippiMirror_16u_C1IR : - type == CV_16UC3 ? (ippiMirrorI)ippiMirror_16u_C3IR : - type == CV_16UC4 ? (ippiMirrorI)ippiMirror_16u_C4IR : - type == CV_16SC1 ? (ippiMirrorI)ippiMirror_16s_C1IR : - type == CV_16SC3 ? (ippiMirrorI)ippiMirror_16s_C3IR : - type == CV_16SC4 ? (ippiMirrorI)ippiMirror_16s_C4IR : - type == CV_32SC1 ? (ippiMirrorI)ippiMirror_32s_C1IR : - type == CV_32SC3 ? (ippiMirrorI)ippiMirror_32s_C3IR : - type == CV_32SC4 ? (ippiMirrorI)ippiMirror_32s_C4IR : - type == CV_32FC1 ? (ippiMirrorI)ippiMirror_32f_C1IR : - type == CV_32FC3 ? (ippiMirrorI)ippiMirror_32f_C3IR : - type == CV_32FC4 ? (ippiMirrorI)ippiMirror_32f_C4IR : 0; - CV_SUPPRESS_DEPRECATED_END - } - else - { - ippFunc = - type == CV_8UC1 ? (ippiMirror)ippiMirror_8u_C1R : - type == CV_8UC3 ? (ippiMirror)ippiMirror_8u_C3R : - type == CV_8UC4 ? (ippiMirror)ippiMirror_8u_C4R : - type == CV_16UC1 ? (ippiMirror)ippiMirror_16u_C1R : - type == CV_16UC3 ? (ippiMirror)ippiMirror_16u_C3R : - type == CV_16UC4 ? (ippiMirror)ippiMirror_16u_C4R : - type == CV_16SC1 ? (ippiMirror)ippiMirror_16s_C1R : - type == CV_16SC3 ? (ippiMirror)ippiMirror_16s_C3R : - type == CV_16SC4 ? (ippiMirror)ippiMirror_16s_C4R : - type == CV_32SC1 ? (ippiMirror)ippiMirror_32s_C1R : - type == CV_32SC3 ? (ippiMirror)ippiMirror_32s_C3R : - type == CV_32SC4 ? (ippiMirror)ippiMirror_32s_C4R : - type == CV_32FC1 ? (ippiMirror)ippiMirror_32f_C1R : - type == CV_32FC3 ? (ippiMirror)ippiMirror_32f_C3R : - type == CV_32FC4 ? (ippiMirror)ippiMirror_32f_C4R : 0; - } - IppiAxis axis = flip_mode == 0 ? ippAxsHorizontal : - flip_mode > 0 ? ippAxsVertical : ippAxsBoth; - IppiSize roisize = { dst.cols, dst.rows }; - - if (ippFunc != 0) - { - if (ippFunc(src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, ippiSize(src.cols, src.rows), axis) >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - else if (ippFuncI != 0) - { - if (ippFuncI(dst.ptr(), (int)dst.step, roisize, axis) >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } -#endif - if( flip_mode <= 0 ) flipVert( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz ); else diff --git a/modules/core/src/dxt.cpp b/modules/core/src/dxt.cpp index e3fd0e4c40..ef01c2139f 100644 --- a/modules/core/src/dxt.cpp +++ b/modules/core/src/dxt.cpp @@ -2026,8 +2026,7 @@ class OCL_FftPlanCache public: static OCL_FftPlanCache & getInstance() { - static OCL_FftPlanCache planCache; - return planCache; + CV_SINGLETON_LAZY_INIT_REF(OCL_FftPlanCache, new OCL_FftPlanCache()) } Ptr getFftPlan(int dft_size, int depth) @@ -2291,8 +2290,7 @@ class PlanCache public: static PlanCache & getInstance() { - static PlanCache planCache; - return planCache; + CV_SINGLETON_LAZY_INIT_REF(PlanCache, new PlanCache()) } clAmdFftPlanHandle getPlanHandle(const Size & dft_size, int src_step, int dst_step, bool doubleFP, diff --git a/modules/core/src/matop.cpp b/modules/core/src/matop.cpp index a0ee4316d9..30933e08a9 100644 --- a/modules/core/src/matop.cpp +++ b/modules/core/src/matop.cpp @@ -205,8 +205,7 @@ public: static MatOp_Initializer* getGlobalMatOpInitializer() { - static MatOp_Initializer initializer; - return &initializer; + CV_SINGLETON_LAZY_INIT(MatOp_Initializer, new MatOp_Initializer()) } static inline bool isIdentity(const MatExpr& e) { return e.op == &g_MatOp_Identity; } @@ -1584,12 +1583,12 @@ void MatOp_Initializer::multiply(const MatExpr& e, double s, MatExpr& res) const inline void MatOp_Initializer::makeExpr(MatExpr& res, int method, Size sz, int type, double alpha) { - res = MatExpr(getGlobalMatOpInitializer(), method, Mat(sz, type, (void*)0), Mat(), Mat(), alpha, 0); + res = MatExpr(getGlobalMatOpInitializer(), method, Mat(sz, type, (void*)0xEEEEEEEE), Mat(), Mat(), alpha, 0); } inline void MatOp_Initializer::makeExpr(MatExpr& res, int method, int ndims, const int* sizes, int type, double alpha) { - res = MatExpr(getGlobalMatOpInitializer(), method, Mat(ndims, sizes, type, (void*)0), Mat(), Mat(), alpha, 0); + res = MatExpr(getGlobalMatOpInitializer(), method, Mat(ndims, sizes, type, (void*)0xEEEEEEEE), Mat(), Mat(), alpha, 0); } /////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp index b273c8a7d8..8161efc57d 100644 --- a/modules/core/src/matrix.cpp +++ b/modules/core/src/matrix.cpp @@ -224,8 +224,7 @@ public: MatAllocator* Mat::getStdAllocator() { - static StdMatAllocator allocator; - return &allocator; + CV_SINGLETON_LAZY_INIT(MatAllocator, new StdMatAllocator()) } void swap( Mat& a, Mat& b ) @@ -3088,7 +3087,73 @@ static bool ocl_transpose( InputArray _src, OutputArray _dst ) #endif +#ifdef HAVE_IPP +static bool ipp_transpose( Mat &src, Mat &dst ) +{ + int type = src.type(); + typedef IppStatus (CV_STDCALL * ippiTranspose)(const void * pSrc, int srcStep, void * pDst, int dstStep, IppiSize roiSize); + typedef IppStatus (CV_STDCALL * ippiTransposeI)(const void * pSrcDst, int srcDstStep, IppiSize roiSize); + ippiTranspose ippFunc = 0; + ippiTransposeI ippFuncI = 0; + + if (dst.data == src.data && dst.cols == dst.rows) + { + CV_SUPPRESS_DEPRECATED_START + ippFuncI = + type == CV_8UC1 ? (ippiTransposeI)ippiTranspose_8u_C1IR : + type == CV_8UC3 ? (ippiTransposeI)ippiTranspose_8u_C3IR : + type == CV_8UC4 ? (ippiTransposeI)ippiTranspose_8u_C4IR : + type == CV_16UC1 ? (ippiTransposeI)ippiTranspose_16u_C1IR : + type == CV_16UC3 ? (ippiTransposeI)ippiTranspose_16u_C3IR : + type == CV_16UC4 ? (ippiTransposeI)ippiTranspose_16u_C4IR : + type == CV_16SC1 ? (ippiTransposeI)ippiTranspose_16s_C1IR : + type == CV_16SC3 ? (ippiTransposeI)ippiTranspose_16s_C3IR : + type == CV_16SC4 ? (ippiTransposeI)ippiTranspose_16s_C4IR : + type == CV_32SC1 ? (ippiTransposeI)ippiTranspose_32s_C1IR : + type == CV_32SC3 ? (ippiTransposeI)ippiTranspose_32s_C3IR : + type == CV_32SC4 ? (ippiTransposeI)ippiTranspose_32s_C4IR : + type == CV_32FC1 ? (ippiTransposeI)ippiTranspose_32f_C1IR : + type == CV_32FC3 ? (ippiTransposeI)ippiTranspose_32f_C3IR : + type == CV_32FC4 ? (ippiTransposeI)ippiTranspose_32f_C4IR : 0; + CV_SUPPRESS_DEPRECATED_END + } + else + { + ippFunc = + type == CV_8UC1 ? (ippiTranspose)ippiTranspose_8u_C1R : + type == CV_8UC3 ? (ippiTranspose)ippiTranspose_8u_C3R : + type == CV_8UC4 ? (ippiTranspose)ippiTranspose_8u_C4R : + type == CV_16UC1 ? (ippiTranspose)ippiTranspose_16u_C1R : + type == CV_16UC3 ? (ippiTranspose)ippiTranspose_16u_C3R : + type == CV_16UC4 ? (ippiTranspose)ippiTranspose_16u_C4R : + type == CV_16SC1 ? (ippiTranspose)ippiTranspose_16s_C1R : + type == CV_16SC3 ? (ippiTranspose)ippiTranspose_16s_C3R : + type == CV_16SC4 ? (ippiTranspose)ippiTranspose_16s_C4R : + type == CV_32SC1 ? (ippiTranspose)ippiTranspose_32s_C1R : + type == CV_32SC3 ? (ippiTranspose)ippiTranspose_32s_C3R : + type == CV_32SC4 ? (ippiTranspose)ippiTranspose_32s_C4R : + type == CV_32FC1 ? (ippiTranspose)ippiTranspose_32f_C1R : + type == CV_32FC3 ? (ippiTranspose)ippiTranspose_32f_C3R : + type == CV_32FC4 ? (ippiTranspose)ippiTranspose_32f_C4R : 0; + } + + IppiSize roiSize = { src.cols, src.rows }; + if (ippFunc != 0) + { + if (ippFunc(src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, roiSize) >= 0) + return true; + } + else if (ippFuncI != 0) + { + if (ippFuncI(dst.ptr(), (int)dst.step, roiSize) >= 0) + return true; + } + return false; } +#endif + +} + void cv::transpose( InputArray _src, OutputArray _dst ) { @@ -3116,76 +3181,7 @@ void cv::transpose( InputArray _src, OutputArray _dst ) return; } -#if defined HAVE_IPP - CV_IPP_CHECK() - { - typedef IppStatus (CV_STDCALL * ippiTranspose)(const void * pSrc, int srcStep, void * pDst, int dstStep, IppiSize roiSize); - typedef IppStatus (CV_STDCALL * ippiTransposeI)(const void * pSrcDst, int srcDstStep, IppiSize roiSize); - ippiTranspose ippFunc = 0; - ippiTransposeI ippFuncI = 0; - - if (dst.data == src.data && dst.cols == dst.rows) - { - CV_SUPPRESS_DEPRECATED_START - ippFuncI = - type == CV_8UC1 ? (ippiTransposeI)ippiTranspose_8u_C1IR : - type == CV_8UC3 ? (ippiTransposeI)ippiTranspose_8u_C3IR : - type == CV_8UC4 ? (ippiTransposeI)ippiTranspose_8u_C4IR : - type == CV_16UC1 ? (ippiTransposeI)ippiTranspose_16u_C1IR : - type == CV_16UC3 ? (ippiTransposeI)ippiTranspose_16u_C3IR : - type == CV_16UC4 ? (ippiTransposeI)ippiTranspose_16u_C4IR : - type == CV_16SC1 ? (ippiTransposeI)ippiTranspose_16s_C1IR : - type == CV_16SC3 ? (ippiTransposeI)ippiTranspose_16s_C3IR : - type == CV_16SC4 ? (ippiTransposeI)ippiTranspose_16s_C4IR : - type == CV_32SC1 ? (ippiTransposeI)ippiTranspose_32s_C1IR : - type == CV_32SC3 ? (ippiTransposeI)ippiTranspose_32s_C3IR : - type == CV_32SC4 ? (ippiTransposeI)ippiTranspose_32s_C4IR : - type == CV_32FC1 ? (ippiTransposeI)ippiTranspose_32f_C1IR : - type == CV_32FC3 ? (ippiTransposeI)ippiTranspose_32f_C3IR : - type == CV_32FC4 ? (ippiTransposeI)ippiTranspose_32f_C4IR : 0; - CV_SUPPRESS_DEPRECATED_END - } - else - { - ippFunc = - type == CV_8UC1 ? (ippiTranspose)ippiTranspose_8u_C1R : - type == CV_8UC3 ? (ippiTranspose)ippiTranspose_8u_C3R : - type == CV_8UC4 ? (ippiTranspose)ippiTranspose_8u_C4R : - type == CV_16UC1 ? (ippiTranspose)ippiTranspose_16u_C1R : - type == CV_16UC3 ? (ippiTranspose)ippiTranspose_16u_C3R : - type == CV_16UC4 ? (ippiTranspose)ippiTranspose_16u_C4R : - type == CV_16SC1 ? (ippiTranspose)ippiTranspose_16s_C1R : - type == CV_16SC3 ? (ippiTranspose)ippiTranspose_16s_C3R : - type == CV_16SC4 ? (ippiTranspose)ippiTranspose_16s_C4R : - type == CV_32SC1 ? (ippiTranspose)ippiTranspose_32s_C1R : - type == CV_32SC3 ? (ippiTranspose)ippiTranspose_32s_C3R : - type == CV_32SC4 ? (ippiTranspose)ippiTranspose_32s_C4R : - type == CV_32FC1 ? (ippiTranspose)ippiTranspose_32f_C1R : - type == CV_32FC3 ? (ippiTranspose)ippiTranspose_32f_C3R : - type == CV_32FC4 ? (ippiTranspose)ippiTranspose_32f_C4R : 0; - } - - IppiSize roiSize = { src.cols, src.rows }; - if (ippFunc != 0) - { - if (ippFunc(src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, roiSize) >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - else if (ippFuncI != 0) - { - if (ippFuncI(dst.ptr(), (int)dst.step, roiSize) >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } -#endif + CV_IPP_RUN(true, ipp_transpose(src, dst)) if( dst.data == src.data ) { diff --git a/modules/core/src/ocl.cpp b/modules/core/src/ocl.cpp index 5d68a36832..0378b66c04 100644 --- a/modules/core/src/ocl.cpp +++ b/modules/core/src/ocl.cpp @@ -858,9 +858,9 @@ OCL_FUNC_P(cl_context, clCreateContext, OCL_FUNC(cl_int, clReleaseContext, (cl_context context), (context)) -/* -OCL_FUNC(cl_int, clRetainContext, (cl_context context), (context)) +OCL_FUNC(cl_int, clRetainContext, (cl_context context), (context)) +/* OCL_FUNC_P(cl_context, clCreateContextFromType, (const cl_context_properties * properties, cl_device_type device_type, @@ -945,7 +945,6 @@ OCL_FUNC(cl_int, clGetSupportedImageFormats, (context, flags, image_type, num_entries, image_formats, num_image_formats)) -/* OCL_FUNC(cl_int, clGetMemObjectInfo, (cl_mem memobj, cl_mem_info param_name, @@ -962,6 +961,7 @@ OCL_FUNC(cl_int, clGetImageInfo, size_t * param_value_size_ret), (image, param_name, param_value_size, param_value, param_value_size_ret)) +/* OCL_FUNC(cl_int, clCreateKernelsInProgram, (cl_program program, cl_uint num_kernels, @@ -1038,20 +1038,20 @@ OCL_FUNC(cl_int, clEnqueueCopyImage, cl_event * event), (command_queue, src_image, dst_image, src_origin, dst_origin, region, num_events_in_wait_list, event_wait_list, event)) +*/ OCL_FUNC(cl_int, clEnqueueCopyImageToBuffer, (cl_command_queue command_queue, cl_mem src_image, cl_mem dst_buffer, - const size_t * src_origin[3], - const size_t * region[3], + const size_t * src_origin, + const size_t * region, size_t dst_offset, cl_uint num_events_in_wait_list, const cl_event * event_wait_list, cl_event * event), (command_queue, src_image, dst_buffer, src_origin, region, dst_offset, num_events_in_wait_list, event_wait_list, event)) -*/ OCL_FUNC(cl_int, clEnqueueCopyBufferToImage, (cl_command_queue command_queue, @@ -1100,10 +1100,10 @@ OCL_FUNC(cl_int, clGetKernelInfo, size_t * param_value_size_ret), (kernel, param_name, param_value_size, param_value, param_value_size_ret)) -OCL_FUNC(cl_int, clRetainMemObject, (cl_mem memobj), (memobj)) - */ +OCL_FUNC(cl_int, clRetainMemObject, (cl_mem memobj), (memobj)) + OCL_FUNC(cl_int, clReleaseMemObject, (cl_mem memobj), (memobj)) @@ -1348,7 +1348,7 @@ OCL_FUNC(cl_int, clReleaseEvent, (cl_event event), (event)) #define CL_VERSION_1_2 #endif -#endif +#endif // HAVE_OPENCL #ifdef _DEBUG #define CV_OclDbgAssert CV_DbgAssert @@ -1510,8 +1510,7 @@ class AmdBlasHelper public: static AmdBlasHelper & getInstance() { - static AmdBlasHelper amdBlas; - return amdBlas; + CV_SINGLETON_LAZY_INIT_REF(AmdBlasHelper, new AmdBlasHelper()) } bool isAvailable() const @@ -1533,35 +1532,36 @@ protected: { if (!g_isAmdBlasInitialized) { - AutoLock lock(m); + AutoLock lock(getInitializationMutex()); - if (!g_isAmdBlasInitialized && haveOpenCL()) + if (!g_isAmdBlasInitialized) { - try + if (haveOpenCL()) { - g_isAmdBlasAvailable = clAmdBlasSetup() == clAmdBlasSuccess; + try + { + g_isAmdBlasAvailable = clAmdBlasSetup() == clAmdBlasSuccess; + } + catch (...) + { + g_isAmdBlasAvailable = false; + } } - catch (...) - { + else g_isAmdBlasAvailable = false; - } - } - else - g_isAmdBlasAvailable = false; - g_isAmdBlasInitialized = true; + g_isAmdBlasInitialized = true; + } } } private: - static Mutex m; static bool g_isAmdBlasInitialized; static bool g_isAmdBlasAvailable; }; bool AmdBlasHelper::g_isAmdBlasAvailable = false; bool AmdBlasHelper::g_isAmdBlasInitialized = false; -Mutex AmdBlasHelper::m; bool haveAmdBlas() { @@ -1584,8 +1584,7 @@ class AmdFftHelper public: static AmdFftHelper & getInstance() { - static AmdFftHelper amdFft; - return amdFft; + CV_SINGLETON_LAZY_INIT_REF(AmdFftHelper, new AmdFftHelper()) } bool isAvailable() const @@ -1607,34 +1606,36 @@ protected: { if (!g_isAmdFftInitialized) { - AutoLock lock(m); + AutoLock lock(getInitializationMutex()); - if (!g_isAmdFftInitialized && haveOpenCL()) + if (!g_isAmdFftInitialized) { - try + if (haveOpenCL()) { - cl_uint major, minor, patch; - CV_Assert(clAmdFftInitSetupData(&setupData) == CLFFT_SUCCESS); + try + { + cl_uint major, minor, patch; + CV_Assert(clAmdFftInitSetupData(&setupData) == CLFFT_SUCCESS); - // it throws exception in case AmdFft binaries are not found - CV_Assert(clAmdFftGetVersion(&major, &minor, &patch) == CLFFT_SUCCESS); - g_isAmdFftAvailable = true; + // it throws exception in case AmdFft binaries are not found + CV_Assert(clAmdFftGetVersion(&major, &minor, &patch) == CLFFT_SUCCESS); + g_isAmdFftAvailable = true; + } + catch (const Exception &) + { + g_isAmdFftAvailable = false; + } } - catch (const Exception &) - { + else g_isAmdFftAvailable = false; - } - } - else - g_isAmdFftAvailable = false; - g_isAmdFftInitialized = true; + g_isAmdFftInitialized = true; + } } } private: static clAmdFftSetupData setupData; - static Mutex m; static bool g_isAmdFftInitialized; static bool g_isAmdFftAvailable; }; @@ -1642,7 +1643,6 @@ private: clAmdFftSetupData AmdFftHelper::setupData; bool AmdFftHelper::g_isAmdFftAvailable = false; bool AmdFftHelper::g_isAmdFftInitialized = false; -Mutex AmdFftHelper::m; bool haveAmdFft() { @@ -2925,6 +2925,83 @@ CV_EXPORTS bool useSVM(UMatUsageFlags usageFlags) #endif // HAVE_OPENCL_SVM +static void get_platform_name(cl_platform_id id, String& name) +{ + // get platform name string length + size_t sz = 0; + if (CL_SUCCESS != clGetPlatformInfo(id, CL_PLATFORM_NAME, 0, 0, &sz)) + CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "clGetPlatformInfo failed!"); + + // get platform name string + AutoBuffer buf(sz + 1); + if (CL_SUCCESS != clGetPlatformInfo(id, CL_PLATFORM_NAME, sz, buf, 0)) + CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "clGetPlatformInfo failed!"); + + // just in case, ensure trailing zero for ASCIIZ string + buf[sz] = 0; + + name = (const char*)buf; +} + +/* +// Attaches OpenCL context to OpenCV +*/ +void attachContext(const String& platformName, void* platformID, void* context, void* deviceID) +{ + cl_uint cnt = 0; + + if(CL_SUCCESS != clGetPlatformIDs(0, 0, &cnt)) + CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "clGetPlatformIDs failed!"); + + if (cnt == 0) + CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "no OpenCL platform available!"); + + std::vector platforms(cnt); + + if(CL_SUCCESS != clGetPlatformIDs(cnt, &platforms[0], 0)) + CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "clGetPlatformIDs failed!"); + + bool platformAvailable = false; + + // check if external platformName contained in list of available platforms in OpenCV + for (unsigned int i = 0; i < cnt; i++) + { + String availablePlatformName; + get_platform_name(platforms[i], availablePlatformName); + // external platform is found in the list of available platforms + if (platformName == availablePlatformName) + { + platformAvailable = true; + break; + } + } + + if (!platformAvailable) + CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "No matched platforms available!"); + + // check if platformID corresponds to platformName + String actualPlatformName; + get_platform_name((cl_platform_id)platformID, actualPlatformName); + if (platformName != actualPlatformName) + CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "No matched platforms available!"); + + // do not initialize OpenCL context + Context ctx = Context::getDefault(false); + + // attach supplied context to OpenCV + initializeContextFromHandle(ctx, platformID, context, deviceID); + + if(CL_SUCCESS != clRetainContext((cl_context)context)) + CV_ErrorNoReturn(cv::Error::OpenCLApiCallError, "clRetainContext failed!"); + + // clear command queue, if any + getCoreTlsData().get()->oclQueue.finish(); + Queue q; + getCoreTlsData().get()->oclQueue = q; + + return; +} // attachContext() + void initializeContextFromHandle(Context& ctx, void* platform, void* _context, void* _device) { @@ -3150,10 +3227,10 @@ struct Kernel::Impl bool haveTempDstUMats; }; -}} +}} // namespace cv::ocl + +extern "C" { -extern "C" -{ static void CL_CALLBACK oclCleanupCallback(cl_event, cl_int, void *p) { ((cv::ocl::Kernel::Impl*)p)->finit(); @@ -5162,10 +5239,170 @@ public: MatAllocator* getOpenCLAllocator() { - static MatAllocator * allocator = new OpenCLAllocator(); - return allocator; + CV_SINGLETON_LAZY_INIT(MatAllocator, new OpenCLAllocator()) } +}} // namespace cv::ocl + + +namespace cv { + +// three funcs below are implemented in umatrix.cpp +void setSize( UMat& m, int _dims, const int* _sz, const size_t* _steps, + bool autoSteps = false ); + +void updateContinuityFlag(UMat& m); +void finalizeHdr(UMat& m); + +} // namespace cv + + +namespace cv { namespace ocl { + +/* +// Convert OpenCL buffer memory to UMat +*/ +void convertFromBuffer(void* cl_mem_buffer, size_t step, int rows, int cols, int type, UMat& dst) +{ + int d = 2; + int sizes[] = { rows, cols }; + + CV_Assert(0 <= d && d <= CV_MAX_DIM); + + dst.release(); + + dst.flags = (type & Mat::TYPE_MASK) | Mat::MAGIC_VAL; + dst.usageFlags = USAGE_DEFAULT; + + setSize(dst, d, sizes, 0, true); + dst.offset = 0; + + cl_mem memobj = (cl_mem)cl_mem_buffer; + cl_mem_object_type mem_type = 0; + + CV_Assert(clGetMemObjectInfo(memobj, CL_MEM_TYPE, sizeof(cl_mem_object_type), &mem_type, 0) == CL_SUCCESS); + + CV_Assert(CL_MEM_OBJECT_BUFFER == mem_type); + + size_t total = 0; + CV_Assert(clGetMemObjectInfo(memobj, CL_MEM_SIZE, sizeof(size_t), &total, 0) == CL_SUCCESS); + + CV_Assert(clRetainMemObject(memobj) == CL_SUCCESS); + + CV_Assert((int)step >= cols * CV_ELEM_SIZE(type)); + CV_Assert(total >= rows * step); + + // attach clBuffer to UMatData + dst.u = new UMatData(getOpenCLAllocator()); + dst.u->data = 0; + dst.u->allocatorFlags_ = 0; // not allocated from any OpenCV buffer pool + dst.u->flags = 0; + dst.u->handle = cl_mem_buffer; + dst.u->origdata = 0; + dst.u->prevAllocator = 0; + dst.u->size = total; + + finalizeHdr(dst); + dst.addref(); + + return; +} // convertFromBuffer() + + +/* +// Convert OpenCL image2d_t memory to UMat +*/ +void convertFromImage(void* cl_mem_image, UMat& dst) +{ + cl_mem clImage = (cl_mem)cl_mem_image; + cl_mem_object_type mem_type = 0; + + CV_Assert(clGetMemObjectInfo(clImage, CL_MEM_TYPE, sizeof(cl_mem_object_type), &mem_type, 0) == CL_SUCCESS); + + CV_Assert(CL_MEM_OBJECT_IMAGE2D == mem_type); + + cl_image_format fmt = { 0, 0 }; + CV_Assert(clGetImageInfo(clImage, CL_IMAGE_FORMAT, sizeof(cl_image_format), &fmt, 0) == CL_SUCCESS); + + int depth = CV_8U; + switch (fmt.image_channel_data_type) + { + case CL_UNORM_INT8: + case CL_UNSIGNED_INT8: + depth = CV_8U; + break; + + case CL_SNORM_INT8: + case CL_SIGNED_INT8: + depth = CV_8S; + break; + + case CL_UNORM_INT16: + case CL_UNSIGNED_INT16: + depth = CV_16U; + break; + + case CL_SNORM_INT16: + case CL_SIGNED_INT16: + depth = CV_16S; + break; + + case CL_SIGNED_INT32: + depth = CV_32S; + break; + + case CL_FLOAT: + depth = CV_32F; + break; + + default: + CV_Error(cv::Error::OpenCLApiCallError, "Not supported image_channel_data_type"); + } + + int type = CV_8UC1; + switch (fmt.image_channel_order) + { + case CL_R: + type = CV_MAKE_TYPE(depth, 1); + break; + + case CL_RGBA: + case CL_BGRA: + case CL_ARGB: + type = CV_MAKE_TYPE(depth, 4); + break; + + default: + CV_Error(cv::Error::OpenCLApiCallError, "Not supported image_channel_order"); + break; + } + + size_t step = 0; + CV_Assert(clGetImageInfo(clImage, CL_IMAGE_ROW_PITCH, sizeof(size_t), &step, 0) == CL_SUCCESS); + + size_t w = 0; + CV_Assert(clGetImageInfo(clImage, CL_IMAGE_WIDTH, sizeof(size_t), &w, 0) == CL_SUCCESS); + + size_t h = 0; + CV_Assert(clGetImageInfo(clImage, CL_IMAGE_HEIGHT, sizeof(size_t), &h, 0) == CL_SUCCESS); + + dst.create((int)h, (int)w, type); + + cl_mem clBuffer = (cl_mem)dst.handle(ACCESS_READ); + + cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); + + size_t offset = 0; + size_t src_origin[3] = { 0, 0, 0 }; + size_t region[3] = { w, h, 1 }; + CV_Assert(clEnqueueCopyImageToBuffer(q, clImage, clBuffer, src_origin, region, offset, 0, NULL, NULL) == CL_SUCCESS); + + CV_Assert(clFinish(q) == CL_SUCCESS); + + return; +} // convertFromImage() + + ///////////////////////////////////////////// Utility functions ///////////////////////////////////////////////// static void getDevices(std::vector& devices, cl_platform_id platform) diff --git a/modules/core/src/opencl/convert.cl b/modules/core/src/opencl/convert.cl index e0e7bd83a8..e869d6d743 100644 --- a/modules/core/src/opencl/convert.cl +++ b/modules/core/src/opencl/convert.cl @@ -53,7 +53,10 @@ __kernel void convertTo(__global const uchar * srcptr, int src_step, int src_offset, __global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols, - WT alpha, WT beta, int rowsPerWI) +#ifndef NO_SCALE + WT alpha, WT beta, +#endif + int rowsPerWI) { int x = get_global_id(0); int y0 = get_global_id(1) * rowsPerWI; @@ -68,7 +71,11 @@ __kernel void convertTo(__global const uchar * srcptr, int src_step, int src_off __global const srcT * src = (__global const srcT *)(srcptr + src_index); __global dstT * dst = (__global dstT *)(dstptr + dst_index); +#ifdef NO_SCALE + dst[0] = convertToDT(src[0]); +#else dst[0] = convertToDT(fma(convertToWT(src[0]), alpha, beta)); +#endif } } } diff --git a/modules/core/src/opencl/minmaxloc.cl b/modules/core/src/opencl/minmaxloc.cl index bd026c5c83..cabc720596 100644 --- a/modules/core/src/opencl/minmaxloc.cl +++ b/modules/core/src/opencl/minmaxloc.cl @@ -13,6 +13,11 @@ #endif #endif +static inline int align(int pos) +{ + return (pos + (MINMAX_STRUCT_ALIGNMENT - 1)) & (~(MINMAX_STRUCT_ALIGNMENT - 1)); +} + #ifdef DEPTH_0 #define MIN_VAL 0 #define MAX_VAL UCHAR_MAX @@ -366,19 +371,23 @@ __kernel void minmaxloc(__global const uchar * srcptr, int src_step, int src_off #ifdef NEED_MINVAL *(__global dstT1 *)(dstptr + mad24(gid, (int)sizeof(dstT1), pos)) = localmem_min[0]; pos = mad24(groupnum, (int)sizeof(dstT1), pos); + pos = align(pos); #endif #ifdef NEED_MAXVAL *(__global dstT1 *)(dstptr + mad24(gid, (int)sizeof(dstT1), pos)) = localmem_max[0]; pos = mad24(groupnum, (int)sizeof(dstT1), pos); + pos = align(pos); #endif #ifdef NEED_MINLOC *(__global uint *)(dstptr + mad24(gid, (int)sizeof(uint), pos)) = localmem_minloc[0]; pos = mad24(groupnum, (int)sizeof(uint), pos); + pos = align(pos); #endif #ifdef NEED_MAXLOC *(__global uint *)(dstptr + mad24(gid, (int)sizeof(uint), pos)) = localmem_maxloc[0]; #ifdef OP_CALC2 pos = mad24(groupnum, (int)sizeof(uint), pos); + pos = align(pos); #endif #endif #ifdef OP_CALC2 diff --git a/modules/core/src/opencl/runtime/autogenerated/opencl_gl_impl.hpp b/modules/core/src/opencl/runtime/autogenerated/opencl_gl_impl.hpp new file mode 100644 index 0000000000..6d97180508 --- /dev/null +++ b/modules/core/src/opencl/runtime/autogenerated/opencl_gl_impl.hpp @@ -0,0 +1,198 @@ +// +// AUTOGENERATED, DO NOT EDIT +// +// generated by parser_cl.py +enum OPENCL_GL_FN_ID { + OPENCL_GL_FN_clCreateFromGLBuffer = 0, + OPENCL_GL_FN_clCreateFromGLRenderbuffer = 1, + OPENCL_GL_FN_clCreateFromGLTexture = 2, + OPENCL_GL_FN_clCreateFromGLTexture2D = 3, + OPENCL_GL_FN_clCreateFromGLTexture3D = 4, + OPENCL_GL_FN_clEnqueueAcquireGLObjects = 5, + OPENCL_GL_FN_clEnqueueReleaseGLObjects = 6, + OPENCL_GL_FN_clGetGLContextInfoKHR = 7, + OPENCL_GL_FN_clGetGLObjectInfo = 8, + OPENCL_GL_FN_clGetGLTextureInfo = 9, +}; + +namespace { +// generated by parser_cl.py +template +struct opencl_gl_fn0 +{ + typedef _R (CL_API_CALL*FN)(); + static _R CL_API_CALL switch_fn() + { return ((FN)opencl_gl_check_fn(ID))(); } +}; + +template +struct opencl_gl_fn1 +{ + typedef _R (CL_API_CALL*FN)(_T1); + static _R CL_API_CALL switch_fn(_T1 p1) + { return ((FN)opencl_gl_check_fn(ID))(p1); } +}; + +template +struct opencl_gl_fn2 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2); } +}; + +template +struct opencl_gl_fn3 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3); } +}; + +template +struct opencl_gl_fn4 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3, _T4); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3, _T4 p4) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3, p4); } +}; + +template +struct opencl_gl_fn5 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3, _T4, _T5); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3, _T4 p4, _T5 p5) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3, p4, p5); } +}; + +template +struct opencl_gl_fn6 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3, _T4, _T5, _T6); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3, _T4 p4, _T5 p5, _T6 p6) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3, p4, p5, p6); } +}; + +template +struct opencl_gl_fn7 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3, _T4, _T5, _T6, _T7); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3, _T4 p4, _T5 p5, _T6 p6, _T7 p7) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3, p4, p5, p6, p7); } +}; + +template +struct opencl_gl_fn8 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3, _T4, _T5, _T6, _T7, _T8); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3, _T4 p4, _T5 p5, _T6 p6, _T7 p7, _T8 p8) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3, p4, p5, p6, p7, p8); } +}; + +template +struct opencl_gl_fn9 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3, _T4, _T5, _T6, _T7, _T8, _T9); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3, _T4 p4, _T5 p5, _T6 p6, _T7 p7, _T8 p8, _T9 p9) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3, p4, p5, p6, p7, p8, p9); } +}; + +template +struct opencl_gl_fn10 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3, _T4, _T5, _T6, _T7, _T8, _T9, _T10); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3, _T4 p4, _T5 p5, _T6 p6, _T7 p7, _T8 p8, _T9 p9, _T10 p10) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10); } +}; + +template +struct opencl_gl_fn11 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3, _T4, _T5, _T6, _T7, _T8, _T9, _T10, _T11); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3, _T4 p4, _T5 p5, _T6 p6, _T7 p7, _T8 p8, _T9 p9, _T10 p10, _T11 p11) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11); } +}; + +template +struct opencl_gl_fn12 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3, _T4, _T5, _T6, _T7, _T8, _T9, _T10, _T11, _T12); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3, _T4 p4, _T5 p5, _T6 p6, _T7 p7, _T8 p8, _T9 p9, _T10 p10, _T11 p11, _T12 p12) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12); } +}; + +template +struct opencl_gl_fn13 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3, _T4, _T5, _T6, _T7, _T8, _T9, _T10, _T11, _T12, _T13); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3, _T4 p4, _T5 p5, _T6 p6, _T7 p7, _T8 p8, _T9 p9, _T10 p10, _T11 p11, _T12 p12, _T13 p13) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13); } +}; + +template +struct opencl_gl_fn14 +{ + typedef _R (CL_API_CALL*FN)(_T1, _T2, _T3, _T4, _T5, _T6, _T7, _T8, _T9, _T10, _T11, _T12, _T13, _T14); + static _R CL_API_CALL switch_fn(_T1 p1, _T2 p2, _T3 p3, _T4 p4, _T5 p5, _T6 p6, _T7 p7, _T8 p8, _T9 p9, _T10 p10, _T11 p11, _T12 p12, _T13 p13, _T14 p14) + { return ((FN)opencl_gl_check_fn(ID))(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14); } +}; + +} // anonymous namespace + +// generated by parser_cl.py +cl_mem (CL_API_CALL*clCreateFromGLBuffer)(cl_context, cl_mem_flags, cl_GLuint, int*) = + opencl_gl_fn4::switch_fn; +static const struct DynamicFnEntry clCreateFromGLBuffer_definition = { "clCreateFromGLBuffer", (void**)&clCreateFromGLBuffer}; + +cl_mem (CL_API_CALL*clCreateFromGLRenderbuffer)(cl_context, cl_mem_flags, cl_GLuint, cl_int*) = + opencl_gl_fn4::switch_fn; +static const struct DynamicFnEntry clCreateFromGLRenderbuffer_definition = { "clCreateFromGLRenderbuffer", (void**)&clCreateFromGLRenderbuffer}; + +cl_mem (CL_API_CALL*clCreateFromGLTexture)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*) = + opencl_gl_fn6::switch_fn; +static const struct DynamicFnEntry clCreateFromGLTexture_definition = { "clCreateFromGLTexture", (void**)&clCreateFromGLTexture}; + +cl_mem (CL_API_CALL*clCreateFromGLTexture2D)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*) = + opencl_gl_fn6::switch_fn; +static const struct DynamicFnEntry clCreateFromGLTexture2D_definition = { "clCreateFromGLTexture2D", (void**)&clCreateFromGLTexture2D}; + +cl_mem (CL_API_CALL*clCreateFromGLTexture3D)(cl_context, cl_mem_flags, cl_GLenum, cl_GLint, cl_GLuint, cl_int*) = + opencl_gl_fn6::switch_fn; +static const struct DynamicFnEntry clCreateFromGLTexture3D_definition = { "clCreateFromGLTexture3D", (void**)&clCreateFromGLTexture3D}; + +cl_int (CL_API_CALL*clEnqueueAcquireGLObjects)(cl_command_queue, cl_uint, const cl_mem*, cl_uint, const cl_event*, cl_event*) = + opencl_gl_fn6::switch_fn; +static const struct DynamicFnEntry clEnqueueAcquireGLObjects_definition = { "clEnqueueAcquireGLObjects", (void**)&clEnqueueAcquireGLObjects}; + +cl_int (CL_API_CALL*clEnqueueReleaseGLObjects)(cl_command_queue, cl_uint, const cl_mem*, cl_uint, const cl_event*, cl_event*) = + opencl_gl_fn6::switch_fn; +static const struct DynamicFnEntry clEnqueueReleaseGLObjects_definition = { "clEnqueueReleaseGLObjects", (void**)&clEnqueueReleaseGLObjects}; + +cl_int (CL_API_CALL*clGetGLContextInfoKHR)(const cl_context_properties*, cl_gl_context_info, size_t, void*, size_t*) = + opencl_gl_fn5::switch_fn; +static const struct DynamicFnEntry clGetGLContextInfoKHR_definition = { "clGetGLContextInfoKHR", (void**)&clGetGLContextInfoKHR}; + +cl_int (CL_API_CALL*clGetGLObjectInfo)(cl_mem, cl_gl_object_type*, cl_GLuint*) = + opencl_gl_fn3::switch_fn; +static const struct DynamicFnEntry clGetGLObjectInfo_definition = { "clGetGLObjectInfo", (void**)&clGetGLObjectInfo}; + +cl_int (CL_API_CALL*clGetGLTextureInfo)(cl_mem, cl_gl_texture_info, size_t, void*, size_t*) = + opencl_gl_fn5::switch_fn; +static const struct DynamicFnEntry clGetGLTextureInfo_definition = { "clGetGLTextureInfo", (void**)&clGetGLTextureInfo}; + + +// generated by parser_cl.py +static const struct DynamicFnEntry* opencl_gl_fn_list[] = { + &clCreateFromGLBuffer_definition, + &clCreateFromGLRenderbuffer_definition, + &clCreateFromGLTexture_definition, + &clCreateFromGLTexture2D_definition, + &clCreateFromGLTexture3D_definition, + &clEnqueueAcquireGLObjects_definition, + &clEnqueueReleaseGLObjects_definition, + &clGetGLContextInfoKHR_definition, + &clGetGLObjectInfo_definition, + &clGetGLTextureInfo_definition, +}; + +// number of enabled functions: 10 diff --git a/modules/core/src/opencl/runtime/generator/filter/opencl_gl_functions.list b/modules/core/src/opencl/runtime/generator/filter/opencl_gl_functions.list new file mode 100644 index 0000000000..b2f9e621aa --- /dev/null +++ b/modules/core/src/opencl/runtime/generator/filter/opencl_gl_functions.list @@ -0,0 +1,11 @@ +clCreateFromGLBuffer +clCreateFromGLRenderbuffer +clCreateFromGLTexture +clCreateFromGLTexture2D +clCreateFromGLTexture3D +clEnqueueAcquireGLObjects +clEnqueueReleaseGLObjects +clGetGLContextInfoKHR +clGetGLObjectInfo +clGetGLTextureInfo +#total 10 diff --git a/modules/core/src/opencl/runtime/generator/generate.sh b/modules/core/src/opencl/runtime/generator/generate.sh index 8649e99843..d9d6f0e3e8 100644 --- a/modules/core/src/opencl/runtime/generator/generate.sh +++ b/modules/core/src/opencl/runtime/generator/generate.sh @@ -3,4 +3,6 @@ echo "Generate files for CL runtime..." python parser_cl.py opencl_core < sources/cl.h python parser_clamdblas.py < sources/clAmdBlas.h python parser_clamdfft.py < sources/clAmdFft.h + +python parser_cl.py opencl_gl < sources/cl_gl.h echo "Generate files for CL runtime... Done" diff --git a/modules/core/src/opencl/runtime/generator/parser_cl.py b/modules/core/src/opencl/runtime/generator/parser_cl.py index 87eeb27236..e6c738bef7 100644 --- a/modules/core/src/opencl/runtime/generator/parser_cl.py +++ b/modules/core/src/opencl/runtime/generator/parser_cl.py @@ -8,9 +8,10 @@ from common import remove_comments, getTokens, getParameters, postProcessParamet try: if len(sys.argv) > 1: - outfile = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/' + sys.argv[1] + '.hpp', 'wb') - outfile_impl = open('../autogenerated/' + sys.argv[1] + '_impl.hpp', 'wb') - outfile_wrappers = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/' + sys.argv[1] + '_wrappers.hpp', 'wb') + module_name = sys.argv[1] + outfile = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/%s.hpp' % module_name, 'wb') + outfile_impl = open('../autogenerated/%s_impl.hpp' % module_name, 'wb') + outfile_wrappers = open('../../../../include/opencv2/core/opencl/runtime/autogenerated/%s_wrappers.hpp' % module_name, 'wb') if len(sys.argv) > 2: f = open(sys.argv[2], "r") else: @@ -95,7 +96,7 @@ pprint(fns) from common import * -filterFileName='./filter/opencl_core_functions.list' +filterFileName = './filter/%s_functions.list' % module_name numEnabled = readFunctionFilter(fns, filterFileName) functionsFilter = generateFilterNames(fns) @@ -108,18 +109,27 @@ ctx['CL_REMAP_DYNAMIC'] = generateRemapDynamic(fns) ctx['CL_FN_DECLARATIONS'] = generateFnDeclaration(fns) sys.stdout = outfile -ProcessTemplate('template/opencl_core.hpp.in', ctx) +ProcessTemplate('template/%s.hpp.in' % module_name, ctx) ctx['CL_FN_INLINE_WRAPPERS'] = generateInlineWrappers(fns) sys.stdout = outfile_wrappers -ProcessTemplate('template/opencl_core_wrappers.hpp.in', ctx) +ProcessTemplate('template/%s_wrappers.hpp.in' % module_name, ctx) -ctx['CL_FN_ENTRY_DEFINITIONS'] = generateStructDefinitions(fns) -ctx['CL_FN_ENTRY_LIST'] = generateListOfDefinitions(fns) -ctx['CL_FN_ENUMS'] = generateEnums(fns) -ctx['CL_FN_SWITCH'] = generateTemplates(15, 'opencl_fn', 'opencl_check_fn', 'CL_API_CALL') +if module_name == 'opencl_core': + ctx['CL_FN_ENTRY_DEFINITIONS'] = generateStructDefinitions(fns) + ctx['CL_FN_ENTRY_LIST'] = generateListOfDefinitions(fns) + ctx['CL_FN_ENUMS'] = generateEnums(fns) + ctx['CL_FN_SWITCH'] = generateTemplates(15, 'opencl_fn', 'opencl_check_fn', 'CL_API_CALL') +else: + lprefix = module_name + '_fn' + enumprefix = module_name.upper() + '_FN' + fn_list_name = module_name + '_fn_list' + ctx['CL_FN_ENTRY_DEFINITIONS'] = generateStructDefinitions(fns, lprefix=lprefix, enumprefix=enumprefix) + ctx['CL_FN_ENTRY_LIST'] = generateListOfDefinitions(fns, fn_list_name) + ctx['CL_FN_ENUMS'] = generateEnums(fns, prefix=enumprefix) + ctx['CL_FN_SWITCH'] = generateTemplates(15, lprefix, '%s_check_fn' % module_name, 'CL_API_CALL') ctx['CL_NUMBER_OF_ENABLED_FUNCTIONS'] = '// number of enabled functions: %d' % (numEnabled) sys.stdout = outfile_impl -ProcessTemplate('template/opencl_core_impl.hpp.in', ctx) +ProcessTemplate('template/%s_impl.hpp.in' % module_name, ctx) diff --git a/modules/core/src/opencl/runtime/generator/template/opencl_gl.hpp.in b/modules/core/src/opencl/runtime/generator/template/opencl_gl.hpp.in new file mode 100644 index 0000000000..24434d2de7 --- /dev/null +++ b/modules/core/src/opencl/runtime/generator/template/opencl_gl.hpp.in @@ -0,0 +1,15 @@ +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_HPP__ +#error "Invalid usage" +#endif + +@CL_REMAP_ORIGIN@ + +#if defined __APPLE__ +#include +#else +#include +#endif + +@CL_REMAP_DYNAMIC@ + +@CL_FN_DECLARATIONS@ diff --git a/modules/core/src/opencl/runtime/generator/template/opencl_gl_impl.hpp.in b/modules/core/src/opencl/runtime/generator/template/opencl_gl_impl.hpp.in new file mode 100644 index 0000000000..14586017a4 --- /dev/null +++ b/modules/core/src/opencl/runtime/generator/template/opencl_gl_impl.hpp.in @@ -0,0 +1,11 @@ +@CL_FN_ENUMS@ + +namespace { +@CL_FN_SWITCH@ +} // anonymous namespace + +@CL_FN_ENTRY_DEFINITIONS@ + +@CL_FN_ENTRY_LIST@ + +@CL_NUMBER_OF_ENABLED_FUNCTIONS@ diff --git a/modules/core/src/opencl/runtime/generator/template/opencl_gl_wrappers.hpp.in b/modules/core/src/opencl/runtime/generator/template/opencl_gl_wrappers.hpp.in new file mode 100644 index 0000000000..0aeefb4f44 --- /dev/null +++ b/modules/core/src/opencl/runtime/generator/template/opencl_gl_wrappers.hpp.in @@ -0,0 +1,5 @@ +#ifndef __OPENCV_CORE_OCL_RUNTIME_OPENCL_GL_WRAPPERS_HPP__ +#error "Invalid usage" +#endif + +@CL_FN_INLINE_WRAPPERS@ diff --git a/modules/core/src/opencl/runtime/opencl_core.cpp b/modules/core/src/opencl/runtime/opencl_core.cpp index 43f6b13b6e..971c0770e5 100644 --- a/modules/core/src/opencl/runtime/opencl_core.cpp +++ b/modules/core/src/opencl/runtime/opencl_core.cpp @@ -279,4 +279,30 @@ static void* opencl_check_fn(int ID) return func; } +#ifdef HAVE_OPENGL + +#include "opencv2/core/opencl/runtime/opencl_gl.hpp" + +static void* opencl_gl_check_fn(int ID); + +#include "autogenerated/opencl_gl_impl.hpp" + +static void* opencl_gl_check_fn(int ID) +{ + const struct DynamicFnEntry* e = NULL; + assert(ID >= 0 && ID < (int)(sizeof(opencl_gl_fn_list)/sizeof(opencl_gl_fn_list[0]))); + e = opencl_gl_fn_list[ID]; + void* func = CV_CL_GET_PROC_ADDRESS(e->fnName); + if (!func) + { + throw cv::Exception(cv::Error::OpenCLApiCallError, + cv::format("OpenCL function is not available: [%s]", e->fnName), + CV_Func, __FILE__, __LINE__); + } + *(e->ppFn) = func; + return func; +} + +#endif // HAVE_OPENGL + #endif diff --git a/modules/core/src/opengl.cpp b/modules/core/src/opengl.cpp index 00a7f66662..3bbc0f8e9f 100644 --- a/modules/core/src/opengl.cpp +++ b/modules/core/src/opengl.cpp @@ -47,7 +47,9 @@ # ifdef HAVE_CUDA # include # endif -#endif +#else // HAVE_OPENGL +# define NO_OPENGL_SUPPORT_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without OpenGL support") +#endif // HAVE_OPENGL using namespace cv; using namespace cv::cuda; @@ -1572,3 +1574,314 @@ void cv::ogl::render(const ogl::Arrays& arr, InputArray indices, int mode, Scala } #endif } + +//////////////////////////////////////////////////////////////////////// +// CL-GL Interoperability + +#ifdef HAVE_OPENCL +# include "opencv2/core/opencl/runtime/opencl_gl.hpp" +#else // HAVE_OPENCL +# define NO_OPENCL_SUPPORT_ERROR CV_ErrorNoReturn(cv::Error::StsBadFunc, "OpenCV was build without OpenCL support") +#endif // HAVE_OPENCL + +#if defined(HAVE_OPENGL) +# if defined(ANDROID) +# include +# elif defined(__linux__) +# include +# endif +#endif // HAVE_OPENGL + +namespace cv { namespace ogl { + +namespace ocl { + +Context& initializeContextFromGL() +{ +#if !defined(HAVE_OPENGL) + NO_OPENGL_SUPPORT_ERROR; +#elif !defined(HAVE_OPENCL) + NO_OPENCL_SUPPORT_ERROR; +#else + cl_uint numPlatforms; + cl_int status = clGetPlatformIDs(0, NULL, &numPlatforms); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get number of platforms"); + if (numPlatforms == 0) + CV_Error(cv::Error::OpenCLInitError, "OpenCL: No available platforms"); + + std::vector platforms(numPlatforms); + status = clGetPlatformIDs(numPlatforms, &platforms[0], NULL); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get number of platforms"); + + // TODO Filter platforms by name from OPENCV_OPENCL_DEVICE + + int found = -1; + cl_device_id device = NULL; + cl_context context = NULL; + + for (int i = 0; i < (int)numPlatforms; i++) + { + // query platform extension: presence of "cl_khr_gl_sharing" extension is requred + { + AutoBuffer extensionStr; + + size_t extensionSize; + status = clGetPlatformInfo(platforms[i], CL_PLATFORM_EXTENSIONS, 0, NULL, &extensionSize); + if (status == CL_SUCCESS) + { + extensionStr.allocate(extensionSize+1); + status = clGetPlatformInfo(platforms[i], CL_PLATFORM_EXTENSIONS, extensionSize, (char*)extensionStr, NULL); + } + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get platform extension string"); + + if (!strstr((const char*)extensionStr, "cl_khr_gl_sharing")) + continue; + } + + clGetGLContextInfoKHR_fn clGetGLContextInfoKHR = (clGetGLContextInfoKHR_fn) + clGetExtensionFunctionAddressForPlatform(platforms[i], "clGetGLContextInfoKHR"); + if (!clGetGLContextInfoKHR) + continue; + + cl_context_properties properties[] = + { +#if defined(WIN32) || defined(_WIN32) + CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i], + CL_GL_CONTEXT_KHR, (cl_context_properties)wglGetCurrentContext(), + CL_WGL_HDC_KHR, (cl_context_properties)wglGetCurrentDC(), +#elif defined(ANDROID) + CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i], + CL_GL_CONTEXT_KHR, (cl_context_properties)eglGetCurrentContext(), + CL_EGL_DISPLAY_KHR, (cl_context_properties)eglGetCurrentDisplay(), +#elif defined(__linux__) + CL_CONTEXT_PLATFORM, (cl_context_properties)platforms[i], + CL_GL_CONTEXT_KHR, (cl_context_properties)glXGetCurrentContext(), + CL_GLX_DISPLAY_KHR, (cl_context_properties)glXGetCurrentDisplay(), +#endif + 0 + }; + + // query device + device = NULL; + status = clGetGLContextInfoKHR(properties, CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR, sizeof(cl_device_id), (void*)&device, NULL); + if (status != CL_SUCCESS) + continue; + + // create context + context = clCreateContext(properties, 1, &device, NULL, NULL, &status); + if (status != CL_SUCCESS) + { + clReleaseDevice(device); + } + else + { + found = i; + break; + } + } + + if (found < 0) + CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't create context for OpenGL interop"); + + Context& ctx = Context::getDefault(false); + initializeContextFromHandle(ctx, platforms[found], context, device); + return ctx; +#endif +} + +} // namespace cv::ogl::ocl + +void convertToGLTexture2D(InputArray src, Texture2D& texture) +{ + (void)src; (void)texture; +#if !defined(HAVE_OPENGL) + NO_OPENGL_SUPPORT_ERROR; +#elif !defined(HAVE_OPENCL) + NO_OPENCL_SUPPORT_ERROR; +#else + Size srcSize = src.size(); + CV_Assert(srcSize.width == (int)texture.cols() && srcSize.height == (int)texture.rows()); + + using namespace cv::ocl; + Context& ctx = Context::getDefault(); + cl_context context = (cl_context)ctx.ptr(); + + UMat u = src.getUMat(); + + // TODO Add support for roi + CV_Assert(u.offset == 0); + CV_Assert(u.isContinuous()); + + cl_int status = 0; + cl_mem clImage = clCreateFromGLTexture(context, CL_MEM_WRITE_ONLY, gl::TEXTURE_2D, 0, texture.texId(), &status); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLTexture failed"); + + cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ); + + cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); + status = clEnqueueAcquireGLObjects(q, 1, &clImage, 0, NULL, NULL); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed"); + size_t offset = 0; // TODO + size_t dst_origin[3] = {0, 0, 0}; + size_t region[3] = {u.cols, u.rows, 1}; + status = clEnqueueCopyBufferToImage(q, clBuffer, clImage, offset, dst_origin, region, 0, NULL, NULL); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed"); + status = clEnqueueReleaseGLObjects(q, 1, &clImage, 0, NULL, NULL); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed"); + + status = clFinish(q); // TODO Use events + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed"); + + status = clReleaseMemObject(clImage); // TODO RAII + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed"); +#endif +} + +void convertFromGLTexture2D(const Texture2D& texture, OutputArray dst) +{ + (void)texture; (void)dst; +#if !defined(HAVE_OPENGL) + NO_OPENGL_SUPPORT_ERROR; +#elif !defined(HAVE_OPENCL) + NO_OPENCL_SUPPORT_ERROR; +#else + // check texture format + const int dtype = CV_8UC4; + CV_Assert(texture.format() == Texture2D::RGBA); + + int textureType = dtype; + CV_Assert(textureType >= 0); + + using namespace cv::ocl; + Context& ctx = Context::getDefault(); + cl_context context = (cl_context)ctx.ptr(); + + // TODO Need to specify ACCESS_WRITE here somehow to prevent useless data copying! + dst.create(texture.size(), textureType); + UMat u = dst.getUMat(); + + // TODO Add support for roi + CV_Assert(u.offset == 0); + CV_Assert(u.isContinuous()); + + cl_int status = 0; + cl_mem clImage = clCreateFromGLTexture(context, CL_MEM_READ_ONLY, gl::TEXTURE_2D, 0, texture.texId(), &status); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLTexture failed"); + + cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ); + + cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); + status = clEnqueueAcquireGLObjects(q, 1, &clImage, 0, NULL, NULL); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed"); + size_t offset = 0; // TODO + size_t src_origin[3] = {0, 0, 0}; + size_t region[3] = {u.cols, u.rows, 1}; + status = clEnqueueCopyImageToBuffer(q, clImage, clBuffer, src_origin, region, offset, 0, NULL, NULL); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed"); + status = clEnqueueReleaseGLObjects(q, 1, &clImage, 0, NULL, NULL); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed"); + + status = clFinish(q); // TODO Use events + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed"); + + status = clReleaseMemObject(clImage); // TODO RAII + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed"); +#endif +} + +//void mapGLBuffer(const Buffer& buffer, UMat& dst, int accessFlags) +UMat mapGLBuffer(const Buffer& buffer, int accessFlags) +{ + (void)buffer; (void)accessFlags; +#if !defined(HAVE_OPENGL) + NO_OPENGL_SUPPORT_ERROR; +#elif !defined(HAVE_OPENCL) + NO_OPENCL_SUPPORT_ERROR; +#else + using namespace cv::ocl; + Context& ctx = Context::getDefault(); + cl_context context = (cl_context)ctx.ptr(); + cl_command_queue clQueue = (cl_command_queue)Queue::getDefault().ptr(); + + int clAccessFlags = 0; + switch (accessFlags & (ACCESS_READ|ACCESS_WRITE)) + { + default: + case ACCESS_READ|ACCESS_WRITE: + clAccessFlags = CL_MEM_READ_WRITE; + break; + case ACCESS_READ: + clAccessFlags = CL_MEM_READ_ONLY; + break; + case ACCESS_WRITE: + clAccessFlags = CL_MEM_WRITE_ONLY; + break; + } + + cl_int status = 0; + cl_mem clBuffer = clCreateFromGLBuffer(context, clAccessFlags, buffer.bufId(), &status); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromGLBuffer failed"); + + gl::Finish(); + + status = clEnqueueAcquireGLObjects(clQueue, 1, &clBuffer, 0, NULL, NULL); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireGLObjects failed"); + + size_t step = buffer.cols() * buffer.elemSize(); + int rows = buffer.rows(); + int cols = buffer.cols(); + int type = buffer.type(); + + UMat u; + convertFromBuffer(clBuffer, step, rows, cols, type, u); + return u; +#endif +} + +void unmapGLBuffer(UMat& u) +{ + (void)u; +#if !defined(HAVE_OPENGL) + NO_OPENGL_SUPPORT_ERROR; +#elif !defined(HAVE_OPENCL) + NO_OPENCL_SUPPORT_ERROR; +#else + using namespace cv::ocl; + cl_command_queue clQueue = (cl_command_queue)Queue::getDefault().ptr(); + + cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ); + + u.release(); + + cl_int status = clEnqueueReleaseGLObjects(clQueue, 1, &clBuffer, 0, NULL, NULL); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseGLObjects failed"); + + status = clFinish(clQueue); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clFinish failed"); + + status = clReleaseMemObject(clBuffer); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clReleaseMemObject failed"); +#endif +} + +}} // namespace cv::ogl diff --git a/modules/core/src/parallel.cpp b/modules/core/src/parallel.cpp index b1e7567818..caa81299d7 100644 --- a/modules/core/src/parallel.cpp +++ b/modules/core/src/parallel.cpp @@ -80,6 +80,7 @@ 4. HAVE_GCD - system wide, used automatically (APPLE only) 5. WINRT - system wide, used automatically (Windows RT only) 6. HAVE_CONCURRENCY - part of runtime, used automatically (Windows only - MSVS 10, MSVS 11) + 7. HAVE_PTHREADS_PF - pthreads if available */ #if defined HAVE_TBB @@ -125,15 +126,21 @@ # define CV_PARALLEL_FRAMEWORK "winrt-concurrency" #elif defined HAVE_CONCURRENCY # define CV_PARALLEL_FRAMEWORK "ms-concurrency" -#elif defined HAVE_PTHREADS +#elif defined HAVE_PTHREADS_PF # define CV_PARALLEL_FRAMEWORK "pthreads" #endif namespace cv { ParallelLoopBody::~ParallelLoopBody() {} +#ifdef HAVE_PTHREADS_PF + void parallel_for_pthreads(const cv::Range& range, const cv::ParallelLoopBody& body, double nstripes); + size_t parallel_pthreads_get_threads_num(); + void parallel_pthreads_set_threads_num(int num); +#endif } + namespace { #ifdef CV_PARALLEL_FRAMEWORK @@ -300,8 +307,8 @@ void cv::parallel_for_(const cv::Range& range, const cv::ParallelLoopBody& body, Concurrency::CurrentScheduler::Detach(); } -#elif defined HAVE_PTHREADS - void parallel_for_pthreads(const Range& range, const ParallelLoopBody& body, double nstripes); +#elif defined HAVE_PTHREADS_PF + parallel_for_pthreads(range, body, nstripes); #else @@ -359,9 +366,7 @@ int cv::getNumThreads(void) ? Concurrency::CurrentScheduler::Get()->GetNumberOfVirtualProcessors() : pplScheduler->GetNumberOfVirtualProcessors()); -#elif defined HAVE_PTHREADS - - size_t parallel_pthreads_get_threads_num(); +#elif defined HAVE_PTHREADS_PF return parallel_pthreads_get_threads_num(); @@ -422,9 +427,7 @@ void cv::setNumThreads( int threads ) Concurrency::MaxConcurrency, threads-1)); } -#elif defined HAVE_PTHREADS - - void parallel_pthreads_set_threads_num(int num); +#elif defined HAVE_PTHREADS_PF parallel_pthreads_set_threads_num(threads); @@ -450,6 +453,8 @@ int cv::getThreadNum(void) return 0; #elif defined HAVE_CONCURRENCY return std::max(0, (int)Concurrency::Context::VirtualProcessorId()); // zero for master thread, unique number for others but not necessary 1,2,3,... +#elif defined HAVE_PTHREADS_PF + return (int)(size_t)(void*)pthread_self(); // no zero-based indexing #else return 0; #endif diff --git a/modules/core/src/parallel_pthreads.cpp b/modules/core/src/parallel_pthreads.cpp index 8c34959783..df99f18bf2 100644 --- a/modules/core/src/parallel_pthreads.cpp +++ b/modules/core/src/parallel_pthreads.cpp @@ -42,7 +42,7 @@ #include "precomp.hpp" -#if defined HAVE_PTHREADS && HAVE_PTHREADS +#ifdef HAVE_PTHREADS_PF #include #include @@ -80,25 +80,31 @@ struct work_load set(range, body, nstripes); } - void set(const cv::Range& range, const cv::ParallelLoopBody& body, int nstripes) + void set(const cv::Range& range, const cv::ParallelLoopBody& body, unsigned int nstripes) { m_body = &body; m_range = ⦥ - m_nstripes = nstripes; - m_blocks_count = ((m_range->end - m_range->start - 1)/m_nstripes) + 1; + + //ensure that nstripes not larger than range length + m_nstripes = std::min( unsigned(m_range->end - m_range->start) , nstripes); + + m_block_size = ((m_range->end - m_range->start - 1)/m_nstripes) + 1; + + //ensure that nstripes not larger than blocks count, so we would never go out of range + m_nstripes = std::min(m_nstripes, unsigned(((m_range->end - m_range->start - 1)/m_block_size) + 1) ); } const cv::ParallelLoopBody* m_body; const cv::Range* m_range; - int m_nstripes; - unsigned int m_blocks_count; + unsigned int m_nstripes; + int m_block_size; void clear() { m_body = 0; m_range = 0; m_nstripes = 0; - m_blocks_count = 0; + m_block_size = 0; } }; @@ -331,10 +337,10 @@ void ForThread::execute() work_load& load = m_parent->m_work_load; - while(m_current_pos < load.m_blocks_count) + while(m_current_pos < load.m_nstripes) { - int start = load.m_range->start + m_current_pos*load.m_nstripes; - int end = std::min(start + load.m_nstripes, load.m_range->end); + int start = load.m_range->start + m_current_pos*load.m_block_size; + int end = std::min(start + load.m_block_size, load.m_range->end); load.m_body->operator()(cv::Range(start, end)); @@ -417,9 +423,11 @@ void ThreadManager::run(const cv::Range& range, const cv::ParallelLoopBody& body { if(initPool()) { - double min_stripes = double(range.end - range.start)/(4*m_threads.size()); + if(nstripes < 1) nstripes = 4*m_threads.size(); - nstripes = std::max(nstripes, min_stripes); + double max_stripes = 4*m_threads.size(); + + nstripes = std::min(nstripes, max_stripes); pthread_mutex_lock(&m_manager_task_mutex); @@ -429,7 +437,7 @@ void ThreadManager::run(const cv::Range& range, const cv::ParallelLoopBody& body m_task_complete = false; - m_work_load.set(range, body, std::ceil(nstripes)); + m_work_load.set(range, body, cvCeil(nstripes)); for(size_t i = 0; i < m_threads.size(); ++i) { diff --git a/modules/core/src/precomp.hpp b/modules/core/src/precomp.hpp index d463126368..d8d7e007ee 100644 --- a/modules/core/src/precomp.hpp +++ b/modules/core/src/precomp.hpp @@ -292,15 +292,25 @@ TLSData& getCoreTlsData(); #define CL_RUNTIME_EXPORT #endif -#ifndef HAVE_PTHREADS -#if !(defined WIN32 || defined _WIN32 || defined WINCE || defined HAVE_WINRT) -#define HAVE_PTHREADS 1 -#endif -#endif - extern bool __termination; // skip some cleanups, because process is terminating // (for example, if ExitProcess() was already called) +cv::Mutex& getInitializationMutex(); + +// TODO Memory barriers? +#define CV_SINGLETON_LAZY_INIT_(TYPE, INITIALIZER, RET_VALUE) \ + static TYPE* volatile instance = NULL; \ + if (instance == NULL) \ + { \ + cv::AutoLock lock(cv::getInitializationMutex()); \ + if (instance == NULL) \ + instance = INITIALIZER; \ + } \ + return RET_VALUE; + +#define CV_SINGLETON_LAZY_INIT(TYPE, INITIALIZER) CV_SINGLETON_LAZY_INIT_(TYPE, INITIALIZER, instance) +#define CV_SINGLETON_LAZY_INIT_REF(TYPE, INITIALIZER) CV_SINGLETON_LAZY_INIT_(TYPE, INITIALIZER, *instance) + } #include "opencv2/hal/intrin.hpp" diff --git a/modules/core/src/stat.cpp b/modules/core/src/stat.cpp index 81f9a2484e..0a4c0415cb 100644 --- a/modules/core/src/stat.cpp +++ b/modules/core/src/stat.cpp @@ -1138,68 +1138,75 @@ static bool ocl_sum( InputArray _src, Scalar & res, int sum_op, InputArray _mask #endif +#ifdef HAVE_IPP +static bool ipp_sum(Mat &src, Scalar &_res) +{ +#if IPP_VERSION_MAJOR >= 7 + int cn = src.channels(); + size_t total_size = src.total(); + int rows = src.size[0], cols = rows ? (int)(total_size/rows) : 0; + if( src.dims == 2 || (src.isContinuous() && cols > 0 && (size_t)rows*cols == total_size) ) + { + IppiSize sz = { cols, rows }; + int type = src.type(); + typedef IppStatus (CV_STDCALL* ippiSumFuncHint)(const void*, int, IppiSize, double *, IppHintAlgorithm); + typedef IppStatus (CV_STDCALL* ippiSumFuncNoHint)(const void*, int, IppiSize, double *); + ippiSumFuncHint ippFuncHint = + type == CV_32FC1 ? (ippiSumFuncHint)ippiSum_32f_C1R : + type == CV_32FC3 ? (ippiSumFuncHint)ippiSum_32f_C3R : + type == CV_32FC4 ? (ippiSumFuncHint)ippiSum_32f_C4R : + 0; + ippiSumFuncNoHint ippFuncNoHint = + type == CV_8UC1 ? (ippiSumFuncNoHint)ippiSum_8u_C1R : + type == CV_8UC3 ? (ippiSumFuncNoHint)ippiSum_8u_C3R : + type == CV_8UC4 ? (ippiSumFuncNoHint)ippiSum_8u_C4R : + type == CV_16UC1 ? (ippiSumFuncNoHint)ippiSum_16u_C1R : + type == CV_16UC3 ? (ippiSumFuncNoHint)ippiSum_16u_C3R : + type == CV_16UC4 ? (ippiSumFuncNoHint)ippiSum_16u_C4R : + type == CV_16SC1 ? (ippiSumFuncNoHint)ippiSum_16s_C1R : + type == CV_16SC3 ? (ippiSumFuncNoHint)ippiSum_16s_C3R : + type == CV_16SC4 ? (ippiSumFuncNoHint)ippiSum_16s_C4R : + 0; + CV_Assert(!ippFuncHint || !ippFuncNoHint); + if( ippFuncHint || ippFuncNoHint ) + { + Ipp64f res[4]; + IppStatus ret = ippFuncHint ? ippFuncHint(src.ptr(), (int)src.step[0], sz, res, ippAlgHintAccurate) : + ippFuncNoHint(src.ptr(), (int)src.step[0], sz, res); + if( ret >= 0 ) + { + for( int i = 0; i < cn; i++ ) + _res[i] = res[i]; + return true; + } + } + } +#else + CV_UNUSED(src); CV_UNUSED(_res); +#endif + return false; +} +#endif + } cv::Scalar cv::sum( InputArray _src ) { -#ifdef HAVE_OPENCL +#if defined HAVE_OPENCL || defined HAVE_IPP Scalar _res; +#endif + +#ifdef HAVE_OPENCL CV_OCL_RUN_(OCL_PERFORMANCE_CHECK(_src.isUMat()) && _src.dims() <= 2, ocl_sum(_src, _res, OCL_OP_SUM), _res) #endif Mat src = _src.getMat(); + CV_IPP_RUN(IPP_VERSION_MAJOR >= 7, ipp_sum(src, _res), _res); + int k, cn = src.channels(), depth = src.depth(); - -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - size_t total_size = src.total(); - int rows = src.size[0], cols = rows ? (int)(total_size/rows) : 0; - if( src.dims == 2 || (src.isContinuous() && cols > 0 && (size_t)rows*cols == total_size) ) - { - IppiSize sz = { cols, rows }; - int type = src.type(); - typedef IppStatus (CV_STDCALL* ippiSumFuncHint)(const void*, int, IppiSize, double *, IppHintAlgorithm); - typedef IppStatus (CV_STDCALL* ippiSumFuncNoHint)(const void*, int, IppiSize, double *); - ippiSumFuncHint ippFuncHint = - type == CV_32FC1 ? (ippiSumFuncHint)ippiSum_32f_C1R : - type == CV_32FC3 ? (ippiSumFuncHint)ippiSum_32f_C3R : - type == CV_32FC4 ? (ippiSumFuncHint)ippiSum_32f_C4R : - 0; - ippiSumFuncNoHint ippFuncNoHint = - type == CV_8UC1 ? (ippiSumFuncNoHint)ippiSum_8u_C1R : - type == CV_8UC3 ? (ippiSumFuncNoHint)ippiSum_8u_C3R : - type == CV_8UC4 ? (ippiSumFuncNoHint)ippiSum_8u_C4R : - type == CV_16UC1 ? (ippiSumFuncNoHint)ippiSum_16u_C1R : - type == CV_16UC3 ? (ippiSumFuncNoHint)ippiSum_16u_C3R : - type == CV_16UC4 ? (ippiSumFuncNoHint)ippiSum_16u_C4R : - type == CV_16SC1 ? (ippiSumFuncNoHint)ippiSum_16s_C1R : - type == CV_16SC3 ? (ippiSumFuncNoHint)ippiSum_16s_C3R : - type == CV_16SC4 ? (ippiSumFuncNoHint)ippiSum_16s_C4R : - 0; - CV_Assert(!ippFuncHint || !ippFuncNoHint); - if( ippFuncHint || ippFuncNoHint ) - { - Ipp64f res[4]; - IppStatus ret = ippFuncHint ? ippFuncHint(src.ptr(), (int)src.step[0], sz, res, ippAlgHintAccurate) : - ippFuncNoHint(src.ptr(), (int)src.step[0], sz, res); - if( ret >= 0 ) - { - Scalar sc; - for( int i = 0; i < cn; i++ ) - sc[i] = res[i]; - CV_IMPL_ADD(CV_IMPL_IPP); - return sc; - } - setIppErrorStatus(); - } - } - } -#endif SumFunc func = getSumFunc(depth); - CV_Assert( cn <= 4 && func != 0 ); const Mat* arrays[] = {&src, 0}; @@ -1291,51 +1298,61 @@ static bool ocl_countNonZero( InputArray _src, int & res ) #endif +#if defined HAVE_IPP +namespace cv { + +static bool ipp_countNonZero( Mat &src, int &res ) +{ +#if !defined HAVE_IPP_ICV_ONLY + Ipp32s count = 0; + IppStatus status = ippStsNoErr; + + int type = src.type(), depth = CV_MAT_DEPTH(type); + IppiSize roiSize = { src.cols, src.rows }; + Ipp32s srcstep = (Ipp32s)src.step; + if (src.isContinuous()) + { + roiSize.width = (Ipp32s)src.total(); + roiSize.height = 1; + srcstep = (Ipp32s)src.total() * CV_ELEM_SIZE(type); + } + + if (depth == CV_8U) + status = ippiCountInRange_8u_C1R((const Ipp8u *)src.data, srcstep, roiSize, &count, 0, 0); + else if (depth == CV_32F) + status = ippiCountInRange_32f_C1R((const Ipp32f *)src.data, srcstep, roiSize, &count, 0, 0); + + if (status >= 0) + { + res = ((Ipp32s)src.total() - count); + return true; + } +#else + CV_UNUSED(src); CV_UNUSED(res); +#endif + return false; +} +} +#endif + + int cv::countNonZero( InputArray _src ) { int type = _src.type(), cn = CV_MAT_CN(type); CV_Assert( cn == 1 ); -#ifdef HAVE_OPENCL +#if defined HAVE_OPENCL || defined HAVE_IPP int res = -1; +#endif + +#ifdef HAVE_OPENCL CV_OCL_RUN_(OCL_PERFORMANCE_CHECK(_src.isUMat()) && _src.dims() <= 2, ocl_countNonZero(_src, res), res) #endif Mat src = _src.getMat(); - -#if defined HAVE_IPP && !defined HAVE_IPP_ICV_ONLY && 0 - CV_IPP_CHECK() - { - if (src.dims <= 2 || src.isContinuous()) - { - IppiSize roiSize = { src.cols, src.rows }; - Ipp32s count = 0, srcstep = (Ipp32s)src.step; - IppStatus status = (IppStatus)-1; - - if (src.isContinuous()) - { - roiSize.width = (Ipp32s)src.total(); - roiSize.height = 1; - srcstep = (Ipp32s)src.total() * CV_ELEM_SIZE(type); - } - - int depth = CV_MAT_DEPTH(type); - if (depth == CV_8U) - status = ippiCountInRange_8u_C1R((const Ipp8u *)src.data, srcstep, roiSize, &count, 0, 0); - else if (depth == CV_32F) - status = ippiCountInRange_32f_C1R((const Ipp32f *)src.data, srcstep, roiSize, &count, 0, 0); - - if (status >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return (Ipp32s)src.total() - count; - } - setIppErrorStatus(); - } - } -#endif + CV_IPP_RUN(0 && (_src.dims() <= 2 || _src.isContinuous()), ipp_countNonZero(src, res), res); CountNonZeroFunc func = getCountNonZeroTab(src.depth()); CV_Assert( func != 0 ); @@ -1618,6 +1635,119 @@ static bool ocl_meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv #endif +#ifdef HAVE_IPP +namespace cv +{ +static bool ipp_meanStdDev(Mat& src, OutputArray _mean, OutputArray _sdv, Mat& mask) +{ +#if IPP_VERSION_MAJOR >= 7 + int cn = src.channels(); + size_t total_size = src.total(); + int rows = src.size[0], cols = rows ? (int)(total_size/rows) : 0; + if( src.dims == 2 || (src.isContinuous() && mask.isContinuous() && cols > 0 && (size_t)rows*cols == total_size) ) + { + Ipp64f mean_temp[3]; + Ipp64f stddev_temp[3]; + Ipp64f *pmean = &mean_temp[0]; + Ipp64f *pstddev = &stddev_temp[0]; + Mat mean, stddev; + int dcn_mean = -1; + if( _mean.needed() ) + { + if( !_mean.fixedSize() ) + _mean.create(cn, 1, CV_64F, -1, true); + mean = _mean.getMat(); + dcn_mean = (int)mean.total(); + pmean = mean.ptr(); + } + int dcn_stddev = -1; + if( _sdv.needed() ) + { + if( !_sdv.fixedSize() ) + _sdv.create(cn, 1, CV_64F, -1, true); + stddev = _sdv.getMat(); + dcn_stddev = (int)stddev.total(); + pstddev = stddev.ptr(); + } + for( int c = cn; c < dcn_mean; c++ ) + pmean[c] = 0; + for( int c = cn; c < dcn_stddev; c++ ) + pstddev[c] = 0; + IppiSize sz = { cols, rows }; + int type = src.type(); + if( !mask.empty() ) + { + typedef IppStatus (CV_STDCALL* ippiMaskMeanStdDevFuncC1)(const void *, int, const void *, int, IppiSize, Ipp64f *, Ipp64f *); + ippiMaskMeanStdDevFuncC1 ippFuncC1 = + type == CV_8UC1 ? (ippiMaskMeanStdDevFuncC1)ippiMean_StdDev_8u_C1MR : + type == CV_16UC1 ? (ippiMaskMeanStdDevFuncC1)ippiMean_StdDev_16u_C1MR : + type == CV_32FC1 ? (ippiMaskMeanStdDevFuncC1)ippiMean_StdDev_32f_C1MR : + 0; + if( ippFuncC1 ) + { + if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, pmean, pstddev) >= 0 ) + { + return true; + } + } + typedef IppStatus (CV_STDCALL* ippiMaskMeanStdDevFuncC3)(const void *, int, const void *, int, IppiSize, int, Ipp64f *, Ipp64f *); + ippiMaskMeanStdDevFuncC3 ippFuncC3 = + type == CV_8UC3 ? (ippiMaskMeanStdDevFuncC3)ippiMean_StdDev_8u_C3CMR : + type == CV_16UC3 ? (ippiMaskMeanStdDevFuncC3)ippiMean_StdDev_16u_C3CMR : + type == CV_32FC3 ? (ippiMaskMeanStdDevFuncC3)ippiMean_StdDev_32f_C3CMR : + 0; + if( ippFuncC3 ) + { + if( ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 && + ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 && + ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 ) + { + return true; + } + } + } + else + { + typedef IppStatus (CV_STDCALL* ippiMeanStdDevFuncC1)(const void *, int, IppiSize, Ipp64f *, Ipp64f *); + ippiMeanStdDevFuncC1 ippFuncC1 = + type == CV_8UC1 ? (ippiMeanStdDevFuncC1)ippiMean_StdDev_8u_C1R : + type == CV_16UC1 ? (ippiMeanStdDevFuncC1)ippiMean_StdDev_16u_C1R : +#if (IPP_VERSION_X100 >= 801) + type == CV_32FC1 ? (ippiMeanStdDevFuncC1)ippiMean_StdDev_32f_C1R ://Aug 2013: bug in IPP 7.1, 8.0 +#endif + 0; + if( ippFuncC1 ) + { + if( ippFuncC1(src.ptr(), (int)src.step[0], sz, pmean, pstddev) >= 0 ) + { + return true; + } + } + typedef IppStatus (CV_STDCALL* ippiMeanStdDevFuncC3)(const void *, int, IppiSize, int, Ipp64f *, Ipp64f *); + ippiMeanStdDevFuncC3 ippFuncC3 = + type == CV_8UC3 ? (ippiMeanStdDevFuncC3)ippiMean_StdDev_8u_C3CR : + type == CV_16UC3 ? (ippiMeanStdDevFuncC3)ippiMean_StdDev_16u_C3CR : + type == CV_32FC3 ? (ippiMeanStdDevFuncC3)ippiMean_StdDev_32f_C3CR : + 0; + if( ippFuncC3 ) + { + if( ippFuncC3(src.ptr(), (int)src.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 && + ippFuncC3(src.ptr(), (int)src.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 && + ippFuncC3(src.ptr(), (int)src.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 ) + { + return true; + } + } + } + } +#else + CV_UNUSED(src); CV_UNUSED(_mean); CV_UNUSED(_sdv); CV_UNUSED(mask); +#endif + return false; +} +} +#endif + void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, InputArray _mask ) { CV_OCL_RUN(OCL_PERFORMANCE_CHECK(_src.isUMat()) && _src.dims() <= 2, @@ -1626,121 +1756,10 @@ void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, Input Mat src = _src.getMat(), mask = _mask.getMat(); CV_Assert( mask.empty() || mask.type() == CV_8UC1 ); + CV_IPP_RUN(IPP_VERSION_MAJOR >= 7, ipp_meanStdDev(src, _mean, _sdv, mask)); + int k, cn = src.channels(), depth = src.depth(); -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - size_t total_size = src.total(); - int rows = src.size[0], cols = rows ? (int)(total_size/rows) : 0; - if( src.dims == 2 || (src.isContinuous() && mask.isContinuous() && cols > 0 && (size_t)rows*cols == total_size) ) - { - Ipp64f mean_temp[3]; - Ipp64f stddev_temp[3]; - Ipp64f *pmean = &mean_temp[0]; - Ipp64f *pstddev = &stddev_temp[0]; - Mat mean, stddev; - int dcn_mean = -1; - if( _mean.needed() ) - { - if( !_mean.fixedSize() ) - _mean.create(cn, 1, CV_64F, -1, true); - mean = _mean.getMat(); - dcn_mean = (int)mean.total(); - pmean = mean.ptr(); - } - int dcn_stddev = -1; - if( _sdv.needed() ) - { - if( !_sdv.fixedSize() ) - _sdv.create(cn, 1, CV_64F, -1, true); - stddev = _sdv.getMat(); - dcn_stddev = (int)stddev.total(); - pstddev = stddev.ptr(); - } - for( int c = cn; c < dcn_mean; c++ ) - pmean[c] = 0; - for( int c = cn; c < dcn_stddev; c++ ) - pstddev[c] = 0; - IppiSize sz = { cols, rows }; - int type = src.type(); - if( !mask.empty() ) - { - typedef IppStatus (CV_STDCALL* ippiMaskMeanStdDevFuncC1)(const void *, int, const void *, int, IppiSize, Ipp64f *, Ipp64f *); - ippiMaskMeanStdDevFuncC1 ippFuncC1 = - type == CV_8UC1 ? (ippiMaskMeanStdDevFuncC1)ippiMean_StdDev_8u_C1MR : - type == CV_16UC1 ? (ippiMaskMeanStdDevFuncC1)ippiMean_StdDev_16u_C1MR : - type == CV_32FC1 ? (ippiMaskMeanStdDevFuncC1)ippiMean_StdDev_32f_C1MR : - 0; - if( ippFuncC1 ) - { - if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, pmean, pstddev) >= 0 ) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - typedef IppStatus (CV_STDCALL* ippiMaskMeanStdDevFuncC3)(const void *, int, const void *, int, IppiSize, int, Ipp64f *, Ipp64f *); - ippiMaskMeanStdDevFuncC3 ippFuncC3 = - type == CV_8UC3 ? (ippiMaskMeanStdDevFuncC3)ippiMean_StdDev_8u_C3CMR : - type == CV_16UC3 ? (ippiMaskMeanStdDevFuncC3)ippiMean_StdDev_16u_C3CMR : - type == CV_32FC3 ? (ippiMaskMeanStdDevFuncC3)ippiMean_StdDev_32f_C3CMR : - 0; - if( ippFuncC3 ) - { - if( ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 && - ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 && - ippFuncC3(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 ) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } - else - { - typedef IppStatus (CV_STDCALL* ippiMeanStdDevFuncC1)(const void *, int, IppiSize, Ipp64f *, Ipp64f *); - ippiMeanStdDevFuncC1 ippFuncC1 = - type == CV_8UC1 ? (ippiMeanStdDevFuncC1)ippiMean_StdDev_8u_C1R : - type == CV_16UC1 ? (ippiMeanStdDevFuncC1)ippiMean_StdDev_16u_C1R : -#if (IPP_VERSION_X100 >= 801) - type == CV_32FC1 ? (ippiMeanStdDevFuncC1)ippiMean_StdDev_32f_C1R ://Aug 2013: bug in IPP 7.1, 8.0 -#endif - 0; - if( ippFuncC1 ) - { - if( ippFuncC1(src.ptr(), (int)src.step[0], sz, pmean, pstddev) >= 0 ) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - typedef IppStatus (CV_STDCALL* ippiMeanStdDevFuncC3)(const void *, int, IppiSize, int, Ipp64f *, Ipp64f *); - ippiMeanStdDevFuncC3 ippFuncC3 = - type == CV_8UC3 ? (ippiMeanStdDevFuncC3)ippiMean_StdDev_8u_C3CR : - type == CV_16UC3 ? (ippiMeanStdDevFuncC3)ippiMean_StdDev_16u_C3CR : - type == CV_32FC3 ? (ippiMeanStdDevFuncC3)ippiMean_StdDev_32f_C3CR : - 0; - if( ippFuncC3 ) - { - if( ippFuncC3(src.ptr(), (int)src.step[0], sz, 1, &pmean[0], &pstddev[0]) >= 0 && - ippFuncC3(src.ptr(), (int)src.step[0], sz, 2, &pmean[1], &pstddev[1]) >= 0 && - ippFuncC3(src.ptr(), (int)src.step[0], sz, 3, &pmean[2], &pstddev[2]) >= 0 ) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } - } - } -#endif - - SumSqrFunc func = getSumSqrTab(depth); CV_Assert( func != 0 ); @@ -1951,6 +1970,8 @@ static void ofs2idx(const Mat& a, size_t ofs, int* idx) #ifdef HAVE_OPENCL +#define MINMAX_STRUCT_ALIGNMENT 8 // sizeof double + template void getMinMaxRes(const Mat & db, double * minVal, double * maxVal, int* minLoc, int* maxLoc, @@ -1961,28 +1982,32 @@ void getMinMaxRes(const Mat & db, double * minVal, double * maxVal, T maxval = std::numeric_limits::min() > 0 ? -std::numeric_limits::max() : std::numeric_limits::min(), maxval2 = maxval; uint minloc = index_max, maxloc = index_max; - int index = 0; + size_t index = 0; const T * minptr = NULL, * maxptr = NULL, * maxptr2 = NULL; const uint * minlocptr = NULL, * maxlocptr = NULL; if (minVal || minLoc) { minptr = db.ptr(); index += sizeof(T) * groupnum; + index = alignSize(index, MINMAX_STRUCT_ALIGNMENT); } if (maxVal || maxLoc) { maxptr = (const T *)(db.ptr() + index); index += sizeof(T) * groupnum; + index = alignSize(index, MINMAX_STRUCT_ALIGNMENT); } if (minLoc) { minlocptr = (const uint *)(db.ptr() + index); index += sizeof(uint) * groupnum; + index = alignSize(index, MINMAX_STRUCT_ALIGNMENT); } if (maxLoc) { maxlocptr = (const uint *)(db.ptr() + index); index += sizeof(uint) * groupnum; + index = alignSize(index, MINMAX_STRUCT_ALIGNMENT); } if (maxVal2) maxptr2 = (const T *)(db.ptr() + index); @@ -2102,7 +2127,8 @@ static bool ocl_minMaxIdx( InputArray _src, double* minVal, double* maxVal, int* char cvt[2][40]; String opts = format("-D DEPTH_%d -D srcT1=%s%s -D WGS=%d -D srcT=%s" " -D WGS2_ALIGNED=%d%s%s%s -D kercn=%d%s%s%s%s" - " -D dstT1=%s -D dstT=%s -D convertToDT=%s%s%s%s%s -D wdepth=%d -D convertFromU=%s", + " -D dstT1=%s -D dstT=%s -D convertToDT=%s%s%s%s%s -D wdepth=%d -D convertFromU=%s" + " -D MINMAX_STRUCT_ALIGNMENT=%d", depth, ocl::typeToStr(depth), haveMask ? " -D HAVE_MASK" : "", (int)wgs, ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)), wgs2_aligned, doubleSupport ? " -D DOUBLE_SUPPORT" : "", @@ -2115,7 +2141,8 @@ static bool ocl_minMaxIdx( InputArray _src, double* minVal, double* maxVal, int* absValues ? " -D OP_ABS" : "", haveSrc2 ? " -D HAVE_SRC2" : "", maxVal2 ? " -D OP_CALC2" : "", haveSrc2 && _src2.isContinuous() ? " -D HAVE_SRC2_CONT" : "", ddepth, - depth <= CV_32S && ddepth == CV_32S ? ocl::convertTypeStr(CV_8U, ddepth, kercn, cvt[1]) : "noconvert"); + depth <= CV_32S && ddepth == CV_32S ? ocl::convertTypeStr(CV_8U, ddepth, kercn, cvt[1]) : "noconvert", + MINMAX_STRUCT_ALIGNMENT); ocl::Kernel k("minmaxloc", ocl::core::minmaxloc_oclsrc, opts); if (k.empty()) @@ -2124,7 +2151,8 @@ static bool ocl_minMaxIdx( InputArray _src, double* minVal, double* maxVal, int* int esz = CV_ELEM_SIZE(ddepth), esz32s = CV_ELEM_SIZE1(CV_32S), dbsize = groupnum * ((needMinVal ? esz : 0) + (needMaxVal ? esz : 0) + (needMinLoc ? esz32s : 0) + (needMaxLoc ? esz32s : 0) + - (maxVal2 ? esz : 0)); + (maxVal2 ? esz : 0)) + + 5 * MINMAX_STRUCT_ALIGNMENT; UMat src = _src.getUMat(), src2 = _src2.getUMat(), db(1, dbsize, CV_8UC1), mask = _mask.getUMat(); if (cn > 1 && !haveMask) @@ -2181,6 +2209,103 @@ static bool ocl_minMaxIdx( InputArray _src, double* minVal, double* maxVal, int* #endif +#ifdef HAVE_IPP +static bool ipp_minMaxIdx( Mat &src, double* minVal, double* maxVal, int* minIdx, int* maxIdx, Mat &mask) +{ +#if IPP_VERSION_MAJOR >= 7 + int type = src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); + size_t total_size = src.total(); + int rows = src.size[0], cols = rows ? (int)(total_size/rows) : 0; + if( src.dims == 2 || (src.isContinuous() && mask.isContinuous() && cols > 0 && (size_t)rows*cols == total_size) ) + { + IppiSize sz = { cols * cn, rows }; + + if( !mask.empty() ) + { + typedef IppStatus (CV_STDCALL* ippiMaskMinMaxIndxFuncC1)(const void *, int, const void *, int, + IppiSize, Ipp32f *, Ipp32f *, IppiPoint *, IppiPoint *); + + CV_SUPPRESS_DEPRECATED_START + ippiMaskMinMaxIndxFuncC1 ippFuncC1 = + type == CV_8UC1 ? (ippiMaskMinMaxIndxFuncC1)ippiMinMaxIndx_8u_C1MR : + type == CV_8SC1 ? (ippiMaskMinMaxIndxFuncC1)ippiMinMaxIndx_8s_C1MR : + type == CV_16UC1 ? (ippiMaskMinMaxIndxFuncC1)ippiMinMaxIndx_16u_C1MR : + type == CV_32FC1 ? (ippiMaskMinMaxIndxFuncC1)ippiMinMaxIndx_32f_C1MR : 0; + CV_SUPPRESS_DEPRECATED_END + + if( ippFuncC1 ) + { + Ipp32f min, max; + IppiPoint minp, maxp; + if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, &min, &max, &minp, &maxp) >= 0 ) + { + if( minVal ) + *minVal = (double)min; + if( maxVal ) + *maxVal = (double)max; + if( !minp.x && !minp.y && !maxp.x && !maxp.y && !mask.ptr()[0] ) + minp.x = maxp.x = -1; + if( minIdx ) + { + size_t minidx = minp.y * cols + minp.x + 1; + ofs2idx(src, minidx, minIdx); + } + if( maxIdx ) + { + size_t maxidx = maxp.y * cols + maxp.x + 1; + ofs2idx(src, maxidx, maxIdx); + } + return true; + } + } + } + else + { + typedef IppStatus (CV_STDCALL* ippiMinMaxIndxFuncC1)(const void *, int, IppiSize, Ipp32f *, Ipp32f *, IppiPoint *, IppiPoint *); + + CV_SUPPRESS_DEPRECATED_START + ippiMinMaxIndxFuncC1 ippFuncC1 = + depth == CV_8U ? (ippiMinMaxIndxFuncC1)ippiMinMaxIndx_8u_C1R : + depth == CV_8S ? (ippiMinMaxIndxFuncC1)ippiMinMaxIndx_8s_C1R : + depth == CV_16U ? (ippiMinMaxIndxFuncC1)ippiMinMaxIndx_16u_C1R : +#if !((defined _MSC_VER && defined _M_IX86) || defined __i386__) + depth == CV_32F ? (ippiMinMaxIndxFuncC1)ippiMinMaxIndx_32f_C1R : +#endif + 0; + CV_SUPPRESS_DEPRECATED_END + + if( ippFuncC1 ) + { + Ipp32f min, max; + IppiPoint minp, maxp; + if( ippFuncC1(src.ptr(), (int)src.step[0], sz, &min, &max, &minp, &maxp) >= 0 ) + { + if( minVal ) + *minVal = (double)min; + if( maxVal ) + *maxVal = (double)max; + if( minIdx ) + { + size_t minidx = minp.y * cols + minp.x + 1; + ofs2idx(src, minidx, minIdx); + } + if( maxIdx ) + { + size_t maxidx = maxp.y * cols + maxp.x + 1; + ofs2idx(src, maxidx, maxIdx); + } + return true; + } + } + } + } +#else +#endif + CV_UNUSED(src); CV_UNUSED(minVal); CV_UNUSED(maxVal); CV_UNUSED(minIdx); CV_UNUSED(maxIdx); CV_UNUSED(mask); + return false; +} +#endif + } void cv::minMaxIdx(InputArray _src, double* minVal, @@ -2195,101 +2320,7 @@ void cv::minMaxIdx(InputArray _src, double* minVal, ocl_minMaxIdx(_src, minVal, maxVal, minIdx, maxIdx, _mask)) Mat src = _src.getMat(), mask = _mask.getMat(); - -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - size_t total_size = src.total(); - int rows = src.size[0], cols = rows ? (int)(total_size/rows) : 0; - if( src.dims == 2 || (src.isContinuous() && mask.isContinuous() && cols > 0 && (size_t)rows*cols == total_size) ) - { - IppiSize sz = { cols * cn, rows }; - - if( !mask.empty() ) - { - typedef IppStatus (CV_STDCALL* ippiMaskMinMaxIndxFuncC1)(const void *, int, const void *, int, - IppiSize, Ipp32f *, Ipp32f *, IppiPoint *, IppiPoint *); - - CV_SUPPRESS_DEPRECATED_START - ippiMaskMinMaxIndxFuncC1 ippFuncC1 = - type == CV_8UC1 ? (ippiMaskMinMaxIndxFuncC1)ippiMinMaxIndx_8u_C1MR : - type == CV_8SC1 ? (ippiMaskMinMaxIndxFuncC1)ippiMinMaxIndx_8s_C1MR : - type == CV_16UC1 ? (ippiMaskMinMaxIndxFuncC1)ippiMinMaxIndx_16u_C1MR : - type == CV_32FC1 ? (ippiMaskMinMaxIndxFuncC1)ippiMinMaxIndx_32f_C1MR : 0; - CV_SUPPRESS_DEPRECATED_END - - if( ippFuncC1 ) - { - Ipp32f min, max; - IppiPoint minp, maxp; - if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, &min, &max, &minp, &maxp) >= 0 ) - { - if( minVal ) - *minVal = (double)min; - if( maxVal ) - *maxVal = (double)max; - if( !minp.x && !minp.y && !maxp.x && !maxp.y && !mask.ptr()[0] ) - minp.x = maxp.x = -1; - if( minIdx ) - { - size_t minidx = minp.y * cols + minp.x + 1; - ofs2idx(src, minidx, minIdx); - } - if( maxIdx ) - { - size_t maxidx = maxp.y * cols + maxp.x + 1; - ofs2idx(src, maxidx, maxIdx); - } - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } - else - { - typedef IppStatus (CV_STDCALL* ippiMinMaxIndxFuncC1)(const void *, int, IppiSize, Ipp32f *, Ipp32f *, IppiPoint *, IppiPoint *); - - CV_SUPPRESS_DEPRECATED_START - ippiMinMaxIndxFuncC1 ippFuncC1 = - depth == CV_8U ? (ippiMinMaxIndxFuncC1)ippiMinMaxIndx_8u_C1R : - depth == CV_8S ? (ippiMinMaxIndxFuncC1)ippiMinMaxIndx_8s_C1R : - depth == CV_16U ? (ippiMinMaxIndxFuncC1)ippiMinMaxIndx_16u_C1R : - #if !((defined _MSC_VER && defined _M_IX86) || defined __i386__) - depth == CV_32F ? (ippiMinMaxIndxFuncC1)ippiMinMaxIndx_32f_C1R : - #endif - 0; - CV_SUPPRESS_DEPRECATED_END - - if( ippFuncC1 ) - { - Ipp32f min, max; - IppiPoint minp, maxp; - if( ippFuncC1(src.ptr(), (int)src.step[0], sz, &min, &max, &minp, &maxp) >= 0 ) - { - if( minVal ) - *minVal = (double)min; - if( maxVal ) - *maxVal = (double)max; - if( minIdx ) - { - size_t minidx = minp.y * cols + minp.x + 1; - ofs2idx(src, minidx, minIdx); - } - if( maxIdx ) - { - size_t maxidx = maxp.y * cols + maxp.x + 1; - ofs2idx(src, maxidx, maxIdx); - } - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } - } - } -#endif + CV_IPP_RUN(IPP_VERSION_MAJOR >= 7, ipp_minMaxIdx(src, minVal, maxVal, minIdx, maxIdx, mask)) MinMaxIdxFunc func = getMinmaxTab(depth); CV_Assert( func != 0 ); @@ -2611,6 +2642,173 @@ static bool ocl_norm( InputArray _src, int normType, InputArray _mask, double & #endif +#ifdef HAVE_IPP +static bool ipp_norm(Mat &src, int normType, Mat &mask, double &result) +{ +#if IPP_VERSION_MAJOR >= 7 + int cn = src.channels(); + size_t total_size = src.total(); + int rows = src.size[0], cols = rows ? (int)(total_size/rows) : 0; + + if( (src.dims == 2 || (src.isContinuous() && mask.isContinuous())) + && cols > 0 && (size_t)rows*cols == total_size + && (normType == NORM_INF || normType == NORM_L1 || + normType == NORM_L2 || normType == NORM_L2SQR) ) + { + IppiSize sz = { cols, rows }; + int type = src.type(); + if( !mask.empty() ) + { + typedef IppStatus (CV_STDCALL* ippiMaskNormFuncC1)(const void *, int, const void *, int, IppiSize, Ipp64f *); + ippiMaskNormFuncC1 ippFuncC1 = + normType == NORM_INF ? + (type == CV_8UC1 ? (ippiMaskNormFuncC1)ippiNorm_Inf_8u_C1MR : + type == CV_8SC1 ? (ippiMaskNormFuncC1)ippiNorm_Inf_8s_C1MR : +// type == CV_16UC1 ? (ippiMaskNormFuncC1)ippiNorm_Inf_16u_C1MR : + type == CV_32FC1 ? (ippiMaskNormFuncC1)ippiNorm_Inf_32f_C1MR : + 0) : + normType == NORM_L1 ? + (type == CV_8UC1 ? (ippiMaskNormFuncC1)ippiNorm_L1_8u_C1MR : + type == CV_8SC1 ? (ippiMaskNormFuncC1)ippiNorm_L1_8s_C1MR : + type == CV_16UC1 ? (ippiMaskNormFuncC1)ippiNorm_L1_16u_C1MR : + type == CV_32FC1 ? (ippiMaskNormFuncC1)ippiNorm_L1_32f_C1MR : + 0) : + normType == NORM_L2 || normType == NORM_L2SQR ? + (type == CV_8UC1 ? (ippiMaskNormFuncC1)ippiNorm_L2_8u_C1MR : + type == CV_8SC1 ? (ippiMaskNormFuncC1)ippiNorm_L2_8s_C1MR : + type == CV_16UC1 ? (ippiMaskNormFuncC1)ippiNorm_L2_16u_C1MR : + type == CV_32FC1 ? (ippiMaskNormFuncC1)ippiNorm_L2_32f_C1MR : + 0) : 0; + if( ippFuncC1 ) + { + Ipp64f norm; + if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, &norm) >= 0 ) + { + result = (normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm); + return true; + } + } + /*typedef IppStatus (CV_STDCALL* ippiMaskNormFuncC3)(const void *, int, const void *, int, IppiSize, int, Ipp64f *); + ippiMaskNormFuncC3 ippFuncC3 = + normType == NORM_INF ? + (type == CV_8UC3 ? (ippiMaskNormFuncC3)ippiNorm_Inf_8u_C3CMR : + type == CV_8SC3 ? (ippiMaskNormFuncC3)ippiNorm_Inf_8s_C3CMR : + type == CV_16UC3 ? (ippiMaskNormFuncC3)ippiNorm_Inf_16u_C3CMR : + type == CV_32FC3 ? (ippiMaskNormFuncC3)ippiNorm_Inf_32f_C3CMR : + 0) : + normType == NORM_L1 ? + (type == CV_8UC3 ? (ippiMaskNormFuncC3)ippiNorm_L1_8u_C3CMR : + type == CV_8SC3 ? (ippiMaskNormFuncC3)ippiNorm_L1_8s_C3CMR : + type == CV_16UC3 ? (ippiMaskNormFuncC3)ippiNorm_L1_16u_C3CMR : + type == CV_32FC3 ? (ippiMaskNormFuncC3)ippiNorm_L1_32f_C3CMR : + 0) : + normType == NORM_L2 || normType == NORM_L2SQR ? + (type == CV_8UC3 ? (ippiMaskNormFuncC3)ippiNorm_L2_8u_C3CMR : + type == CV_8SC3 ? (ippiMaskNormFuncC3)ippiNorm_L2_8s_C3CMR : + type == CV_16UC3 ? (ippiMaskNormFuncC3)ippiNorm_L2_16u_C3CMR : + type == CV_32FC3 ? (ippiMaskNormFuncC3)ippiNorm_L2_32f_C3CMR : + 0) : 0; + if( ippFuncC3 ) + { + Ipp64f norm1, norm2, norm3; + if( ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 1, &norm1) >= 0 && + ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 2, &norm2) >= 0 && + ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 3, &norm3) >= 0) + { + Ipp64f norm = + normType == NORM_INF ? std::max(std::max(norm1, norm2), norm3) : + normType == NORM_L1 ? norm1 + norm2 + norm3 : + normType == NORM_L2 || normType == NORM_L2SQR ? std::sqrt(norm1 * norm1 + norm2 * norm2 + norm3 * norm3) : + 0; + result = (normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm); + return true; + } + }*/ + } + else + { + typedef IppStatus (CV_STDCALL* ippiNormFuncHint)(const void *, int, IppiSize, Ipp64f *, IppHintAlgorithm hint); + typedef IppStatus (CV_STDCALL* ippiNormFuncNoHint)(const void *, int, IppiSize, Ipp64f *); + ippiNormFuncHint ippFuncHint = + normType == NORM_L1 ? + (type == CV_32FC1 ? (ippiNormFuncHint)ippiNorm_L1_32f_C1R : + type == CV_32FC3 ? (ippiNormFuncHint)ippiNorm_L1_32f_C3R : + type == CV_32FC4 ? (ippiNormFuncHint)ippiNorm_L1_32f_C4R : + 0) : + normType == NORM_L2 || normType == NORM_L2SQR ? + (type == CV_32FC1 ? (ippiNormFuncHint)ippiNorm_L2_32f_C1R : + type == CV_32FC3 ? (ippiNormFuncHint)ippiNorm_L2_32f_C3R : + type == CV_32FC4 ? (ippiNormFuncHint)ippiNorm_L2_32f_C4R : + 0) : 0; + ippiNormFuncNoHint ippFuncNoHint = + normType == NORM_INF ? + (type == CV_8UC1 ? (ippiNormFuncNoHint)ippiNorm_Inf_8u_C1R : + type == CV_8UC3 ? (ippiNormFuncNoHint)ippiNorm_Inf_8u_C3R : + type == CV_8UC4 ? (ippiNormFuncNoHint)ippiNorm_Inf_8u_C4R : + type == CV_16UC1 ? (ippiNormFuncNoHint)ippiNorm_Inf_16u_C1R : + type == CV_16UC3 ? (ippiNormFuncNoHint)ippiNorm_Inf_16u_C3R : + type == CV_16UC4 ? (ippiNormFuncNoHint)ippiNorm_Inf_16u_C4R : + type == CV_16SC1 ? (ippiNormFuncNoHint)ippiNorm_Inf_16s_C1R : +#if (IPP_VERSION_X100 >= 801) + type == CV_16SC3 ? (ippiNormFuncNoHint)ippiNorm_Inf_16s_C3R : //Aug 2013: problem in IPP 7.1, 8.0 : -32768 + type == CV_16SC4 ? (ippiNormFuncNoHint)ippiNorm_Inf_16s_C4R : //Aug 2013: problem in IPP 7.1, 8.0 : -32768 +#endif + type == CV_32FC1 ? (ippiNormFuncNoHint)ippiNorm_Inf_32f_C1R : + type == CV_32FC3 ? (ippiNormFuncNoHint)ippiNorm_Inf_32f_C3R : + type == CV_32FC4 ? (ippiNormFuncNoHint)ippiNorm_Inf_32f_C4R : + 0) : + normType == NORM_L1 ? + (type == CV_8UC1 ? (ippiNormFuncNoHint)ippiNorm_L1_8u_C1R : + type == CV_8UC3 ? (ippiNormFuncNoHint)ippiNorm_L1_8u_C3R : + type == CV_8UC4 ? (ippiNormFuncNoHint)ippiNorm_L1_8u_C4R : + type == CV_16UC1 ? (ippiNormFuncNoHint)ippiNorm_L1_16u_C1R : + type == CV_16UC3 ? (ippiNormFuncNoHint)ippiNorm_L1_16u_C3R : + type == CV_16UC4 ? (ippiNormFuncNoHint)ippiNorm_L1_16u_C4R : + type == CV_16SC1 ? (ippiNormFuncNoHint)ippiNorm_L1_16s_C1R : + type == CV_16SC3 ? (ippiNormFuncNoHint)ippiNorm_L1_16s_C3R : + type == CV_16SC4 ? (ippiNormFuncNoHint)ippiNorm_L1_16s_C4R : + 0) : + normType == NORM_L2 || normType == NORM_L2SQR ? + (type == CV_8UC1 ? (ippiNormFuncNoHint)ippiNorm_L2_8u_C1R : + type == CV_8UC3 ? (ippiNormFuncNoHint)ippiNorm_L2_8u_C3R : + type == CV_8UC4 ? (ippiNormFuncNoHint)ippiNorm_L2_8u_C4R : + type == CV_16UC1 ? (ippiNormFuncNoHint)ippiNorm_L2_16u_C1R : + type == CV_16UC3 ? (ippiNormFuncNoHint)ippiNorm_L2_16u_C3R : + type == CV_16UC4 ? (ippiNormFuncNoHint)ippiNorm_L2_16u_C4R : + type == CV_16SC1 ? (ippiNormFuncNoHint)ippiNorm_L2_16s_C1R : + type == CV_16SC3 ? (ippiNormFuncNoHint)ippiNorm_L2_16s_C3R : + type == CV_16SC4 ? (ippiNormFuncNoHint)ippiNorm_L2_16s_C4R : + 0) : 0; + // Make sure only zero or one version of the function pointer is valid + CV_Assert(!ippFuncHint || !ippFuncNoHint); + if( ippFuncHint || ippFuncNoHint ) + { + Ipp64f norm_array[4]; + IppStatus ret = ippFuncHint ? ippFuncHint(src.ptr(), (int)src.step[0], sz, norm_array, ippAlgHintAccurate) : + ippFuncNoHint(src.ptr(), (int)src.step[0], sz, norm_array); + if( ret >= 0 ) + { + Ipp64f norm = (normType == NORM_L2 || normType == NORM_L2SQR) ? norm_array[0] * norm_array[0] : norm_array[0]; + for( int i = 1; i < cn; i++ ) + { + norm = + normType == NORM_INF ? std::max(norm, norm_array[i]) : + normType == NORM_L1 ? norm + norm_array[i] : + normType == NORM_L2 || normType == NORM_L2SQR ? norm + norm_array[i] * norm_array[i] : + 0; + } + result = (normType == NORM_L2 ? (double)std::sqrt(norm) : (double)norm); + return true; + } + } + } + } +#else + CV_UNUSED(src); CV_UNUSED(normType); CV_UNUSED(mask); CV_UNUSED(result); +#endif + return false; +} +#endif } double cv::norm( InputArray _src, int normType, InputArray _mask ) @@ -2620,182 +2818,20 @@ double cv::norm( InputArray _src, int normType, InputArray _mask ) normType == NORM_L2 || normType == NORM_L2SQR || ((normType == NORM_HAMMING || normType == NORM_HAMMING2) && _src.type() == CV_8U) ); -#ifdef HAVE_OPENCL +#if defined HAVE_OPENCL || defined HAVE_IPP double _result = 0; +#endif + +#ifdef HAVE_OPENCL CV_OCL_RUN_(OCL_PERFORMANCE_CHECK(_src.isUMat()) && _src.dims() <= 2, ocl_norm(_src, normType, _mask, _result), _result) #endif Mat src = _src.getMat(), mask = _mask.getMat(); + CV_IPP_RUN(IPP_VERSION_MAJOR >= 7, ipp_norm(src, normType, mask, _result), _result); + int depth = src.depth(), cn = src.channels(); - -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - size_t total_size = src.total(); - int rows = src.size[0], cols = rows ? (int)(total_size/rows) : 0; - - if( (src.dims == 2 || (src.isContinuous() && mask.isContinuous())) - && cols > 0 && (size_t)rows*cols == total_size - && (normType == NORM_INF || normType == NORM_L1 || - normType == NORM_L2 || normType == NORM_L2SQR) ) - { - IppiSize sz = { cols, rows }; - int type = src.type(); - if( !mask.empty() ) - { - typedef IppStatus (CV_STDCALL* ippiMaskNormFuncC1)(const void *, int, const void *, int, IppiSize, Ipp64f *); - ippiMaskNormFuncC1 ippFuncC1 = - normType == NORM_INF ? - (type == CV_8UC1 ? (ippiMaskNormFuncC1)ippiNorm_Inf_8u_C1MR : - type == CV_8SC1 ? (ippiMaskNormFuncC1)ippiNorm_Inf_8s_C1MR : - // type == CV_16UC1 ? (ippiMaskNormFuncC1)ippiNorm_Inf_16u_C1MR : - type == CV_32FC1 ? (ippiMaskNormFuncC1)ippiNorm_Inf_32f_C1MR : - 0) : - normType == NORM_L1 ? - (type == CV_8UC1 ? (ippiMaskNormFuncC1)ippiNorm_L1_8u_C1MR : - type == CV_8SC1 ? (ippiMaskNormFuncC1)ippiNorm_L1_8s_C1MR : - type == CV_16UC1 ? (ippiMaskNormFuncC1)ippiNorm_L1_16u_C1MR : - type == CV_32FC1 ? (ippiMaskNormFuncC1)ippiNorm_L1_32f_C1MR : - 0) : - normType == NORM_L2 || normType == NORM_L2SQR ? - (type == CV_8UC1 ? (ippiMaskNormFuncC1)ippiNorm_L2_8u_C1MR : - type == CV_8SC1 ? (ippiMaskNormFuncC1)ippiNorm_L2_8s_C1MR : - type == CV_16UC1 ? (ippiMaskNormFuncC1)ippiNorm_L2_16u_C1MR : - type == CV_32FC1 ? (ippiMaskNormFuncC1)ippiNorm_L2_32f_C1MR : - 0) : 0; - if( ippFuncC1 ) - { - Ipp64f norm; - if( ippFuncC1(src.ptr(), (int)src.step[0], mask.ptr(), (int)mask.step[0], sz, &norm) >= 0 ) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm; - } - - setIppErrorStatus(); - } - /*typedef IppStatus (CV_STDCALL* ippiMaskNormFuncC3)(const void *, int, const void *, int, IppiSize, int, Ipp64f *); - ippiMaskNormFuncC3 ippFuncC3 = - normType == NORM_INF ? - (type == CV_8UC3 ? (ippiMaskNormFuncC3)ippiNorm_Inf_8u_C3CMR : - type == CV_8SC3 ? (ippiMaskNormFuncC3)ippiNorm_Inf_8s_C3CMR : - type == CV_16UC3 ? (ippiMaskNormFuncC3)ippiNorm_Inf_16u_C3CMR : - type == CV_32FC3 ? (ippiMaskNormFuncC3)ippiNorm_Inf_32f_C3CMR : - 0) : - normType == NORM_L1 ? - (type == CV_8UC3 ? (ippiMaskNormFuncC3)ippiNorm_L1_8u_C3CMR : - type == CV_8SC3 ? (ippiMaskNormFuncC3)ippiNorm_L1_8s_C3CMR : - type == CV_16UC3 ? (ippiMaskNormFuncC3)ippiNorm_L1_16u_C3CMR : - type == CV_32FC3 ? (ippiMaskNormFuncC3)ippiNorm_L1_32f_C3CMR : - 0) : - normType == NORM_L2 || normType == NORM_L2SQR ? - (type == CV_8UC3 ? (ippiMaskNormFuncC3)ippiNorm_L2_8u_C3CMR : - type == CV_8SC3 ? (ippiMaskNormFuncC3)ippiNorm_L2_8s_C3CMR : - type == CV_16UC3 ? (ippiMaskNormFuncC3)ippiNorm_L2_16u_C3CMR : - type == CV_32FC3 ? (ippiMaskNormFuncC3)ippiNorm_L2_32f_C3CMR : - 0) : 0; - if( ippFuncC3 ) - { - Ipp64f norm1, norm2, norm3; - if( ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 1, &norm1) >= 0 && - ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 2, &norm2) >= 0 && - ippFuncC3(src.data, (int)src.step[0], mask.data, (int)mask.step[0], sz, 3, &norm3) >= 0) - { - Ipp64f norm = - normType == NORM_INF ? std::max(std::max(norm1, norm2), norm3) : - normType == NORM_L1 ? norm1 + norm2 + norm3 : - normType == NORM_L2 || normType == NORM_L2SQR ? std::sqrt(norm1 * norm1 + norm2 * norm2 + norm3 * norm3) : - 0; - CV_IMPL_ADD(CV_IMPL_IPP); - return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm; - } - setIppErrorStatus(); - }*/ - } - else - { - typedef IppStatus (CV_STDCALL* ippiNormFuncHint)(const void *, int, IppiSize, Ipp64f *, IppHintAlgorithm hint); - typedef IppStatus (CV_STDCALL* ippiNormFuncNoHint)(const void *, int, IppiSize, Ipp64f *); - ippiNormFuncHint ippFuncHint = - normType == NORM_L1 ? - (type == CV_32FC1 ? (ippiNormFuncHint)ippiNorm_L1_32f_C1R : - type == CV_32FC3 ? (ippiNormFuncHint)ippiNorm_L1_32f_C3R : - type == CV_32FC4 ? (ippiNormFuncHint)ippiNorm_L1_32f_C4R : - 0) : - normType == NORM_L2 || normType == NORM_L2SQR ? - (type == CV_32FC1 ? (ippiNormFuncHint)ippiNorm_L2_32f_C1R : - type == CV_32FC3 ? (ippiNormFuncHint)ippiNorm_L2_32f_C3R : - type == CV_32FC4 ? (ippiNormFuncHint)ippiNorm_L2_32f_C4R : - 0) : 0; - ippiNormFuncNoHint ippFuncNoHint = - normType == NORM_INF ? - (type == CV_8UC1 ? (ippiNormFuncNoHint)ippiNorm_Inf_8u_C1R : - type == CV_8UC3 ? (ippiNormFuncNoHint)ippiNorm_Inf_8u_C3R : - type == CV_8UC4 ? (ippiNormFuncNoHint)ippiNorm_Inf_8u_C4R : - type == CV_16UC1 ? (ippiNormFuncNoHint)ippiNorm_Inf_16u_C1R : - type == CV_16UC3 ? (ippiNormFuncNoHint)ippiNorm_Inf_16u_C3R : - type == CV_16UC4 ? (ippiNormFuncNoHint)ippiNorm_Inf_16u_C4R : - type == CV_16SC1 ? (ippiNormFuncNoHint)ippiNorm_Inf_16s_C1R : -#if (IPP_VERSION_X100 >= 801) - type == CV_16SC3 ? (ippiNormFuncNoHint)ippiNorm_Inf_16s_C3R : //Aug 2013: problem in IPP 7.1, 8.0 : -32768 - type == CV_16SC4 ? (ippiNormFuncNoHint)ippiNorm_Inf_16s_C4R : //Aug 2013: problem in IPP 7.1, 8.0 : -32768 -#endif - type == CV_32FC1 ? (ippiNormFuncNoHint)ippiNorm_Inf_32f_C1R : - type == CV_32FC3 ? (ippiNormFuncNoHint)ippiNorm_Inf_32f_C3R : - type == CV_32FC4 ? (ippiNormFuncNoHint)ippiNorm_Inf_32f_C4R : - 0) : - normType == NORM_L1 ? - (type == CV_8UC1 ? (ippiNormFuncNoHint)ippiNorm_L1_8u_C1R : - type == CV_8UC3 ? (ippiNormFuncNoHint)ippiNorm_L1_8u_C3R : - type == CV_8UC4 ? (ippiNormFuncNoHint)ippiNorm_L1_8u_C4R : - type == CV_16UC1 ? (ippiNormFuncNoHint)ippiNorm_L1_16u_C1R : - type == CV_16UC3 ? (ippiNormFuncNoHint)ippiNorm_L1_16u_C3R : - type == CV_16UC4 ? (ippiNormFuncNoHint)ippiNorm_L1_16u_C4R : - type == CV_16SC1 ? (ippiNormFuncNoHint)ippiNorm_L1_16s_C1R : - type == CV_16SC3 ? (ippiNormFuncNoHint)ippiNorm_L1_16s_C3R : - type == CV_16SC4 ? (ippiNormFuncNoHint)ippiNorm_L1_16s_C4R : - 0) : - normType == NORM_L2 || normType == NORM_L2SQR ? - (type == CV_8UC1 ? (ippiNormFuncNoHint)ippiNorm_L2_8u_C1R : - type == CV_8UC3 ? (ippiNormFuncNoHint)ippiNorm_L2_8u_C3R : - type == CV_8UC4 ? (ippiNormFuncNoHint)ippiNorm_L2_8u_C4R : - type == CV_16UC1 ? (ippiNormFuncNoHint)ippiNorm_L2_16u_C1R : - type == CV_16UC3 ? (ippiNormFuncNoHint)ippiNorm_L2_16u_C3R : - type == CV_16UC4 ? (ippiNormFuncNoHint)ippiNorm_L2_16u_C4R : - type == CV_16SC1 ? (ippiNormFuncNoHint)ippiNorm_L2_16s_C1R : - type == CV_16SC3 ? (ippiNormFuncNoHint)ippiNorm_L2_16s_C3R : - type == CV_16SC4 ? (ippiNormFuncNoHint)ippiNorm_L2_16s_C4R : - 0) : 0; - // Make sure only zero or one version of the function pointer is valid - CV_Assert(!ippFuncHint || !ippFuncNoHint); - if( ippFuncHint || ippFuncNoHint ) - { - Ipp64f norm_array[4]; - IppStatus ret = ippFuncHint ? ippFuncHint(src.ptr(), (int)src.step[0], sz, norm_array, ippAlgHintAccurate) : - ippFuncNoHint(src.ptr(), (int)src.step[0], sz, norm_array); - if( ret >= 0 ) - { - Ipp64f norm = (normType == NORM_L2 || normType == NORM_L2SQR) ? norm_array[0] * norm_array[0] : norm_array[0]; - for( int i = 1; i < cn; i++ ) - { - norm = - normType == NORM_INF ? std::max(norm, norm_array[i]) : - normType == NORM_L1 ? norm + norm_array[i] : - normType == NORM_L2 || normType == NORM_L2SQR ? norm + norm_array[i] * norm_array[i] : - 0; - } - CV_IMPL_ADD(CV_IMPL_IPP); - return normType == NORM_L2 ? (double)std::sqrt(norm) : (double)norm; - } - setIppErrorStatus(); - } - } - } - } -#endif - if( src.isContinuous() && mask.empty() ) { size_t len = src.total()*cn; @@ -2992,139 +3028,19 @@ static bool ocl_norm( InputArray _src1, InputArray _src2, int normType, InputArr #endif -double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _mask ) +#ifdef HAVE_IPP +namespace cv { - CV_Assert( _src1.sameSize(_src2) && _src1.type() == _src2.type() ); - -#ifdef HAVE_OPENCL - double _result = 0; - CV_OCL_RUN_(OCL_PERFORMANCE_CHECK(_src1.isUMat()), - ocl_norm(_src1, _src2, normType, _mask, _result), - _result) -#endif +static bool ipp_norm(InputArray _src1, InputArray _src2, int normType, InputArray _mask, double &result) +{ +#if IPP_VERSION_MAJOR >= 7 + Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat(); if( normType & CV_RELATIVE ) { -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat(); - - normType &= NORM_TYPE_MASK; - CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR || - ((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src1.type() == CV_8U) ); - size_t total_size = src1.total(); - int rows = src1.size[0], cols = rows ? (int)(total_size/rows) : 0; - if( (src1.dims == 2 || (src1.isContinuous() && src2.isContinuous() && mask.isContinuous())) - && cols > 0 && (size_t)rows*cols == total_size - && (normType == NORM_INF || normType == NORM_L1 || - normType == NORM_L2 || normType == NORM_L2SQR) ) - { - IppiSize sz = { cols, rows }; - int type = src1.type(); - if( !mask.empty() ) - { - typedef IppStatus (CV_STDCALL* ippiMaskNormRelFuncC1)(const void *, int, const void *, int, const void *, int, IppiSize, Ipp64f *); - ippiMaskNormRelFuncC1 ippFuncC1 = - normType == NORM_INF ? - (type == CV_8UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_Inf_8u_C1MR : -#ifndef __APPLE__ - type == CV_8SC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_Inf_8s_C1MR : -#endif - type == CV_16UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_Inf_16u_C1MR : - type == CV_32FC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_Inf_32f_C1MR : - 0) : - normType == NORM_L1 ? - (type == CV_8UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L1_8u_C1MR : -#ifndef __APPLE__ - type == CV_8SC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L1_8s_C1MR : -#endif - type == CV_16UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L1_16u_C1MR : - type == CV_32FC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L1_32f_C1MR : - 0) : - normType == NORM_L2 || normType == NORM_L2SQR ? - (type == CV_8UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L2_8u_C1MR : - type == CV_8SC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L2_8s_C1MR : - type == CV_16UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L2_16u_C1MR : - type == CV_32FC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L2_32f_C1MR : - 0) : 0; - if( ippFuncC1 ) - { - Ipp64f norm; - if( ippFuncC1(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], mask.ptr(), (int)mask.step[0], sz, &norm) >= 0 ) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm; - } - setIppErrorStatus(); - } - } - else - { - typedef IppStatus (CV_STDCALL* ippiNormRelFuncNoHint)(const void *, int, const void *, int, IppiSize, Ipp64f *); - typedef IppStatus (CV_STDCALL* ippiNormRelFuncHint)(const void *, int, const void *, int, IppiSize, Ipp64f *, IppHintAlgorithm hint); - ippiNormRelFuncNoHint ippFuncNoHint = - normType == NORM_INF ? - (type == CV_8UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_Inf_8u_C1R : - type == CV_16UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_Inf_16u_C1R : - type == CV_16SC1 ? (ippiNormRelFuncNoHint)ippiNormRel_Inf_16s_C1R : - type == CV_32FC1 ? (ippiNormRelFuncNoHint)ippiNormRel_Inf_32f_C1R : - 0) : - normType == NORM_L1 ? - (type == CV_8UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L1_8u_C1R : - type == CV_16UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L1_16u_C1R : - type == CV_16SC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L1_16s_C1R : - 0) : - normType == NORM_L2 || normType == NORM_L2SQR ? - (type == CV_8UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L2_8u_C1R : - type == CV_16UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L2_16u_C1R : - type == CV_16SC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L2_16s_C1R : - 0) : 0; - ippiNormRelFuncHint ippFuncHint = - normType == NORM_L1 ? - (type == CV_32FC1 ? (ippiNormRelFuncHint)ippiNormRel_L1_32f_C1R : - 0) : - normType == NORM_L2 || normType == NORM_L2SQR ? - (type == CV_32FC1 ? (ippiNormRelFuncHint)ippiNormRel_L2_32f_C1R : - 0) : 0; - if (ippFuncNoHint) - { - Ipp64f norm; - if( ippFuncNoHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, &norm) >= 0 ) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return (double)norm; - } - setIppErrorStatus(); - } - if (ippFuncHint) - { - Ipp64f norm; - if( ippFuncHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, &norm, ippAlgHintAccurate) >= 0 ) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return (double)norm; - } - setIppErrorStatus(); - } - } - } - } -#endif - return norm(_src1, _src2, normType & ~CV_RELATIVE, _mask)/(norm(_src2, normType, _mask) + DBL_EPSILON); - } - - Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat(); - int depth = src1.depth(), cn = src1.channels(); - - normType &= 7; - CV_Assert( normType == NORM_INF || normType == NORM_L1 || - normType == NORM_L2 || normType == NORM_L2SQR || - ((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src1.type() == CV_8U) ); - -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { + normType &= NORM_TYPE_MASK; + CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR || + ((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src1.type() == CV_8U) ); size_t total_size = src1.total(); int rows = src1.size[0], cols = rows ? (int)(total_size/rows) : 0; if( (src1.dims == 2 || (src1.isContinuous() && src2.isContinuous() && mask.isContinuous())) @@ -3136,161 +3052,294 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m int type = src1.type(); if( !mask.empty() ) { - typedef IppStatus (CV_STDCALL* ippiMaskNormDiffFuncC1)(const void *, int, const void *, int, const void *, int, IppiSize, Ipp64f *); - ippiMaskNormDiffFuncC1 ippFuncC1 = + typedef IppStatus (CV_STDCALL* ippiMaskNormRelFuncC1)(const void *, int, const void *, int, const void *, int, IppiSize, Ipp64f *); + ippiMaskNormRelFuncC1 ippFuncC1 = normType == NORM_INF ? - (type == CV_8UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_Inf_8u_C1MR : - type == CV_8SC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_Inf_8s_C1MR : - type == CV_16UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_Inf_16u_C1MR : - type == CV_32FC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_Inf_32f_C1MR : + (type == CV_8UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_Inf_8u_C1MR : +#ifndef __APPLE__ + type == CV_8SC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_Inf_8s_C1MR : +#endif + type == CV_16UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_Inf_16u_C1MR : + type == CV_32FC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_Inf_32f_C1MR : 0) : normType == NORM_L1 ? - (type == CV_8UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_8u_C1MR : + (type == CV_8UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L1_8u_C1MR : #ifndef __APPLE__ - type == CV_8SC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_8s_C1MR : + type == CV_8SC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L1_8s_C1MR : #endif - type == CV_16UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_16u_C1MR : - type == CV_32FC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_32f_C1MR : + type == CV_16UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L1_16u_C1MR : + type == CV_32FC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L1_32f_C1MR : 0) : normType == NORM_L2 || normType == NORM_L2SQR ? - (type == CV_8UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L2_8u_C1MR : - type == CV_8SC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L2_8s_C1MR : - type == CV_16UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L2_16u_C1MR : - type == CV_32FC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L2_32f_C1MR : + (type == CV_8UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L2_8u_C1MR : + type == CV_8SC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L2_8s_C1MR : + type == CV_16UC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L2_16u_C1MR : + type == CV_32FC1 ? (ippiMaskNormRelFuncC1)ippiNormRel_L2_32f_C1MR : 0) : 0; if( ippFuncC1 ) { Ipp64f norm; if( ippFuncC1(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], mask.ptr(), (int)mask.step[0], sz, &norm) >= 0 ) { - CV_IMPL_ADD(CV_IMPL_IPP); - return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm; + result = (normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm); + return true; } - setIppErrorStatus(); } -#ifndef __APPLE__ - typedef IppStatus (CV_STDCALL* ippiMaskNormDiffFuncC3)(const void *, int, const void *, int, const void *, int, IppiSize, int, Ipp64f *); - ippiMaskNormDiffFuncC3 ippFuncC3 = - normType == NORM_INF ? - (type == CV_8UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_Inf_8u_C3CMR : - type == CV_8SC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_Inf_8s_C3CMR : - type == CV_16UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_Inf_16u_C3CMR : - type == CV_32FC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_Inf_32f_C3CMR : - 0) : - normType == NORM_L1 ? - (type == CV_8UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L1_8u_C3CMR : - type == CV_8SC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L1_8s_C3CMR : - type == CV_16UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L1_16u_C3CMR : - type == CV_32FC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L1_32f_C3CMR : - 0) : - normType == NORM_L2 || normType == NORM_L2SQR ? - (type == CV_8UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L2_8u_C3CMR : - type == CV_8SC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L2_8s_C3CMR : - type == CV_16UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L2_16u_C3CMR : - type == CV_32FC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L2_32f_C3CMR : - 0) : 0; - if( ippFuncC3 ) - { - Ipp64f norm1, norm2, norm3; - if( ippFuncC3(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], mask.data, (int)mask.step[0], sz, 1, &norm1) >= 0 && - ippFuncC3(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], mask.data, (int)mask.step[0], sz, 2, &norm2) >= 0 && - ippFuncC3(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], mask.data, (int)mask.step[0], sz, 3, &norm3) >= 0) - { - Ipp64f norm = - normType == NORM_INF ? std::max(std::max(norm1, norm2), norm3) : - normType == NORM_L1 ? norm1 + norm2 + norm3 : - normType == NORM_L2 || normType == NORM_L2SQR ? std::sqrt(norm1 * norm1 + norm2 * norm2 + norm3 * norm3) : - 0; - CV_IMPL_ADD(CV_IMPL_IPP); - return normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm; - } - setIppErrorStatus(); - } -#endif } else { - typedef IppStatus (CV_STDCALL* ippiNormDiffFuncHint)(const void *, int, const void *, int, IppiSize, Ipp64f *, IppHintAlgorithm hint); - typedef IppStatus (CV_STDCALL* ippiNormDiffFuncNoHint)(const void *, int, const void *, int, IppiSize, Ipp64f *); - ippiNormDiffFuncHint ippFuncHint = - normType == NORM_L1 ? - (type == CV_32FC1 ? (ippiNormDiffFuncHint)ippiNormDiff_L1_32f_C1R : - type == CV_32FC3 ? (ippiNormDiffFuncHint)ippiNormDiff_L1_32f_C3R : - type == CV_32FC4 ? (ippiNormDiffFuncHint)ippiNormDiff_L1_32f_C4R : - 0) : - normType == NORM_L2 || normType == NORM_L2SQR ? - (type == CV_32FC1 ? (ippiNormDiffFuncHint)ippiNormDiff_L2_32f_C1R : - type == CV_32FC3 ? (ippiNormDiffFuncHint)ippiNormDiff_L2_32f_C3R : - type == CV_32FC4 ? (ippiNormDiffFuncHint)ippiNormDiff_L2_32f_C4R : - 0) : 0; - ippiNormDiffFuncNoHint ippFuncNoHint = + typedef IppStatus (CV_STDCALL* ippiNormRelFuncNoHint)(const void *, int, const void *, int, IppiSize, Ipp64f *); + typedef IppStatus (CV_STDCALL* ippiNormRelFuncHint)(const void *, int, const void *, int, IppiSize, Ipp64f *, IppHintAlgorithm hint); + ippiNormRelFuncNoHint ippFuncNoHint = normType == NORM_INF ? - (type == CV_8UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_8u_C1R : - type == CV_8UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_8u_C3R : - type == CV_8UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_8u_C4R : - type == CV_16UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16u_C1R : - type == CV_16UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16u_C3R : - type == CV_16UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16u_C4R : - type == CV_16SC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16s_C1R : -#if (IPP_VERSION_X100 >= 801) - type == CV_16SC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16s_C3R : //Aug 2013: problem in IPP 7.1, 8.0 : -32768 - type == CV_16SC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16s_C4R : //Aug 2013: problem in IPP 7.1, 8.0 : -32768 -#endif - type == CV_32FC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_32f_C1R : - type == CV_32FC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_32f_C3R : - type == CV_32FC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_32f_C4R : + (type == CV_8UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_Inf_8u_C1R : + type == CV_16UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_Inf_16u_C1R : + type == CV_16SC1 ? (ippiNormRelFuncNoHint)ippiNormRel_Inf_16s_C1R : + type == CV_32FC1 ? (ippiNormRelFuncNoHint)ippiNormRel_Inf_32f_C1R : 0) : normType == NORM_L1 ? - (type == CV_8UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_8u_C1R : - type == CV_8UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_8u_C3R : - type == CV_8UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_8u_C4R : - type == CV_16UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16u_C1R : - type == CV_16UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16u_C3R : - type == CV_16UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16u_C4R : -#if !(IPP_VERSION_X100 == 802 && (!defined(IPP_VERSION_UPDATE) || IPP_VERSION_UPDATE <= 1)) // Oct 2014: Accuracy issue with IPP 8.2 / 8.2.1 - type == CV_16SC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16s_C1R : -#endif - type == CV_16SC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16s_C3R : - type == CV_16SC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16s_C4R : + (type == CV_8UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L1_8u_C1R : + type == CV_16UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L1_16u_C1R : + type == CV_16SC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L1_16s_C1R : 0) : normType == NORM_L2 || normType == NORM_L2SQR ? - (type == CV_8UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_8u_C1R : - type == CV_8UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_8u_C3R : - type == CV_8UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_8u_C4R : - type == CV_16UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16u_C1R : - type == CV_16UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16u_C3R : - type == CV_16UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16u_C4R : - type == CV_16SC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16s_C1R : - type == CV_16SC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16s_C3R : - type == CV_16SC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16s_C4R : + (type == CV_8UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L2_8u_C1R : + type == CV_16UC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L2_16u_C1R : + type == CV_16SC1 ? (ippiNormRelFuncNoHint)ippiNormRel_L2_16s_C1R : 0) : 0; - // Make sure only zero or one version of the function pointer is valid - CV_Assert(!ippFuncHint || !ippFuncNoHint); - if( ippFuncHint || ippFuncNoHint ) + ippiNormRelFuncHint ippFuncHint = + normType == NORM_L1 ? + (type == CV_32FC1 ? (ippiNormRelFuncHint)ippiNormRel_L1_32f_C1R : + 0) : + normType == NORM_L2 || normType == NORM_L2SQR ? + (type == CV_32FC1 ? (ippiNormRelFuncHint)ippiNormRel_L2_32f_C1R : + 0) : 0; + if (ippFuncNoHint) { - Ipp64f norm_array[4]; - IppStatus ret = ippFuncHint ? ippFuncHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, norm_array, ippAlgHintAccurate) : - ippFuncNoHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, norm_array); - if( ret >= 0 ) + Ipp64f norm; + if( ippFuncNoHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, &norm) >= 0 ) { - Ipp64f norm = (normType == NORM_L2 || normType == NORM_L2SQR) ? norm_array[0] * norm_array[0] : norm_array[0]; - for( int i = 1; i < src1.channels(); i++ ) - { - norm = - normType == NORM_INF ? std::max(norm, norm_array[i]) : - normType == NORM_L1 ? norm + norm_array[i] : - normType == NORM_L2 || normType == NORM_L2SQR ? norm + norm_array[i] * norm_array[i] : - 0; - } - CV_IMPL_ADD(CV_IMPL_IPP); - return normType == NORM_L2 ? (double)std::sqrt(norm) : (double)norm; + result = (double)norm; + return true; } - setIppErrorStatus(); + } + if (ippFuncHint) + { + Ipp64f norm; + if( ippFuncHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, &norm, ippAlgHintAccurate) >= 0 ) + { + result = (double)norm; + return true; + } + } + } + } + return false; + } + + normType &= 7; + CV_Assert( normType == NORM_INF || normType == NORM_L1 || + normType == NORM_L2 || normType == NORM_L2SQR || + ((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src1.type() == CV_8U) ); + + size_t total_size = src1.total(); + int rows = src1.size[0], cols = rows ? (int)(total_size/rows) : 0; + if( (src1.dims == 2 || (src1.isContinuous() && src2.isContinuous() && mask.isContinuous())) + && cols > 0 && (size_t)rows*cols == total_size + && (normType == NORM_INF || normType == NORM_L1 || + normType == NORM_L2 || normType == NORM_L2SQR) ) + { + IppiSize sz = { cols, rows }; + int type = src1.type(); + if( !mask.empty() ) + { + typedef IppStatus (CV_STDCALL* ippiMaskNormDiffFuncC1)(const void *, int, const void *, int, const void *, int, IppiSize, Ipp64f *); + ippiMaskNormDiffFuncC1 ippFuncC1 = + normType == NORM_INF ? + (type == CV_8UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_Inf_8u_C1MR : + type == CV_8SC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_Inf_8s_C1MR : + type == CV_16UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_Inf_16u_C1MR : + type == CV_32FC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_Inf_32f_C1MR : + 0) : + normType == NORM_L1 ? + (type == CV_8UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_8u_C1MR : +#ifndef __APPLE__ + type == CV_8SC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_8s_C1MR : +#endif + type == CV_16UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_16u_C1MR : + type == CV_32FC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L1_32f_C1MR : + 0) : + normType == NORM_L2 || normType == NORM_L2SQR ? + (type == CV_8UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L2_8u_C1MR : + type == CV_8SC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L2_8s_C1MR : + type == CV_16UC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L2_16u_C1MR : + type == CV_32FC1 ? (ippiMaskNormDiffFuncC1)ippiNormDiff_L2_32f_C1MR : + 0) : 0; + if( ippFuncC1 ) + { + Ipp64f norm; + if( ippFuncC1(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], mask.ptr(), (int)mask.step[0], sz, &norm) >= 0 ) + { + result = (normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm); + return true; + } + } +#ifndef __APPLE__ + typedef IppStatus (CV_STDCALL* ippiMaskNormDiffFuncC3)(const void *, int, const void *, int, const void *, int, IppiSize, int, Ipp64f *); + ippiMaskNormDiffFuncC3 ippFuncC3 = + normType == NORM_INF ? + (type == CV_8UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_Inf_8u_C3CMR : + type == CV_8SC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_Inf_8s_C3CMR : + type == CV_16UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_Inf_16u_C3CMR : + type == CV_32FC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_Inf_32f_C3CMR : + 0) : + normType == NORM_L1 ? + (type == CV_8UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L1_8u_C3CMR : + type == CV_8SC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L1_8s_C3CMR : + type == CV_16UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L1_16u_C3CMR : + type == CV_32FC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L1_32f_C3CMR : + 0) : + normType == NORM_L2 || normType == NORM_L2SQR ? + (type == CV_8UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L2_8u_C3CMR : + type == CV_8SC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L2_8s_C3CMR : + type == CV_16UC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L2_16u_C3CMR : + type == CV_32FC3 ? (ippiMaskNormDiffFuncC3)ippiNormDiff_L2_32f_C3CMR : + 0) : 0; + if( ippFuncC3 ) + { + Ipp64f norm1, norm2, norm3; + if( ippFuncC3(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], mask.data, (int)mask.step[0], sz, 1, &norm1) >= 0 && + ippFuncC3(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], mask.data, (int)mask.step[0], sz, 2, &norm2) >= 0 && + ippFuncC3(src1.data, (int)src1.step[0], src2.data, (int)src2.step[0], mask.data, (int)mask.step[0], sz, 3, &norm3) >= 0) + { + Ipp64f norm = + normType == NORM_INF ? std::max(std::max(norm1, norm2), norm3) : + normType == NORM_L1 ? norm1 + norm2 + norm3 : + normType == NORM_L2 || normType == NORM_L2SQR ? std::sqrt(norm1 * norm1 + norm2 * norm2 + norm3 * norm3) : + 0; + result = (normType == NORM_L2SQR ? (double)(norm * norm) : (double)norm); + return true; + } + } +#endif + } + else + { + typedef IppStatus (CV_STDCALL* ippiNormDiffFuncHint)(const void *, int, const void *, int, IppiSize, Ipp64f *, IppHintAlgorithm hint); + typedef IppStatus (CV_STDCALL* ippiNormDiffFuncNoHint)(const void *, int, const void *, int, IppiSize, Ipp64f *); + ippiNormDiffFuncHint ippFuncHint = + normType == NORM_L1 ? + (type == CV_32FC1 ? (ippiNormDiffFuncHint)ippiNormDiff_L1_32f_C1R : + type == CV_32FC3 ? (ippiNormDiffFuncHint)ippiNormDiff_L1_32f_C3R : + type == CV_32FC4 ? (ippiNormDiffFuncHint)ippiNormDiff_L1_32f_C4R : + 0) : + normType == NORM_L2 || normType == NORM_L2SQR ? + (type == CV_32FC1 ? (ippiNormDiffFuncHint)ippiNormDiff_L2_32f_C1R : + type == CV_32FC3 ? (ippiNormDiffFuncHint)ippiNormDiff_L2_32f_C3R : + type == CV_32FC4 ? (ippiNormDiffFuncHint)ippiNormDiff_L2_32f_C4R : + 0) : 0; + ippiNormDiffFuncNoHint ippFuncNoHint = + normType == NORM_INF ? + (type == CV_8UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_8u_C1R : + type == CV_8UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_8u_C3R : + type == CV_8UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_8u_C4R : + type == CV_16UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16u_C1R : + type == CV_16UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16u_C3R : + type == CV_16UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16u_C4R : + type == CV_16SC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16s_C1R : +#if (IPP_VERSION_X100 >= 801) + type == CV_16SC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16s_C3R : //Aug 2013: problem in IPP 7.1, 8.0 : -32768 + type == CV_16SC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_16s_C4R : //Aug 2013: problem in IPP 7.1, 8.0 : -32768 +#endif + type == CV_32FC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_32f_C1R : + type == CV_32FC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_32f_C3R : + type == CV_32FC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_Inf_32f_C4R : + 0) : + normType == NORM_L1 ? + (type == CV_8UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_8u_C1R : + type == CV_8UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_8u_C3R : + type == CV_8UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_8u_C4R : + type == CV_16UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16u_C1R : + type == CV_16UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16u_C3R : + type == CV_16UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16u_C4R : +#if !(IPP_VERSION_X100 == 802 && (!defined(IPP_VERSION_UPDATE) || IPP_VERSION_UPDATE <= 1)) // Oct 2014: Accuracy issue with IPP 8.2 / 8.2.1 + type == CV_16SC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16s_C1R : +#endif + type == CV_16SC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16s_C3R : + type == CV_16SC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L1_16s_C4R : + 0) : + normType == NORM_L2 || normType == NORM_L2SQR ? + (type == CV_8UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_8u_C1R : + type == CV_8UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_8u_C3R : + type == CV_8UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_8u_C4R : + type == CV_16UC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16u_C1R : + type == CV_16UC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16u_C3R : + type == CV_16UC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16u_C4R : + type == CV_16SC1 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16s_C1R : + type == CV_16SC3 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16s_C3R : + type == CV_16SC4 ? (ippiNormDiffFuncNoHint)ippiNormDiff_L2_16s_C4R : + 0) : 0; + // Make sure only zero or one version of the function pointer is valid + CV_Assert(!ippFuncHint || !ippFuncNoHint); + if( ippFuncHint || ippFuncNoHint ) + { + Ipp64f norm_array[4]; + IppStatus ret = ippFuncHint ? ippFuncHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, norm_array, ippAlgHintAccurate) : + ippFuncNoHint(src1.ptr(), (int)src1.step[0], src2.ptr(), (int)src2.step[0], sz, norm_array); + if( ret >= 0 ) + { + Ipp64f norm = (normType == NORM_L2 || normType == NORM_L2SQR) ? norm_array[0] * norm_array[0] : norm_array[0]; + for( int i = 1; i < src1.channels(); i++ ) + { + norm = + normType == NORM_INF ? std::max(norm, norm_array[i]) : + normType == NORM_L1 ? norm + norm_array[i] : + normType == NORM_L2 || normType == NORM_L2SQR ? norm + norm_array[i] * norm_array[i] : + 0; + } + result = (normType == NORM_L2 ? (double)std::sqrt(norm) : (double)norm); + return true; } } } } +#else + CV_UNUSED(_src1); CV_UNUSED(_src2); CV_UNUSED(normType); CV_UNUSED(_mask); CV_UNUSED(result); #endif + return false; +} +} +#endif + + +double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _mask ) +{ + CV_Assert( _src1.sameSize(_src2) && _src1.type() == _src2.type() ); + +#if defined HAVE_OPENCL || defined HAVE_IPP + double _result = 0; +#endif + +#ifdef HAVE_OPENCL + CV_OCL_RUN_(OCL_PERFORMANCE_CHECK(_src1.isUMat()), + ocl_norm(_src1, _src2, normType, _mask, _result), + _result) +#endif + + CV_IPP_RUN(IPP_VERSION_MAJOR >= 7, ipp_norm(_src1, _src2, normType, _mask, _result), _result); + + if( normType & CV_RELATIVE ) + { + return norm(_src1, _src2, normType & ~CV_RELATIVE, _mask)/(norm(_src2, normType, _mask) + DBL_EPSILON); + } + + Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat(); + int depth = src1.depth(), cn = src1.channels(); + + normType &= 7; + CV_Assert( normType == NORM_INF || normType == NORM_L1 || + normType == NORM_L2 || normType == NORM_L2SQR || + ((normType == NORM_HAMMING || normType == NORM_HAMMING2) && src1.type() == CV_8U) ); if( src1.isContinuous() && src2.isContinuous() && mask.empty() ) { diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index 46f41dcca9..6a85c40ee3 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -43,6 +43,20 @@ #include "precomp.hpp" +namespace cv { + +static Mutex* __initialization_mutex = NULL; +Mutex& getInitializationMutex() +{ + if (__initialization_mutex == NULL) + __initialization_mutex = new Mutex(); + return *__initialization_mutex; +} +// force initialization (single-threaded environment) +Mutex* __initialization_mutex_initializer = &getInitializationMutex(); + +} // namespace cv + #ifdef _MSC_VER # if _MSC_VER >= 1700 # pragma warning(disable:4447) // Disable warning 'main' signature found without threading model @@ -1108,8 +1122,7 @@ public: // For more information: http://www.parashift.com/c++-faq/static-init-order-on-first-use.html static TLSContainerStorage& getTLSContainerStorage() { - static TLSContainerStorage *tlsContainerStorage = new TLSContainerStorage(); - return *tlsContainerStorage; + CV_SINGLETON_LAZY_INIT_REF(TLSContainerStorage, new TLSContainerStorage()) } TLSDataContainer::TLSDataContainer() @@ -1153,20 +1166,16 @@ TLSStorage::~TLSStorage() } - TLSData& getCoreTlsData() { - static TLSData *value = new TLSData(); - return *value; + CV_SINGLETON_LAZY_INIT_REF(TLSData, new TLSData()) } - #ifdef CV_COLLECT_IMPL_DATA ImplCollector& getImplData() { - static ImplCollector *value = new ImplCollector(); - return *value; + CV_SINGLETON_LAZY_INIT_REF(ImplCollector, new ImplCollector()) } void setImpl(int flags) diff --git a/modules/core/src/umatrix.cpp b/modules/core/src/umatrix.cpp index 1b42f1ee1e..9a991d7f30 100644 --- a/modules/core/src/umatrix.cpp +++ b/modules/core/src/umatrix.cpp @@ -46,6 +46,13 @@ namespace cv { +// forward decls, implementation is below in this file +void setSize(UMat& m, int _dims, const int* _sz, const size_t* _steps, + bool autoSteps = false); + +void updateContinuityFlag(UMat& m); +void finalizeHdr(UMat& m); + // it should be a prime number for the best hash function enum { UMAT_NLOCKS = 31 }; static Mutex umatLocks[UMAT_NLOCKS]; @@ -123,8 +130,8 @@ void swap( UMat& a, UMat& b ) } -static inline void setSize( UMat& m, int _dims, const int* _sz, - const size_t* _steps, bool autoSteps=false ) +void setSize( UMat& m, int _dims, const int* _sz, + const size_t* _steps, bool autoSteps ) { CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM ); if( m.dims != _dims ) @@ -176,7 +183,8 @@ static inline void setSize( UMat& m, int _dims, const int* _sz, } } -static void updateContinuityFlag(UMat& m) + +void updateContinuityFlag(UMat& m) { int i, j; for( i = 0; i < m.dims; i++ ) @@ -199,7 +207,7 @@ static void updateContinuityFlag(UMat& m) } -static void finalizeHdr(UMat& m) +void finalizeHdr(UMat& m) { updateContinuityFlag(m); int d = m.dims; @@ -207,6 +215,7 @@ static void finalizeHdr(UMat& m) m.rows = m.cols = -1; } + UMat Mat::getUMat(int accessFlags, UMatUsageFlags usageFlags) const { UMat hdr; @@ -737,7 +746,7 @@ void UMat::convertTo(OutputArray _dst, int _type, double alpha, double beta) con ocl::typeToStr(sdepth), ocl::typeToStr(wdepth), ocl::typeToStr(ddepth), ocl::convertTypeStr(sdepth, wdepth, 1, cvt[0]), ocl::convertTypeStr(wdepth, ddepth, 1, cvt[1]), - doubleSupport ? " -D DOUBLE_SUPPORT" : "")); + doubleSupport ? " -D DOUBLE_SUPPORT" : "", noScale ? " -D NO_SCALE" : "")); if (!k.empty()) { UMat src = *this; @@ -748,7 +757,9 @@ void UMat::convertTo(OutputArray _dst, int _type, double alpha, double beta) con ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src), dstarg = ocl::KernelArg::WriteOnly(dst, cn); - if (wdepth == CV_32F) + if (noScale) + k.args(srcarg, dstarg, rowsPerWI); + else if (wdepth == CV_32F) k.args(srcarg, dstarg, alphaf, betaf, rowsPerWI); else k.args(srcarg, dstarg, alpha, beta, rowsPerWI); diff --git a/modules/core/test/test_mat.cpp b/modules/core/test/test_mat.cpp index 897ac40a43..e2d4e8a4bf 100644 --- a/modules/core/test/test_mat.cpp +++ b/modules/core/test/test_mat.cpp @@ -1178,6 +1178,11 @@ TEST(Core_IOArray, submat_create) EXPECT_THROW( OutputArray_create2(A.row(0)), cv::Exception ); } +TEST(Core_Mat, issue4457_pass_null_ptr) +{ + ASSERT_ANY_THROW(cv::Mat mask(45, 45, CV_32F, 0)); +} + TEST(Core_Mat, reshape_1942) { cv::Mat A = (cv::Mat_(2,3) << 3.4884074, 1.4159607, 0.78737736, 2.3456569, -0.88010466, 0.3009364); @@ -1189,6 +1194,52 @@ TEST(Core_Mat, reshape_1942) ASSERT_EQ(1, cn); } +TEST(Core_Mat, push_back) +{ + Mat a = (Mat_(1,2) << 3.4884074f, 1.4159607f); + Mat b = (Mat_(1,2) << 0.78737736f, 2.3456569f); + + a.push_back(b); + + ASSERT_EQ(2, a.cols); + ASSERT_EQ(2, a.rows); + + ASSERT_FLOAT_EQ(3.4884074f, a.at(0, 0)); + ASSERT_FLOAT_EQ(1.4159607f, a.at(0, 1)); + ASSERT_FLOAT_EQ(0.78737736f, a.at(1, 0)); + ASSERT_FLOAT_EQ(2.3456569f, a.at(1, 1)); + + Mat c = (Mat_(2,2) << -0.88010466f, 0.3009364f, 2.22399974f, -5.45933905f); + + ASSERT_EQ(c.rows, a.cols); + + a.push_back(c.t()); + + ASSERT_EQ(2, a.cols); + ASSERT_EQ(4, a.rows); + + ASSERT_FLOAT_EQ(3.4884074f, a.at(0, 0)); + ASSERT_FLOAT_EQ(1.4159607f, a.at(0, 1)); + ASSERT_FLOAT_EQ(0.78737736f, a.at(1, 0)); + ASSERT_FLOAT_EQ(2.3456569f, a.at(1, 1)); + ASSERT_FLOAT_EQ(-0.88010466f, a.at(2, 0)); + ASSERT_FLOAT_EQ(2.22399974f, a.at(2, 1)); + ASSERT_FLOAT_EQ(0.3009364f, a.at(3, 0)); + ASSERT_FLOAT_EQ(-5.45933905f, a.at(3, 1)); + + a.push_back(Mat::ones(2, 2, CV_32FC1)); + + ASSERT_EQ(6, a.rows); + + for(int row=4; row(row, col)); + } + } +} + TEST(Core_Mat, copyNx1ToVector) { cv::Mat_ src(5, 1); @@ -1214,7 +1265,7 @@ TEST(Core_Matx, fromMat_) { Mat_ a = (Mat_(2,2) << 10, 11, 12, 13); Matx22d b(a); - ASSERT_EQ( norm(a, b, NORM_INF), 0.); + ASSERT_EQ( cvtest::norm(a, b, NORM_INF), 0.); } TEST(Core_InputArray, empty) @@ -1272,3 +1323,85 @@ TEST(Core_SparseMat, footprint) ASSERT_LE((int)m.hdr->nodeSize, 32); ASSERT_LE(dataSize1, threshold); } + + +// Can't fix without duty hacks or broken user code (PR #4159) +TEST(Core_Mat_vector, DISABLED_OutputArray_create_getMat) +{ + cv::Mat_ src_base(5, 1); + std::vector dst8; + + src_base << 1, 2, 3, 4, 5; + + Mat src(src_base); + OutputArray _dst(dst8); + { + _dst.create(src.rows, src.cols, src.type()); + Mat dst = _dst.getMat(); + EXPECT_EQ(src.dims, dst.dims); + EXPECT_EQ(src.cols, dst.cols); + EXPECT_EQ(src.rows, dst.rows); + } +} + +TEST(Core_Mat_vector, copyTo_roi_column) +{ + cv::Mat_ src_base(5, 2); + std::vector dst1; + + src_base << 1, 2, 3, 4, 5, 6, 7, 8, 9, 10; + + Mat src_full(src_base); + Mat src(src_full.col(0)); +#if 0 // Can't fix without duty hacks or broken user code (PR #4159) + OutputArray _dst(dst1); + { + _dst.create(src.rows, src.cols, src.type()); + Mat dst = _dst.getMat(); + EXPECT_EQ(src.dims, dst.dims); + EXPECT_EQ(src.cols, dst.cols); + EXPECT_EQ(src.rows, dst.rows); + } +#endif + + std::vector dst2; + src.copyTo(dst2); + std::cout << "src = " << src << std::endl; + std::cout << "dst = " << Mat(dst2) << std::endl; + EXPECT_EQ((size_t)5, dst2.size()); + EXPECT_EQ(1, (int)dst2[0]); + EXPECT_EQ(3, (int)dst2[1]); + EXPECT_EQ(5, (int)dst2[2]); + EXPECT_EQ(7, (int)dst2[3]); + EXPECT_EQ(9, (int)dst2[4]); +} + +TEST(Core_Mat_vector, copyTo_roi_row) +{ + cv::Mat_ src_base(2, 5); + std::vector dst1; + + src_base << 1, 2, 3, 4, 5, 6, 7, 8, 9, 10; + + Mat src_full(src_base); + Mat src(src_full.row(0)); + OutputArray _dst(dst1); + { + _dst.create(src.rows, src.cols, src.type()); + Mat dst = _dst.getMat(); + EXPECT_EQ(src.dims, dst.dims); + EXPECT_EQ(src.cols, dst.cols); + EXPECT_EQ(src.rows, dst.rows); + } + + std::vector dst2; + src.copyTo(dst2); + std::cout << "src = " << src << std::endl; + std::cout << "dst = " << Mat(dst2) << std::endl; + EXPECT_EQ((size_t)5, dst2.size()); + EXPECT_EQ(1, (int)dst2[0]); + EXPECT_EQ(2, (int)dst2[1]); + EXPECT_EQ(3, (int)dst2[2]); + EXPECT_EQ(4, (int)dst2[3]); + EXPECT_EQ(5, (int)dst2[4]); +} diff --git a/modules/cudev/CMakeLists.txt b/modules/cudev/CMakeLists.txt index c5520b1e69..257572951e 100644 --- a/modules/cudev/CMakeLists.txt +++ b/modules/cudev/CMakeLists.txt @@ -8,7 +8,7 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4189 /wd4505 -Wundef -Wmissing-declarati ocv_add_module(cudev) -ocv_module_include_directories(opencv_core) +ocv_module_include_directories(opencv_core opencv_hal) file(GLOB_RECURSE lib_hdrs "include/opencv2/*.hpp") file(GLOB lib_srcs "src/*.cpp") diff --git a/modules/cudev/test/CMakeLists.txt b/modules/cudev/test/CMakeLists.txt index 89213e236d..b3474168e3 100644 --- a/modules/cudev/test/CMakeLists.txt +++ b/modules/cudev/test/CMakeLists.txt @@ -45,4 +45,8 @@ if(OCV_DEPENDENCIES_FOUND) enable_testing() get_target_property(LOC ${the_target} LOCATION) add_test(${the_target} "${LOC}") + + if(INSTALL_TESTS) + install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_TEST_INSTALL_PATH} COMPONENT tests) + endif() endif() diff --git a/modules/features2d/test/test_detectors_regression.cpp b/modules/features2d/test/test_detectors_regression.cpp index 58c0b6dbf0..a235065172 100644 --- a/modules/features2d/test/test_detectors_regression.cpp +++ b/modules/features2d/test/test_detectors_regression.cpp @@ -40,7 +40,6 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" using namespace std; using namespace cv; diff --git a/modules/features2d/test/test_keypoints.cpp b/modules/features2d/test/test_keypoints.cpp index 4f5f8a0836..fb25514060 100644 --- a/modules/features2d/test/test_keypoints.cpp +++ b/modules/features2d/test/test_keypoints.cpp @@ -40,7 +40,6 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" #include "opencv2/core/core_c.h" using namespace std; diff --git a/modules/features2d/test/test_matchers_algorithmic.cpp b/modules/features2d/test/test_matchers_algorithmic.cpp index 8f66648599..0e3f2ffd55 100644 --- a/modules/features2d/test/test_matchers_algorithmic.cpp +++ b/modules/features2d/test/test_matchers_algorithmic.cpp @@ -40,7 +40,6 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" using namespace std; using namespace cv; diff --git a/modules/features2d/test/test_orb.cpp b/modules/features2d/test/test_orb.cpp index b7f854ba80..c02ea010cc 100644 --- a/modules/features2d/test/test_orb.cpp +++ b/modules/features2d/test/test_orb.cpp @@ -40,7 +40,6 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" using namespace std; using namespace cv; diff --git a/modules/features2d/test/test_rotation_and_scale_invariance.cpp b/modules/features2d/test/test_rotation_and_scale_invariance.cpp index f03fa14477..93ded0b6af 100644 --- a/modules/features2d/test/test_rotation_and_scale_invariance.cpp +++ b/modules/features2d/test/test_rotation_and_scale_invariance.cpp @@ -40,7 +40,6 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" using namespace std; using namespace cv; diff --git a/modules/hal/CMakeLists.txt b/modules/hal/CMakeLists.txt index b5b2abb81e..b04e96b9e7 100644 --- a/modules/hal/CMakeLists.txt +++ b/modules/hal/CMakeLists.txt @@ -1,7 +1,6 @@ set(the_description "The Hardware Acceleration Layer (HAL) module") set(OPENCV_MODULE_TYPE STATIC) -# set(OPENCV_MODULE_IS_PART_OF_WORLD FALSE) if(UNIX) if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC) diff --git a/modules/hal/include/opencv2/hal/defs.h b/modules/hal/include/opencv2/hal/defs.h index 197533993b..1c30073a07 100644 --- a/modules/hal/include/opencv2/hal/defs.h +++ b/modules/hal/include/opencv2/hal/defs.h @@ -179,7 +179,7 @@ # define CV_NEON 1 #endif -#if defined __GNUC__ && defined __arm__ && (defined __ARM_PCS_VFP || defined __ARM_VFPV3__) +#if defined __GNUC__ && defined __arm__ && (defined __ARM_PCS_VFP || defined __ARM_VFPV3__ || defined __ARM_NEON__) && !defined __SOFTFP__ # define CV_VFP 1 #endif diff --git a/modules/hal/include/opencv2/hal/intrin_cpp.hpp b/modules/hal/include/opencv2/hal/intrin_cpp.hpp index e0140a8632..683305cc22 100644 --- a/modules/hal/include/opencv2/hal/intrin_cpp.hpp +++ b/modules/hal/include/opencv2/hal/intrin_cpp.hpp @@ -566,6 +566,7 @@ inline v_reg<_Tp, n> v_combine_low(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& c.s[i] = a.s[i]; c.s[i+(n/2)] = b.s[i]; } + return c; } template @@ -577,6 +578,7 @@ inline v_reg<_Tp, n> v_combine_high(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& c.s[i] = a.s[i+(n/2)]; c.s[i+(n/2)] = b.s[i+(n/2)]; } + return c; } template @@ -592,6 +594,18 @@ inline void v_recombine(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b, } } +template +inline v_reg<_Tp, n> v_extract(const v_reg<_Tp, n>& a, const v_reg<_Tp, n>& b) +{ + v_reg<_Tp, n> r; + int i = 0; + for (; i < s; ++i) + r.s[i] = a.s[i+n-s]; + for (; i < n; ++i) + r.s[i] = b.s[i-s]; + return r; +} + template inline v_reg v_round(const v_reg& a) { v_reg c; diff --git a/modules/hal/include/opencv2/hal/intrin_neon.hpp b/modules/hal/include/opencv2/hal/intrin_neon.hpp index 2418566e62..e326696d63 100644 --- a/modules/hal/include/opencv2/hal/intrin_neon.hpp +++ b/modules/hal/include/opencv2/hal/intrin_neon.hpp @@ -557,6 +557,8 @@ OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint16x8, ushort, u16) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int16x8, short, s16) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint32x4, unsigned, u32) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int32x4, int, s32) +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_uint64x2, uint64, u64) +OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_int64x2, int64, s64) OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float32x4, float, f32) #define OPENCV_HAL_IMPL_NEON_REDUCE_OP_4(_Tpvec, scalartype, func, scalar_func) \ @@ -720,6 +722,23 @@ OPENCV_HAL_IMPL_NEON_UNPACKS(uint32x4, u32) OPENCV_HAL_IMPL_NEON_UNPACKS(int32x4, s32) OPENCV_HAL_IMPL_NEON_UNPACKS(float32x4, f32) +#define OPENCV_HAL_IMPL_NEON_EXTRACT(_Tpvec, suffix) \ +template \ +inline v_##_Tpvec v_extract(const v_##_Tpvec& a, const v_##_Tpvec& b) \ +{ \ + return v_##_Tpvec(vextq_##suffix(a.val, b.val, s)); \ +} + +OPENCV_HAL_IMPL_NEON_EXTRACT(uint8x16, u8) +OPENCV_HAL_IMPL_NEON_EXTRACT(int8x16, s8) +OPENCV_HAL_IMPL_NEON_EXTRACT(uint16x8, u16) +OPENCV_HAL_IMPL_NEON_EXTRACT(int16x8, s16) +OPENCV_HAL_IMPL_NEON_EXTRACT(uint32x4, u32) +OPENCV_HAL_IMPL_NEON_EXTRACT(int32x4, s32) +OPENCV_HAL_IMPL_NEON_EXTRACT(uint64x2, u64) +OPENCV_HAL_IMPL_NEON_EXTRACT(int64x2, s64) +OPENCV_HAL_IMPL_NEON_EXTRACT(float32x4, f32) + inline v_int32x4 v_round(const v_float32x4& a) { static const int32x4_t v_sign = vdupq_n_s32(1 << 31), @@ -747,7 +766,7 @@ inline v_int32x4 v_trunc(const v_float32x4& a) { return v_int32x4(vcvtq_s32_f32(a.val)); } #define OPENCV_HAL_IMPL_NEON_TRANSPOSE4x4(_Tpvec, suffix) \ -inline void transpose4x4(const v_##_Tpvec& a0, const v_##_Tpvec& a1, \ +inline void v_transpose4x4(const v_##_Tpvec& a0, const v_##_Tpvec& a1, \ const v_##_Tpvec& a2, const v_##_Tpvec& a3, \ v_##_Tpvec& b0, v_##_Tpvec& b1, \ v_##_Tpvec& b2, v_##_Tpvec& b3) \ diff --git a/modules/hal/include/opencv2/hal/intrin_sse.hpp b/modules/hal/include/opencv2/hal/intrin_sse.hpp index 69171e2516..0c30f7d5b6 100644 --- a/modules/hal/include/opencv2/hal/intrin_sse.hpp +++ b/modules/hal/include/opencv2/hal/intrin_sse.hpp @@ -1149,6 +1149,17 @@ OPENCV_HAL_IMPL_SSE_UNPACKS(v_int32x4, epi32, OPENCV_HAL_NOP, OPENCV_HAL_NOP) OPENCV_HAL_IMPL_SSE_UNPACKS(v_float32x4, ps, _mm_castps_si128, _mm_castsi128_ps) OPENCV_HAL_IMPL_SSE_UNPACKS(v_float64x2, pd, _mm_castpd_si128, _mm_castsi128_pd) +template +inline _Tpvec v_extract(const _Tpvec& a, const _Tpvec& b) +{ + const int w = sizeof(typename _Tpvec::lane_type); + const int n = _Tpvec::nlanes; + __m128i ra, rb; + ra = _mm_srli_si128(a.val, s*w); + rb = _mm_slli_si128(b.val, (n-s)*w); + return _Tpvec(_mm_or_si128(ra, rb)); +} + inline v_int32x4 v_round(const v_float32x4& a) { return v_int32x4(_mm_cvtps_epi32(a.val)); } diff --git a/modules/highgui/CMakeLists.txt b/modules/highgui/CMakeLists.txt index 0b7f8fe526..9d1745e7b7 100644 --- a/modules/highgui/CMakeLists.txt +++ b/modules/highgui/CMakeLists.txt @@ -1,7 +1,3 @@ -if (WINRT) - ocv_module_disable(highgui) -endif() - set(the_description "High-level GUI and Media I/O") ocv_add_module(highgui opencv_imgproc opencv_imgcodecs opencv_videoio WRAP python) @@ -11,8 +7,7 @@ ocv_add_module(highgui opencv_imgproc opencv_imgcodecs opencv_videoio WRAP pytho # Jose Luis Blanco, 2008 # ---------------------------------------------------------------------------- -# Compilation with /ZW is not allowed for *.c files -if(HAVE_WINRT_CX AND NOT WINRT) +if(WINRT_8_1) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW") endif() @@ -34,6 +29,9 @@ file(GLOB highgui_ext_hdrs "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/*.h") +# Removing WinRT API headers by default +list(REMOVE_ITEM highgui_ext_hdrs "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/highgui_winrt.hpp") + if(HAVE_QT5) set(CMAKE_AUTOMOC ON) set(CMAKE_INCLUDE_CURRENT_DIR ON) @@ -71,6 +69,40 @@ elseif(HAVE_QT) if(${_have_flag}) set_source_files_properties(${_RCC_OUTFILES} PROPERTIES COMPILE_FLAGS -Wno-missing-declarations) endif() +elseif(WINRT) + if(NOT WINRT_8_0) + # Dependencies used by the implementation referenced + # below are not available on WinRT 8.0. + # Enabling it for WiRT 8.1+ only. + + # WinRT 8.1+ detected. Adding WinRT API header. + message(STATUS " ${name}: WinRT detected. Adding WinRT API header") + list(APPEND highgui_ext_hdrs "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/highgui_winrt.hpp") + + + list(APPEND highgui_srcs + ${CMAKE_CURRENT_LIST_DIR}/src/window_winrt.cpp + ${CMAKE_CURRENT_LIST_DIR}/src/window_winrt_bridge.cpp) + list(APPEND highgui_hdrs + ${CMAKE_CURRENT_LIST_DIR}/src/window_winrt_bridge.hpp) + endif() + + # libraries below are neither available nor required + # on ARM devices and/or Windows Phone + if(WINRT_PHONE OR (OpenCV_ARCH STREQUAL "ARM")) + list(REMOVE_ITEM HIGHGUI_LIBRARIES "comctl32" "gdi32" "ole32" "setupapi") + if(WINRT_PHONE) + message(STATUS " ${name}: Windows Phone detected") + elseif(OpenCV_ARCH STREQUAL "ARM") + message(STATUS " ${name}: ARM detected") + if(WINRT_STORE) + list(REMOVE_ITEM HIGHGUI_LIBRARIES "ws2_32") + message(STATUS " ${name}: Removing 'ws2_32.lib'") + endif() + endif() + message(STATUS " ${name}: Removing 'comctl32.lib, gdi32.lib, ole32.lib, setupapi.lib'") + message(STATUS " ${name}: Leaving '${HIGHGUI_LIBRARIES}'") + endif() elseif(HAVE_WIN32UI) list(APPEND highgui_srcs ${CMAKE_CURRENT_LIST_DIR}/src/window_w32.cpp) elseif(HAVE_GTK OR HAVE_GTK3) diff --git a/modules/highgui/include/opencv2/highgui.hpp b/modules/highgui/include/opencv2/highgui.hpp index 9275ae7c22..f3afceef0c 100644 --- a/modules/highgui/include/opencv2/highgui.hpp +++ b/modules/highgui/include/opencv2/highgui.hpp @@ -79,7 +79,7 @@ It provides easy interface to: attached to the control panel is a trackbar, or the control panel is empty, a new buttonbar is created. Then, a new button is attached to it. - See below the example used to generate the figure: : + See below the example used to generate the figure: @code int main(int argc, char *argv[]) int value = 50; @@ -122,6 +122,45 @@ It provides easy interface to: } @endcode + + @defgroup highgui_winrt WinRT support + + This figure explains new functionality implemented with WinRT GUI. The new GUI provides an Image control, + and a slider panel. Slider panel holds trackbars attached to it. + + Sliders are attached below the image control. Every new slider is added below the previous one. + + See below the example used to generate the figure: + @code + void sample_app::MainPage::ShowWindow() + { + static cv::String windowName("sample"); + cv::winrt_initContainer(this->cvContainer); + cv::namedWindow(windowName); // not required + + cv::Mat image = cv::imread("Assets/sample.jpg"); + cv::Mat converted = cv::Mat(image.rows, image.cols, CV_8UC4); + cvtColor(image, converted, CV_BGR2BGRA); + cv::imshow(windowName, converted); // this will create window if it hasn't been created before + + int state = 42; + cv::TrackbarCallback callback = [](int pos, void* userdata) + { + if (pos == 0) { + cv::destroyWindow(windowName); + } + }; + cv::TrackbarCallback callbackTwin = [](int pos, void* userdata) + { + if (pos >= 70) { + cv::destroyAllWindows(); + } + }; + cv::createTrackbar("Sample trackbar", windowName, &state, 100, callback); + cv::createTrackbar("Twin brother", windowName, &state, 100, callbackTwin); + } + @endcode + @defgroup highgui_c C API @} */ diff --git a/modules/highgui/include/opencv2/highgui/highgui_winrt.hpp b/modules/highgui/include/opencv2/highgui/highgui_winrt.hpp new file mode 100644 index 0000000000..f4147f3908 --- /dev/null +++ b/modules/highgui/include/opencv2/highgui/highgui_winrt.hpp @@ -0,0 +1,48 @@ +// highgui (UX) support for Windows Runtime + +// Copyright (c) Microsoft Open Technologies, Inc. +// All rights reserved. +// +// (3 - clause BSD License) +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that +// the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the +// following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or +// promote products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +// PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +using namespace Windows::UI::Xaml::Controls; + +namespace cv +{ + +//! @addtogroup highgui_winrt +//! @{ + +/********************************** WinRT Specific API *************************************************/ + +/** @brief Initializes container component that will be used to hold generated window content. + +@param container Container (Panel^) reference that will be used to hold generated window content: controls and image. + +@note + Must be called to assign WinRT container that will hold created windows content. +*/ + CV_EXPORTS void winrt_initContainer(::Windows::UI::Xaml::Controls::Panel^ container); + +//! @} videoio_winrt + +} // cv \ No newline at end of file diff --git a/modules/highgui/src/agile_wrl.h b/modules/highgui/src/agile_wrl.h deleted file mode 100644 index 99fbf41856..0000000000 --- a/modules/highgui/src/agile_wrl.h +++ /dev/null @@ -1,568 +0,0 @@ -// -// Copyright (C) Microsoft Corporation -// All rights reserved. -// Modified for native C++ WRL support by Gregory Morse -// -// Code in Details namespace is for internal usage within the library code -// - -#ifndef _PLATFORM_AGILE_H_ -#define _PLATFORM_AGILE_H_ - -#ifdef _MSC_VER -#pragma once -#endif // _MSC_VER - -#include -#include - -template class Agile; - -template -struct UnwrapAgile -{ - static const bool _IsAgile = false; -}; -template -struct UnwrapAgile> -{ - static const bool _IsAgile = true; -}; -template -struct UnwrapAgile> -{ - static const bool _IsAgile = true; -}; - -#define IS_AGILE(T) UnwrapAgile::_IsAgile - -#define __is_winrt_agile(T) (std::is_same::value || std::is_base_of::value || std::is_base_of::value) //derived from Microsoft::WRL::FtmBase or IAgileObject - -#define __is_win_interface(T) (std::is_base_of::value || std::is_base_of::value) //derived from IUnknown or IInspectable - -#define __is_win_class(T) (std::is_same::value || std::is_base_of::value) //derived from Microsoft::WRL::RuntimeClass or HSTRING - - namespace Details - { - IUnknown* __stdcall GetObjectContext(); - HRESULT __stdcall GetProxyImpl(IUnknown*, REFIID, IUnknown*, IUnknown**); - HRESULT __stdcall ReleaseInContextImpl(IUnknown*, IUnknown*); - - template -#if _MSC_VER >= 1800 - __declspec(no_refcount) inline HRESULT GetProxy(T *ObjectIn, IUnknown *ContextCallBack, T **Proxy) -#else - inline HRESULT GetProxy(T *ObjectIn, IUnknown *ContextCallBack, T **Proxy) -#endif - { -#if _MSC_VER >= 1800 - return GetProxyImpl(*reinterpret_cast(&ObjectIn), __uuidof(T*), ContextCallBack, reinterpret_cast(Proxy)); -#else - return GetProxyImpl(*reinterpret_cast(&const_cast(ObjectIn)), __uuidof(T*), ContextCallBack, reinterpret_cast(Proxy)); -#endif - } - - template - inline HRESULT ReleaseInContext(T *ObjectIn, IUnknown *ContextCallBack) - { - return ReleaseInContextImpl(ObjectIn, ContextCallBack); - } - - template - class AgileHelper - { - __abi_IUnknown* _p; - bool _release; - public: - AgileHelper(__abi_IUnknown* p, bool release = true) : _p(p), _release(release) - { - } - AgileHelper(AgileHelper&& other) : _p(other._p), _release(other._release) - { - _other._p = nullptr; - _other._release = true; - } - AgileHelper operator=(AgileHelper&& other) - { - _p = other._p; - _release = other._release; - _other._p = nullptr; - _other._release = true; - return *this; - } - - ~AgileHelper() - { - if (_release && _p) - { - _p->__abi_Release(); - } - } - - __declspec(no_refcount) __declspec(no_release_return) - T* operator->() - { - return reinterpret_cast(_p); - } - - __declspec(no_refcount) __declspec(no_release_return) - operator T * () - { - return reinterpret_cast(_p); - } - private: - AgileHelper(const AgileHelper&); - AgileHelper operator=(const AgileHelper&); - }; - template - struct __remove_hat - { - typedef T type; - }; - template - struct __remove_hat - { - typedef T type; - }; - template - struct AgileTypeHelper - { - typename typedef __remove_hat::type type; - typename typedef __remove_hat::type* agileMemberType; - }; - } // namespace Details - -#pragma warning(push) -#pragma warning(disable: 4451) // Usage of ref class inside this context can lead to invalid marshaling of object across contexts - - template < - typename T, - bool TIsNotAgile = (__is_win_class(typename Details::AgileTypeHelper::type) && !__is_winrt_agile(typename Details::AgileTypeHelper::type)) || - __is_win_interface(typename Details::AgileTypeHelper::type) - > - class Agile - { - static_assert(__is_win_class(typename Details::AgileTypeHelper::type) || __is_win_interface(typename Details::AgileTypeHelper::type), "Agile can only be used with ref class or interface class types"); - typename typedef Details::AgileTypeHelper::agileMemberType TypeT; - TypeT _object; - ::Microsoft::WRL::ComPtr _contextCallback; - ULONG_PTR _contextToken; - -#if _MSC_VER >= 1800 - enum class AgileState - { - NonAgilePointer = 0, - AgilePointer = 1, - Unknown = 2 - }; - AgileState _agileState; -#endif - - void CaptureContext() - { - _contextCallback = Details::GetObjectContext(); - __abi_ThrowIfFailed(CoGetContextToken(&_contextToken)); - } - - void SetObject(TypeT object) - { - // Capture context before setting the pointer - // If context capture fails then nothing to cleanup - Release(); - if (object != nullptr) - { - ::Microsoft::WRL::ComPtr checkIfAgile; - HRESULT hr = reinterpret_cast(object)->QueryInterface(__uuidof(IAgileObject), &checkIfAgile); - // Don't Capture context if object is agile - if (hr != S_OK) - { -#if _MSC_VER >= 1800 - _agileState = AgileState::NonAgilePointer; -#endif - CaptureContext(); - } -#if _MSC_VER >= 1800 - else - { - _agileState = AgileState::AgilePointer; - } -#endif - } - _object = object; - } - - public: - Agile() throw() : _object(nullptr), _contextToken(0) -#if _MSC_VER >= 1800 - , _agileState(AgileState::Unknown) -#endif - { - } - - Agile(nullptr_t) throw() : _object(nullptr), _contextToken(0) -#if _MSC_VER >= 1800 - , _agileState(AgileState::Unknown) -#endif - { - } - - explicit Agile(TypeT object) throw() : _object(nullptr), _contextToken(0) -#if _MSC_VER >= 1800 - , _agileState(AgileState::Unknown) -#endif - { - // Assumes that the source object is from the current context - SetObject(object); - } - - Agile(const Agile& object) throw() : _object(nullptr), _contextToken(0) -#if _MSC_VER >= 1800 - , _agileState(AgileState::Unknown) -#endif - { - // Get returns pointer valid for current context - SetObject(object.Get()); - } - - Agile(Agile&& object) throw() : _object(nullptr), _contextToken(0) -#if _MSC_VER >= 1800 - , _agileState(AgileState::Unknown) -#endif - { - // Assumes that the source object is from the current context - Swap(object); - } - - ~Agile() throw() - { - Release(); - } - - TypeT Get() const - { - // Agile object, no proxy required -#if _MSC_VER >= 1800 - if (_agileState == AgileState::AgilePointer || _object == nullptr) -#else - if (_contextToken == 0 || _contextCallback == nullptr || _object == nullptr) -#endif - { - return _object; - } - - // Do the check for same context - ULONG_PTR currentContextToken; - __abi_ThrowIfFailed(CoGetContextToken(¤tContextToken)); - if (currentContextToken == _contextToken) - { - return _object; - } - -#if _MSC_VER >= 1800 - // Different context and holding on to a non agile object - // Do the costly work of getting a proxy - TypeT localObject; - __abi_ThrowIfFailed(Details::GetProxy(_object, _contextCallback.Get(), &localObject)); - - if (_agileState == AgileState::Unknown) -#else - // Object is agile if it implements IAgileObject - // GetAddressOf captures the context with out knowing the type of object that it will hold - if (_object != nullptr) -#endif - { -#if _MSC_VER >= 1800 - // Object is agile if it implements IAgileObject - // GetAddressOf captures the context with out knowing the type of object that it will hold - ::Microsoft::WRL::ComPtr checkIfAgile; - HRESULT hr = reinterpret_cast(localObject)->QueryInterface(__uuidof(IAgileObject), &checkIfAgile); -#else - ::Microsoft::WRL::ComPtr checkIfAgile; - HRESULT hr = reinterpret_cast(_object)->QueryInterface(__uuidof(IAgileObject), &checkIfAgile); -#endif - if (hr == S_OK) - { - auto pThis = const_cast(this); -#if _MSC_VER >= 1800 - pThis->_agileState = AgileState::AgilePointer; -#endif - pThis->_contextToken = 0; - pThis->_contextCallback = nullptr; - return _object; - } -#if _MSC_VER >= 1800 - else - { - auto pThis = const_cast(this); - pThis->_agileState = AgileState::NonAgilePointer; - } -#endif - } - -#if _MSC_VER < 1800 - // Different context and holding on to a non agile object - // Do the costly work of getting a proxy - TypeT localObject; - __abi_ThrowIfFailed(Details::GetProxy(_object, _contextCallback.Get(), &localObject)); -#endif - return localObject; - } - - TypeT* GetAddressOf() throw() - { - Release(); - CaptureContext(); - return &_object; - } - - TypeT* GetAddressOfForInOut() throw() - { - CaptureContext(); - return &_object; - } - - TypeT operator->() const throw() - { - return Get(); - } - - Agile& operator=(nullptr_t) throw() - { - Release(); - return *this; - } - - Agile& operator=(TypeT object) throw() - { - Agile(object).Swap(*this); - return *this; - } - - Agile& operator=(Agile object) throw() - { - // parameter is by copy which gets pointer valid for current context - object.Swap(*this); - return *this; - } - -#if _MSC_VER < 1800 - Agile& operator=(IUnknown* lp) throw() - { - // bump ref count - ::Microsoft::WRL::ComPtr spObject(lp); - - // put it into Platform Object - Platform::Object object; - *(IUnknown**)(&object) = spObject.Detach(); - - SetObject(object); - return *this; - } -#endif - - void Swap(Agile& object) - { - std::swap(_object, object._object); - std::swap(_contextCallback, object._contextCallback); - std::swap(_contextToken, object._contextToken); -#if _MSC_VER >= 1800 - std::swap(_agileState, object._agileState); -#endif - } - - // Release the interface and set to NULL - void Release() throw() - { - if (_object) - { - // Cast to IInspectable (no QI) - IUnknown* pObject = *(IUnknown**)(&_object); - // Set * to null without release - *(IUnknown**)(&_object) = nullptr; - - ULONG_PTR currentContextToken; - __abi_ThrowIfFailed(CoGetContextToken(¤tContextToken)); - if (_contextToken == 0 || _contextCallback == nullptr || _contextToken == currentContextToken) - { - pObject->Release(); - } - else - { - Details::ReleaseInContext(pObject, _contextCallback.Get()); - } - _contextCallback = nullptr; - _contextToken = 0; -#if _MSC_VER >= 1800 - _agileState = AgileState::Unknown; -#endif - } - } - - bool operator==(nullptr_t) const throw() - { - return _object == nullptr; - } - - bool operator==(const Agile& other) const throw() - { - return _object == other._object && _contextToken == other._contextToken; - } - - bool operator<(const Agile& other) const throw() - { - if (reinterpret_cast(_object) < reinterpret_cast(other._object)) - { - return true; - } - - return _object == other._object && _contextToken < other._contextToken; - } - }; - - template - class Agile - { - static_assert(__is_win_class(typename Details::AgileTypeHelper::type) || __is_win_interface(typename Details::AgileTypeHelper::type), "Agile can only be used with ref class or interface class types"); - typename typedef Details::AgileTypeHelper::agileMemberType TypeT; - TypeT _object; - - public: - Agile() throw() : _object(nullptr) - { - } - - Agile(nullptr_t) throw() : _object(nullptr) - { - } - - explicit Agile(TypeT object) throw() : _object(object) - { - } - - Agile(const Agile& object) throw() : _object(object._object) - { - } - - Agile(Agile&& object) throw() : _object(nullptr) - { - Swap(object); - } - - ~Agile() throw() - { - Release(); - } - - TypeT Get() const - { - return _object; - } - - TypeT* GetAddressOf() throw() - { - Release(); - return &_object; - } - - TypeT* GetAddressOfForInOut() throw() - { - return &_object; - } - - TypeT operator->() const throw() - { - return Get(); - } - - Agile& operator=(nullptr_t) throw() - { - Release(); - return *this; - } - - Agile& operator=(TypeT object) throw() - { - if (_object != object) - { - _object = object; - } - return *this; - } - - Agile& operator=(Agile object) throw() - { - object.Swap(*this); - return *this; - } - -#if _MSC_VER < 1800 - Agile& operator=(IUnknown* lp) throw() - { - Release(); - // bump ref count - ::Microsoft::WRL::ComPtr spObject(lp); - - // put it into Platform Object - Platform::Object object; - *(IUnknown**)(&object) = spObject.Detach(); - - _object = object; - return *this; - } -#endif - - // Release the interface and set to NULL - void Release() throw() - { - _object = nullptr; - } - - void Swap(Agile& object) - { - std::swap(_object, object._object); - } - - bool operator==(nullptr_t) const throw() - { - return _object == nullptr; - } - - bool operator==(const Agile& other) const throw() - { - return _object == other._object; - } - - bool operator<(const Agile& other) const throw() - { - return reinterpret_cast(_object) < reinterpret_cast(other._object); - } - }; - -#pragma warning(pop) - - template - bool operator==(nullptr_t, const Agile& a) throw() - { - return a == nullptr; - } - - template - bool operator!=(const Agile& a, nullptr_t) throw() - { - return !(a == nullptr); - } - - template - bool operator!=(nullptr_t, const Agile& a) throw() - { - return !(a == nullptr); - } - - template - bool operator!=(const Agile& a, const Agile& b) throw() - { - return !(a == b); - } - - -#endif // _PLATFORM_AGILE_H_ diff --git a/modules/highgui/src/ppltasks_winrt.h b/modules/highgui/src/ppltasks_winrt.h deleted file mode 100644 index c9867d8a56..0000000000 --- a/modules/highgui/src/ppltasks_winrt.h +++ /dev/null @@ -1,9466 +0,0 @@ -/*** -* ==++== -* -* Copyright (c) Microsoft Corporation. All rights reserved. -* -* Modified for native C++ WRL support by Gregory Morse -* -* ==--== -* =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+ -* -* ppltasks_winrt.h -* -* Parallel Patterns Library - PPL Tasks -* -* =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -****/ - -#pragma once - -#ifndef _PPLTASKS_WINRT_H -#define _PPLTASKS_WINRT_H - -#include -#include -#if _MSC_VER >= 1800 -#include - -// Cannot build using a compiler that is older than dev10 SP1 -#ifdef _MSC_VER -#if _MSC_FULL_VER < 160040219 /*IFSTRIP=IGN*/ -#error ERROR: Visual Studio 2010 SP1 or later is required to build ppltasks -#endif /*IFSTRIP=IGN*/ -#endif -#else -#include -#endif -#include -#include -#include -#include -#if _MSC_VER >= 1800 -#include -#endif - -#ifndef __cplusplus_winrt - -#include -#include -#if _MSC_VER >= 1800 -#include "agile_wrl.h" -#endif -#include -#include - -#ifndef _UITHREADCTXT_SUPPORT - -#ifdef WINAPI_FAMILY /*IFSTRIP=IGN*/ - -// It is safe to include winapifamily as WINAPI_FAMILY was defined by the user -#include - -#if WINAPI_FAMILY == WINAPI_FAMILY_APP /*IFSTRIP=IGN*/ - // UI thread context support is not required for desktop and Windows Store apps - #define _UITHREADCTXT_SUPPORT 0 -#elif WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP /*IFSTRIP=IGN*/ - // UI thread context support is not required for desktop and Windows Store apps - #define _UITHREADCTXT_SUPPORT 0 -#else /* WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP */ - #define _UITHREADCTXT_SUPPORT 1 -#endif /* WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP */ - -#else /* WINAPI_FAMILY */ - // Not supported without a WINAPI_FAMILY setting. - #define _UITHREADCTXT_SUPPORT 0 -#endif /* WINAPI_FAMILY */ - -#endif /* _UITHREADCTXT_SUPPORT */ - -#if _UITHREADCTXT_SUPPORT -#include -#endif /* _UITHREADCTXT_SUPPORT */ - -#pragma detect_mismatch("_PPLTASKS_WITH_WINRT", "0") - -#ifdef _DEBUG -#define _DBG_ONLY(X) X -#else -#define _DBG_ONLY(X) -#endif // #ifdef _DEBUG - -// std::copy_exception changed to std::make_exception_ptr from VS 2010 to VS 11. -#ifdef _MSC_VER -#if _MSC_VER < 1700 /*IFSTRIP=IGN*/ -namespace std -{ - template exception_ptr make_exception_ptr(_E _Except) - { - return copy_exception(_Except); - } -} -#endif -#ifndef _PPLTASK_ASYNC_LOGGING -#if _MSC_VER >= 1800 && defined(__cplusplus_winrt) -#define _PPLTASK_ASYNC_LOGGING 1 // Only enable async logging under dev12 winrt -#else -#define _PPLTASK_ASYNC_LOGGING 0 -#endif -#endif -#endif - -#pragma pack(push,_CRT_PACKING) - -#pragma warning(push) -#pragma warning(disable: 28197) -#pragma warning(disable: 4100) // Unreferenced formal parameter - needed for document generation -#if _MSC_VER >= 1800 -#pragma warning(disable: 4127) // constant express in if condition - we use it for meta programming -#else -#pragma warning(disable: 4702) // Unreachable code - it is caused by user lambda throw exceptions -#endif - -// All CRT public header files are required to be protected from the macro new -#pragma push_macro("new") -#undef new - -// stuff ported from Dev11 CRT -// NOTE: this doesn't actually match std::declval. it behaves differently for void! -// so don't blindly change it to std::declval. -namespace stdx -{ - template - _T&& declval(); -} - -/// -/// The Concurrency_winrt namespace provides classes and functions that give you access to the Concurrency Runtime, -/// a concurrent programming framework for C++. For more information, see . -/// -/**/ -namespace Concurrency_winrt -{ - // In debug builds, default to 10 frames, unless this is overridden prior to #includ'ing ppltasks.h. In retail builds, default to only one frame. -#ifndef PPL_TASK_SAVE_FRAME_COUNT -#ifdef _DEBUG -#define PPL_TASK_SAVE_FRAME_COUNT 10 -#else -#define PPL_TASK_SAVE_FRAME_COUNT 1 -#endif -#endif - - /// - /// Helper macro to determine how many stack frames need to be saved. When any number less or equal to 1 is specified, - /// only one frame is captured and no stackwalk will be involved. Otherwise, the number of callstack frames will be captured. - /// - /// - /// This needs to be defined as a macro rather than a function so that if we're only gathering one frame, _ReturnAddress() - /// will evaluate to client code, rather than a helper function inside of _TaskCreationCallstack, itself. - /// -#ifdef _CAPTURE_CALLSTACK -#undef _CAPTURE_CALLSTACK -#endif -#if PPL_TASK_SAVE_FRAME_COUNT > 1 -#if !defined(_DEBUG) -#pragma message ("WARNING: Redefinning PPL_TASK_SAVE_FRAME_COUNT under Release build for non-desktop applications is not supported; only one frame will be captured!") -#define _CAPTURE_CALLSTACK() ::Concurrency_winrt::details::_TaskCreationCallstack::_CaptureSingleFrameCallstack(_ReturnAddress()) -#else -#define _CAPTURE_CALLSTACK() ::Concurrency_winrt::details::_TaskCreationCallstack::_CaptureMultiFramesCallstack(PPL_TASK_SAVE_FRAME_COUNT) -#endif -#else -#define _CAPTURE_CALLSTACK() ::Concurrency_winrt::details::_TaskCreationCallstack::_CaptureSingleFrameCallstack(_ReturnAddress()) -#endif -/// - -/// A type that represents the terminal state of a task. Valid values are completed and canceled. -/// -/// -/**/ -typedef Concurrency::task_group_status task_status; - -template class task; -template <> class task; - -/// -/// Returns an indication of whether the task that is currently executing has received a request to cancel its -/// execution. Cancellation is requested on a task if the task was created with a cancellation token, and -/// the token source associated with that token is canceled. -/// -/// -/// true if the currently executing task has received a request for cancellation, false otherwise. -/// -/// -/// If you call this method in the body of a task and it returns true, you must respond with a call to -/// cancel_current_task to acknowledge the cancellation request, -/// after performing any cleanup you need. This will abort the execution of the task and cause it to enter into -/// the canceled state. If you do not respond and continue execution, or return instead of calling -/// cancel_current_task, the task will enter the completed state when it is done. -/// state. -/// A task is not cancellable if it was created without a cancellation token. -/// -/// -/// -/// -/// -/**/ -#if _MSC_VER >= 1800 -inline bool __cdecl is_task_cancellation_requested() -{ - return ::Concurrency::details::_TaskCollection_t::_Is_cancellation_requested(); -} -#else -inline bool __cdecl is_task_cancellation_requested() -{ - // ConcRT scheduler under the hood is using TaskCollection, which is same as task_group - return ::Concurrency::is_current_task_group_canceling(); -} -#endif - -/// -/// Cancels the currently executing task. This function can be called from within the body of a task to abort the -/// task's execution and cause it to enter the canceled state. While it may be used in response to -/// the is_task_cancellation_requested function, you may -/// also use it by itself, to initiate cancellation of the task that is currently executing. -/// It is not a supported scenario to call this function if you are not within the body of a task. -/// Doing so will result in undefined behavior such as a crash or a hang in your application. -/// -/// -/// -/**/ -//#if _MSC_VER >= 1800 -inline __declspec(noreturn) void __cdecl cancel_current_task() -{ - throw Concurrency::task_canceled(); -} -//#else -//_CRTIMP2 __declspec(noreturn) void __cdecl cancel_current_task(); -//#endif - -namespace details -{ -#if _MSC_VER >= 1800 - /// - /// Callstack container, which is used to capture and preserve callstacks in ppltasks. - /// Members of this class is examined by vc debugger, thus there will be no public access methods. - /// Please note that names of this class should be kept stable for debugger examining. - /// - class _TaskCreationCallstack - { - private: - // If _M_SingleFrame != nullptr, there will be only one frame of callstacks, which is stored in _M_SingleFrame; - // otherwise, _M_Frame will store all the callstack frames. - void* _M_SingleFrame; - std::vector _M_frames; - public: - _TaskCreationCallstack() - { - _M_SingleFrame = nullptr; - } - - // Store one frame of callstack. This function works for both Debug / Release CRT. - static _TaskCreationCallstack _CaptureSingleFrameCallstack(void *_SingleFrame) - { - _TaskCreationCallstack _csc; - _csc._M_SingleFrame = _SingleFrame; - return _csc; - } - - // Capture _CaptureFrames number of callstack frames. This function only work properly for Desktop or Debug CRT. - __declspec(noinline) - static _TaskCreationCallstack _CaptureMultiFramesCallstack(size_t _CaptureFrames) - { - _TaskCreationCallstack _csc; - _csc._M_frames.resize(_CaptureFrames); - // skip 2 frames to make sure callstack starts from user code - _csc._M_frames.resize(::Concurrency::details::platform::CaptureCallstack(&_csc._M_frames[0], 2, _CaptureFrames)); - return _csc; - } - }; -#endif - typedef UINT32 _Unit_type; - - struct _TypeSelectorNoAsync {}; - struct _TypeSelectorAsyncOperationOrTask {}; - struct _TypeSelectorAsyncOperation : public _TypeSelectorAsyncOperationOrTask { }; - struct _TypeSelectorAsyncTask : public _TypeSelectorAsyncOperationOrTask { }; - struct _TypeSelectorAsyncAction {}; - struct _TypeSelectorAsyncActionWithProgress {}; - struct _TypeSelectorAsyncOperationWithProgress {}; - - template - struct _NormalizeVoidToUnitType - { - typedef _Ty _Type; - }; - - template<> - struct _NormalizeVoidToUnitType - { - typedef _Unit_type _Type; - }; - - template - struct _IsUnwrappedAsyncSelector - { - static const bool _Value = true; - }; - - template<> - struct _IsUnwrappedAsyncSelector<_TypeSelectorNoAsync> - { - static const bool _Value = false; - }; - - template - struct _UnwrapTaskType - { - typedef _Ty _Type; - }; - - template - struct _UnwrapTaskType> - { - typedef _Ty _Type; - }; - - template - _TypeSelectorAsyncTask _AsyncOperationKindSelector(task<_T>); - - _TypeSelectorNoAsync _AsyncOperationKindSelector(...); - - template - struct _Unhat - { - typedef _Type _Value; - }; - - template - struct _Unhat<_Type*> - { - typedef _Type _Value; - }; - - //struct _NonUserType { public: int _Dummy; }; - - template - struct _ValueTypeOrRefType - { - typedef _Unit_type _Value; - }; - - template - struct _ValueTypeOrRefType<_Type, true> - { - typedef _Type _Value; - }; - - template - _Ty _UnwrapAsyncActionWithProgressSelector(ABI::Windows::Foundation::IAsyncActionWithProgress_impl<_Ty>*); - - template - _Ty _UnwrapAsyncActionWithProgressSelector(...); - - template - _Progress _UnwrapAsyncOperationWithProgressProgressSelector(ABI::Windows::Foundation::IAsyncOperationWithProgress_impl<_Ty, _Progress>*); - - template - _Progress _UnwrapAsyncOperationWithProgressProgressSelector(...); - - template - _T2 _ProgressTypeSelector(ABI::Windows::Foundation::IAsyncOperationWithProgress<_T1, _T2>*); - - template - _T1 _ProgressTypeSelector(ABI::Windows::Foundation::IAsyncActionWithProgress<_T1>*); - - template - struct _GetProgressType - { - typedef decltype(_ProgressTypeSelector(stdx::declval<_Type>())) _Value; - }; - - template - _TypeSelectorAsyncOperation _AsyncOperationKindSelector(ABI::Windows::Foundation::IAsyncOperation<_T>*); - - _TypeSelectorAsyncAction _AsyncOperationKindSelector(ABI::Windows::Foundation::IAsyncAction*); - - template - _TypeSelectorAsyncOperationWithProgress _AsyncOperationKindSelector(ABI::Windows::Foundation::IAsyncOperationWithProgress<_T1, _T2>*); - - template - _TypeSelectorAsyncActionWithProgress _AsyncOperationKindSelector(ABI::Windows::Foundation::IAsyncActionWithProgress<_T>*); - - template - struct _IsIAsyncInfo - { - static const bool _Value = std::is_base_of::_Value>::value || - std::is_same<_TypeSelectorAsyncAction, decltype(details::_AsyncOperationKindSelector(stdx::declval<_Type>()))>::value || - std::is_same<_TypeSelectorAsyncOperation, decltype(details::_AsyncOperationKindSelector(stdx::declval<_Type>()))>::value || - std::is_same<_TypeSelectorAsyncOperationWithProgress, decltype(details::_AsyncOperationKindSelector(stdx::declval<_Type>()))>::value || - std::is_same<_TypeSelectorAsyncActionWithProgress, decltype(details::_AsyncOperationKindSelector(stdx::declval<_Type>()))>::value; - }; - - template <> - struct _IsIAsyncInfo - { - static const bool _Value = false; - }; - - template - _Ty _UnwrapAsyncOperationSelector(ABI::Windows::Foundation::IAsyncOperation_impl<_Ty>*); - - template - _Ty _UnwrapAsyncOperationSelector(...); - - template - _Ty _UnwrapAsyncOperationWithProgressSelector(ABI::Windows::Foundation::IAsyncOperationWithProgress_impl<_Ty, _Progress>*); - - template - _Ty _UnwrapAsyncOperationWithProgressSelector(...); - - // Unwrap functions for asyncOperations - template - auto _GetUnwrappedType(ABI::Windows::Foundation::IAsyncOperation<_Ty>*) -> typename ABI::Windows::Foundation::Internal::GetAbiType*>()))>::type; - - void _GetUnwrappedType(ABI::Windows::Foundation::IAsyncAction*); - - template - auto _GetUnwrappedType(ABI::Windows::Foundation::IAsyncOperationWithProgress<_Ty, _Progress>*) -> typename ABI::Windows::Foundation::Internal::GetAbiType*>()))>::type; - - template - void _GetUnwrappedType(ABI::Windows::Foundation::IAsyncActionWithProgress<_Progress>*); - - template - _T _ReturnAsyncOperationKindSelector(ABI::Windows::Foundation::IAsyncOperation<_T>*); - - void _ReturnAsyncOperationKindSelector(ABI::Windows::Foundation::IAsyncAction*); - - template - _T1 _ReturnAsyncOperationKindSelector(ABI::Windows::Foundation::IAsyncOperationWithProgress<_T1, _T2>*); - - template - void _ReturnAsyncOperationKindSelector(ABI::Windows::Foundation::IAsyncActionWithProgress<_T>*); - - class _ProgressReporterCtorArgType{}; - - template ::_Value> - struct _TaskTypeTraits - { - typedef typename details::_UnwrapTaskType<_Type>::_Type _TaskRetType; - typedef _TaskRetType _TaskRetType_abi; - typedef decltype(_AsyncOperationKindSelector(stdx::declval<_Type>())) _AsyncKind; - typedef typename details::_NormalizeVoidToUnitType<_TaskRetType>::_Type _NormalizedTaskRetType; - - static const bool _IsAsyncTask = _IsAsync; - static const bool _IsUnwrappedTaskOrAsync = details::_IsUnwrappedAsyncSelector<_AsyncKind>::_Value; - }; - - template - struct _TaskTypeTraits<_Type, true> - { - typedef decltype(_ReturnAsyncOperationKindSelector(stdx::declval<_Type>())) _TaskRetType; - typedef decltype(_GetUnwrappedType(stdx::declval<_Type>())) _TaskRetType_abi; - typedef _TaskRetType _NormalizedTaskRetType; - typedef decltype(_AsyncOperationKindSelector(stdx::declval<_Type>())) _AsyncKind; - - static const bool _IsAsyncTask = true; - static const bool _IsUnwrappedTaskOrAsync = details::_IsUnwrappedAsyncSelector<_AsyncKind>::_Value; - }; - - template auto _IsCallable(_Function _Func, int, int, int) -> decltype(_Func(stdx::declval*>()), std::true_type()) { (void)_Func; return std::true_type(); } - template auto _IsCallable(_Function _Func, int, int, ...) -> decltype(_Func(stdx::declval<_ReturnType*>()), std::true_type()) { (void)_Func; return std::true_type(); } - template auto _IsCallable(_Function _Func, int, ...) -> decltype(_Func(), std::true_type()) { (void)_Func; return std::true_type(); } - template std::false_type _IsCallable(_Function, ...) { return std::false_type(); } - - template <> - struct _TaskTypeTraits - { - typedef void _TaskRetType; - typedef void _TaskRetType_abi; - typedef _TypeSelectorNoAsync _AsyncKind; - typedef _Unit_type _NormalizedTaskRetType; - - static const bool _IsAsyncTask = false; - static const bool _IsUnwrappedTaskOrAsync = false; - }; - - // *************************************************************************** - // Template type traits and helpers for async production APIs: - // - - struct _ZeroArgumentFunctor { }; - struct _OneArgumentFunctor { }; - struct _TwoArgumentFunctor { }; - struct _ThreeArgumentFunctor { }; - - // **************************************** - // CLASS TYPES: - - // mutable functions - // ******************** - // THREE ARGUMENTS: - - // non-void arg: - template - _Arg1 _Arg1ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2, _Arg3)); - - // non-void arg: - template - _Arg2 _Arg2ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2, _Arg3)); - - // non-void arg: - template - _Arg3 _Arg3ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2, _Arg3)); - - template - _ReturnType _ReturnTypeClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2, _Arg3)); - - template - _ThreeArgumentFunctor _ArgumentCountHelper(_ReturnType(_Class::*)(_Arg1, _Arg2, _Arg3)); - - // ******************** - // TWO ARGUMENTS: - - // non-void arg: - template - _Arg1 _Arg1ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2)); - - // non-void arg: - template - _Arg2 _Arg2ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2)); - - // non-void arg: - template - void _Arg3ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2)); - - template - _ReturnType _ReturnTypeClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2)); - - template - _TwoArgumentFunctor _ArgumentCountHelper(_ReturnType(_Class::*)(_Arg1, _Arg2)); - - // ******************** - // ONE ARGUMENT: - - // non-void arg: - template - _Arg1 _Arg1ClassHelperThunk(_ReturnType(_Class::*)(_Arg1)); - - // non-void arg: - template - void _Arg2ClassHelperThunk(_ReturnType(_Class::*)(_Arg1)); - - // non-void arg: - template - void _Arg3ClassHelperThunk(_ReturnType(_Class::*)(_Arg1)); - - template - _ReturnType _ReturnTypeClassHelperThunk(_ReturnType(_Class::*)(_Arg1)); - - template - _OneArgumentFunctor _ArgumentCountHelper(_ReturnType(_Class::*)(_Arg1)); - - // ******************** - // ZERO ARGUMENT: - - // void arg: - template - void _Arg1ClassHelperThunk(_ReturnType(_Class::*)()); - - // void arg: - template - void _Arg2ClassHelperThunk(_ReturnType(_Class::*)()); - - // void arg: - template - void _Arg3ClassHelperThunk(_ReturnType(_Class::*)()); - - // void arg: - template - _ReturnType _ReturnTypeClassHelperThunk(_ReturnType(_Class::*)()); - - template - _ZeroArgumentFunctor _ArgumentCountHelper(_ReturnType(_Class::*)()); - - // ******************** - // THREE ARGUMENTS: - - // non-void arg: - template - _Arg1 _Arg1ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2, _Arg3) const); - - // non-void arg: - template - _Arg2 _Arg2ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2, _Arg3) const); - - // non-void arg: - template - _Arg3 _Arg3ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2, _Arg3) const); - - template - _ReturnType _ReturnTypeClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2, _Arg3) const); - - template - _ThreeArgumentFunctor _ArgumentCountHelper(_ReturnType(_Class::*)(_Arg1, _Arg2, _Arg3) const); - - // ******************** - // TWO ARGUMENTS: - - // non-void arg: - template - _Arg1 _Arg1ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2) const); - - // non-void arg: - template - _Arg2 _Arg2ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2) const); - - // non-void arg: - template - void _Arg3ClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2) const); - - template - _ReturnType _ReturnTypeClassHelperThunk(_ReturnType(_Class::*)(_Arg1, _Arg2) const); - - template - _TwoArgumentFunctor _ArgumentCountHelper(_ReturnType(_Class::*)(_Arg1, _Arg2) const); - - // ******************** - // ONE ARGUMENT: - - // non-void arg: - template - _Arg1 _Arg1ClassHelperThunk(_ReturnType(_Class::*)(_Arg1) const); - - // non-void arg: - template - void _Arg2ClassHelperThunk(_ReturnType(_Class::*)(_Arg1) const); - - // non-void arg: - template - void _Arg3ClassHelperThunk(_ReturnType(_Class::*)(_Arg1) const); - - template - _ReturnType _ReturnTypeClassHelperThunk(_ReturnType(_Class::*)(_Arg1) const); - - template - _OneArgumentFunctor _ArgumentCountHelper(_ReturnType(_Class::*)(_Arg1) const); - - // ******************** - // ZERO ARGUMENT: - - // void arg: - template - void _Arg1ClassHelperThunk(_ReturnType(_Class::*)() const); - - // void arg: - template - void _Arg2ClassHelperThunk(_ReturnType(_Class::*)() const); - - // void arg: - template - void _Arg3ClassHelperThunk(_ReturnType(_Class::*)() const); - - // void arg: - template - _ReturnType _ReturnTypeClassHelperThunk(_ReturnType(_Class::*)() const); - - template - _ZeroArgumentFunctor _ArgumentCountHelper(_ReturnType(_Class::*)() const); - - // **************************************** - // POINTER TYPES: - - // ******************** - // THREE ARGUMENTS: - - template - _Arg1 _Arg1PFNHelperThunk(_ReturnType(__cdecl *)(_Arg1, _Arg2, _Arg3)); - - template - _Arg2 _Arg2PFNHelperThunk(_ReturnType(__cdecl *)(_Arg1, _Arg2, _Arg3)); - - template - _Arg3 _Arg3PFNHelperThunk(_ReturnType(__cdecl *)(_Arg1, _Arg2, _Arg3)); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__cdecl *)(_Arg1, _Arg2, _Arg3)); - - template - _ThreeArgumentFunctor _ArgumentCountHelper(_ReturnType(__cdecl *)(_Arg1, _Arg2, _Arg3)); - - template - _Arg1 _Arg1PFNHelperThunk(_ReturnType(__stdcall *)(_Arg1, _Arg2, _Arg3)); - - template - _Arg2 _Arg2PFNHelperThunk(_ReturnType(__stdcall *)(_Arg1, _Arg2, _Arg3)); - - template - _Arg3 _Arg3PFNHelperThunk(_ReturnType(__stdcall *)(_Arg1, _Arg2, _Arg3)); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__stdcall *)(_Arg1, _Arg2, _Arg3)); - - template - _ThreeArgumentFunctor _ArgumentCountHelper(_ReturnType(__stdcall *)(_Arg1, _Arg2, _Arg3)); - - template - _Arg1 _Arg1PFNHelperThunk(_ReturnType(__fastcall *)(_Arg1, _Arg2, _Arg3)); - - template - _Arg2 _Arg2PFNHelperThunk(_ReturnType(__fastcall *)(_Arg1, _Arg2, _Arg3)); - - template - _Arg3 _Arg3PFNHelperThunk(_ReturnType(__fastcall *)(_Arg1, _Arg2, _Arg3)); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__fastcall *)(_Arg1, _Arg2, _Arg3)); - - template - _ThreeArgumentFunctor _ArgumentCountHelper(_ReturnType(__fastcall *)(_Arg1, _Arg2, _Arg3)); - - // ******************** - // TWO ARGUMENTS: - - template - _Arg1 _Arg1PFNHelperThunk(_ReturnType(__cdecl *)(_Arg1, _Arg2)); - - template - _Arg2 _Arg2PFNHelperThunk(_ReturnType(__cdecl *)(_Arg1, _Arg2)); - - template - void _Arg3PFNHelperThunk(_ReturnType(__cdecl *)(_Arg1, _Arg2)); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__cdecl *)(_Arg1, _Arg2)); - - template - _TwoArgumentFunctor _ArgumentCountHelper(_ReturnType(__cdecl *)(_Arg1, _Arg2)); - - template - _Arg1 _Arg1PFNHelperThunk(_ReturnType(__stdcall *)(_Arg1, _Arg2)); - - template - _Arg2 _Arg2PFNHelperThunk(_ReturnType(__stdcall *)(_Arg1, _Arg2)); - - template - void _Arg3PFNHelperThunk(_ReturnType(__stdcall *)(_Arg1, _Arg2)); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__stdcall *)(_Arg1, _Arg2)); - - template - _TwoArgumentFunctor _ArgumentCountHelper(_ReturnType(__stdcall *)(_Arg1, _Arg2)); - - template - _Arg1 _Arg1PFNHelperThunk(_ReturnType(__fastcall *)(_Arg1, _Arg2)); - - template - _Arg2 _Arg2PFNHelperThunk(_ReturnType(__fastcall *)(_Arg1, _Arg2)); - - template - void _Arg3PFNHelperThunk(_ReturnType(__fastcall *)(_Arg1, _Arg2)); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__fastcall *)(_Arg1, _Arg2)); - - template - _TwoArgumentFunctor _ArgumentCountHelper(_ReturnType(__fastcall *)(_Arg1, _Arg2)); - - // ******************** - // ONE ARGUMENT: - - template - _Arg1 _Arg1PFNHelperThunk(_ReturnType(__cdecl *)(_Arg1)); - - template - void _Arg2PFNHelperThunk(_ReturnType(__cdecl *)(_Arg1)); - - template - void _Arg3PFNHelperThunk(_ReturnType(__cdecl *)(_Arg1)); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__cdecl *)(_Arg1)); - - template - _OneArgumentFunctor _ArgumentCountHelper(_ReturnType(__cdecl *)(_Arg1)); - - template - _Arg1 _Arg1PFNHelperThunk(_ReturnType(__stdcall *)(_Arg1)); - - template - void _Arg2PFNHelperThunk(_ReturnType(__stdcall *)(_Arg1)); - - template - void _Arg3PFNHelperThunk(_ReturnType(__stdcall *)(_Arg1)); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__stdcall *)(_Arg1)); - - template - _OneArgumentFunctor _ArgumentCountHelper(_ReturnType(__stdcall *)(_Arg1)); - - template - _Arg1 _Arg1PFNHelperThunk(_ReturnType(__fastcall *)(_Arg1)); - - template - void _Arg2PFNHelperThunk(_ReturnType(__fastcall *)(_Arg1)); - - template - void _Arg3PFNHelperThunk(_ReturnType(__fastcall *)(_Arg1)); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__fastcall *)(_Arg1)); - - template - _OneArgumentFunctor _ArgumentCountHelper(_ReturnType(__fastcall *)(_Arg1)); - - // ******************** - // ZERO ARGUMENT: - - template - void _Arg1PFNHelperThunk(_ReturnType(__cdecl *)()); - - template - void _Arg2PFNHelperThunk(_ReturnType(__cdecl *)()); - - template - void _Arg3PFNHelperThunk(_ReturnType(__cdecl *)()); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__cdecl *)()); - - template - _ZeroArgumentFunctor _ArgumentCountHelper(_ReturnType(__cdecl *)()); - - template - void _Arg1PFNHelperThunk(_ReturnType(__stdcall *)()); - - template - void _Arg2PFNHelperThunk(_ReturnType(__stdcall *)()); - - template - void _Arg3PFNHelperThunk(_ReturnType(__stdcall *)()); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__stdcall *)()); - - template - _ZeroArgumentFunctor _ArgumentCountHelper(_ReturnType(__stdcall *)()); - - template - void _Arg1PFNHelperThunk(_ReturnType(__fastcall *)()); - - template - void _Arg2PFNHelperThunk(_ReturnType(__fastcall *)()); - - template - void _Arg3PFNHelperThunk(_ReturnType(__fastcall *)()); - - template - _ReturnType _ReturnTypePFNHelperThunk(_ReturnType(__fastcall *)()); - - template - _ZeroArgumentFunctor _ArgumentCountHelper(_ReturnType(__fastcall *)()); - - template - struct _FunctorArguments - { - static const size_t _Count = 0; - }; - - template<> - struct _FunctorArguments<_OneArgumentFunctor> - { - static const size_t _Count = 1; - }; - - template<> - struct _FunctorArguments<_TwoArgumentFunctor> - { - static const size_t _Count = 2; - }; - - template<> - struct _FunctorArguments<_ThreeArgumentFunctor> - { - static const size_t _Count = 3; - }; - - template - struct _FunctorTypeTraits - { - typedef decltype(_ArgumentCountHelper(&(_T::operator()))) _ArgumentCountType; - static const size_t _ArgumentCount = _FunctorArguments<_ArgumentCountType>::_Count; - - typedef decltype(_ReturnTypeClassHelperThunk(&(_T::operator()))) _ReturnType; - typedef decltype(_Arg1ClassHelperThunk(&(_T::operator()))) _Argument1Type; - typedef decltype(_Arg2ClassHelperThunk(&(_T::operator()))) _Argument2Type; - typedef decltype(_Arg3ClassHelperThunk(&(_T::operator()))) _Argument3Type; - }; - - template - struct _FunctorTypeTraits<_T *> - { - typedef decltype(_ArgumentCountHelper(stdx::declval<_T*>())) _ArgumentCountType; - static const size_t _ArgumentCount = _FunctorArguments<_ArgumentCountType>::_Count; - - typedef decltype(_ReturnTypePFNHelperThunk(stdx::declval<_T*>())) _ReturnType; - typedef decltype(_Arg1PFNHelperThunk(stdx::declval<_T*>())) _Argument1Type; - typedef decltype(_Arg2PFNHelperThunk(stdx::declval<_T*>())) _Argument2Type; - typedef decltype(_Arg3PFNHelperThunk(stdx::declval<_T*>())) _Argument3Type; - }; - - task _To_task(); - - template auto _IsVoidConversionHelper(_Function _Func, int) -> typename decltype(_Func(_To_task()), std::true_type()); - template std::false_type _IsVoidConversionHelper(_Function _Func, ...); - - template std::true_type _VoidIsTaskHelper(task _Arg, int); - template std::false_type _VoidIsTaskHelper(T _Arg, ...); - - template(), 0)), std::true_type>::value, const size_t _Count = _FunctorTypeTraits<_Function>::_ArgumentCount> - struct _FunctionTypeTraits - { - typedef typename _Unhat::_Argument2Type>::_Value _FuncRetType; - static_assert(std::is_same::_Argument1Type, _ExpectedParameterType>::value || - std::is_same::_Argument1Type, task<_ExpectedParameterType>>::value, "incorrect parameter type for the callable object in 'then'; consider _ExpectedParameterType or task<_ExpectedParameterType> (see below)"); - - typedef decltype(_VoidIsTaskHelper(stdx::declval<_FunctorTypeTraits<_Function>::_Argument1Type>(), 0)) _Takes_task; - }; - - //if there is a continuation parameter, then must use void/no return value - template - struct _FunctionTypeTraits<_Function, _ExpectedParameterType, _IsVoidConversion, 1> - { - typedef void _FuncRetType; - static_assert(std::is_same::_Argument1Type, _ExpectedParameterType>::value || - std::is_same::_Argument1Type, task<_ExpectedParameterType>>::value, "incorrect parameter type for the callable object in 'then'; consider _ExpectedParameterType or task<_ExpectedParameterType> (see below)"); - - typedef decltype(_VoidIsTaskHelper(stdx::declval<_FunctorTypeTraits<_Function>::_Argument1Type>(), 0)) _Takes_task; - }; - - template - struct _FunctionTypeTraits<_Function, void, true, 1> - { - typedef void _FuncRetType; - static_assert(std::is_same::_Argument1Type, decltype(_To_task())>::value, "incorrect parameter type for the callable object in 'then'; consider _ExpectedParameterType or task<_ExpectedParameterType> (see below)"); - - typedef decltype(_VoidIsTaskHelper(stdx::declval<_FunctorTypeTraits<_Function>::_Argument1Type>(), 0)) _Takes_task; - }; - - template - struct _FunctionTypeTraits<_Function, void, false, 1> - { - typedef typename _Unhat::_Argument1Type>::_Value _FuncRetType; - - typedef std::false_type _Takes_task; - }; - - template - struct _FunctionTypeTraits<_Function, _ExpectedParameterType, _IsVoidConversion, 0> - { - typedef void _FuncRetType; - - typedef std::false_type _Takes_task; - }; - - template - struct _ContinuationTypeTraits - { - typedef typename task::_FuncRetType>::_TaskRetType_abi> _TaskOfType; - }; - - // _InitFunctorTypeTraits is used to decide whether a task constructed with a lambda should be unwrapped. Depending on how the variable is - // declared, the constructor may or may not perform unwrapping. For eg. - // - // This declaration SHOULD NOT cause unwrapping - // task> t1([]() -> task { - // task t2([]() {}); - // return t2; - // }); - // - // This declaration SHOULD cause unwrapping - // task> t1([]() -> task { - // task t2([]() {}); - // return t2; - // }); - // If the type of the task is the same as the return type of the function, no unwrapping should take place. Else normal rules apply. - template - struct _InitFunctorTypeTraits - { - typedef typename _TaskTypeTraits<_FuncRetType>::_AsyncKind _AsyncKind; - static const bool _IsAsyncTask = _TaskTypeTraits<_FuncRetType>::_IsAsyncTask; - static const bool _IsUnwrappedTaskOrAsync = _TaskTypeTraits<_FuncRetType>::_IsUnwrappedTaskOrAsync; - }; - - template - struct _InitFunctorTypeTraits - { - typedef _TypeSelectorNoAsync _AsyncKind; - static const bool _IsAsyncTask = false; - static const bool _IsUnwrappedTaskOrAsync = false; - }; - /// - /// Helper object used for LWT invocation. - /// - struct _TaskProcThunk - { - _TaskProcThunk(const std::function & _Callback) : - _M_func(_Callback) - { - } - - static void __cdecl _Bridge(void *_PData) - { - _TaskProcThunk *_PThunk = reinterpret_cast<_TaskProcThunk *>(_PData); -#if _MSC_VER >= 1800 - _Holder _ThunkHolder(_PThunk); -#endif - _PThunk->_M_func(); -#if _MSC_VER < 1800 - delete _PThunk; -#endif - } - private: -#if _MSC_VER >= 1800 - // RAII holder - struct _Holder - { - _Holder(_TaskProcThunk * _PThunk) : _M_pThunk(_PThunk) - { - } - - ~_Holder() - { - delete _M_pThunk; - } - - _TaskProcThunk * _M_pThunk; - - private: - _Holder& operator=(const _Holder&); - }; -#endif - std::function _M_func; - _TaskProcThunk& operator=(const _TaskProcThunk&); - }; - - /// - /// Schedule a functor with automatic inlining. Note that this is "fire and forget" scheduling, which cannot be - /// waited on or canceled after scheduling. - /// This schedule method will perform automatic inlining base on . - /// - /// - /// The user functor need to be scheduled. - /// - /// - /// The inlining scheduling policy for current functor. - /// -#if _MSC_VER >= 1800 - typedef Concurrency::details::_TaskInliningMode_t _TaskInliningMode; -#else - typedef Concurrency::details::_TaskInliningMode _TaskInliningMode; -#endif - static void _ScheduleFuncWithAutoInline(const std::function & _Func, _TaskInliningMode _InliningMode) - { -#if _MSC_VER >= 1800 - Concurrency::details::_TaskCollection_t::_RunTask(&_TaskProcThunk::_Bridge, new _TaskProcThunk(_Func), _InliningMode); -#else - Concurrency::details::_StackGuard _Guard; - if (_Guard._ShouldInline(_InliningMode)) - { - _Func(); - } - else - { - Concurrency::details::_CurrentScheduler::_ScheduleTask(reinterpret_cast(&_TaskProcThunk::_Bridge), new _TaskProcThunk(_Func)); - } -#endif - } - class _ContextCallback - { - typedef std::function _CallbackFunction; - - public: - - static _ContextCallback _CaptureCurrent() - { - _ContextCallback _Context; - _Context._Capture(); - return _Context; - } - - ~_ContextCallback() - { - _Reset(); - } - - _ContextCallback(bool _DeferCapture = false) - { - if (_DeferCapture) - { - _M_context._M_captureMethod = _S_captureDeferred; - } - else - { - _M_context._M_pContextCallback = nullptr; - } - } - - // Resolves a context that was created as _S_captureDeferred based on the environment (ancestor, current context). - void _Resolve(bool _CaptureCurrent) - { - if (_M_context._M_captureMethod == _S_captureDeferred) - { - _M_context._M_pContextCallback = nullptr; - - if (_CaptureCurrent) - { - if (_IsCurrentOriginSTA()) - { - _Capture(); - } -#if _UITHREADCTXT_SUPPORT - else - { - // This method will fail if not called from the UI thread. - HRESULT _Hr = CaptureUiThreadContext(&_M_context._M_pContextCallback); - if (FAILED(_Hr)) - { - _M_context._M_pContextCallback = nullptr; - } - } -#endif // _UITHREADCTXT_SUPPORT - } - } - } - - void _Capture() - { - HRESULT _Hr = CoGetObjectContext(IID_IContextCallback, reinterpret_cast(&_M_context._M_pContextCallback)); - if (FAILED(_Hr)) - { - _M_context._M_pContextCallback = nullptr; - } - } - - _ContextCallback(const _ContextCallback& _Src) - { - _Assign(_Src._M_context._M_pContextCallback); - } - - _ContextCallback(_ContextCallback&& _Src) - { - _M_context._M_pContextCallback = _Src._M_context._M_pContextCallback; - _Src._M_context._M_pContextCallback = nullptr; - } - - _ContextCallback& operator=(const _ContextCallback& _Src) - { - if (this != &_Src) - { - _Reset(); - _Assign(_Src._M_context._M_pContextCallback); - } - return *this; - } - - _ContextCallback& operator=(_ContextCallback&& _Src) - { - if (this != &_Src) - { - _M_context._M_pContextCallback = _Src._M_context._M_pContextCallback; - _Src._M_context._M_pContextCallback = nullptr; - } - return *this; - } - - bool _HasCapturedContext() const - { - _CONCRT_ASSERT(_M_context._M_captureMethod != _S_captureDeferred); - return (_M_context._M_pContextCallback != nullptr); - } - - HRESULT _CallInContext(_CallbackFunction _Func) const - { - if (!_HasCapturedContext()) - { - _Func(); - } - else - { - ComCallData callData; - ZeroMemory(&callData, sizeof(callData)); - callData.pUserDefined = reinterpret_cast(&_Func); - - HRESULT _Hr = _M_context._M_pContextCallback->ContextCallback(&_Bridge, &callData, IID_ICallbackWithNoReentrancyToApplicationSTA, 5, nullptr); - if (FAILED(_Hr)) - { - return _Hr; - } - } - return S_OK; - } - - bool operator==(const _ContextCallback& _Rhs) const - { - return (_M_context._M_pContextCallback == _Rhs._M_context._M_pContextCallback); - } - - bool operator!=(const _ContextCallback& _Rhs) const - { - return !(operator==(_Rhs)); - } - - private: - - void _Reset() - { - if (_M_context._M_captureMethod != _S_captureDeferred && _M_context._M_pContextCallback != nullptr) - { - _M_context._M_pContextCallback->Release(); - } - } - - void _Assign(IContextCallback *_PContextCallback) - { - _M_context._M_pContextCallback = _PContextCallback; - if (_M_context._M_captureMethod != _S_captureDeferred && _M_context._M_pContextCallback != nullptr) - { - _M_context._M_pContextCallback->AddRef(); - } - } - - static HRESULT __stdcall _Bridge(ComCallData *_PParam) - { - _CallbackFunction *pFunc = reinterpret_cast<_CallbackFunction *>(_PParam->pUserDefined); - return (*pFunc)(); - } - - // Returns the origin information for the caller (runtime / Windows Runtime apartment as far as task continuations need know) - static bool _IsCurrentOriginSTA() - { - APTTYPE _AptType; - APTTYPEQUALIFIER _AptTypeQualifier; - - HRESULT hr = CoGetApartmentType(&_AptType, &_AptTypeQualifier); - if (SUCCEEDED(hr)) - { - // We determine the origin of a task continuation by looking at where .then is called, so we can tell whether - // to need to marshal the continuation back to the originating apartment. If an STA thread is in executing in - // a neutral aparment when it schedules a continuation, we will not marshal continuations back to the STA, - // since variables used within a neutral apartment are expected to be apartment neutral. - switch (_AptType) - { - case APTTYPE_MAINSTA: - case APTTYPE_STA: - return true; - default: - break; - } - } - return false; - } - - union - { - IContextCallback *_M_pContextCallback; - size_t _M_captureMethod; - } _M_context; - - static const size_t _S_captureDeferred = 1; - }; - -#if _MSC_VER >= 1800 - template - struct _ResultHolder - { - void Set(const _Type& _type) - { - _Result = _type; - } - - _Type Get() - { - return _Result; - } - - _Type _Result; - }; - - template - struct _ResultHolder<_Type*> - { - void Set(_Type* const & _type) - { - _M_Result = _type; - } - - _Type* Get() - { - return _M_Result.Get(); - } - private: - // ::Platform::Agile handle specialization of all hats - // including ::Platform::String and ::Platform::Array - Agile<_Type*> _M_Result; - }; - - // - // The below are for composability with tasks auto-created from when_any / when_all / && / || constructs. - // - template - struct _ResultHolder> - { - void Set(const std::vector<_Type*>& _type) - { - _Result.reserve(_type.size()); - - for (auto _PTask = _type.begin(); _PTask != _type.end(); ++_PTask) - { - _Result.emplace_back(*_PTask); - } - } - - std::vector<_Type*> Get() - { - // Return vectory with the objects that are marshaled in the proper appartment - std::vector<_Type*> _Return; - _Return.reserve(_Result.size()); - - for (auto _PTask = _Result.begin(); _PTask != _Result.end(); ++_PTask) - { - _Return.push_back(_PTask->Get()); // Agile will marshal the object to appropriate appartment if neccessary - } - - return _Return; - } - - std::vector< Agile<_Type*> > _Result; - }; - - template - struct _ResultHolder > - { - void Set(const std::pair<_Type*, size_t>& _type) - { - _M_Result = _type; - } - - std::pair<_Type*, size_t> Get() - { - return std::make_pair(_M_Result.first, _M_Result.second); - } - private: - std::pair, size_t> _M_Result; - }; -#else - template - struct _ResultContext - { - static _ContextCallback _GetContext(bool /* _RuntimeAggregate */) - { - return _ContextCallback(); - } - - static _Type _GetValue(_Type _ObjInCtx, const _ContextCallback & /* _Ctx */, bool /* _RuntimeAggregate */) - { - return _ObjInCtx; - } - }; - - template::value> - struct _MarshalHelper - { - }; - template - struct _MarshalHelper<_Type, N, true> - { - static _Type* _Perform(_Type(&_ObjInCtx)[N], const _ContextCallback& _Ctx) - { - static_assert(__is_valid_winrt_type(_Type*), "must be a WinRT array compatible type"); - if (_ObjInCtx == nullptr) - { - return nullptr; - } - - HRESULT _Hr; - IStream * _PStream; - _Ctx._CallInContext([&]() -> HRESULT { - // It isn't safe to simply reinterpret_cast a hat type to IUnknown* because some types do not have a real vtable ptr. - // Instead, we could to create a property value to make it "grow" the vtable ptr but instead primitives are not marshalled. - - IUnknown * _PUnk = winrt_array_type::create(_ObjInCtx, N); - _Hr = CoMarshalInterThreadInterfaceInStream(winrt_type<_Type>::getuuid(), _PUnk, &_PStream); - return S_OK; - }); - - // With an APPX manifest, this call should never fail. - _CONCRT_ASSERT(SUCCEEDED(_Hr)); - - _Type* _Proxy; - // - // Cannot use IID_PPV_ARGS with ^ types. - // - _Hr = CoGetInterfaceAndReleaseStream(_PStream, winrt_type<_Type>::getuuid(), reinterpret_cast(&_Proxy)); - if (FAILED(_Hr)) - { - throw std::make_exception_ptr(_Hr); - } - return _Proxy; - } - }; - template - struct _MarshalHelper<_Type, 0, false> - { - static _Type* _Perform(_Type* _ObjInCtx, const _ContextCallback& _Ctx) - { - static_assert(std::is_base_of::value || __is_valid_winrt_type(_Type), "must be a COM or WinRT type"); - if (_ObjInCtx == nullptr) - { - return nullptr; - } - - HRESULT _Hr; - IStream * _PStream; - _Ctx._CallInContext([&]() -> HRESULT { - // It isn't safe to simply reinterpret_cast a hat type to IUnknown* because some types do not have a real vtable ptr. - // Instead, we could to create a property value to make it "grow" the vtable ptr but instead primitives are not marshalled. - - IUnknown * _PUnk = winrt_type<_Type>::create(_ObjInCtx); - _Hr = CoMarshalInterThreadInterfaceInStream(winrt_type<_Type>::getuuid(), _PUnk, &_PStream); - return S_OK; - }); - - // With an APPX manifest, this call should never fail. - _CONCRT_ASSERT(SUCCEEDED(_Hr)); - - _Type* _Proxy; - // - // Cannot use IID_PPV_ARGS with ^ types. - // - _Hr = CoGetInterfaceAndReleaseStream(_PStream, winrt_type<_Type>::getuuid(), reinterpret_cast(&_Proxy)); - if (FAILED(_Hr)) - { - throw std::make_exception_ptr(_Hr); - } - return _Proxy; - } - }; - - // Arrays must be converted to IPropertyValue objects. - - template<> - struct _MarshalHelper - { - static HSTRING _Perform(HSTRING _ObjInCtx, const _ContextCallback& _Ctx) - { - return _ObjInCtx; - } - }; - - template - _Type* _Marshal(_Type* _ObjInCtx, const _ContextCallback& _Ctx) - { - return _MarshalHelper<_Type>::_Perform(_ObjInCtx, _Ctx); - } - - template - struct _InContext - { - static _Type _Get(_Type _ObjInCtx, const _ContextCallback& _Ctx) - { - return _ObjInCtx; - } - }; - - template - struct _InContext<_Type*> - { - static _Type* _Get(_Type* _ObjInCtx, const _ContextCallback& _Ctx) - { - _ContextCallback _CurrentContext = _ContextCallback::_CaptureCurrent(); - if (!_Ctx._HasCapturedContext() || _Ctx == _CurrentContext) - { - return _ObjInCtx; - } - - // - // The object is from another apartment. If it's marshalable, do so. - // - return _Marshal<_Type>(_ObjInCtx, _Ctx); - } - }; - - template - struct _ResultContext<_Type*> - { - static _Type* _GetValue(_Type* _ObjInCtx, const _ContextCallback& _Ctx, bool /* _RuntimeAggregate */) - { - return _InContext<_Type*>::_Get(_ObjInCtx, _Ctx); - } - - static _ContextCallback _GetContext(bool /* _RuntimeAggregate */) - { - return _ContextCallback::_CaptureCurrent(); - } - }; - - // - // The below are for composability with tasks auto-created from when_any / when_all / && / || constructs. - // - template - struct _ResultContext> - { - static std::vector<_Type*> _GetValue(std::vector<_Type*> _ObjInCtx, const _ContextCallback& _Ctx, bool _RuntimeAggregate) - { - if (!_RuntimeAggregate) - { - return _ObjInCtx; - } - - _ContextCallback _CurrentContext = _ContextCallback::_CaptureCurrent(); - if (!_Ctx._HasCapturedContext() || _Ctx == _CurrentContext) - { - return _ObjInCtx; - } - - for (auto _It = _ObjInCtx.begin(); _It != _ObjInCtx.end(); ++_It) - { - *_It = _Marshal<_Type>(*_It, _Ctx); - } - - return _ObjInCtx; - } - - static _ContextCallback _GetContext(bool _RuntimeAggregate) - { - if (!_RuntimeAggregate) - { - return _ContextCallback(); - } - else - { - return _ContextCallback::_CaptureCurrent(); - } - } - }; - - template - struct _ResultContext> - { - static std::pair<_Type*, size_t> _GetValue(std::pair<_Type*, size_t> _ObjInCtx, const _ContextCallback& _Ctx, bool _RuntimeAggregate) - { - if (!_RuntimeAggregate) - { - return _ObjInCtx; - } - - _ContextCallback _CurrentContext = _ContextCallback::_CaptureCurrent(); - if (!_Ctx._HasCapturedContext() || _Ctx == _CurrentContext) - { - return _ObjInCtx; - } - - return std::pair<_Type*, size_t>(_Marshal<_Type>(_ObjInCtx.first, _Ctx), _ObjInCtx.second); - } - - static _ContextCallback _GetContext(bool _RuntimeAggregate) - { - if (!_RuntimeAggregate) - { - return _ContextCallback(); - } - else - { - return _ContextCallback::_CaptureCurrent(); - } - } - }; -#endif - // An exception thrown by the task body is captured in an exception holder and it is shared with all value based continuations rooted at the task. - // The exception is 'observed' if the user invokes get()/wait() on any of the tasks that are sharing this exception holder. If the exception - // is not observed by the time the internal object owned by the shared pointer destructs, the process will fail fast. - struct _ExceptionHolder - { -#if _MSC_VER >= 1800 - private: - void ReportUnhandledError() - { - if (_M_winRTException != nullptr) - { - throw _M_winRTException.Get(); - } - } - public: - explicit _ExceptionHolder(const std::exception_ptr& _E, const _TaskCreationCallstack &_stackTrace) : - _M_exceptionObserved(0), _M_stdException(_E), _M_stackTrace(_stackTrace) - { - } - - explicit _ExceptionHolder(IRestrictedErrorInfo*& _E, const _TaskCreationCallstack &_stackTrace) : - _M_exceptionObserved(0), _M_winRTException(_E), _M_stackTrace(_stackTrace) - { - } -#else - explicit _ExceptionHolder(const std::exception_ptr& _E, void* _SourceAddressHint) : - _M_exceptionObserved(0), _M_stdException(_E), _M_disassembleMe(_SourceAddressHint) - { - } - - explicit _ExceptionHolder(IRestrictedErrorInfo*& _E, void* _SourceAddressHint) : - _M_exceptionObserved(0), _M_disassembleMe(_SourceAddressHint), _M_winRTException(_E) - { - } -#endif - __declspec(noinline) - ~_ExceptionHolder() - { - if (_M_exceptionObserved == 0) - { -#if _MSC_VER >= 1800 - // If you are trapped here, it means an exception thrown in task chain didn't get handled. - // Please add task-based continuation to handle all exceptions coming from tasks. - // this->_M_stackTrace keeps the creation callstack of the task generates this exception. - _REPORT_PPLTASK_UNOBSERVED_EXCEPTION(); -#else - // Disassemble at this->_M_disassembleMe to get to the source location right after either the creation of the task (constructor - // or then method) that encountered this exception, or the set_exception call for a task_completion_event. - Concurrency::details::_ReportUnobservedException(); -#endif - } - } - - void _RethrowUserException() - { - if (_M_exceptionObserved == 0) - { -#if _MSC_VER >= 1800 - Concurrency::details::atomic_exchange(_M_exceptionObserved, 1l); -#else - _InterlockedExchange(&_M_exceptionObserved, 1); -#endif - } - - if (_M_winRTException != nullptr) - { - throw _M_winRTException.Get(); - } - std::rethrow_exception(_M_stdException); - } - - // A variable that remembers if this exception was every rethrown into user code (and hence handled by the user). Exceptions that - // are unobserved when the exception holder is destructed will terminate the process. -#if _MSC_VER >= 1800 - Concurrency::details::atomic_long _M_exceptionObserved; -#else - long volatile _M_exceptionObserved; -#endif - - // Either _M_stdException or _M_winRTException is populated based on the type of exception encountered. - std::exception_ptr _M_stdException; - Microsoft::WRL::ComPtr _M_winRTException; - - // Disassembling this value will point to a source instruction right after a call instruction. If the call is to create_task, - // a task constructor or the then method, the task created by that method is the one that encountered this exception. If the call - // is to task_completion_event::set_exception, the set_exception method was the source of the exception. - // DO NOT REMOVE THIS VARIABLE. It is extremely helpful for debugging. -#if _MSC_VER >= 1800 - _TaskCreationCallstack _M_stackTrace; -#else - void* _M_disassembleMe; -#endif - }; - -#ifndef RUNTIMECLASS_Concurrency_winrt_details__AsyncInfoImpl_DEFINED -#define RUNTIMECLASS_Concurrency_winrt_details__AsyncInfoImpl_DEFINED - extern const __declspec(selectany) WCHAR RuntimeClass_Concurrency_winrt_details__AsyncInfoImpl[] = L"Concurrency_winrt.details._AsyncInfoImpl"; -#endif - - /// - /// Base converter class for converting asynchronous interfaces to IAsyncOperation - /// - template - struct _AsyncInfoImpl abstract : public Microsoft::WRL::RuntimeClass< - Microsoft::WRL::RuntimeClassFlags< Microsoft::WRL::RuntimeClassType::WinRt>, - Microsoft::WRL::Implements>> - { - InspectableClass(RuntimeClass_Concurrency_winrt_details__AsyncInfoImpl, BaseTrust) - public: - // The async action, action with progress or operation with progress that this stub forwards to. -#if _MSC_VER >= 1800 - Agile<_AsyncOperationType> _M_asyncInfo; -#else - Microsoft::WRL::ComPtr<_AsyncOperationType> _M_asyncInfo; - // The context in which this async info is valid - may be different from the context where the completion handler runs, - // and may require marshalling before it is used. - _ContextCallback _M_asyncInfoContext; -#endif - - Microsoft::WRL::ComPtr<_CompletionHandlerType> _M_CompletedHandler; - - _AsyncInfoImpl(_AsyncOperationType* _AsyncInfo) : _M_asyncInfo(_AsyncInfo) -#if _MSC_VER < 1800 - , _M_asyncInfoContext(_ContextCallback::_CaptureCurrent()) -#endif - {} - - public: - virtual HRESULT OnStart() { return S_OK; } - virtual void OnCancel() { - Microsoft::WRL::ComPtr pAsyncInfo; - HRESULT hr; -#if _MSC_VER >= 1800 - if (SUCCEEDED(hr = _M_asyncInfo.Get()->QueryInterface(pAsyncInfo.GetAddressOf()))) -#else - if (SUCCEEDED(hr = _M_asyncInfo.As(&pAsyncInfo))) -#endif - pAsyncInfo->Cancel(); - else - throw std::make_exception_ptr(hr); - } - virtual void OnClose() { - Microsoft::WRL::ComPtr pAsyncInfo; - HRESULT hr; -#if _MSC_VER >= 1800 - if (SUCCEEDED(hr = _M_asyncInfo.Get()->QueryInterface(pAsyncInfo.GetAddressOf()))) -#else - if (SUCCEEDED(hr = _M_asyncInfo.As(&pAsyncInfo))) -#endif - pAsyncInfo->Close(); - else - throw std::make_exception_ptr(hr); - } - - virtual STDMETHODIMP get_ErrorCode(HRESULT* errorCode) - { - Microsoft::WRL::ComPtr pAsyncInfo; - HRESULT hr; -#if _MSC_VER >= 1800 - if (SUCCEEDED(hr = _M_asyncInfo.Get()->QueryInterface(pAsyncInfo.GetAddressOf()))) -#else - if (SUCCEEDED(hr = _M_asyncInfo.As(&pAsyncInfo))) -#endif - return pAsyncInfo->get_ErrorCode(errorCode); - return hr; - } - - virtual STDMETHODIMP get_Id(UINT* id) - { - Microsoft::WRL::ComPtr pAsyncInfo; - HRESULT hr; -#if _MSC_VER >= 1800 - if (SUCCEEDED(hr = _M_asyncInfo.Get()->QueryInterface(pAsyncInfo.GetAddressOf()))) -#else - if (SUCCEEDED(hr = _M_asyncInfo.As(&pAsyncInfo))) -#endif - return pAsyncInfo->get_Id(id); - return hr; - } - - virtual STDMETHODIMP get_Status(ABI::Windows::Foundation::AsyncStatus *status) - { - Microsoft::WRL::ComPtr pAsyncInfo; - HRESULT hr; -#if _MSC_VER >= 1800 - if (SUCCEEDED(hr = _M_asyncInfo.Get()->QueryInterface(pAsyncInfo.GetAddressOf()))) -#else - if (SUCCEEDED(hr = _M_asyncInfo.As(&pAsyncInfo))) -#endif - return pAsyncInfo->get_Status(status); - return hr; - } - - virtual STDMETHODIMP GetResults(_Result_abi*) { throw std::runtime_error("derived class must implement"); } - - virtual STDMETHODIMP get_Completed(_CompletionHandlerType** handler) - { - if (!handler) return E_POINTER; - _M_CompletedHandler.CopyTo(handler); - return S_OK; - } - - virtual STDMETHODIMP put_Completed(_CompletionHandlerType* value) - { - _M_CompletedHandler = value; - Microsoft::WRL::ComPtr<_CompletionHandlerType> handler = Microsoft::WRL::Callback<_CompletionHandlerType>([&](_AsyncOperationType*, ABI::Windows::Foundation::AsyncStatus status) -> HRESULT { -#if _MSC_VER < 1800 - // Update the saved _M_asyncInfo with a proxy valid in the current context if required. Some Windows APIs return an IAsyncInfo - // that is only valid for the thread that called the API to retrieve. Since this completion handler can run on any thread, we - // need to ensure that the async info is valid in the current apartment. _M_asyncInfo will be accessed via calls to 'this' inside - // _AsyncInit. - _M_asyncInfo = _ResultContext<_AsyncOperationType*>::_GetValue(_M_asyncInfo.Get(), _M_asyncInfoContext, false); -#endif - return _M_CompletedHandler->Invoke(_M_asyncInfo.Get(), status); - }); -#if _MSC_VER >= 1800 - return _M_asyncInfo.Get()->put_Completed(handler.Get()); -#else - return _M_asyncInfo->put_Completed(handler.Get()); -#endif - } - }; - - extern const __declspec(selectany) WCHAR RuntimeClass_IAsyncOperationToAsyncOperationConverter[] = L"_IAsyncOperationToAsyncOperationConverter"; - - /// - /// Class _IAsyncOperationToAsyncOperationConverter is used to convert an instance of IAsyncOperationWithProgress into IAsyncOperation - /// - template - struct _IAsyncOperationToAsyncOperationConverter : - _AsyncInfoImpl, - ABI::Windows::Foundation::IAsyncOperationCompletedHandler<_Result>, - typename ABI::Windows::Foundation::Internal::GetAbiType*>()))>::type> - { - typedef typename ABI::Windows::Foundation::Internal::GetAbiType*>()))>::type _Result_abi; - - InspectableClass(RuntimeClass_IAsyncOperationToAsyncOperationConverter, BaseTrust) - public: - _IAsyncOperationToAsyncOperationConverter(ABI::Windows::Foundation::IAsyncOperation<_Result>* _Operation) : - _AsyncInfoImpl, - ABI::Windows::Foundation::IAsyncOperationCompletedHandler<_Result>, - _Result_abi>(_Operation) {} - public: - virtual STDMETHODIMP GetResults(_Result_abi* results) override { - if (!results) return E_POINTER; -#if _MSC_VER >= 1800 - return _M_asyncInfo.Get()->GetResults(results); -#else - return _M_asyncInfo->GetResults(results); -#endif - } - }; - - extern const __declspec(selectany) WCHAR RuntimeClass_IAsyncOperationWithProgressToAsyncOperationConverter[] = L"_IAsyncOperationWithProgressToAsyncOperationConverter"; - - /// - /// Class _IAsyncOperationWithProgressToAsyncOperationConverter is used to convert an instance of IAsyncOperationWithProgress into IAsyncOperation - /// - template - struct _IAsyncOperationWithProgressToAsyncOperationConverter : - _AsyncInfoImpl, - ABI::Windows::Foundation::IAsyncOperationWithProgressCompletedHandler<_Result, _Progress>, - typename ABI::Windows::Foundation::Internal::GetAbiType*>()))>::type> - { - typedef typename ABI::Windows::Foundation::Internal::GetAbiType*>()))>::type _Result_abi; - - InspectableClass(RuntimeClass_IAsyncOperationWithProgressToAsyncOperationConverter, BaseTrust) - public: - _IAsyncOperationWithProgressToAsyncOperationConverter(ABI::Windows::Foundation::IAsyncOperationWithProgress<_Result, _Progress>* _Operation) : - _AsyncInfoImpl, - ABI::Windows::Foundation::IAsyncOperationWithProgressCompletedHandler<_Result, _Progress>, - _Result_abi>(_Operation) {} - public: - virtual STDMETHODIMP GetResults(_Result_abi* results) override { - if (!results) return E_POINTER; -#if _MSC_VER >= 1800 - return _M_asyncInfo.Get()->GetResults(results); -#else - return _M_asyncInfo->GetResults(results); -#endif - } - }; - - extern const __declspec(selectany) WCHAR RuntimeClass_IAsyncActionToAsyncOperationConverter[] = L"_IAsyncActionToAsyncOperationConverter"; - - /// - /// Class _IAsyncActionToAsyncOperationConverter is used to convert an instance of IAsyncAction into IAsyncOperation<_Unit_type> - /// - struct _IAsyncActionToAsyncOperationConverter : - _AsyncInfoImpl - { - InspectableClass(RuntimeClass_IAsyncActionToAsyncOperationConverter, BaseTrust) - public: - _IAsyncActionToAsyncOperationConverter(ABI::Windows::Foundation::IAsyncAction* _Operation) : - _AsyncInfoImpl(_Operation) {} - - public: - virtual STDMETHODIMP GetResults(details::_Unit_type* results) - { - if (!results) return E_POINTER; - // Invoke GetResults on the IAsyncAction to allow exceptions to be thrown to higher layers before returning a dummy value. -#if _MSC_VER >= 1800 - HRESULT hr = _M_asyncInfo.Get()->GetResults(); -#else - HRESULT hr = _M_asyncInfo->GetResults(); -#endif - if (SUCCEEDED(hr)) *results = _Unit_type(); - return hr; - } - }; - - extern const __declspec(selectany) WCHAR RuntimeClass_IAsyncActionWithProgressToAsyncOperationConverter[] = L"_IAsyncActionWithProgressToAsyncOperationConverter"; - - /// - /// Class _IAsyncActionWithProgressToAsyncOperationConverter is used to convert an instance of IAsyncActionWithProgress into IAsyncOperation<_Unit_type> - /// - template - struct _IAsyncActionWithProgressToAsyncOperationConverter : - _AsyncInfoImpl, - ABI::Windows::Foundation::IAsyncActionWithProgressCompletedHandler<_Progress>, - _Unit_type> - { - InspectableClass(RuntimeClass_IAsyncActionWithProgressToAsyncOperationConverter, BaseTrust) - public: - _IAsyncActionWithProgressToAsyncOperationConverter(ABI::Windows::Foundation::IAsyncActionWithProgress<_Progress>* _Action) : - _AsyncInfoImpl, - ABI::Windows::Foundation::IAsyncActionWithProgressCompletedHandler<_Progress>, - _Unit_type>(_Action) {} - public: - virtual STDMETHODIMP GetResults(_Unit_type* results) override - { - if (!results) return E_POINTER; - // Invoke GetResults on the IAsyncActionWithProgress to allow exceptions to be thrown before returning a dummy value. -#if _MSC_VER >= 1800 - HRESULT hr = _M_asyncInfo.Get()->GetResults(); -#else - HRESULT hr = _M_asyncInfo->GetResults(); -#endif - if (SUCCEEDED(hr)) *results = _Unit_type(); - return hr; - } - }; -} - -/// -/// The task_continuation_context class allows you to specify where you would like a continuation to be executed. -/// It is only useful to use this class from a Windows Store app. For non-Windows Store apps, the task continuation's -/// execution context is determined by the runtime, and not configurable. -/// -/// -/**/ -class task_continuation_context : public details::_ContextCallback -{ -public: - - /// - /// Creates the default task continuation context. - /// - /// - /// The default continuation context. - /// - /// - /// The default context is used if you don't specifiy a continuation context when you call the then method. In Windows - /// applications for Windows 7 and below, as well as desktop applications on Windows 8 and higher, the runtime determines where - /// task continuations will execute. However, in a Windows Store app, the default continuation context for a continuation on an - /// apartment aware task is the apartment where then is invoked. - /// An apartment aware task is a task that unwraps a Windows Runtime IAsyncInfo interface, or a task that is descended from such - /// a task. Therefore, if you schedule a continuation on an apartment aware task in a Windows Runtime STA, the continuation will execute in - /// that STA. - /// A continuation on a non-apartment aware task will execute in a context the Runtime chooses. - /// - /**/ - static task_continuation_context use_default() - { - // The callback context is created with the context set to CaptureDeferred and resolved when it is used in .then() - return task_continuation_context(true); // sets it to deferred, is resolved in the constructor of _ContinuationTaskHandle - } - - /// - /// Creates a task continuation context which allows the Runtime to choose the execution context for a continuation. - /// - /// - /// A task continuation context that represents an arbitrary location. - /// - /// - /// When this continuation context is used the continuation will execute in a context the runtime chooses even if the antecedent task - /// is apartment aware. - /// use_arbitrary can be used to turn off the default behavior for a continuation on an apartment - /// aware task created in an STA. - /// This method is only available to Windows Store apps. - /// - /**/ - static task_continuation_context use_arbitrary() - { - task_continuation_context _Arbitrary(true); - _Arbitrary._Resolve(false); - return _Arbitrary; - } - - /// - /// Returns a task continuation context object that represents the current execution context. - /// - /// - /// The current execution context. - /// - /// - /// This method captures the caller's Windows Runtime context so that continuations can be executed in the right apartment. - /// The value returned by use_current can be used to indicate to the Runtime that the continuation should execute in - /// the captured context (STA vs MTA) regardless of whether or not the antecedent task is apartment aware. An apartment aware task is - /// a task that unwraps a Windows Runtime IAsyncInfo interface, or a task that is descended from such a task. - /// This method is only available to Windows Store apps. - /// - /**/ - static task_continuation_context use_current() - { - task_continuation_context _Current(true); - _Current._Resolve(true); - return _Current; - } - -private: - - task_continuation_context(bool _DeferCapture = false) : details::_ContextCallback(_DeferCapture) - { - } -}; - -#if _MSC_VER >= 1800 -class task_options; -namespace details -{ - struct _Internal_task_options - { - bool _M_hasPresetCreationCallstack; - _TaskCreationCallstack _M_presetCreationCallstack; - - void _set_creation_callstack(const _TaskCreationCallstack &_callstack) - { - _M_hasPresetCreationCallstack = true; - _M_presetCreationCallstack = _callstack; - } - _Internal_task_options() - { - _M_hasPresetCreationCallstack = false; - } - }; - - inline _Internal_task_options &_get_internal_task_options(task_options &options); - inline const _Internal_task_options &_get_internal_task_options(const task_options &options); -} -/// -/// Represents the allowed options for creating a task -/// -class task_options -{ -public: - - - /// - /// Default list of task creation options - /// - task_options() - : _M_Scheduler(Concurrency::get_ambient_scheduler()), - _M_CancellationToken(Concurrency::cancellation_token::none()), - _M_ContinuationContext(task_continuation_context::use_default()), - _M_HasCancellationToken(false), - _M_HasScheduler(false) - { - } - - /// - /// Task option that specify a cancellation token - /// - task_options(Concurrency::cancellation_token _Token) - : _M_Scheduler(Concurrency::get_ambient_scheduler()), - _M_CancellationToken(_Token), - _M_ContinuationContext(task_continuation_context::use_default()), - _M_HasCancellationToken(true), - _M_HasScheduler(false) - { - } - - /// - /// Task option that specify a continuation context. This is valid only for continuations (then) - /// - task_options(task_continuation_context _ContinuationContext) - : _M_Scheduler(Concurrency::get_ambient_scheduler()), - _M_CancellationToken(Concurrency::cancellation_token::none()), - _M_ContinuationContext(_ContinuationContext), - _M_HasCancellationToken(false), - _M_HasScheduler(false) - { - } - - /// - /// Task option that specify a cancellation token and a continuation context. This is valid only for continuations (then) - /// - task_options(Concurrency::cancellation_token _Token, task_continuation_context _ContinuationContext) - : _M_Scheduler(Concurrency::get_ambient_scheduler()), - _M_CancellationToken(_Token), - _M_ContinuationContext(_ContinuationContext), - _M_HasCancellationToken(false), - _M_HasScheduler(false) - { - } - - /// - /// Task option that specify a scheduler with shared lifetime - /// - template - task_options(std::shared_ptr<_SchedType> _Scheduler) - : _M_Scheduler(std::move(_Scheduler)), - _M_CancellationToken(cancellation_token::none()), - _M_ContinuationContext(task_continuation_context::use_default()), - _M_HasCancellationToken(false), - _M_HasScheduler(true) - { - } - - /// - /// Task option that specify a scheduler reference - /// - task_options(Concurrency::scheduler_interface& _Scheduler) - : _M_Scheduler(&_Scheduler), - _M_CancellationToken(Concurrency::cancellation_token::none()), - _M_ContinuationContext(task_continuation_context::use_default()), - _M_HasCancellationToken(false), - _M_HasScheduler(true) - { - } - - /// - /// Task option that specify a scheduler - /// - task_options(Concurrency::scheduler_ptr _Scheduler) - : _M_Scheduler(std::move(_Scheduler)), - _M_CancellationToken(Concurrency::cancellation_token::none()), - _M_ContinuationContext(task_continuation_context::use_default()), - _M_HasCancellationToken(false), - _M_HasScheduler(true) - { - } - - /// - /// Task option copy constructor - /// - task_options(const task_options& _TaskOptions) - : _M_Scheduler(_TaskOptions.get_scheduler()), - _M_CancellationToken(_TaskOptions.get_cancellation_token()), - _M_ContinuationContext(_TaskOptions.get_continuation_context()), - _M_HasCancellationToken(_TaskOptions.has_cancellation_token()), - _M_HasScheduler(_TaskOptions.has_scheduler()) - { - } - - /// - /// Sets the given token in the options - /// - void set_cancellation_token(Concurrency::cancellation_token _Token) - { - _M_CancellationToken = _Token; - _M_HasCancellationToken = true; - } - - /// - /// Sets the given continuation context in the options - /// - void set_continuation_context(task_continuation_context _ContinuationContext) - { - _M_ContinuationContext = _ContinuationContext; - } - - /// - /// Indicates whether a cancellation token was specified by the user - /// - bool has_cancellation_token() const - { - return _M_HasCancellationToken; - } - - /// - /// Returns the cancellation token - /// - Concurrency::cancellation_token get_cancellation_token() const - { - return _M_CancellationToken; - } - - /// - /// Returns the continuation context - /// - task_continuation_context get_continuation_context() const - { - return _M_ContinuationContext; - } - - /// - /// Indicates whether a scheduler n was specified by the user - /// - bool has_scheduler() const - { - return _M_HasScheduler; - } - - /// - /// Returns the scheduler - /// - Concurrency::scheduler_ptr get_scheduler() const - { - return _M_Scheduler; - } - -private: - - task_options const& operator=(task_options const& _Right); - friend details::_Internal_task_options &details::_get_internal_task_options(task_options &); - friend const details::_Internal_task_options &details::_get_internal_task_options(const task_options &); - - Concurrency::scheduler_ptr _M_Scheduler; - Concurrency::cancellation_token _M_CancellationToken; - task_continuation_context _M_ContinuationContext; - details::_Internal_task_options _M_InternalTaskOptions; - bool _M_HasCancellationToken; - bool _M_HasScheduler; -}; -#endif - -namespace details -{ -#if _MSC_VER >= 1800 - inline _Internal_task_options & _get_internal_task_options(task_options &options) - { - return options._M_InternalTaskOptions; - } - inline const _Internal_task_options & _get_internal_task_options(const task_options &options) - { - return options._M_InternalTaskOptions; - } -#endif - struct _Task_impl_base; - template struct _Task_impl; - - template - struct _Task_ptr - { - typedef std::shared_ptr<_Task_impl<_ReturnType>> _Type; -#if _MSC_VER >= 1800 - static _Type _Make(Concurrency::details::_CancellationTokenState * _Ct, Concurrency::scheduler_ptr _Scheduler_arg) { return std::make_shared<_Task_impl<_ReturnType>>(_Ct, _Scheduler_arg); } -#else - static _Type _Make(Concurrency::details::_CancellationTokenState * _Ct) { return std::make_shared<_Task_impl<_ReturnType>>(_Ct); } -#endif - }; -#if _MSC_VER >= 1800 - typedef Concurrency::details::_TaskCollection_t::_TaskProcHandle_t _UnrealizedChore_t; - typedef _UnrealizedChore_t _UnrealizedChore; - typedef Concurrency::extensibility::scoped_critical_section_t scoped_lock; - typedef Concurrency::extensibility::critical_section_t critical_section; - typedef Concurrency::details::atomic_size_t atomic_size_t; -#else - typedef Concurrency::details::_UnrealizedChore _UnrealizedChore; - typedef Concurrency::critical_section::scoped_lock scoped_lock; - typedef Concurrency::critical_section critical_section; - typedef volatile size_t atomic_size_t; -#endif - typedef std::shared_ptr<_Task_impl_base> _Task_ptr_base; - // The weak-typed base task handler for continuation tasks. - struct _ContinuationTaskHandleBase : _UnrealizedChore - { - _ContinuationTaskHandleBase * _M_next; - task_continuation_context _M_continuationContext; - bool _M_isTaskBasedContinuation; - - // This field gives inlining scheduling policy for current chore. - _TaskInliningMode _M_inliningMode; - - virtual _Task_ptr_base _GetTaskImplBase() const = 0; - - _ContinuationTaskHandleBase() : - _M_next(nullptr), _M_isTaskBasedContinuation(false), _M_continuationContext(task_continuation_context::use_default()), _M_inliningMode(Concurrency::details::_NoInline) - { - } - virtual ~_ContinuationTaskHandleBase() {} - }; -#if _MSC_VER >= 1800 -#if _PPLTASK_ASYNC_LOGGING - // GUID used for identifying causality logs from PPLTask - const ::Platform::Guid _PPLTaskCausalityPlatformID(0x7A76B220, 0xA758, 0x4E6E, 0xB0, 0xE0, 0xD7, 0xC6, 0xD7, 0x4A, 0x88, 0xFE); - - __declspec(selectany) volatile long _isCausalitySupported = 0; - - inline bool _IsCausalitySupported() - { -#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) - if (_isCausalitySupported == 0) - { - long _causality = 1; - OSVERSIONINFOEX _osvi = {}; - _osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); - - // The Causality is supported on Windows version higher than Windows 8 - _osvi.dwMajorVersion = 6; - _osvi.dwMinorVersion = 3; - - DWORDLONG _conditionMask = 0; - VER_SET_CONDITION(_conditionMask, VER_MAJORVERSION, VER_GREATER_EQUAL); - VER_SET_CONDITION(_conditionMask, VER_MINORVERSION, VER_GREATER_EQUAL); - - if (::VerifyVersionInfo(&_osvi, VER_MAJORVERSION | VER_MINORVERSION, _conditionMask)) - { - _causality = 2; - } - - _isCausalitySupported = _causality; - return _causality == 2; - } - - return _isCausalitySupported == 2 ? true : false; -#else - return true; -#endif - } - - // Stateful logger rests inside task_impl_base. - struct _TaskEventLogger - { - _Task_impl_base *_M_task; - bool _M_scheduled; - bool _M_taskPostEventStarted; - - // Log before scheduling task - void _LogScheduleTask(bool _isContinuation) - { - if (details::_IsCausalitySupported()) - { - ::Windows::Foundation::Diagnostics::AsyncCausalityTracer::TraceOperationCreation(::Windows::Foundation::Diagnostics::CausalityTraceLevel::Required, ::Windows::Foundation::Diagnostics::CausalitySource::Library, - _PPLTaskCausalityPlatformID, reinterpret_cast(_M_task), - _isContinuation ? "Concurrency::PPLTask::ScheduleContinuationTask" : "Concurrency::PPLTask::ScheduleTask", 0); - _M_scheduled = true; - } - } - - // It will log the cancel event but not canceled state. _LogTaskCompleted will log the terminal state, which includes cancel state. - void _LogCancelTask() - { - if (details::_IsCausalitySupported()) - { - ::Windows::Foundation::Diagnostics::AsyncCausalityTracer::TraceOperationRelation(::Windows::Foundation::Diagnostics::CausalityTraceLevel::Important, ::Windows::Foundation::Diagnostics::CausalitySource::Library, - _PPLTaskCausalityPlatformID, reinterpret_cast(_M_task), ::Windows::Foundation::Diagnostics::CausalityRelation::Cancel); - - } - } - - // Log when task reaches terminal state. Note: the task can reach a terminal state (by cancellation or exception) without having run - void _LogTaskCompleted(); - - // Log when task body (which includes user lambda and other scheduling code) begin to run - void _LogTaskExecutionStarted() { } - - // Log when task body finish executing - void _LogTaskExecutionCompleted() - { - if (_M_taskPostEventStarted && details::_IsCausalitySupported()) - { - ::Windows::Foundation::Diagnostics::AsyncCausalityTracer::TraceSynchronousWorkCompletion(::Windows::Foundation::Diagnostics::CausalityTraceLevel::Required, ::Windows::Foundation::Diagnostics::CausalitySource::Library, - ::Windows::Foundation::Diagnostics::CausalitySynchronousWork::CompletionNotification); - } - } - - // Log right before user lambda being invoked - void _LogWorkItemStarted() - { - if (details::_IsCausalitySupported()) - { - ::Windows::Foundation::Diagnostics::AsyncCausalityTracer::TraceSynchronousWorkStart(::Windows::Foundation::Diagnostics::CausalityTraceLevel::Required, ::Windows::Foundation::Diagnostics::CausalitySource::Library, - _PPLTaskCausalityPlatformID, reinterpret_cast(_M_task), ::Windows::Foundation::Diagnostics::CausalitySynchronousWork::Execution); - } - } - - // Log right after user lambda being invoked - void _LogWorkItemCompleted() - { - if (details::_IsCausalitySupported()) - { - ::Windows::Foundation::Diagnostics::AsyncCausalityTracer::TraceSynchronousWorkCompletion(::Windows::Foundation::Diagnostics::CausalityTraceLevel::Required, ::Windows::Foundation::Diagnostics::CausalitySource::Library, - ::Windows::Foundation::Diagnostics::CausalitySynchronousWork::Execution); - - ::Windows::Foundation::Diagnostics::AsyncCausalityTracer::TraceSynchronousWorkStart(::Windows::Foundation::Diagnostics::CausalityTraceLevel::Required, ::Windows::Foundation::Diagnostics::CausalitySource::Library, - _PPLTaskCausalityPlatformID, reinterpret_cast(_M_task), ::Windows::Foundation::Diagnostics::CausalitySynchronousWork::CompletionNotification); - _M_taskPostEventStarted = true; - } - } - - _TaskEventLogger(_Task_impl_base *_task) : _M_task(_task) - { - _M_scheduled = false; - _M_taskPostEventStarted = false; - } - }; - - // Exception safe logger for user lambda - struct _TaskWorkItemRAIILogger - { - _TaskEventLogger &_M_logger; - _TaskWorkItemRAIILogger(_TaskEventLogger &_taskHandleLogger) : _M_logger(_taskHandleLogger) - { - _M_logger._LogWorkItemStarted(); - } - - ~_TaskWorkItemRAIILogger() - { - _M_logger._LogWorkItemCompleted(); - } - _TaskWorkItemRAIILogger &operator =(const _TaskWorkItemRAIILogger &); // cannot be assigned - }; - -#else - inline void _LogCancelTask(_Task_impl_base *) {} - struct _TaskEventLogger - { - void _LogScheduleTask(bool) {} - void _LogCancelTask() {} - void _LogWorkItemStarted() {} - void _LogWorkItemCompleted() {} - void _LogTaskExecutionStarted() {} - void _LogTaskExecutionCompleted() {} - void _LogTaskCompleted() {} - _TaskEventLogger(_Task_impl_base *) {} - }; - struct _TaskWorkItemRAIILogger - { - _TaskWorkItemRAIILogger(_TaskEventLogger &) {} - }; -#endif -#endif - /// - /// The _PPLTaskHandle is the strong-typed task handle base. All user task functions need to be wrapped in this task handler - /// to be executable by PPL. By deriving from a different _BaseTaskHandle, it can be used for both initial tasks and continuation tasks. - /// For initial tasks, _PPLTaskHandle will be derived from _UnrealizedChore, and for continuation tasks, it will be derived from - /// _ContinuationTaskHandleBase. The life time of the _PPLTaskHandle object is be managed by runtime if task handle is scheduled. - /// - /// - /// The result type of the _Task_impl. - /// - /// - /// The derived task handle class. The operator () needs to be implemented. - /// - /// - /// The base class from which _PPLTaskHandle should be derived. This is either _UnrealizedChore or _ContinuationTaskHandleBase. - /// - template - struct _PPLTaskHandle : _BaseTaskHandle - { - _PPLTaskHandle(const typename _Task_ptr<_ReturnType>::_Type & _PTask) : _M_pTask(_PTask) - { -#if _MSC_VER < 1800 - m_pFunction = reinterpret_cast (&_UnrealizedChore::_InvokeBridge<_PPLTaskHandle>); - _SetRuntimeOwnsLifetime(true); -#endif - } - virtual ~_PPLTaskHandle() { -#if _MSC_VER >= 1800 - // Here is the sink of all task completion code paths - _M_pTask->_M_taskEventLogger._LogTaskCompleted(); -#endif - } -#if _MSC_VER >= 1800 - virtual void invoke() const -#else - void operator()() const -#endif - { - // All exceptions should be rethrown to finish cleanup of the task collection. They will be caught and handled - // by the runtime. - _CONCRT_ASSERT(_M_pTask != nullptr); - if (!_M_pTask->_TransitionedToStarted()) { -#if _MSC_VER >= 1800 - static_cast(this)->_SyncCancelAndPropagateException(); -#endif - return; - } -#if _MSC_VER >= 1800 - _M_pTask->_M_taskEventLogger._LogTaskExecutionStarted(); -#endif - try - { - // All derived task handle must implement this contract function. - static_cast(this)->_Perform(); - } - catch (const Concurrency::task_canceled &) - { - _M_pTask->_Cancel(true); -#if _MSC_VER < 1800 - throw; -#endif - } - catch (const Concurrency::details::_Interruption_exception &) - { - _M_pTask->_Cancel(true); -#if _MSC_VER < 1800 - throw; -#endif - } - catch (IRestrictedErrorInfo*& _E) - { - _M_pTask->_CancelWithException(_E); -#if _MSC_VER < 1800 - throw; -#endif - } - catch (...) - { - _M_pTask->_CancelWithException(std::current_exception()); -#if _MSC_VER < 1800 - throw; -#endif - } -#if _MSC_VER >= 1800 - _M_pTask->_M_taskEventLogger._LogTaskExecutionCompleted(); -#endif - } - - // Cast _M_pTask pointer to "type-less" _Task_impl_base pointer, which can be used in _ContinuationTaskHandleBase. - // The return value should be automatically optimized by R-value ref. - _Task_ptr_base _GetTaskImplBase() const - { - return _M_pTask; - } - - typename _Task_ptr<_ReturnType>::_Type _M_pTask; - - private: - _PPLTaskHandle const & operator=(_PPLTaskHandle const&); // no assignment operator - }; - - /// - /// The base implementation of a first-class task. This class contains all the non-type specific - /// implementation details of the task. - /// - /**/ - struct _Task_impl_base - { - enum _TaskInternalState - { - // Tracks the state of the task, rather than the task collection on which the task is scheduled - _Created, - _Started, - _PendingCancel, - _Completed, - _Canceled - }; -#if _MSC_VER >= 1800 - _Task_impl_base(Concurrency::details::_CancellationTokenState * _PTokenState, Concurrency::scheduler_ptr _Scheduler_arg) - : _M_TaskState(_Created), - _M_fFromAsync(false), _M_fUnwrappedTask(false), - _M_pRegistration(nullptr), _M_Continuations(nullptr), _M_TaskCollection(_Scheduler_arg), - _M_taskEventLogger(this) -#else - _Task_impl_base(Concurrency::details::_CancellationTokenState * _PTokenState) : _M_TaskState(_Created), - _M_fFromAsync(false), _M_fRuntimeAggregate(false), _M_fUnwrappedTask(false), - _M_pRegistration(nullptr), _M_Continuations(nullptr), _M_pTaskCollection(nullptr), - _M_pTaskCreationAddressHint(nullptr) -#endif - { - // Set cancelation token - _M_pTokenState = _PTokenState; - _CONCRT_ASSERT(_M_pTokenState != nullptr); - if (_M_pTokenState != Concurrency::details::_CancellationTokenState::_None()) - _M_pTokenState->_Reference(); - - } - - virtual ~_Task_impl_base() - { - _CONCRT_ASSERT(_M_pTokenState != nullptr); - if (_M_pTokenState != Concurrency::details::_CancellationTokenState::_None()) - { - _M_pTokenState->_Release(); - } -#if _MSC_VER < 1800 - if (_M_pTaskCollection != nullptr) - { - _M_pTaskCollection->_Release(); - _M_pTaskCollection = nullptr; - } -#endif - } - - task_status _Wait() - { - bool _DoWait = true; - - if (_IsNonBlockingThread()) - { - // In order to prevent Windows Runtime STA threads from blocking the UI, calling task.wait() task.get() is illegal - // if task has not been completed. - if (!_IsCompleted() && !_IsCanceled()) - { - throw Concurrency::invalid_operation("Illegal to wait on a task in a Windows Runtime STA"); - } - else - { - // Task Continuations are 'scheduled' *inside* the chore that is executing on the ancestors's task group. If a continuation - // needs to be marshalled to a different apartment, instead of scheduling, we make a synchronous cross apartment COM - // call to execute the continuation. If it then happens to do something which waits on the ancestor (say it calls .get(), which - // task based continuations are wont to do), waiting on the task group results in on the chore that is making this - // synchronous callback, which causes a deadlock. To avoid this, we test the state ancestor's event , and we will NOT wait on - // if it has finished execution (which means now we are on the inline synchronous callback). - _DoWait = false; - } - } - if (_DoWait) - { -#if _MSC_VER < 1800 - // Wait for the task to be actually scheduled, otherwise the underlying task collection - // might not be created yet. If we don't wait, we will miss the chance to inline this task. - _M_Scheduled.wait(); - - - // A PPL task created by a task_completion_event does not have an underlying TaskCollection. For - // These tasks, a call to wait should wait for the event to be set. The TaskCollection must either - // be nullptr or allocated (the setting of _M_Scheduled) ensures that. -#endif - // If this task was created from a Windows Runtime async operation, do not attempt to inline it. The - // async operation will take place on a thread in the appropriate apartment Simply wait for the completed - // event to be set. -#if _MSC_VER >= 1800 - if (_M_fFromAsync) -#else - if ((_M_pTaskCollection == nullptr) || _M_fFromAsync) -#endif - { -#if _MSC_VER >= 1800 - _M_TaskCollection._Wait(); -#else - _M_Completed.wait(); -#endif - } - else - { - // Wait on the task collection to complete. The task collection is guaranteed to still be - // valid since the task must be still within scope so that the _Task_impl_base destructor - // has not yet been called. This call to _Wait potentially inlines execution of work. - try - { - // Invoking wait on a task collection resets the state of the task collection. This means that - // if the task collection itself were canceled, or had encountered an exception, only the first - // call to wait will receive this status. However, both cancellation and exceptions flowing through - // tasks set state in the task impl itself. - - // When it returns cancelled, either work chore or the cancel thread should already have set task's state - // properly -- cancelled state or completed state (because there was no interruption point). - // For tasks with unwrapped tasks, we should not change the state of current task, since the unwrapped task are still running. -#if _MSC_VER >= 1800 - _M_TaskCollection._RunAndWait(); -#else - _M_pTaskCollection->_RunAndWait(); -#endif - } - catch (Concurrency::details::_Interruption_exception&) - { - // The _TaskCollection will never be an interruption point since it has a none token. - _CONCRT_ASSERT(false); - } - catch (Concurrency::task_canceled&) - { - // task_canceled is a special exception thrown by cancel_current_task. The spec states that cancel_current_task - // must be called from code that is executed within the task (throwing it from parallel work created by and waited - // upon by the task is acceptable). We can safely assume that the task wrapper _PPLTaskHandle::operator() has seen - // the exception and canceled the task. Swallow the exception here. - _CONCRT_ASSERT(_IsCanceled()); - } - catch (IRestrictedErrorInfo*& _E) - { - // Its possible the task body hasn't seen the exception, if so we need to cancel with exception here. - if(!_HasUserException()) - { - _CancelWithException(_E); - } - // Rethrow will mark the exception as observed. - _M_exceptionHolder->_RethrowUserException(); - } - catch (...) - { - // Its possible the task body hasn't seen the exception, if so we need to cancel with exception here. - if (!_HasUserException()) - { - _CancelWithException(std::current_exception()); - } - // Rethrow will mark the exception as observed. - _M_exceptionHolder->_RethrowUserException(); - } - - // If the lambda body for this task (executed or waited upon in _RunAndWait above) happened to return a task - // which is to be unwrapped and plumbed to the output of this task, we must not only wait on the lambda body, we must - // wait on the **INNER** body. It is in theory possible that we could inline such if we plumb a series of things through; - // however, this takes the tact of simply waiting upon the completion signal. - if (_M_fUnwrappedTask) - { -#if _MSC_VER >= 1800 - _M_TaskCollection._Wait(); -#else - _M_Completed.wait(); -#endif - } - } - } - - if (_HasUserException()) - { - _M_exceptionHolder->_RethrowUserException(); - } - else if (_IsCanceled()) - { - return Concurrency::canceled; - } - _CONCRT_ASSERT(_IsCompleted()); - return Concurrency::completed; - } - /// - /// Requests cancellation on the task and schedules continuations if the task can be transitioned to a terminal state. - /// - /// - /// Set to true if the cancel takes place as a result of the task body encountering an exception, or because an ancestor or task_completion_event the task - /// was registered with were canceled with an exception. A synchronous cancel is one that assures the task could not be running on a different thread at - /// the time the cancellation is in progress. An asynchronous cancel is one where the thread performing the cancel has no control over the thread that could - /// be executing the task, that is the task could execute concurrently while the cancellation is in progress. - /// - /// - /// Whether an exception other than the internal runtime cancellation exceptions caused this cancellation. - /// - /// - /// Whether this exception came from an ancestor task or a task_completion_event as opposed to an exception that was encountered by the task itself. Only valid when - /// _UserException is set to true. - /// - /// - /// The exception holder that represents the exception. Only valid when _UserException is set to true. - /// - virtual bool _CancelAndRunContinuations(bool _SynchronousCancel, bool _UserException, bool _PropagatedFromAncestor, const std::shared_ptr<_ExceptionHolder>& _ExHolder) = 0; - - bool _Cancel(bool _SynchronousCancel) - { - // Send in a dummy value for exception. It is not used when the first parameter is false. - return _CancelAndRunContinuations(_SynchronousCancel, false, false, _M_exceptionHolder); - } - - bool _CancelWithExceptionHolder(const std::shared_ptr<_ExceptionHolder>& _ExHolder, bool _PropagatedFromAncestor) - { - // This task was canceled because an ancestor task encountered an exception. - return _CancelAndRunContinuations(true, true, _PropagatedFromAncestor, _ExHolder); - } - - bool _CancelWithException(IRestrictedErrorInfo*& _Exception) - { - // This task was canceled because the task body encountered an exception. - _CONCRT_ASSERT(!_HasUserException()); -#if _MSC_VER >= 1800 - return _CancelAndRunContinuations(true, true, false, std::make_shared<_ExceptionHolder>(_Exception, _GetTaskCreationCallstack())); -#else - return _CancelAndRunContinuations(true, true, false, std::make_shared<_ExceptionHolder>(_Exception, _GetTaskCreationAddressHint())); -#endif - } - bool _CancelWithException(const std::exception_ptr& _Exception) - { - // This task was canceled because the task body encountered an exception. - _CONCRT_ASSERT(!_HasUserException()); -#if _MSC_VER >= 1800 - return _CancelAndRunContinuations(true, true, false, std::make_shared<_ExceptionHolder>(_Exception, _GetTaskCreationCallstack())); -#else - return _CancelAndRunContinuations(true, true, false, std::make_shared<_ExceptionHolder>(_Exception, _GetTaskCreationAddressHint())); -#endif - } - -#if _MSC_VER >= 1800 - void _RegisterCancellation(std::weak_ptr<_Task_impl_base> _WeakPtr) -#else - void _RegisterCancellation() -#endif - { - _CONCRT_ASSERT(Concurrency::details::_CancellationTokenState::_IsValid(_M_pTokenState)); -#if _MSC_VER >= 1800 - auto _CancellationCallback = [_WeakPtr](){ - // Taking ownership of the task prevents dead lock during destruction - // if the destructor waits for the cancellations to be finished - auto _task = _WeakPtr.lock(); - if (_task != nullptr) - _task->_Cancel(false); - }; - - _M_pRegistration = new Concurrency::details::_CancellationTokenCallback(_CancellationCallback); - _M_pTokenState->_RegisterCallback(_M_pRegistration); -#else - _M_pRegistration = _M_pTokenState->_RegisterCallback(reinterpret_cast(&_CancelViaToken), (_Task_impl_base *)this); -#endif - } - - void _DeregisterCancellation() - { - if (_M_pRegistration != nullptr) - { - _M_pTokenState->_DeregisterCallback(_M_pRegistration); - _M_pRegistration->_Release(); - _M_pRegistration = nullptr; - } - } -#if _MSC_VER < 1800 - static void _CancelViaToken(_Task_impl_base *_PImpl) - { - _PImpl->_Cancel(false); - } -#endif - bool _IsCreated() - { - return (_M_TaskState == _Created); - } - - bool _IsStarted() - { - return (_M_TaskState == _Started); - } - - bool _IsPendingCancel() - { - return (_M_TaskState == _PendingCancel); - } - - bool _IsCompleted() - { - return (_M_TaskState == _Completed); - } - - bool _IsCanceled() - { - return (_M_TaskState == _Canceled); - } - - bool _HasUserException() - { - return static_cast(_M_exceptionHolder); - } -#if _MSC_VER < 1800 - void _SetScheduledEvent() - { - _M_Scheduled.set(); - } -#endif - const std::shared_ptr<_ExceptionHolder>& _GetExceptionHolder() - { - _CONCRT_ASSERT(_HasUserException()); - return _M_exceptionHolder; - } - - bool _IsApartmentAware() - { - return _M_fFromAsync; - } - - void _SetAsync(bool _Async = true) - { - _M_fFromAsync = _Async; - } -#if _MSC_VER >= 1800 - _TaskCreationCallstack _GetTaskCreationCallstack() - { - return _M_pTaskCreationCallstack; - } - - void _SetTaskCreationCallstack(const _TaskCreationCallstack &_Callstack) - { - _M_pTaskCreationCallstack = _Callstack; - } -#else - void* _GetTaskCreationAddressHint() - { - return _M_pTaskCreationAddressHint; - } - - void _SetTaskCreationAddressHint(void* _AddressHint) - { - _M_pTaskCreationAddressHint = _AddressHint; - } -#endif - /// - /// Helper function to schedule the task on the Task Collection. - /// - /// - /// The task chore handle that need to be executed. - /// - /// - /// The inlining scheduling policy for current _PTaskHandle. - /// - void _ScheduleTask(_UnrealizedChore * _PTaskHandle, _TaskInliningMode _InliningMode) - { -#if _MSC_VER < 1800 - // Construct the task collection; We use none token to provent it becoming interruption point. - _M_pTaskCollection = Concurrency::details::_AsyncTaskCollection::_NewCollection(Concurrency::details::_CancellationTokenState::_None()); - // _M_pTaskCollection->_ScheduleWithAutoInline will schedule the chore onto AsyncTaskCollection with automatic inlining, in a way that honors cancellation etc. -#endif - try - { -#if _MSC_VER >= 1800 - _M_TaskCollection._ScheduleTask(_PTaskHandle, _InliningMode); -#else - // Do not need to check its returning state, more details please refer to _Wait method. - _M_pTaskCollection->_ScheduleWithAutoInline(_PTaskHandle, _InliningMode); -#endif - } - catch (const Concurrency::task_canceled &) - { - // task_canceled is a special exception thrown by cancel_current_task. The spec states that cancel_current_task - // must be called from code that is executed within the task (throwing it from parallel work created by and waited - // upon by the task is acceptable). We can safely assume that the task wrapper _PPLTaskHandle::operator() has seen - // the exception and canceled the task. Swallow the exception here. - _CONCRT_ASSERT(_IsCanceled()); - } - catch (const Concurrency::details::_Interruption_exception &) - { - // The _TaskCollection will never be an interruption point since it has a none token. - _CONCRT_ASSERT(false); - } - catch (...) - { - // This exception could only have come from within the chore body. It should've been caught - // and the task should be canceled with exception. Swallow the exception here. - _CONCRT_ASSERT(_HasUserException()); - } -#if _MSC_VER < 1800 - // Set the event in case anyone is waiting to notify that this task has been scheduled. In the case where we - // execute the chore inline, the event should be set after the chore has executed, to prevent a different thread - // performing a wait on the task from waiting on the task collection before the chore is actually added to it, - // and thereby returning from the wait() before the chore has executed. - _SetScheduledEvent(); -#endif - } - - /// - /// Function executes a continuation. This function is recorded by a parent task implementation - /// when a continuation is created in order to execute later. - /// - /// - /// The continuation task chore handle that need to be executed. - /// - /**/ - void _RunContinuation(_ContinuationTaskHandleBase * _PTaskHandle) - { - _Task_ptr_base _ImplBase = _PTaskHandle->_GetTaskImplBase(); - if (_IsCanceled() && !_PTaskHandle->_M_isTaskBasedContinuation) - { - if (_HasUserException()) - { - // If the ancestor encountered an exception, transfer the exception to the continuation - // This traverses down the tree to propagate the exception. - _ImplBase->_CancelWithExceptionHolder(_GetExceptionHolder(), true); - } - else - { - // If the ancestor was canceled, then your own execution should be canceled. - // This traverses down the tree to cancel it. - _ImplBase->_Cancel(true); - } - } - else - { - // This can only run when the ancestor has completed or it's a task based continuation that fires when a task is canceled - // (with or without a user exception). - _CONCRT_ASSERT(_IsCompleted() || _PTaskHandle->_M_isTaskBasedContinuation); - -#if _MSC_VER >= 1800 - _CONCRT_ASSERT(!_ImplBase->_IsCanceled()); - return _ImplBase->_ScheduleContinuationTask(_PTaskHandle); -#else - // If it has been canceled here (before starting), do nothing. The guy firing cancel will do the clean up. - if (!_ImplBase->_IsCanceled()) - { - return _ImplBase->_ScheduleContinuationTask(_PTaskHandle); - } -#endif - } - - // If the handle is not scheduled, we need to manually delete it. - delete _PTaskHandle; - } - - // Schedule a continuation to run - void _ScheduleContinuationTask(_ContinuationTaskHandleBase * _PTaskHandle) - { -#if _MSC_VER >= 1800 - _M_taskEventLogger._LogScheduleTask(true); -#endif - // Ensure that the continuation runs in proper context (this might be on a Concurrency Runtime thread or in a different Windows Runtime apartment) - if (_PTaskHandle->_M_continuationContext._HasCapturedContext()) - { - // For those continuations need to be scheduled inside captured context, we will try to apply automatic inlining to their inline modes, - // if they haven't been specified as _ForceInline yet. This change will encourage those continuations to be executed inline so that reduce - // the cost of marshaling. - // For normal continuations we won't do any change here, and their inline policies are completely decided by ._ThenImpl method. - if (_PTaskHandle->_M_inliningMode != Concurrency::details::_ForceInline) - { - _PTaskHandle->_M_inliningMode = Concurrency::details::_DefaultAutoInline; - } - details::_ScheduleFuncWithAutoInline([_PTaskHandle]() -> HRESULT { - // Note that we cannot directly capture "this" pointer, instead, we should use _TaskImplPtr, a shared_ptr to the _Task_impl_base. - // Because "this" pointer will be invalid as soon as _PTaskHandle get deleted. _PTaskHandle will be deleted after being scheduled. - auto _TaskImplPtr = _PTaskHandle->_GetTaskImplBase(); - if (details::_ContextCallback::_CaptureCurrent() == _PTaskHandle->_M_continuationContext) - { - _TaskImplPtr->_ScheduleTask(_PTaskHandle, Concurrency::details::_ForceInline); - } - else - { - // - // It's entirely possible that the attempt to marshal the call into a differing context will fail. In this case, we need to handle - // the exception and mark the continuation as canceled with the appropriate exception. There is one slight hitch to this: - // - // NOTE: COM's legacy behavior is to swallow SEH exceptions and marshal them back as HRESULTS. This will in effect turn an SEH into - // a C++ exception that gets tagged on the task. One unfortunate result of this is that various pieces of the task infrastructure will - // not be in a valid state after this in /EHsc (due to the lack of destructors running, etc...). - // - try - { - // Dev10 compiler needs this! - auto _PTaskHandle1 = _PTaskHandle; - _PTaskHandle->_M_continuationContext._CallInContext([_PTaskHandle1, _TaskImplPtr]() -> HRESULT { - _TaskImplPtr->_ScheduleTask(_PTaskHandle1, Concurrency::details::_ForceInline); - return S_OK; - }); - } - catch (IRestrictedErrorInfo*& _E) - { - _TaskImplPtr->_CancelWithException(_E); - } - catch (...) - { - _TaskImplPtr->_CancelWithException(std::current_exception()); - } - } - return S_OK; - }, _PTaskHandle->_M_inliningMode); - } - else - { - _ScheduleTask(_PTaskHandle, _PTaskHandle->_M_inliningMode); - } - } - - /// - /// Schedule the actual continuation. This will either schedule the function on the continuation task's implementation - /// if the task has completed or append it to a list of functions to execute when the task actually does complete. - /// - /// - /// The input type of the task. - /// - /// - /// The output type of the task. - /// - /**/ - void _ScheduleContinuation(_ContinuationTaskHandleBase * _PTaskHandle) - { - enum { _Nothing, _Schedule, _Cancel, _CancelWithException } _Do = _Nothing; - - // If the task has canceled, cancel the continuation. If the task has completed, execute the continuation right away. - // Otherwise, add it to the list of pending continuations - { - scoped_lock _LockHolder(_M_ContinuationsCritSec); - if (_IsCompleted() || (_IsCanceled() && _PTaskHandle->_M_isTaskBasedContinuation)) - { - _Do = _Schedule; - } - else if (_IsCanceled()) - { - if (_HasUserException()) - { - _Do = _CancelWithException; - } - else - { - _Do = _Cancel; - } - } - else - { - // chain itself on the continuation chain. - _PTaskHandle->_M_next = _M_Continuations; - _M_Continuations = _PTaskHandle; - } - } - - // Cancellation and execution of continuations should be performed after releasing the lock. Continuations off of - // async tasks may execute inline. - switch (_Do) - { - case _Schedule: - { - _PTaskHandle->_GetTaskImplBase()->_ScheduleContinuationTask(_PTaskHandle); - break; - } - case _Cancel: - { - // If the ancestor was canceled, then your own execution should be canceled. - // This traverses down the tree to cancel it. - _PTaskHandle->_GetTaskImplBase()->_Cancel(true); - - delete _PTaskHandle; - break; - } - case _CancelWithException: - { - // If the ancestor encountered an exception, transfer the exception to the continuation - // This traverses down the tree to propagate the exception. - _PTaskHandle->_GetTaskImplBase()->_CancelWithExceptionHolder(_GetExceptionHolder(), true); - - delete _PTaskHandle; - break; - } - case _Nothing: - default: - // In this case, we have inserted continuation to continuation chain, - // nothing more need to be done, just leave. - break; - } - } - - void _RunTaskContinuations() - { - // The link list can no longer be modified at this point, - // since all following up continuations will be scheduled by themselves. - _ContinuationList _Cur = _M_Continuations, _Next; - _M_Continuations = nullptr; - while (_Cur) - { - // Current node might be deleted after running, - // so we must fetch the next first. - _Next = _Cur->_M_next; - _RunContinuation(_Cur); - _Cur = _Next; - } - } - static bool _IsNonBlockingThread() - { - APTTYPE _AptType; - APTTYPEQUALIFIER _AptTypeQualifier; - - HRESULT hr = CoGetApartmentType(&_AptType, &_AptTypeQualifier); - // - // If it failed, it's not a Windows Runtime/COM initialized thread. This is not a failure. - // - if (SUCCEEDED(hr)) - { - switch (_AptType) - { - case APTTYPE_STA: - case APTTYPE_MAINSTA: - return true; - break; - case APTTYPE_NA: - switch (_AptTypeQualifier) - { - // A thread executing in a neutral apartment is either STA or MTA. To find out if this thread is allowed - // to wait, we check the app qualifier. If it is an STA thread executing in a neutral apartment, waiting - // is illegal, because the thread is responsible for pumping messages and waiting on a task could take the - // thread out of circulation for a while. - case APTTYPEQUALIFIER_NA_ON_STA: - case APTTYPEQUALIFIER_NA_ON_MAINSTA: - return true; - break; - } - break; - } - } -#if _UITHREADCTXT_SUPPORT - // This method is used to throw an exepection in _Wait() if called within STA. We - // want the same behavior if _Wait is called on the UI thread. - if (SUCCEEDED(CaptureUiThreadContext(nullptr))) - { - return true; - } -#endif // _UITHREADCTXT_SUPPORT - - return false; - } - - template - static void _AsyncInit(const typename _Task_ptr<_ReturnType>::_Type & _OuterTask, - _AsyncInfoImpl<_OpType, _CompHandlerType, _ResultType>* _AsyncOp) - { - typedef typename ABI::Windows::Foundation::Internal::GetAbiType()))>::type _Result_abi; - // This method is invoked either when a task is created from an existing async operation or - // when a lambda that creates an async operation executes. - - // If the outer task is pending cancel, cancel the async operation before setting the completed handler. The COM reference on - // the IAsyncInfo object will be released when all *references to the operation go out of scope. - - // This assertion uses the existence of taskcollection to determine if the task was created from an event. - // That is no longer valid as even tasks created from a user lambda could have no underlying taskcollection - // when a custom scheduler is used. -#if _MSC_VER < 1800 - _CONCRT_ASSERT(((_OuterTask->_M_pTaskCollection == nullptr) || _OuterTask->_M_fUnwrappedTask) && !_OuterTask->_IsCanceled()); -#endif - - // Pass the shared_ptr by value into the lambda instead of using 'this'. - - _AsyncOp->put_Completed(Microsoft::WRL::Callback<_CompHandlerType>( - [_OuterTask, _AsyncOp](_OpType* _Operation, ABI::Windows::Foundation::AsyncStatus _Status) mutable -> HRESULT - { - HRESULT hr = S_OK; - if (_Status == ABI::Windows::Foundation::AsyncStatus::Canceled) - { - _OuterTask->_Cancel(true); - } - else if (_Status == ABI::Windows::Foundation::AsyncStatus::Error) - { - HRESULT _hr; - Microsoft::WRL::ComPtr pAsyncInfo; - if (SUCCEEDED(hr = _Operation->QueryInterface(pAsyncInfo.GetAddressOf())) && SUCCEEDED(hr = pAsyncInfo->get_ErrorCode(&_hr))) - _OuterTask->_CancelWithException(std::make_exception_ptr(_hr)); - } - else - { - _CONCRT_ASSERT(_Status == ABI::Windows::Foundation::AsyncStatus::Completed); - _NormalizeVoidToUnitType<_Result_abi>::_Type results; - if (SUCCEEDED(hr = _AsyncOp->GetResults(&results))) - _OuterTask->_FinalizeAndRunContinuations(results); - } - // Take away this shared pointers reference on the task instead of waiting for the delegate to be released. It could - // be released on a different thread after a delay, and not releasing the reference here could cause the tasks to hold - // on to resources longer than they should. As an example, without this reset, writing to a file followed by reading from - // it using the Windows Runtime Async APIs causes a sharing violation. - // Using const_cast is the workaround for failed mutable keywords - const_cast<_Task_ptr<_ReturnType>::_Type &>(_OuterTask).reset(); - return hr; - }).Get()); - _OuterTask->_SetUnwrappedAsyncOp(_AsyncOp); - } - template - static void _AsyncInit(const typename _Task_ptr<_ReturnType>::_Type& _OuterTask, const task<_InternalReturnType> & _UnwrappedTask) - { - _CONCRT_ASSERT(_OuterTask->_M_fUnwrappedTask && !_OuterTask->_IsCanceled()); - // - // We must ensure that continuations off _OuterTask (especially exception handling ones) continue to function in the - // presence of an exception flowing out of the inner task _UnwrappedTask. This requires an exception handling continuation - // off the inner task which does the appropriate funnelling to the outer one. We use _Then instead of then to prevent - // the exception from being marked as observed by our internal continuation. This continuation must be scheduled regardless - // of whether or not the _OuterTask task is canceled. - // - _UnwrappedTask._Then([_OuterTask](task<_InternalReturnType> _AncestorTask) -> HRESULT { - - if (_AncestorTask._GetImpl()->_IsCompleted()) - { - _OuterTask->_FinalizeAndRunContinuations(_AncestorTask._GetImpl()->_GetResult()); - } - else - { - _CONCRT_ASSERT(_AncestorTask._GetImpl()->_IsCanceled()); - if (_AncestorTask._GetImpl()->_HasUserException()) - { - // Set _PropagatedFromAncestor to false, since _AncestorTask is not an ancestor of _UnwrappedTask. - // Instead, it is the enclosing task. - _OuterTask->_CancelWithExceptionHolder(_AncestorTask._GetImpl()->_GetExceptionHolder(), false); - } - else - { - _OuterTask->_Cancel(true); - } - } - return S_OK; -#if _MSC_VER >= 1800 - }, nullptr, Concurrency::details::_DefaultAutoInline); -#else - }, nullptr, false, Concurrency::details::_DefaultAutoInline); -#endif - } - -#if _MSC_VER >= 1800 - Concurrency::scheduler_ptr _GetScheduler() const - { - return _M_TaskCollection._GetScheduler(); - } -#else - Concurrency::event _M_Completed; - Concurrency::event _M_Scheduled; -#endif - - // Tracks the internal state of the task - volatile _TaskInternalState _M_TaskState; - // Set to true either if the ancestor task had the flag set to true, or if the lambda that does the work of this task returns an - // async operation or async action that is unwrapped by the runtime. - bool _M_fFromAsync; -#if _MSC_VER < 1800 - // Set to true if we need to marshal the inner parts of an aggregate type like std::vector or std::pair. We only marshal - // the contained T^s if we create the vector or pair, such as on a when_any or a when_all operation. - bool _M_fRuntimeAggregate; -#endif - // Set to true when a continuation unwraps a task or async operation. - bool _M_fUnwrappedTask; - - // An exception thrown by the task body is captured in an exception holder and it is shared with all value based continuations rooted at the task. - // The exception is 'observed' if the user invokes get()/wait() on any of the tasks that are sharing this exception holder. If the exception - // is not observed by the time the internal object owned by the shared pointer destructs, the process will fail fast. - std::shared_ptr<_ExceptionHolder> _M_exceptionHolder; - - typedef _ContinuationTaskHandleBase * _ContinuationList; - - critical_section _M_ContinuationsCritSec; - _ContinuationList _M_Continuations; - - // The cancellation token state. - Concurrency::details::_CancellationTokenState * _M_pTokenState; - - // The registration on the token. - Concurrency::details::_CancellationTokenRegistration * _M_pRegistration; - - // The async task collection wrapper -#if _MSC_VER >= 1800 - Concurrency::details::_TaskCollection_t _M_TaskCollection; - - // Callstack for function call (constructor or .then) that created this task impl. - _TaskCreationCallstack _M_pTaskCreationCallstack; - - _TaskEventLogger _M_taskEventLogger; -#else - Concurrency::details::_AsyncTaskCollection * _M_pTaskCollection; - - // Points to the source code instruction right after the function call (constructor or .then) that created this task impl. - void* _M_pTaskCreationAddressHint; -#endif - - private: - // Must not be copied by value: - _Task_impl_base(const _Task_impl_base&); - _Task_impl_base const & operator=(_Task_impl_base const&); - }; - -#if _MSC_VER >= 1800 -#if _PPLTASK_ASYNC_LOGGING - inline void _TaskEventLogger::_LogTaskCompleted() - { - if (_M_scheduled) - { - ::Windows::Foundation::AsyncStatus _State; - if (_M_task->_IsCompleted()) - _State = ::Windows::Foundation::AsyncStatus::Completed; - else if (_M_task->_HasUserException()) - _State = ::Windows::Foundation::AsyncStatus::Error; - else - _State = ::Windows::Foundation::AsyncStatus::Canceled; - - if (details::_IsCausalitySupported()) - { - ::Windows::Foundation::Diagnostics::AsyncCausalityTracer::TraceOperationCompletion(::Windows::Foundation::Diagnostics::CausalityTraceLevel::Required, ::Windows::Foundation::Diagnostics::CausalitySource::Library, - _PPLTaskCausalityPlatformID, reinterpret_cast(_M_task), _State); - } - } - } -#endif -#endif - - template - struct _Task_impl : public _Task_impl_base - { - typedef ABI::Windows::Foundation::IAsyncInfo _AsyncOperationType; -#if _MSC_VER >= 1800 - _Task_impl(Concurrency::details::_CancellationTokenState * _Ct, Concurrency::scheduler_ptr _Scheduler_arg) - : _Task_impl_base(_Ct, _Scheduler_arg) -#else - _Task_impl(Concurrency::details::_CancellationTokenState * _Ct) : _Task_impl_base(_Ct) -#endif - { - _M_unwrapped_async_op = nullptr; - } - virtual ~_Task_impl() - { - // We must invoke _DeregisterCancellation in the derived class destructor. Calling it in the base class destructor could cause - // a partially initialized _Task_impl to be in the list of registrations for a cancellation token. - _DeregisterCancellation(); - } - virtual bool _CancelAndRunContinuations(bool _SynchronousCancel, bool _UserException, bool _PropagatedFromAncestor, const std::shared_ptr<_ExceptionHolder> & _ExceptionHolder) - { - enum { _Nothing, _RunContinuations, _Cancel } _Do = _Nothing; - { - scoped_lock _LockHolder(_M_ContinuationsCritSec); - if (_UserException) - { - _CONCRT_ASSERT(_SynchronousCancel && !_IsCompleted()); - // If the state is _Canceled, the exception has to be coming from an ancestor. - _CONCRT_ASSERT(!_IsCanceled() || _PropagatedFromAncestor); -#if _MSC_VER < 1800 - // If the state is _Started or _PendingCancel, the exception cannot be coming from an ancestor. - _CONCRT_ASSERT((!_IsStarted() && !_IsPendingCancel()) || !_PropagatedFromAncestor); -#endif - // We should not be canceled with an exception more than once. - _CONCRT_ASSERT(!_HasUserException()); - - if (_M_TaskState == _Canceled) - { - // If the task has finished cancelling there should not be any continuation records in the array. - return false; - } - else - { - _CONCRT_ASSERT(_M_TaskState != _Completed); - _M_exceptionHolder = _ExceptionHolder; - } - } - else - { - // Completed is a non-cancellable state, and if this is an asynchronous cancel, we're unable to do better than the last async cancel - // which is to say, cancellation is already initiated, so return early. - if (_IsCompleted() || _IsCanceled() || (_IsPendingCancel() && !_SynchronousCancel)) - { - _CONCRT_ASSERT(!_IsCompleted() || !_HasUserException()); - return false; - } - _CONCRT_ASSERT(!_SynchronousCancel || !_HasUserException()); - } - -#if _MSC_VER >= 1800 - if (_SynchronousCancel) -#else - if (_SynchronousCancel || _IsCreated()) -#endif - { - // Be aware that this set must be done BEFORE _M_Scheduled being set, or race will happen between this and wait() - _M_TaskState = _Canceled; -#if _MSC_VER < 1800 - _M_Scheduled.set(); -#endif - - // Cancellation completes the task, so all dependent tasks must be run to cancel them - // They are canceled when they begin running (see _RunContinuation) and see that their - // ancestor has been canceled. - _Do = _RunContinuations; - } - else - { -#if _MSC_VER >= 1800 - _CONCRT_ASSERT(!_UserException); - - if (_IsStarted()) - { - // should not initiate cancellation under a lock - _Do = _Cancel; - } - - // The _M_TaskState variable transitions to _Canceled when cancellation is completed (the task is not executing user code anymore). - // In the case of a synchronous cancel, this can happen immediately, whereas with an asynchronous cancel, the task has to move from - // _Started to _PendingCancel before it can move to _Canceled when it is finished executing. - _M_TaskState = _PendingCancel; - - _M_taskEventLogger._LogCancelTask(); - } - } - - switch (_Do) - { - case _Cancel: - { -#else - _CONCRT_ASSERT(_IsStarted() && !_UserException); -#endif - // The _M_TaskState variable transitions to _Canceled when cancellation is completed (the task is not executing user code anymore). - // In the case of a synchronous cancel, this can happen immediately, whereas with an asynchronous cancel, the task has to move from - // _Started to _PendingCancel before it can move to _Canceled when it is finished executing. - _M_TaskState = _PendingCancel; - if (_M_unwrapped_async_op != nullptr) - { - // We will only try to cancel async operation but not unwrapped tasks, since unwrapped tasks cannot be canceled without its token. - if (_M_unwrapped_async_op) _M_unwrapped_async_op->Cancel(); - } -#if _MSC_VER >= 1800 - _M_TaskCollection._Cancel(); - break; -#else - // Optimistic trying for cancelation - if (_M_pTaskCollection != nullptr) - { - _M_pTaskCollection->_Cancel(); - } -#endif - } -#if _MSC_VER < 1800 - } -#endif - - // Only execute continuations and mark the task as completed if we were able to move the task to the _Canceled state. -#if _MSC_VER >= 1800 - case _RunContinuations: - { - _M_TaskCollection._Complete(); -#else - if (_RunContinuations) - { - _M_Completed.set(); -#endif - - if (_M_Continuations) - { - // Scheduling cancellation with automatic inlining. - details::_ScheduleFuncWithAutoInline([=]() -> HRESULT { _RunTaskContinuations(); return S_OK; }, Concurrency::details::_DefaultAutoInline); - } -#if _MSC_VER >= 1800 - break; - } -#endif - } - return true; - } - void _FinalizeAndRunContinuations(_ReturnType _Result) - { - -#if _MSC_VER >= 1800 - _M_Result.Set(_Result); -#else - _M_Result = _Result; - _M_ResultContext = _ResultContext<_ReturnType>::_GetContext(_M_fRuntimeAggregate); -#endif - { - // - // Hold this lock to ensure continuations being concurrently either get added - // to the _M_Continuations vector or wait for the result - // - scoped_lock _LockHolder(_M_ContinuationsCritSec); - - // A task could still be in the _Created state if it was created with a task_completion_event. - // It could also be in the _Canceled state for the same reason. - _CONCRT_ASSERT(!_HasUserException() && !_IsCompleted()); - if (_IsCanceled()) - { - return; - } - - // Always transition to "completed" state, even in the face of unacknowledged pending cancellation - _M_TaskState = _Completed; - } -#if _MSC_VER >= 1800 - _M_TaskCollection._Complete(); -#else - _M_Completed.set(); -#endif - _RunTaskContinuations(); - } - // - // This method is invoked when the starts executing. The task returns early if this method returns true. - // - bool _TransitionedToStarted() - { - scoped_lock _LockHolder(_M_ContinuationsCritSec); -#if _MSC_VER >= 1800 - // Canceled state could only result from antecedent task's canceled state, but that code path will not reach here. - _ASSERT(!_IsCanceled()); - if (_IsPendingCancel()) -#else - if (_IsCanceled()) -#endif - { - return false; - } - _CONCRT_ASSERT(_IsCreated()); - _M_TaskState = _Started; - return true; - } - void _SetUnwrappedAsyncOp(_AsyncOperationType* _AsyncOp) - { - scoped_lock _LockHolder(_M_ContinuationsCritSec); - // Cancel the async operation if the task itself is canceled, since the thread that canceled the task missed it. - if (_IsPendingCancel()) - { - _CONCRT_ASSERT(!_IsCanceled()); - if (_AsyncOp) _AsyncOp->Cancel(); - } - else - { - _M_unwrapped_async_op = _AsyncOp; - } - } -#if _MSC_VER >= 1800 - // Return true if the task has reached a terminal state - bool _IsDone() - { - return _IsCompleted() || _IsCanceled(); - } -#endif - _ReturnType _GetResult() - { -#if _MSC_VER >= 1800 - return _M_Result.Get(); -#else - return _ResultContext<_ReturnType>::_GetValue(_M_Result, _M_ResultContext, _M_fRuntimeAggregate); -#endif - } -#if _MSC_VER >= 1800 - _ResultHolder<_ReturnType> _M_Result; // this means that the result type must have a public default ctor. -#else - _ReturnType _M_Result; // this means that the result type must have a public default ctor. -#endif - Microsoft::WRL::ComPtr<_AsyncOperationType> _M_unwrapped_async_op; -#if _MSC_VER < 1800 - _ContextCallback _M_ResultContext; -#endif - }; - - template - struct _Task_completion_event_impl - { -#if _MSC_VER >= 1800 - private: - _Task_completion_event_impl(const _Task_completion_event_impl&); - _Task_completion_event_impl& operator=(const _Task_completion_event_impl&); - - public: -#endif - typedef std::vector::_Type> _TaskList; - - _Task_completion_event_impl() : _M_fHasValue(false), _M_fIsCanceled(false) - { - } - - bool _HasUserException() - { - return _M_exceptionHolder != nullptr; - } - - ~_Task_completion_event_impl() - { - for (auto _TaskIt = _M_tasks.begin(); _TaskIt != _M_tasks.end(); ++_TaskIt) - { - _CONCRT_ASSERT(!_M_fHasValue && !_M_fIsCanceled); - // Cancel the tasks since the event was never signaled or canceled. - (*_TaskIt)->_Cancel(true); - } - } - - // We need to protect the loop over the array, so concurrent_vector would not have helped - _TaskList _M_tasks; - critical_section _M_taskListCritSec; -#if _MSC_VER >= 1800 - _ResultHolder<_ResultType> _M_value; -#else - _ResultType _M_value; -#endif - std::shared_ptr<_ExceptionHolder> _M_exceptionHolder; - bool _M_fHasValue; - bool _M_fIsCanceled; - }; - - // Utility method for dealing with void functions - inline std::function _MakeVoidToUnitFunc(const std::function& _Func) - { - return [=](_Unit_type* retVal) -> HRESULT { HRESULT hr = _Func(); *retVal = _Unit_type(); return hr; }; - } - - template - std::function _MakeUnitToTFunc(const std::function& _Func) - { - return [=](_Unit_type, _Type* retVal) -> HRESULT { HRESULT hr = _Func(retVal); return hr; }; - } - - template - std::function _MakeTToUnitFunc(const std::function& _Func) - { - return[=](_Type t, _Unit_type* retVal) -> HRESULT { HRESULT hr = _Func(t); *retVal = _Unit_type(); return hr; }; - } - - inline std::function _MakeUnitToUnitFunc(const std::function& _Func) - { - return [=](_Unit_type, _Unit_type* retVal) -> HRESULT { HRESULT hr = _Func(); *retVal = _Unit_type(); return hr; }; - } -} - - -/// -/// The task_completion_event class allows you to delay the execution of a task until a condition is satisfied, -/// or start a task in response to an external event. -/// -/// -/// The result type of this task_completion_event class. -/// -/// -/// Use a task created from a task completion event when your scenario requires you to create a task that will complete, and -/// thereby have its continuations scheduled for execution, at some point in the future. The task_completion_event must -/// have the same type as the task you create, and calling the set method on the task completion event with a value of that type -/// will cause the associated task to complete, and provide that value as a result to its continuations. -/// If the task completion event is never signaled, any tasks created from it will be canceled when it is destructed. -/// task_completion_event behaves like a smart pointer, and should be passed by value. -/// -/// -/**/ -template -class task_completion_event -{ -public: - /// - /// Constructs a task_completion_event object. - /// - /**/ - task_completion_event() : _M_Impl(std::make_shared>()) - { - } - - /// - /// Sets the task completion event. - /// - /// - /// The result to set this event with. - /// - /// - /// The method returns true if it was successful in setting the event. It returns false if the event is already set. - /// - /// - /// In the presence of multiple or concurrent calls to set, only the first call will succeed and its result (if any) will be stored in the - /// task completion event. The remaining sets are ignored and the method will return false. When you set a task completion event, all the - /// tasks created from that event will immediately complete, and its continuations, if any, will be scheduled. Task completion objects that have - /// a other than void will pass the value to their continuations. - /// - /**/ - bool set(_ResultType _Result) const // 'const' (even though it's not deep) allows to safely pass events by value into lambdas - { - // Subsequent sets are ignored. This makes races to set benign: the first setter wins and all others are ignored. - if (_IsTriggered()) - { - return false; - } - - _TaskList _Tasks; - bool _RunContinuations = false; - { - details::scoped_lock _LockHolder(_M_Impl->_M_taskListCritSec); - - if (!_IsTriggered()) - { -#if _MSC_VER >= 1800 - _M_Impl->_M_value.Set(_Result); -#else - _M_Impl->_M_value = _Result; -#endif - _M_Impl->_M_fHasValue = true; - - _Tasks.swap(_M_Impl->_M_tasks); - _RunContinuations = true; - } - } - - if (_RunContinuations) - { - for (auto _TaskIt = _Tasks.begin(); _TaskIt != _Tasks.end(); ++_TaskIt) - { -#if _MSC_VER >= 1800 - // If current task was cancelled by a cancellation_token, it would be in cancel pending state. - if ((*_TaskIt)->_IsPendingCancel()) - (*_TaskIt)->_Cancel(true); - else - { - // Tasks created with task_completion_events can be marked as async, (we do this in when_any and when_all - // if one of the tasks involved is an async task). Since continuations of async tasks can execute inline, we - // need to run continuations after the lock is released. - (*_TaskIt)->_FinalizeAndRunContinuations(_M_Impl->_M_value.Get()); - } -#else - // Tasks created with task_completion_events can be marked as async, (we do this in when_any and when_all - // if one of the tasks involved is an async task). Since continuations of async tasks can execute inline, we - // need to run continuations after the lock is released. - (*_TaskIt)->_FinalizeAndRunContinuations(_M_Impl->_M_value); -#endif - } - if (_M_Impl->_HasUserException()) - { - _M_Impl->_M_exceptionHolder.reset(); - } - return true; - } - - return false; - } -#if _MSC_VER >= 1800 - - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result - bool set_exception(_E _Except) const // 'const' (even though it's not deep) allows to safely pass events by value into lambdas - { - // It is important that _CAPTURE_CALLSTACK() evaluate to the instruction after the call instruction for set_exception. - return _Cancel(std::make_exception_ptr(_Except), _CAPTURE_CALLSTACK()); - } -#endif - - /// - /// Propagates an exception to all tasks associated with this event. - /// - /// - /// The exception_ptr that indicates the exception to set this event with. - /// - /**/ - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result - bool set_exception(std::exception_ptr _ExceptionPtr) const // 'const' (even though it's not deep) allows to safely pass events by value into lambdas - { - // It is important that _ReturnAddress() evaluate to the instruction after the call instruction for set_exception. -#if _MSC_VER >= 1800 - return _Cancel(_ExceptionPtr, _CAPTURE_CALLSTACK()); -#else - return _Cancel(_ExceptionPtr, _ReturnAddress()); -#endif - } - - /// - /// Internal method to cancel the task_completion_event. Any task created using this event will be marked as canceled if it has - /// not already been set. - /// - bool _Cancel() const - { - // Cancel with the stored exception if one exists. - return _CancelInternal(); - } - - /// - /// Internal method to cancel the task_completion_event with the exception provided. Any task created using this event will be canceled - /// with the same exception. - /// - template -#if _MSC_VER >= 1800 - bool _Cancel(_ExHolderType _ExHolder, const details::_TaskCreationCallstack &_SetExceptionAddressHint = details::_TaskCreationCallstack()) const -#else - bool _Cancel(_ExHolderType _ExHolder, void* _SetExceptionAddressHint = nullptr) const -#endif - { - (void)_SetExceptionAddressHint; - bool _Canceled; -#if _MSC_VER >= 1800 - if(_StoreException(_ExHolder, _SetExceptionAddressHint)) -#else - if (_StoreException(_ExHolder)) -#endif - { - _Canceled = _CancelInternal(); - _CONCRT_ASSERT(_Canceled); - } - else - { - _Canceled = false; - } - return _Canceled; - } - - /// - /// Internal method that stores an exception in the task completion event. This is used internally by when_any. - /// Note, this does not cancel the task completion event. A task completion event with a stored exception - /// can bet set() successfully. If it is canceled, it will cancel with the stored exception, if one is present. - /// - template -#if _MSC_VER >= 1800 - bool _StoreException(_ExHolderType _ExHolder, const details::_TaskCreationCallstack &_SetExceptionAddressHint = details::_TaskCreationCallstack()) const -#else - bool _StoreException(_ExHolderType _ExHolder, void* _SetExceptionAddressHint = nullptr) const -#endif - { - details::scoped_lock _LockHolder(_M_Impl->_M_taskListCritSec); - if (!_IsTriggered() && !_M_Impl->_HasUserException()) - { - // Create the exception holder only if we have ensured there we will be successful in setting it onto the - // task completion event. Failing to do so will result in an unobserved task exception. - _M_Impl->_M_exceptionHolder = _ToExceptionHolder(_ExHolder, _SetExceptionAddressHint); - return true; - } - return false; - } - - /// - /// Tests whether current event has been either Set, or Canceled. - /// - bool _IsTriggered() const - { - return _M_Impl->_M_fHasValue || _M_Impl->_M_fIsCanceled; - } - -private: - -#if _MSC_VER >= 1800 - static std::shared_ptr _ToExceptionHolder(const std::shared_ptr& _ExHolder, const details::_TaskCreationCallstack&) -#else - static std::shared_ptr _ToExceptionHolder(const std::shared_ptr& _ExHolder, void*) -#endif - { - return _ExHolder; - } - -#if _MSC_VER >= 1800 - static std::shared_ptr _ToExceptionHolder(std::exception_ptr _ExceptionPtr, const details::_TaskCreationCallstack &_SetExceptionAddressHint) -#else - static std::shared_ptr _ToExceptionHolder(std::exception_ptr _ExceptionPtr, void* _SetExceptionAddressHint) -#endif - { - return std::make_shared(_ExceptionPtr, _SetExceptionAddressHint); - } - - template friend class task; // task can register itself with the event by calling the private _RegisterTask - template friend class task_completion_event; - - typedef typename details::_Task_completion_event_impl<_ResultType>::_TaskList _TaskList; - - /// - /// Cancels the task_completion_event. - /// - bool _CancelInternal() const - { - // Cancellation of task completion events is an internal only utility. Our usage is such that _CancelInternal - // will never be invoked if the task completion event has been set. - _CONCRT_ASSERT(!_M_Impl->_M_fHasValue); - if (_M_Impl->_M_fIsCanceled) - { - return false; - } - - _TaskList _Tasks; - bool _Cancel = false; - { - details::scoped_lock _LockHolder(_M_Impl->_M_taskListCritSec); - _CONCRT_ASSERT(!_M_Impl->_M_fHasValue); - if (!_M_Impl->_M_fIsCanceled) - { - _M_Impl->_M_fIsCanceled = true; - _Tasks.swap(_M_Impl->_M_tasks); - _Cancel = true; - } - } - - bool _UserException = _M_Impl->_HasUserException(); - - if (_Cancel) - { - for (auto _TaskIt = _Tasks.begin(); _TaskIt != _Tasks.end(); ++_TaskIt) - { - // Need to call this after the lock is released. See comments in set(). - if (_UserException) - { - (*_TaskIt)->_CancelWithExceptionHolder(_M_Impl->_M_exceptionHolder, true); - } - else - { - (*_TaskIt)->_Cancel(true); - } - } - } - return _Cancel; - } - - /// - /// Register a task with this event. This function is called when a task is constructed using - /// a task_completion_event. - /// - void _RegisterTask(const typename details::_Task_ptr<_ResultType>::_Type & _TaskParam) - { - details::scoped_lock _LockHolder(_M_Impl->_M_taskListCritSec); -#if _MSC_VER < 1800 - _TaskParam->_SetScheduledEvent(); -#endif - //If an exception was already set on this event, then cancel the task with the stored exception. - if (_M_Impl->_HasUserException()) - { - _TaskParam->_CancelWithExceptionHolder(_M_Impl->_M_exceptionHolder, true); - } - else if (_M_Impl->_M_fHasValue) - { -#if _MSC_VER >= 1800 - _TaskParam->_FinalizeAndRunContinuations(_M_Impl->_M_value.Get()); -#else - _TaskParam->_FinalizeAndRunContinuations(_M_Impl->_M_value); -#endif - } - else - { - _M_Impl->_M_tasks.push_back(_TaskParam); - } - } - - std::shared_ptr> _M_Impl; -}; - -/// -/// The task_completion_event class allows you to delay the execution of a task until a condition is satisfied, -/// or start a task in response to an external event. -/// -/// -/// Use a task created from a task completion event when your scenario requires you to create a task that will complete, and -/// thereby have its continuations scheduled for execution, at some point in the future. The task_completion_event must -/// have the same type as the task you create, and calling the set method on the task completion event with a value of that type -/// will cause the associated task to complete, and provide that value as a result to its continuations. -/// If the task completion event is never signaled, any tasks created from it will be canceled when it is destructed. -/// task_completion_event behaves like a smart pointer, and should be passed by value. -/// -/// -/**/ -template<> -class task_completion_event -{ -public: - /// - /// Sets the task completion event. - /// - /// - /// The method returns true if it was successful in setting the event. It returns false if the event is already set. - /// - /// - /// In the presence of multiple or concurrent calls to set, only the first call will succeed and its result (if any) will be stored in the - /// task completion event. The remaining sets are ignored and the method will return false. When you set a task completion event, all the - /// tasks created from that event will immediately complete, and its continuations, if any, will be scheduled. Task completion objects that have - /// a other than void will pass the value to their continuations. - /// - /**/ - bool set() const // 'const' (even though it's not deep) allows to safely pass events by value into lambdas - { - return _M_unitEvent.set(details::_Unit_type()); - } -#if _MSC_VER >= 1800 - - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result - bool set_exception(_E _Except) const // 'const' (even though it's not deep) allows to safely pass events by value into lambdas - { - return _M_unitEvent._Cancel(std::make_exception_ptr(_Except), _CAPTURE_CALLSTACK()); - } -#endif - - /// - /// Propagates an exception to all tasks associated with this event. - /// - /// - /// The exception_ptr that indicates the exception to set this event with. - /// - /**/ - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result - bool set_exception(std::exception_ptr _ExceptionPtr) const // 'const' (even though it's not deep) allows to safely pass events by value into lambdas - { - // It is important that _ReturnAddress() evaluate to the instruction after the call instruction for set_exception. -#if _MSC_VER >= 1800 - return _M_unitEvent._Cancel(_ExceptionPtr, _CAPTURE_CALLSTACK()); -#else - return _M_unitEvent._Cancel(_ExceptionPtr, _ReturnAddress()); -#endif - } - - /// - /// Cancel the task_completion_event. Any task created using this event will be marked as canceled if it has - /// not already been set. - /// - void _Cancel() const // 'const' (even though it's not deep) allows to safely pass events by value into lambdas - { - _M_unitEvent._Cancel(); - } - - /// - /// Cancel the task_completion_event with the exception holder provided. Any task created using this event will be canceled - /// with the same exception. - /// - void _Cancel(const std::shared_ptr& _ExHolder) const - { - _M_unitEvent._Cancel(_ExHolder); - } - - /// - /// Method that stores an exception in the task completion event. This is used internally by when_any. - /// Note, this does not cancel the task completion event. A task completion event with a stored exception - /// can bet set() successfully. If it is canceled, it will cancel with the stored exception, if one is present. - /// - bool _StoreException(const std::shared_ptr& _ExHolder) const - { - return _M_unitEvent._StoreException(_ExHolder); - } - - /// - /// Test whether current event has been either Set, or Canceled. - /// - bool _IsTriggered() const - { - return _M_unitEvent._IsTriggered(); - } - -private: - template friend class task; // task can register itself with the event by calling the private _RegisterTask - - /// - /// Register a task with this event. This function is called when a task is constructed using - /// a task_completion_event. - /// - void _RegisterTask(details::_Task_ptr::_Type _TaskParam) - { - _M_unitEvent._RegisterTask(_TaskParam); - } - - // The void event contains an event a dummy type so common code can be used for events with void and non-void results. - task_completion_event _M_unitEvent; -}; -namespace details -{ - // - // Compile-time validation helpers - // - - // Task constructor validation: issue helpful diagnostics for common user errors. Do not attempt full validation here. - // - // Anything callable is fine - template - auto _IsValidTaskCtor(_Ty _Param, int, int, int, int, int, int, int) -> typename decltype(_Param(), std::true_type()); - - // Anything callable with a task return value is fine - template - auto _IsValidTaskCtor(_Ty _Param, int, int, int, int, int, int, ...) -> typename decltype(_Param(stdx::declval*>()), std::true_type()); - - // Anything callable with a return value is fine - template - auto _IsValidTaskCtor(_Ty _Param, int, int, int, int, int, ...) -> typename decltype(_Param(stdx::declval<_ReturnType*>()), std::true_type()); - - // Anything that has GetResults is fine: this covers AsyncAction* - template - auto _IsValidTaskCtor(_Ty _Param, int, int, int, int, ...) -> typename decltype(_Param->GetResults(), std::true_type()); - - // Anything that has GetResults(TResult_abi*) is fine: this covers AsyncOperation* - template - auto _IsValidTaskCtor(_Ty _Param, int, int, int, ...) -> typename decltype(_Param->GetResults(stdx::declval()))*>()), std::true_type()); - - // Allow parameters with set: this covers task_completion_event - template - auto _IsValidTaskCtor(_Ty _Param, int, int, ...) -> typename decltype(_Param.set(stdx::declval<_ReturnType>()), std::true_type()); - - template - auto _IsValidTaskCtor(_Ty _Param, int, ...) -> typename decltype(_Param.set(), std::true_type()); - - // All else is invalid - template - std::false_type _IsValidTaskCtor(_Ty _Param, ...); - - template - void _ValidateTaskConstructorArgs(_Ty _Param) - { - (void)_Param; - static_assert(std::is_same(_Param, 0, 0, 0, 0, 0, 0, 0)), std::true_type>::value, - "incorrect argument for task constructor; can be a callable object, an asynchronous operation, or a task_completion_event" - ); - static_assert(!(std::is_same<_Ty, _ReturnType>::value && details::_IsIAsyncInfo<_Ty>::_Value), - "incorrect template argument for task; consider using the return type of the async operation"); - } - // Helpers for create_async validation - // - // A parameter lambda taking no arguments is valid - template - static auto _IsValidCreateAsync(_Ty _Param, int, int, int, int, int, int, int, int) -> typename decltype(_Param(), std::true_type()); - - // A parameter lambda taking a result argument is valid - template - static auto _IsValidCreateAsync(_Ty _Param, int, int, int, int, int, int, int, ...) -> typename decltype(_Param(stdx::declval<_ReturnType*>()), std::true_type()); - - // A parameter lambda taking an cancellation_token argument is valid - template - static auto _IsValidCreateAsync(_Ty _Param, int, int, int, int, int, int, ...) -> typename decltype(_Param(Concurrency::cancellation_token::none()), std::true_type()); - - // A parameter lambda taking an cancellation_token argument and a result argument is valid - template - static auto _IsValidCreateAsync(_Ty _Param, int, int, int, int, int, ...) -> typename decltype(_Param(Concurrency::cancellation_token::none(), stdx::declval<_ReturnType*>()), std::true_type()); - - // A parameter lambda taking a progress report argument is valid - template - static auto _IsValidCreateAsync(_Ty _Param, int, int, int, int, ...) -> typename decltype(_Param(details::_ProgressReporterCtorArgType()), std::true_type()); - - // A parameter lambda taking a progress report argument and a result argument is valid - template - static auto _IsValidCreateAsync(_Ty _Param, int, int, int, ...) -> typename decltype(_Param(details::_ProgressReporterCtorArgType(), stdx::declval<_ReturnType*>()), std::true_type()); - - // A parameter lambda taking a progress report and a cancellation_token argument is valid - template - static auto _IsValidCreateAsync(_Ty _Param, int, int, ...) -> typename decltype(_Param(details::_ProgressReporterCtorArgType(), Concurrency::cancellation_token::none()), std::true_type()); - - // A parameter lambda taking a progress report and a cancellation_token argument and a result argument is valid - template - static auto _IsValidCreateAsync(_Ty _Param, int, ...) -> typename decltype(_Param(details::_ProgressReporterCtorArgType(), Concurrency::cancellation_token::none(), stdx::declval<_ReturnType*>()), std::true_type()); - - // All else is invalid - template - static std::false_type _IsValidCreateAsync(_Ty _Param, ...); -} - -/// -/// The Parallel Patterns Library (PPL) task class. A task object represents work that can be executed asynchronously, -/// and concurrently with other tasks and parallel work produced by parallel algorithms in the Concurrency Runtime. It produces -/// a result of type on successful completion. Tasks of type task<void> produce no result. -/// A task can be waited upon and canceled independently of other tasks. It can also be composed with other tasks using -/// continuations(then), and join(when_all) and choice(when_any) patterns. -/// -/// -/// The result type of this task. -/// -/// -/// For more information, see . -/// -/**/ -template -class task -{ -public: - /// - /// The type of the result an object of this class produces. - /// - /**/ - typedef _ReturnType result_type; - - /// - /// Constructs a task object. - /// - /// - /// The default constructor for a task is only present in order to allow tasks to be used within containers. - /// A default constructed task cannot be used until you assign a valid task to it. Methods such as get, wait or then - /// will throw an invalid_argument exception when called on a default constructed task. - /// A task that is created from a task_completion_event will complete (and have its continuations scheduled) when the task - /// completion event is set. - /// The version of the constructor that takes a cancellation token creates a task that can be canceled using the - /// cancellation_token_source the token was obtained from. Tasks created without a cancellation token are not cancelable. - /// Tasks created from a Windows::Foundation::IAsyncInfo interface or a lambda that returns an IAsyncInfo interface - /// reach their terminal state when the enclosed Windows Runtime asynchronous operation or action completes. Similarly, tasks created - /// from a lamda that returns a task<result_type> reach their terminal state when the inner task reaches its terminal state, - /// and not when the lamda returns. - /// task behaves like a smart pointer and is safe to pass around by value. It can be accessed by multiple threads - /// without the need for locks. - /// The constructor overloads that take a Windows::Foundation::IAsyncInfo interface or a lambda returning such an interface, are only available - /// to Windows Store apps. - /// For more information, see . - /// - /**/ - task() : _M_Impl(nullptr) - { - // The default constructor should create a task with a nullptr impl. This is a signal that the - // task is not usable and should throw if any wait(), get() or then() APIs are used. - } - - /// - /// Constructs a task object. - /// - /// - /// The type of the parameter from which the task is to be constructed. - /// - /// - /// The parameter from which the task is to be constructed. This could be a lambda, a function object, a task_completion_event<result_type> - /// object, or a Windows::Foundation::IAsyncInfo if you are using tasks in your Windows Store app. The lambda or function - /// object should be a type equivalent to std::function<X(void)>, where X can be a variable of type result_type, - /// task<result_type>, or a Windows::Foundation::IAsyncInfo in Windows Store apps. - /// - /// - /// The cancellation token to associate with this task. A task created without a cancellation token cannot be canceled. It implicitly receives - /// the token cancellation_token::none(). - /// - /// - /// The default constructor for a task is only present in order to allow tasks to be used within containers. - /// A default constructed task cannot be used until you assign a valid task to it. Methods such as get, wait or then - /// will throw an invalid_argument exception when called on a default constructed task. - /// A task that is created from a task_completion_event will complete (and have its continuations scheduled) when the task - /// completion event is set. - /// The version of the constructor that takes a cancellation token creates a task that can be canceled using the - /// cancellation_token_source the token was obtained from. Tasks created without a cancellation token are not cancelable. - /// Tasks created from a Windows::Foundation::IAsyncInfo interface or a lambda that returns an IAsyncInfo interface - /// reach their terminal state when the enclosed Windows Runtime asynchronous operation or action completes. Similarly, tasks created - /// from a lamda that returns a task<result_type> reach their terminal state when the inner task reaches its terminal state, - /// and not when the lamda returns. - /// task behaves like a smart pointer and is safe to pass around by value. It can be accessed by multiple threads - /// without the need for locks. - /// The constructor overloads that take a Windows::Foundation::IAsyncInfo interface or a lambda returning such an interface, are only available - /// to Windows Store apps. - /// For more information, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result - explicit task(_Ty _Param) - { -#if _MSC_VER >= 1800 - task_options _TaskOptions; -#endif - details::_ValidateTaskConstructorArgs<_ReturnType, _Ty>(_Param); - -#if _MSC_VER >= 1800 - _CreateImpl(_TaskOptions.get_cancellation_token()._GetImplValue(), _TaskOptions.get_scheduler()); -#else - _CreateImpl(Concurrency::cancellation_token::none()._GetImplValue()); -#endif - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of the task constructor. -#if _MSC_VER >= 1800 - _SetTaskCreationCallstack(_CAPTURE_CALLSTACK()); -#else - _SetTaskCreationAddressHint(_ReturnAddress()); -#endif - _TaskInitMaybeFunctor(_Param, details::_IsCallable<_ReturnType>(_Param, 0, 0, 0)); - } - - /// - /// Constructs a task object. - /// - /// - /// The type of the parameter from which the task is to be constructed. - /// - /// - /// The parameter from which the task is to be constructed. This could be a lambda, a function object, a task_completion_event<result_type> - /// object, or a Windows::Foundation::IAsyncInfo if you are using tasks in your Windows Store app. The lambda or function - /// object should be a type equivalent to std::function<X(void)>, where X can be a variable of type result_type, - /// task<result_type>, or a Windows::Foundation::IAsyncInfo in Windows Store apps. - /// - /// - /// The cancellation token to associate with this task. A task created without a cancellation token cannot be canceled. It implicitly receives - /// the token cancellation_token::none(). - /// - /// - /// The default constructor for a task is only present in order to allow tasks to be used within containers. - /// A default constructed task cannot be used until you assign a valid task to it. Methods such as get, wait or then - /// will throw an invalid_argument exception when called on a default constructed task. - /// A task that is created from a task_completion_event will complete (and have its continuations scheduled) when the task - /// completion event is set. - /// The version of the constructor that takes a cancellation token creates a task that can be canceled using the - /// cancellation_token_source the token was obtained from. Tasks created without a cancellation token are not cancelable. - /// Tasks created from a Windows::Foundation::IAsyncInfo interface or a lambda that returns an IAsyncInfo interface - /// reach their terminal state when the enclosed Windows Runtime asynchronous operation or action completes. Similarly, tasks created - /// from a lamda that returns a task<result_type> reach their terminal state when the inner task reaches its terminal state, - /// and not when the lamda returns. - /// task behaves like a smart pointer and is safe to pass around by value. It can be accessed by multiple threads - /// without the need for locks. - /// The constructor overloads that take a Windows::Foundation::IAsyncInfo interface or a lambda returning such an interface, are only available - /// to Windows Store apps. - /// For more information, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result -#if _MSC_VER >= 1800 - explicit task(_Ty _Param, const task_options &_TaskOptions) -#else - explicit task(_Ty _Param, Concurrency::cancellation_token _Token) -#endif - { - details::_ValidateTaskConstructorArgs<_ReturnType, _Ty>(_Param); - -#if _MSC_VER >= 1800 - _CreateImpl(_TaskOptions.get_cancellation_token()._GetImplValue(), _TaskOptions.get_scheduler()); -#else - _CreateImpl(_Token._GetImplValue()); -#endif - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of the task constructor. -#if _MSC_VER >= 1800 - _SetTaskCreationCallstack(details::_get_internal_task_options(_TaskOptions)._M_hasPresetCreationCallstack ? details::_get_internal_task_options(_TaskOptions)._M_presetCreationCallstack : _CAPTURE_CALLSTACK()); -#else - _SetTaskCreationAddressHint(_ReturnAddress()); -#endif - _TaskInitMaybeFunctor(_Param, details::_IsCallable<_ReturnType>(_Param, 0, 0, 0)); - } - - /// - /// Constructs a task object. - /// - /// - /// The source task object. - /// - /// - /// The default constructor for a task is only present in order to allow tasks to be used within containers. - /// A default constructed task cannot be used until you assign a valid task to it. Methods such as get, wait or then - /// will throw an invalid_argument exception when called on a default constructed task. - /// A task that is created from a task_completion_event will complete (and have its continuations scheduled) when the task - /// completion event is set. - /// The version of the constructor that takes a cancellation token creates a task that can be canceled using the - /// cancellation_token_source the token was obtained from. Tasks created without a cancellation token are not cancelable. - /// Tasks created from a Windows::Foundation::IAsyncInfo interface or a lambda that returns an IAsyncInfo interface - /// reach their terminal state when the enclosed Windows Runtime asynchronous operation or action completes. Similarly, tasks created - /// from a lamda that returns a task<result_type> reach their terminal state when the inner task reaches its terminal state, - /// and not when the lamda returns. - /// task behaves like a smart pointer and is safe to pass around by value. It can be accessed by multiple threads - /// without the need for locks. - /// The constructor overloads that take a Windows::Foundation::IAsyncInfo interface or a lambda returning such an interface, are only available - /// to Windows Store apps. - /// For more information, see . - /// - /**/ - task(const task& _Other) : _M_Impl(_Other._M_Impl) {} - - /// - /// Constructs a task object. - /// - /// - /// The source task object. - /// - /// - /// The default constructor for a task is only present in order to allow tasks to be used within containers. - /// A default constructed task cannot be used until you assign a valid task to it. Methods such as get, wait or then - /// will throw an invalid_argument exception when called on a default constructed task. - /// A task that is created from a task_completion_event will complete (and have its continuations scheduled) when the task - /// completion event is set. - /// The version of the constructor that takes a cancellation token creates a task that can be canceled using the - /// cancellation_token_source the token was obtained from. Tasks created without a cancellation token are not cancelable. - /// Tasks created from a Windows::Foundation::IAsyncInfo interface or a lambda that returns an IAsyncInfo interface - /// reach their terminal state when the enclosed Windows Runtime asynchronous operation or action completes. Similarly, tasks created - /// from a lamda that returns a task<result_type> reach their terminal state when the inner task reaches its terminal state, - /// and not when the lamda returns. - /// task behaves like a smart pointer and is safe to pass around by value. It can be accessed by multiple threads - /// without the need for locks. - /// The constructor overloads that take a Windows::Foundation::IAsyncInfo interface or a lambda returning such an interface, are only available - /// to Windows Store apps. - /// For more information, see . - /// - /**/ - task(task&& _Other) : _M_Impl(std::move(_Other._M_Impl)) {} - - /// - /// Replaces the contents of one task object with another. - /// - /// - /// The source task object. - /// - /// - /// As task behaves like a smart pointer, after a copy assignment, this task objects represents the same - /// actual task as does. - /// - /**/ - task& operator=(const task& _Other) - { - if (this != &_Other) - { - _M_Impl = _Other._M_Impl; - } - return *this; - } - - /// - /// Replaces the contents of one task object with another. - /// - /// - /// The source task object. - /// - /// - /// As task behaves like a smart pointer, after a copy assignment, this task objects represents the same - /// actual task as does. - /// - /**/ - task& operator=(task&& _Other) - { - if (this != &_Other) - { - _M_Impl = std::move(_Other._M_Impl); - } - return *this; - } - - /// - /// Adds a continuation task to this task. - /// - /// - /// The type of the function object that will be invoked by this task. - /// - /// - /// The continuation function to execute when this task completes. This continuation function must take as input - /// a variable of either result_type or task<result_type>, where result_type is the type - /// of the result this task produces. - /// - /// - /// The newly created continuation task. The result type of the returned task is determined by what returns. - /// - /// - /// The overloads of then that take a lambda or functor that returns a Windows::Foundation::IAsyncInfo interface, are only available - /// to Windows Store apps. - /// For more information on how to use task continuations to compose asynchronous work, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result - auto then(const _Function& _Func) const -> typename details::_ContinuationTypeTraits<_Function, _ReturnType>::_TaskOfType - { -#if _MSC_VER >= 1800 - task_options _TaskOptions; - details::_get_internal_task_options(_TaskOptions)._set_creation_callstack(_CAPTURE_CALLSTACK()); - return _ThenImpl<_ReturnType, _Function>(_Func, _TaskOptions); -#else - auto _ContinuationTask = _ThenImpl<_ReturnType, _Function>(_Func, nullptr, task_continuation_context::use_default()); - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of then. - _ContinuationTask._SetTaskCreationAddressHint(_ReturnAddress()); - return _ContinuationTask; -#endif - } - - /// - /// Adds a continuation task to this task. - /// - /// - /// The type of the function object that will be invoked by this task. - /// - /// - /// The continuation function to execute when this task completes. This continuation function must take as input - /// a variable of either result_type or task<result_type>, where result_type is the type - /// of the result this task produces. - /// - /// - /// The cancellation token to associate with the continuation task. A continuation task that is created without a cancellation token will inherit - /// the token of its antecedent task. - /// - /// - /// The newly created continuation task. The result type of the returned task is determined by what returns. - /// - /// - /// The overloads of then that take a lambda or functor that returns a Windows::Foundation::IAsyncInfo interface, are only available - /// to Windows Store apps. - /// For more information on how to use task continuations to compose asynchronous work, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result -#if _MSC_VER >= 1800 - auto then(const _Function& _Func, task_options _TaskOptions) const -> typename details::_ContinuationTypeTraits<_Function, _ReturnType>::_TaskOfType -#else - auto then(const _Function& _Func, Concurrency::cancellation_token _CancellationToken) const -> typename details::_ContinuationTypeTraits<_Function, _ReturnType>::_TaskOfType -#endif - { -#if _MSC_VER >= 1800 - details::_get_internal_task_options(_TaskOptions)._set_creation_callstack(_CAPTURE_CALLSTACK()); - return _ThenImpl<_ReturnType, _Function>(_Func, _TaskOptions); -#else - auto _ContinuationTask = _ThenImpl<_ReturnType, _Function>(_Func, _CancellationToken._GetImplValue(), task_continuation_context::use_default()); - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of then. - _ContinuationTask._SetTaskCreationAddressHint(_ReturnAddress()); - return _ContinuationTask; -#endif - } -#if _MSC_VER < 1800 - /// - /// Adds a continuation task to this task. - /// - /// - /// The type of the function object that will be invoked by this task. - /// - /// - /// The continuation function to execute when this task completes. This continuation function must take as input - /// a variable of either result_type or task<result_type>, where result_type is the type - /// of the result this task produces. - /// - /// - /// A variable that specifies where the continuation should execute. This variable is only useful when used in a - /// Windows Store app. For more information, see task_continuation_context - /// - /// - /// The newly created continuation task. The result type of the returned task is determined by what returns. - /// - /// - /// The overloads of then that take a lambda or functor that returns a Windows::Foundation::IAsyncInfo interface, are only available - /// to Windows Store apps. - /// For more information on how to use task continuations to compose asynchronous work, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result - auto then(const _Function& _Func, task_continuation_context _ContinuationContext) const -> typename details::_ContinuationTypeTraits<_Function, _ReturnType>::_TaskOfType - { - auto _ContinuationTask = _ThenImpl<_ReturnType, _Function>(_Func, nullptr, _ContinuationContext); - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of then. - _ContinuationTask._SetTaskCreationAddressHint(_ReturnAddress()); - return _ContinuationTask; - } -#endif - /// - /// Adds a continuation task to this task. - /// - /// - /// The type of the function object that will be invoked by this task. - /// - /// - /// The continuation function to execute when this task completes. This continuation function must take as input - /// a variable of either result_type or task<result_type>, where result_type is the type - /// of the result this task produces. - /// - /// - /// The cancellation token to associate with the continuation task. A continuation task that is created without a cancellation token will inherit - /// the token of its antecedent task. - /// - /// - /// A variable that specifies where the continuation should execute. This variable is only useful when used in a - /// Windows Store app. For more information, see task_continuation_context - /// - /// - /// The newly created continuation task. The result type of the returned task is determined by what returns. - /// - /// - /// The overloads of then that take a lambda or functor that returns a Windows::Foundation::IAsyncInfo interface, are only available - /// to Windows Store apps. - /// For more information on how to use task continuations to compose asynchronous work, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result - auto then(const _Function& _Func, Concurrency::cancellation_token _CancellationToken, task_continuation_context _ContinuationContext) const -> typename details::_ContinuationTypeTraits<_Function, _ReturnType>::_TaskOfType - { -#if _MSC_VER >= 1800 - task_options _TaskOptions(_CancellationToken, _ContinuationContext); - details::_get_internal_task_options(_TaskOptions)._set_creation_callstack(_CAPTURE_CALLSTACK()); - return _ThenImpl<_ReturnType, _Function>(_Func, _TaskOptions); -#else - auto _ContinuationTask = _ThenImpl<_ReturnType, _Function>(_Func, _CancellationToken._GetImplValue(), _ContinuationContext); - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of then. - _ContinuationTask._SetTaskCreationAddressHint(_ReturnAddress()); - return _ContinuationTask; -#endif - } - - /// - /// Waits for this task to reach a terminal state. It is possible for wait to execute the task inline, if all of the tasks - /// dependencies are satisfied, and it has not already been picked up for execution by a background worker. - /// - /// - /// A task_status value which could be either completed or canceled. If the task encountered an exception - /// during execution, or an exception was propagated to it from an antecedent task, wait will throw that exception. - /// - /**/ - task_status wait() const - { - if (_M_Impl == nullptr) - { - throw Concurrency::invalid_operation("wait() cannot be called on a default constructed task."); - } - - return _M_Impl->_Wait(); - } - - /// - /// Returns the result this task produced. If the task is not in a terminal state, a call to get will wait for the task to - /// finish. This method does not return a value when called on a task with a result_type of void. - /// - /// - /// The result of the task. - /// - /// - /// If the task is canceled, a call to get will throw a task_canceled exception. If the task - /// encountered an different exception or an exception was propagated to it from an antecedent task, a call to get will throw that exception. - /// - /**/ - _ReturnType get() const - { - if (_M_Impl == nullptr) - { - throw Concurrency::invalid_operation("get() cannot be called on a default constructed task."); - } - - if (_M_Impl->_Wait() == Concurrency::canceled) - { - throw Concurrency::task_canceled(); - } - - return _M_Impl->_GetResult(); - } -#if _MSC_VER >= 1800 - /// - /// Determines if the task is completed. - /// - /// - /// True if the task has completed, false otherwise. - /// - /// - /// The function returns true if the task is completed or canceled (with or without user exception). - /// - bool is_done() const - { - if (!_M_Impl) - { - throw Concurrency::invalid_operation("is_done() cannot be called on a default constructed task."); - } - - return _M_Impl->_IsDone(); - } - - /// - /// Returns the scheduler for this task - /// - /// - /// A pointer to the scheduler - /// - Concurrency::scheduler_ptr scheduler() const - { - if (!_M_Impl) - { - throw Concurrency::invalid_operation("scheduler() cannot be called on a default constructed task."); - } - - return _M_Impl->_GetScheduler(); - } -#endif - /// - /// Determines whether the task unwraps a Windows Runtime IAsyncInfo interface or is descended from such a task. - /// - /// - /// true if the task unwraps an IAsyncInfo interface or is descended from such a task, false otherwise. - /// - /**/ - bool is_apartment_aware() const - { - if (_M_Impl == nullptr) - { - throw Concurrency::invalid_operation("is_apartment_aware() cannot be called on a default constructed task."); - } - return _M_Impl->_IsApartmentAware(); - } - - /// - /// Determines whether two task objects represent the same internal task. - /// - /// - /// true if the objects refer to the same underlying task, and false otherwise. - /// - /**/ - bool operator==(const task<_ReturnType>& _Rhs) const - { - return (_M_Impl == _Rhs._M_Impl); - } - - /// - /// Determines whether two task objects represent different internal tasks. - /// - /// - /// true if the objects refer to different underlying tasks, and false otherwise. - /// - /**/ - bool operator!=(const task<_ReturnType>& _Rhs) const - { - return !operator==(_Rhs); - } - - /// - /// Create an underlying task implementation. - /// -#if _MSC_VER >= 1800 - void _CreateImpl(Concurrency::details::_CancellationTokenState * _Ct, Concurrency::scheduler_ptr _Scheduler) -#else - void _CreateImpl(Concurrency::details::_CancellationTokenState * _Ct) -#endif - { - _CONCRT_ASSERT(_Ct != nullptr); -#if _MSC_VER >= 1800 - _M_Impl = details::_Task_ptr<_ReturnType>::_Make(_Ct, _Scheduler); -#else - _M_Impl = details::_Task_ptr<_ReturnType>::_Make(_Ct); -#endif - if (_Ct != Concurrency::details::_CancellationTokenState::_None()) - { -#if _MSC_VER >= 1800 - _M_Impl->_RegisterCancellation(_M_Impl); -#else - _M_Impl->_RegisterCancellation(); -#endif - } - } - - /// - /// Return the underlying implementation for this task. - /// - const typename details::_Task_ptr<_ReturnType>::_Type & _GetImpl() const - { - return _M_Impl; - } - - /// - /// Set the implementation of the task to be the supplied implementaion. - /// - void _SetImpl(const typename details::_Task_ptr<_ReturnType>::_Type & _Impl) - { - _CONCRT_ASSERT(_M_Impl == nullptr); - _M_Impl = _Impl; - } - - /// - /// Set the implementation of the task to be the supplied implementaion using a move instead of a copy. - /// - void _SetImpl(typename details::_Task_ptr<_ReturnType>::_Type && _Impl) - { - _CONCRT_ASSERT(_M_Impl == nullptr); - _M_Impl = std::move(_Impl); - } - - /// - /// Sets a property determining whether the task is apartment aware. - /// - void _SetAsync(bool _Async = true) - { - _GetImpl()->_SetAsync(_Async); - } - - /// - /// Sets a field in the task impl to the return address for calls to the task constructors and the then method. - /// -#if _MSC_VER >= 1800 - void _SetTaskCreationCallstack(const details::_TaskCreationCallstack &_callstack) - { - _GetImpl()->_SetTaskCreationCallstack(_callstack); - } -#else - void _SetTaskCreationAddressHint(void* _Address) - { - _GetImpl()->_SetTaskCreationAddressHint(_Address); - } -#endif - /// - /// An internal version of then that takes additional flags and always execute the continuation inline by default. - /// When _ForceInline is set to false, continuations inlining will be limited to default _DefaultAutoInline. - /// This function is Used for runtime internal continuations only. - /// - template -#if _MSC_VER >= 1800 - auto _Then(const _Function& _Func, Concurrency::details::_CancellationTokenState *_PTokenState, - details::_TaskInliningMode _InliningMode = Concurrency::details::_ForceInline) const -> typename details::_ContinuationTypeTraits<_Function, _ReturnType>::_TaskOfType - { - // inherit from antecedent - auto _Scheduler = _GetImpl()->_GetScheduler(); - - return _ThenImpl<_ReturnType, _Function>(_Func, _PTokenState, task_continuation_context::use_default(), _Scheduler, _CAPTURE_CALLSTACK(), _InliningMode); - } -#else - auto _Then(const _Function& _Func, Concurrency::details::_CancellationTokenState *_PTokenState, bool _Aggregating, - details::_TaskInliningMode _InliningMode = Concurrency::details::_ForceInline) const -> typename details::_ContinuationTypeTraits<_Function, _ReturnType>::_TaskOfType - { - return _ThenImpl<_ReturnType, _Function>(_Func, _PTokenState, task_continuation_context::use_default(), _Aggregating, _InliningMode); - } -#endif - -private: - template friend class task; - - // A helper class template that transforms an intial task lambda returns void into a lambda that returns a non-void type (details::_Unit_type is used - // to substitute for void). This is to minimize the special handling required for 'void'. - template - class _Init_func_transformer - { - public: - static auto _Perform(std::function _Func) -> decltype(_Func) - { - return _Func; - } - }; - - template<> - class _Init_func_transformer - { - public: - static auto _Perform(std::function _Func) -> decltype(details::_MakeVoidToUnitFunc(_Func)) - { - return details::_MakeVoidToUnitFunc(_Func); - } - }; - - // The task handle type used to construct an 'initial task' - a task with no dependents. - template - struct _InitialTaskHandle : - details::_PPLTaskHandle<_ReturnType, _InitialTaskHandle<_InternalReturnType, _Function, _TypeSelection>, details::_UnrealizedChore> - { - _Function _M_function; - _InitialTaskHandle(const typename details::_Task_ptr<_ReturnType>::_Type & _TaskImpl, const _Function & _Function) : _M_function(_Function), _PPLTaskHandle(_TaskImpl) - { - } - virtual ~_InitialTaskHandle() {} - -#if _MSC_VER >= 1800 - template - auto _LogWorkItemAndInvokeUserLambda(_Func && _func, _RetArg && _retArg) const -> decltype(_func(std::forward<_RetArg>(_retArg))) - { - details::_TaskWorkItemRAIILogger _LogWorkItem(this->_M_pTask->_M_taskEventLogger); - return _func(std::forward<_RetArg>(_retArg)); - } -#endif - - void _Perform() const - { - _Init(_TypeSelection()); - } -#if _MSC_VER >= 1800 - - void _SyncCancelAndPropagateException() const - { - this->_M_pTask->_Cancel(true); - } -#endif - // - // Overload 0: returns _InternalReturnType - // - // This is the most basic task with no unwrapping - // - void _Init(details::_TypeSelectorNoAsync) const - { - _ReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_Init_func_transformer<_InternalReturnType>::_Perform(_M_function), &retVal); -#else - HRESULT hr = _Init_func_transformer<_InternalReturnType>::_Perform(_M_function)(&retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - _M_pTask->_FinalizeAndRunContinuations(retVal); - } - - // - // Overload 1: returns IAsyncOperation<_InternalReturnType>* - // or - // returns task<_InternalReturnType> - // - // This is task whose functor returns an async operation or a task which will be unwrapped for continuation - // Depending on the output type, the right _AsyncInit gets invoked - // - void _Init(details::_TypeSelectorAsyncTask) const - { - task<_InternalReturnType> retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_M_function, &retVal); -#else - HRESULT hr = _M_function(&retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_ReturnType, _InternalReturnType>(_M_pTask, retVal); - } - void _Init(details::_TypeSelectorAsyncOperation) const - { - _ReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_M_function, &retVal); -#else - HRESULT hr = _M_function(&retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_ReturnType, _InternalReturnType>(_M_pTask, - Microsoft::WRL::Make>(retVal).Get()); - } - - // - // Overload 2: returns IAsyncAction* - // - // This is task whose functor returns an async action which will be unwrapped for continuation - // - void _Init(details::_TypeSelectorAsyncAction) const - { - _ReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_M_function, &retVal); -#else - HRESULT hr = _M_function(&retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_ReturnType, _InternalReturnType>(_M_pTask, Microsoft::WRL::Make(retVal).Get()); - } - - // - // Overload 3: returns IAsyncOperationWithProgress<_InternalReturnType, _ProgressType>* - // - // This is task whose functor returns an async operation with progress which will be unwrapped for continuation - // - void _Init(details::_TypeSelectorAsyncOperationWithProgress) const - { - typedef details::_GetProgressType::_Value _ProgressType; - _ReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_M_function, &retVal); -#else - HRESULT hr = _M_function(&retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_ReturnType, _InternalReturnType>(_M_pTask, - Microsoft::WRL::Make>(retVal).Get()); - } - - // - // Overload 4: returns IAsyncActionWithProgress<_ProgressType>* - // - // This is task whose functor returns an async action with progress which will be unwrapped for continuation - // - void _Init(details::_TypeSelectorAsyncActionWithProgress) const - { - typedef details::_GetProgressType::_Value _ProgressType; - _ReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_M_function, &retVal); -#else - HRESULT hr = _M_function(&retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_ReturnType, _InternalReturnType>(_M_pTask, - Microsoft::WRL::Make>(retVal).Get()); - } - }; - - /// - /// A helper class template that transforms a continuation lambda that either takes or returns void, or both, into a lambda that takes and returns a - /// non-void type (details::_Unit_type is used to substitute for void). This is to minimize the special handling required for 'void'. - /// - template - class _Continuation_func_transformer - { - public: - static auto _Perform(std::function _Func) -> decltype(_Func) - { - return _Func; - } - }; - - template - class _Continuation_func_transformer - { - public: - static auto _Perform(std::function _Func) -> decltype(details::_MakeUnitToTFunc<_OutType>(_Func)) - { - return details::_MakeUnitToTFunc<_OutType>(_Func); - } - }; - - template - class _Continuation_func_transformer<_InType, void> - { - public: - static auto _Perform(std::function _Func) -> decltype(details::_MakeTToUnitFunc<_InType>(_Func)) - { - return details::_MakeTToUnitFunc<_InType>(_Func); - } - }; - - template<> - class _Continuation_func_transformer - { - public: - static auto _Perform(std::function _Func) -> decltype(details::_MakeUnitToUnitFunc(_Func)) - { - return details::_MakeUnitToUnitFunc(_Func); - } - }; - /// - /// The task handle type used to create a 'continuation task'. - /// - template - struct _ContinuationTaskHandle : - details::_PPLTaskHandle::_Type, - _ContinuationTaskHandle<_InternalReturnType, _ContinuationReturnType, _Function, _IsTaskBased, _TypeSelection>, details::_ContinuationTaskHandleBase> - { - typedef typename details::_NormalizeVoidToUnitType<_ContinuationReturnType>::_Type _NormalizedContinuationReturnType; - - typename details::_Task_ptr<_ReturnType>::_Type _M_ancestorTaskImpl; - _Function _M_function; - - _ContinuationTaskHandle(const typename details::_Task_ptr<_ReturnType>::_Type & _AncestorImpl, - const typename details::_Task_ptr<_NormalizedContinuationReturnType>::_Type & _ContinuationImpl, - const _Function & _Func, const task_continuation_context & _Context, details::_TaskInliningMode _InliningMode) : -#if _MSC_VER >= 1800 - details::_PPLTaskHandle::_Type, - _ContinuationTaskHandle<_InternalReturnType, _ContinuationReturnType, _Function, _IsTaskBased, _TypeSelection>, details::_ContinuationTaskHandleBase> - ::_PPLTaskHandle(_ContinuationImpl) - , _M_ancestorTaskImpl(_AncestorImpl) - , _M_function(_Func) -#else - _M_ancestorTaskImpl(_AncestorImpl), _PPLTaskHandle(_ContinuationImpl), _M_function(_Func) -#endif - { - _M_isTaskBasedContinuation = _IsTaskBased::value; - _M_continuationContext = _Context; - _M_continuationContext._Resolve(_AncestorImpl->_IsApartmentAware()); - _M_inliningMode = _InliningMode; - } - - virtual ~_ContinuationTaskHandle() {} - -#if _MSC_VER >= 1800 - template - auto _LogWorkItemAndInvokeUserLambda(_Func && _func, _Arg && _value, _RetArg && _retArg) const -> decltype(_func(std::forward<_Arg>(_value), std::forward<_RetArg>(_retArg))) - { - details::_TaskWorkItemRAIILogger _LogWorkItem(this->_M_pTask->_M_taskEventLogger); - return _func(std::forward<_Arg>(_value), std::forward<_RetArg>(_retArg)); - } -#endif - - void _Perform() const - { - _Continue(_IsTaskBased(), _TypeSelection()); - } - -#if _MSC_VER >= 1800 - void _SyncCancelAndPropagateException() const - { - if (_M_ancestorTaskImpl->_HasUserException()) - { - // If the ancestor encountered an exception, transfer the exception to the continuation - // This traverses down the tree to propagate the exception. - this->_M_pTask->_CancelWithExceptionHolder(_M_ancestorTaskImpl->_GetExceptionHolder(), true); - } - else - { - // If the ancestor was canceled, then your own execution should be canceled. - // This traverses down the tree to cancel it. - this->_M_pTask->_Cancel(true); - } - } -#endif - - // - // Overload 0-0: _InternalReturnType -> _TaskType - // - // This is a straight task continuation which simply invokes its target with the ancestor's completion argument - // - void _Continue(std::false_type, details::_TypeSelectorNoAsync) const - { - _NormalizedContinuationReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_Continuation_func_transformer<_InternalReturnType, _ContinuationReturnType>::_Perform(_M_function), _M_ancestorTaskImpl->_GetResult(), &retVal); -#else - HRESULT hr =_Continuation_func_transformer<_InternalReturnType, _ContinuationReturnType>::_Perform(_M_function)(_M_ancestorTaskImpl->_GetResult(), &retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - _M_pTask->_FinalizeAndRunContinuations(retVal); - } - - // - // Overload 0-1: _InternalReturnType -> IAsyncOperation<_TaskType>* - // or - // _InternalReturnType -> task<_TaskType> - // - // This is a straight task continuation which returns an async operation or a task which will be unwrapped for continuation - // Depending on the output type, the right _AsyncInit gets invoked - // - void _Continue(std::false_type, details::_TypeSelectorAsyncTask) const - { - typedef typename details::_FunctionTypeTraits<_Function, _InternalReturnType>::_FuncRetType _FuncOutputType; - _FuncOutputType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_Continuation_func_transformer<_InternalReturnType, _FuncOutputType>::_Perform(_M_function), _M_ancestorTaskImpl->_GetResult(), &retVal); -#else - HRESULT hr = _Continuation_func_transformer<_InternalReturnType, _FuncOutputType>::_Perform(_M_function)(_M_ancestorTaskImpl->_GetResult(), &retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_NormalizedContinuationReturnType, _ContinuationReturnType>( - _M_pTask, - retVal - ); - } - void _Continue(std::false_type, details::_TypeSelectorAsyncOperation) const - { - typedef typename details::_FunctionTypeTraits<_Function, _InternalReturnType>::_FuncRetType _FuncOutputType; - _FuncOutputType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_Continuation_func_transformer<_InternalReturnType, _FuncOutputType>::_Perform(_M_function), _M_ancestorTaskImpl->_GetResult(), &retVal); -#else - HRESULT hr = _Continuation_func_transformer<_InternalReturnType, _FuncOutputType>::_Perform(_M_function)(_M_ancestorTaskImpl->_GetResult(), &retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_NormalizedContinuationReturnType, _ContinuationReturnType>( - _M_pTask, - Microsoft::WRL::Make>(retVal).Get()); - } - - // - // Overload 0-2: _InternalReturnType -> IAsyncAction* - // - // This is a straight task continuation which returns an async action which will be unwrapped for continuation - // - void _Continue(std::false_type, details::_TypeSelectorAsyncAction) const - { - typedef details::_FunctionTypeTraits<_Function, _InternalReturnType>::_FuncRetType _FuncOutputType; - _FuncOutputType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_Continuation_func_transformer<_InternalReturnType, _FuncOutputType>::_Perform(_M_function), _M_ancestorTaskImpl->_GetResult(), &retVal); -#else - HRESULT hr = _Continuation_func_transformer<_InternalReturnType, _FuncOutputType>::_Perform(_M_function)(_M_ancestorTaskImpl->_GetResult(), &retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_NormalizedContinuationReturnType, _ContinuationReturnType>( - _M_pTask, - Microsoft::WRL::Make( - retVal).Get()); - } - - // - // Overload 0-3: _InternalReturnType -> IAsyncOperationWithProgress<_TaskType, _ProgressType>* - // - // This is a straight task continuation which returns an async operation with progress which will be unwrapped for continuation - // - void _Continue(std::false_type, details::_TypeSelectorAsyncOperationWithProgress) const - { - typedef details::_FunctionTypeTraits<_Function, _InternalReturnType>::_FuncRetType _FuncOutputType; - - _FuncOutputType _OpWithProgress; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_Continuation_func_transformer<_InternalReturnType, _FuncOutputType>::_Perform(_M_function), _M_ancestorTaskImpl->_GetResult(), &_OpWithProgress); -#else - HRESULT hr = _Continuation_func_transformer<_InternalReturnType, _FuncOutputType>::_Perform(_M_function)(_M_ancestorTaskImpl->_GetResult(), &_OpWithProgress); -#endif - typedef details::_GetProgressType::_Value _ProgressType; - - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_NormalizedContinuationReturnType, _ContinuationReturnType>( - _M_pTask, - Microsoft::WRL::Make>(_OpWithProgress).Get()); - } - - // - // Overload 0-4: _InternalReturnType -> IAsyncActionWithProgress<_ProgressType>* - // - // This is a straight task continuation which returns an async action with progress which will be unwrapped for continuation - // - void _Continue(std::false_type, details::_TypeSelectorAsyncActionWithProgress) const - { - typedef details::_FunctionTypeTraits<_Function, _InternalReturnType>::_FuncRetType _FuncOutputType; - - _FuncOutputType _OpWithProgress; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_Continuation_func_transformer<_InternalReturnType, _FuncOutputType>::_Perform(_M_function), _M_ancestorTaskImpl->_GetResult(), &_OpWithProgress); -#else - HRESULT hr = _Continuation_func_transformer<_InternalReturnType, _FuncOutputType>::_Perform(_M_function)(_M_ancestorTaskImpl->_GetResult(), &_OpWithProgress); -#endif - typedef details::_GetProgressType::_Value _ProgressType; - - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_NormalizedContinuationReturnType, _ContinuationReturnType>( - _M_pTask, - Microsoft::WRL::Make>(_OpWithProgress).Get()); - } - - - // - // Overload 1-0: task<_InternalReturnType> -> _TaskType - // - // This is an exception handling type of continuation which takes the task rather than the task's result. - // - void _Continue(std::true_type, details::_TypeSelectorNoAsync) const - { - typedef task<_InternalReturnType> _FuncInputType; - task<_InternalReturnType> _ResultTask; - _ResultTask._SetImpl(std::move(_M_ancestorTaskImpl)); - _NormalizedContinuationReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_Continuation_func_transformer<_FuncInputType, _ContinuationReturnType>::_Perform(_M_function), std::move(_ResultTask), &retVal); -#else - HRESULT hr = _Continuation_func_transformer<_FuncInputType, _ContinuationReturnType>::_Perform(_M_function)(std::move(_ResultTask), &retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - _M_pTask->_FinalizeAndRunContinuations(retVal); - } - - // - // Overload 1-1: task<_InternalReturnType> -> IAsyncOperation<_TaskType>^ - // or - // task<_TaskType> - // - // This is an exception handling type of continuation which takes the task rather than - // the task's result. It also returns an async operation or a task which will be unwrapped - // for continuation - // - void _Continue(std::true_type, details::_TypeSelectorAsyncTask) const - { - // The continuation takes a parameter of type task<_Input>, which is the same as the ancestor task. - task<_InternalReturnType> _ResultTask; - _ResultTask._SetImpl(std::move(_M_ancestorTaskImpl)); - _ContinuationReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_M_function, std::move(_ResultTask), &retVal); -#else - HRESULT hr = _M_function(std::move(_ResultTask), &retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_NormalizedContinuationReturnType, _ContinuationReturnType>(_M_pTask, retVal); - } - void _Continue(std::true_type, details::_TypeSelectorAsyncOperation) const - { - // The continuation takes a parameter of type task<_Input>, which is the same as the ancestor task. - task<_InternalReturnType> _ResultTask; - _ResultTask._SetImpl(std::move(_M_ancestorTaskImpl)); - _ContinuationReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_M_function, std::move(_ResultTask), &retVal); -#else - HRESULT hr = _M_function(std::move(_ResultTask), &retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_NormalizedContinuationReturnType, _ContinuationReturnType>(_M_pTask, - Microsoft::WRL::Make>(retVal)); - } - - // - // Overload 1-2: task<_InternalReturnType> -> IAsyncAction* - // - // This is an exception handling type of continuation which takes the task rather than - // the task's result. It also returns an async action which will be unwrapped for continuation - // - void _Continue(std::true_type, details::_TypeSelectorAsyncAction) const - { - // The continuation takes a parameter of type task<_Input>, which is the same as the ancestor task. - task<_InternalReturnType> _ResultTask; - _ResultTask._SetImpl(std::move(_M_ancestorTaskImpl)); - _ContinuationReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_M_function, std::move(_ResultTask), &retVal); -#else - HRESULT hr = _M_function(std::move(_ResultTask), &retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_NormalizedContinuationReturnType, _ContinuationReturnType>(_M_pTask, - Microsoft::WRL::Make(retVal)); - } - - // - // Overload 1-3: task<_InternalReturnType> -> IAsyncOperationWithProgress<_TaskType, _ProgressType>* - // - // This is an exception handling type of continuation which takes the task rather than - // the task's result. It also returns an async operation with progress which will be unwrapped - // for continuation - // - void _Continue(std::true_type, details::_TypeSelectorAsyncOperationWithProgress) const - { - // The continuation takes a parameter of type task<_Input>, which is the same as the ancestor task. - task<_InternalReturnType> _ResultTask; - _ResultTask._SetImpl(std::move(_M_ancestorTaskImpl)); - - typedef details::_GetProgressType::_Value _ProgressType; - _ContinuationReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_M_function, std::move(_ResultTask), &retVal); -#else - HRESULT hr = _M_function(std::move(_ResultTask), &retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_NormalizedContinuationReturnType, _ContinuationReturnType>(_M_pTask, - Microsoft::WRL::Make>(retVal)); - } - - // - // Overload 1-4: task<_InternalReturnType> -> IAsyncActionWithProgress<_ProgressType>* - // - // This is an exception handling type of continuation which takes the task rather than - // the task's result. It also returns an async operation with progress which will be unwrapped - // for continuation - // - void _Continue(std::true_type, details::_TypeSelectorAsyncActionWithProgress) const - { - // The continuation takes a parameter of type task<_Input>, which is the same as the ancestor task. - task<_InternalReturnType> _ResultTask; - _ResultTask._SetImpl(std::move(_M_ancestorTaskImpl)); - - typedef details::_GetProgressType::_Value _ProgressType; - _ContinuationReturnType retVal; -#if _MSC_VER >= 1800 - HRESULT hr = _LogWorkItemAndInvokeUserLambda(_M_function, std::move(_ResultTask), &retVal); -#else - HRESULT hr = _M_function(std::move(_ResultTask), &retVal); -#endif - if (FAILED(hr)) throw std::make_exception_ptr(hr); - details::_Task_impl_base::_AsyncInit<_NormalizedContinuationReturnType, _ContinuationReturnType>(_M_pTask, - Microsoft::WRL::Make>(retVal)); - } - }; - /// - /// Initializes a task using a lambda, function pointer or function object. - /// - template - void _TaskInitWithFunctor(const _Function& _Func) - { - typedef details::_InitFunctorTypeTraits<_InternalReturnType, details::_FunctionTypeTraits<_Function, void>::_FuncRetType> _Async_type_traits; - - _M_Impl->_M_fFromAsync = _Async_type_traits::_IsAsyncTask; - _M_Impl->_M_fUnwrappedTask = _Async_type_traits::_IsUnwrappedTaskOrAsync; -#if _MSC_VER >= 1800 - _M_Impl->_M_taskEventLogger._LogScheduleTask(false); -#endif - _M_Impl->_ScheduleTask(new _InitialTaskHandle<_InternalReturnType, _Function, typename _Async_type_traits::_AsyncKind>(_GetImpl(), _Func), Concurrency::details::_NoInline); - } - - /// - /// Initializes a task using a task completion event. - /// - void _TaskInitNoFunctor(task_completion_event<_ReturnType>& _Event) - { - _Event._RegisterTask(_M_Impl); - } - - /// - /// Initializes a task using an asynchronous operation IAsyncOperation* - /// - template - void _TaskInitAsyncOp(details::_AsyncInfoImpl<_OpType, _CompHandlerType, _ResultType>* _AsyncOp) - { - _M_Impl->_M_fFromAsync = true; -#if _MSC_VER < 1800 - _M_Impl->_SetScheduledEvent(); -#endif - // Mark this task as started here since we can set the state in the constructor without acquiring a lock. Once _AsyncInit - // returns a completion could execute concurrently and the task must be fully initialized before that happens. - _M_Impl->_M_TaskState = details::_Task_impl_base::_Started; - // Pass the shared pointer into _AsyncInit for storage in the Async Callback. - details::_Task_impl_base::_AsyncInit<_ReturnType, _Result>(_M_Impl, _AsyncOp); - } - - /// - /// Initializes a task using an asynchronous operation IAsyncOperation* - /// - template - void _TaskInitNoFunctor(ABI::Windows::Foundation::IAsyncOperation<_Result>* _AsyncOp) - { - _TaskInitAsyncOp<_Result>(Microsoft::WRL::Make>(_AsyncOp).Get()); - } - - /// - /// Initializes a task using an asynchronous operation with progress IAsyncOperationWithProgress* - /// - template - void _TaskInitNoFunctor(ABI::Windows::Foundation::IAsyncOperationWithProgress<_Result, _Progress>* _AsyncOp) - { - _TaskInitAsyncOp<_Result>(Microsoft::WRL::Make>(_AsyncOp).Get()); - } - /// - /// Initializes a task using a callable object. - /// - template - void _TaskInitMaybeFunctor(_Function & _Func, std::true_type) - { - _TaskInitWithFunctor<_ReturnType, _Function>(_Func); - } - - /// - /// Initializes a task using a non-callable object. - /// - template - void _TaskInitMaybeFunctor(_Ty & _Param, std::false_type) - { - _TaskInitNoFunctor(_Param); - } -#if _MSC_VER >= 1800 - template - auto _ThenImpl(const _Function& _Func, const task_options& _TaskOptions) const -> typename details::_ContinuationTypeTraits<_Function, _InternalReturnType>::_TaskOfType - { - if (!_M_Impl) - { - throw Concurrency::invalid_operation("then() cannot be called on a default constructed task."); - } - - Concurrency::details::_CancellationTokenState *_PTokenState = _TaskOptions.has_cancellation_token() ? _TaskOptions.get_cancellation_token()._GetImplValue() : nullptr; - auto _Scheduler = _TaskOptions.has_scheduler() ? _TaskOptions.get_scheduler() : _GetImpl()->_GetScheduler(); - auto _CreationStack = details::_get_internal_task_options(_TaskOptions)._M_hasPresetCreationCallstack ? details::_get_internal_task_options(_TaskOptions)._M_presetCreationCallstack : details::_TaskCreationCallstack(); - return _ThenImpl<_InternalReturnType, _Function>(_Func, _PTokenState, _TaskOptions.get_continuation_context(), _Scheduler, _CreationStack); - } -#endif - /// - /// The one and only implementation of then for void and non-void tasks. - /// - template -#if _MSC_VER >= 1800 - auto _ThenImpl(const _Function& _Func, Concurrency::details::_CancellationTokenState *_PTokenState, const task_continuation_context& _ContinuationContext, Concurrency::scheduler_ptr _Scheduler, details::_TaskCreationCallstack _CreationStack, - details::_TaskInliningMode _InliningMode = Concurrency::details::_NoInline) const -> typename details::_ContinuationTypeTraits<_Function, _InternalReturnType>::_TaskOfType -#else - auto _ThenImpl(const _Function& _Func, Concurrency::details::_CancellationTokenState *_PTokenState, const task_continuation_context& _ContinuationContext, - bool _Aggregating = false, details::_TaskInliningMode _InliningMode = Concurrency::details::_NoInline) const -> typename details::_ContinuationTypeTraits<_Function, _InternalReturnType>::_TaskOfType -#endif - { - if (_M_Impl == nullptr) - { - throw Concurrency::invalid_operation("then() cannot be called on a default constructed task."); - } - - typedef details::_FunctionTypeTraits<_Function, _InternalReturnType> _Function_type_traits; - typedef details::_TaskTypeTraits _Async_type_traits; - typedef typename _Async_type_traits::_TaskRetType _TaskType; - - // - // A **nullptr** token state indicates that it was not provided by the user. In this case, we inherit the antecedent's token UNLESS this is a - // an exception handling continuation. In that case, we break the chain with a _None. That continuation is never canceled unless the user - // explicitly passes the same token. - // - if (_PTokenState == nullptr) - { -#if _MSC_VER >= 1800 - if (_Function_type_traits::_Takes_task::value) -#else - if (_Function_type_traits::_Takes_task()) -#endif - { - _PTokenState = Concurrency::details::_CancellationTokenState::_None(); - } - else - { - _PTokenState = _GetImpl()->_M_pTokenState; - } - } - - task<_TaskType> _ContinuationTask; -#if _MSC_VER >= 1800 - _ContinuationTask._CreateImpl(_PTokenState, _Scheduler); -#else - _ContinuationTask._CreateImpl(_PTokenState); -#endif - _ContinuationTask._GetImpl()->_M_fFromAsync = (_GetImpl()->_M_fFromAsync || _Async_type_traits::_IsAsyncTask); -#if _MSC_VER < 1800 - _ContinuationTask._GetImpl()->_M_fRuntimeAggregate = _Aggregating; -#endif - _ContinuationTask._GetImpl()->_M_fUnwrappedTask = _Async_type_traits::_IsUnwrappedTaskOrAsync; -#if _MSC_VER >= 1800 - _ContinuationTask._SetTaskCreationCallstack(_CreationStack); -#endif - _GetImpl()->_ScheduleContinuation(new _ContinuationTaskHandle<_InternalReturnType, _TaskType, _Function, typename _Function_type_traits::_Takes_task, typename _Async_type_traits::_AsyncKind>( - _GetImpl(), _ContinuationTask._GetImpl(), _Func, _ContinuationContext, _InliningMode)); - - return _ContinuationTask; - } - - // The underlying implementation for this task - typename details::_Task_ptr<_ReturnType>::_Type _M_Impl; -}; - -/// -/// The Parallel Patterns Library (PPL) task class. A task object represents work that can be executed asynchronously, -/// and concurrently with other tasks and parallel work produced by parallel algorithms in the Concurrency Runtime. It produces -/// a result of type on successful completion. Tasks of type task<void> produce no result. -/// A task can be waited upon and canceled independently of other tasks. It can also be composed with other tasks using -/// continuations(then), and join(when_all) and choice(when_any) patterns. -/// -/// -/// For more information, see . -/// -/**/ -template<> -class task -{ -public: - /// - /// The type of the result an object of this class produces. - /// - /**/ - typedef void result_type; - - /// - /// Constructs a task object. - /// - /// - /// The default constructor for a task is only present in order to allow tasks to be used within containers. - /// A default constructed task cannot be used until you assign a valid task to it. Methods such as get, wait or then - /// will throw an invalid_argument exception when called on a default constructed task. - /// A task that is created from a task_completion_event will complete (and have its continuations scheduled) when the task - /// completion event is set. - /// The version of the constructor that takes a cancellation token creates a task that can be canceled using the - /// cancellation_token_source the token was obtained from. Tasks created without a cancellation token are not cancelable. - /// Tasks created from a Windows::Foundation::IAsyncInfo interface or a lambda that returns an IAsyncInfo interface - /// reach their terminal state when the enclosed Windows Runtime asynchronous operation or action completes. Similarly, tasks created - /// from a lamda that returns a task<result_type> reach their terminal state when the inner task reaches its terminal state, - /// and not when the lamda returns. - /// task behaves like a smart pointer and is safe to pass around by value. It can be accessed by multiple threads - /// without the need for locks. - /// The constructor overloads that take a Windows::Foundation::IAsyncInfo interface or a lambda returning such an interface, are only available - /// to Windows Store apps. - /// For more information, see . - /// - /**/ - task() : _M_unitTask() - { - // The default constructor should create a task with a nullptr impl. This is a signal that the - // task is not usable and should throw if any wait(), get() or then() APIs are used. - } -#if _MSC_VER < 1800 - /// - /// Constructs a task object. - /// - /// - /// The type of the parameter from which the task is to be constructed. - /// - /// - /// The parameter from which the task is to be constructed. This could be a lambda, a function object, a task_completion_event<result_type> - /// object, or a Windows::Foundation::IAsyncInfo if you are using tasks in your Windows Store app. The lambda or function - /// object should be a type equivalent to std::function<X(void)>, where X can be a variable of type result_type, - /// task<result_type>, or a Windows::Foundation::IAsyncInfo in Windows Store apps. - /// - /// - /// The default constructor for a task is only present in order to allow tasks to be used within containers. - /// A default constructed task cannot be used until you assign a valid task to it. Methods such as get, wait or then - /// will throw an invalid_argument exception when called on a default constructed task. - /// A task that is created from a task_completion_event will complete (and have its continuations scheduled) when the task - /// completion event is set. - /// The version of the constructor that takes a cancellation token creates a task that can be canceled using the - /// cancellation_token_source the token was obtained from. Tasks created without a cancellation token are not cancelable. - /// Tasks created from a Windows::Foundation::IAsyncInfo interface or a lambda that returns an IAsyncInfo interface - /// reach their terminal state when the enclosed Windows Runtime asynchronous operation or action completes. Similarly, tasks created - /// from a lamda that returns a task<result_type> reach their terminal state when the inner task reaches its terminal state, - /// and not when the lamda returns. - /// task behaves like a smart pointer and is safe to pass around by value. It can be accessed by multiple threads - /// without the need for locks. - /// The constructor overloads that take a Windows::Foundation::IAsyncInfo interface or a lambda returning such an interface, are only available - /// to Windows Store apps. - /// For more information, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result - explicit task(_Ty _Param) - { - details::_ValidateTaskConstructorArgs(_Param); - - _M_unitTask._CreateImpl(Concurrency::cancellation_token::none()._GetImplValue()); - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of the task constructor. - _M_unitTask._SetTaskCreationAddressHint(_ReturnAddress()); - - _TaskInitMaybeFunctor(_Param, details::_IsCallable(_Param, 0, 0, 0)); - } -#endif - /// - /// Constructs a task object. - /// - /// - /// The type of the parameter from which the task is to be constructed. - /// - /// - /// The parameter from which the task is to be constructed. This could be a lambda, a function object, a task_completion_event<result_type> - /// object, or a Windows::Foundation::IAsyncInfo if you are using tasks in your Windows Store app. The lambda or function - /// object should be a type equivalent to std::function<X(void)>, where X can be a variable of type result_type, - /// task<result_type>, or a Windows::Foundation::IAsyncInfo in Windows Store apps. - /// - /// - /// The cancellation token to associate with this task. A task created without a cancellation token cannot be canceled. It implicitly receives - /// the token cancellation_token::none(). - /// - /// - /// The default constructor for a task is only present in order to allow tasks to be used within containers. - /// A default constructed task cannot be used until you assign a valid task to it. Methods such as get, wait or then - /// will throw an invalid_argument exception when called on a default constructed task. - /// A task that is created from a task_completion_event will complete (and have its continuations scheduled) when the task - /// completion event is set. - /// The version of the constructor that takes a cancellation token creates a task that can be canceled using the - /// cancellation_token_source the token was obtained from. Tasks created without a cancellation token are not cancelable. - /// Tasks created from a Windows::Foundation::IAsyncInfo interface or a lambda that returns an IAsyncInfo interface - /// reach their terminal state when the enclosed Windows Runtime asynchronous operation or action completes. Similarly, tasks created - /// from a lamda that returns a task<result_type> reach their terminal state when the inner task reaches its terminal state, - /// and not when the lamda returns. - /// task behaves like a smart pointer and is safe to pass around by value. It can be accessed by multiple threads - /// without the need for locks. - /// The constructor overloads that take a Windows::Foundation::IAsyncInfo interface or a lambda returning such an interface, are only available - /// to Windows Store apps. - /// For more information, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result -#if _MSC_VER >= 1800 - explicit task(_Ty _Param, const task_options& _TaskOptions = task_options()) -#else - explicit task(_Ty _Param, Concurrency::cancellation_token _CancellationToken) -#endif - { - details::_ValidateTaskConstructorArgs(_Param); -#if _MSC_VER >= 1800 - _M_unitTask._CreateImpl(_TaskOptions.get_cancellation_token()._GetImplValue(), _TaskOptions.get_scheduler()); -#else - _M_unitTask._CreateImpl(_CancellationToken._GetImplValue()); -#endif - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of the task constructor. -#if _MSC_VER >= 1800 - _M_unitTask._SetTaskCreationCallstack(details::_get_internal_task_options(_TaskOptions)._M_hasPresetCreationCallstack ? details::_get_internal_task_options(_TaskOptions)._M_presetCreationCallstack : _CAPTURE_CALLSTACK()); -#else - _M_unitTask._SetTaskCreationAddressHint(_ReturnAddress()); -#endif - _TaskInitMaybeFunctor(_Param, details::_IsCallable(_Param, 0, 0, 0)); - } - - /// - /// Constructs a task object. - /// - /// - /// The source task object. - /// - /// - /// The default constructor for a task is only present in order to allow tasks to be used within containers. - /// A default constructed task cannot be used until you assign a valid task to it. Methods such as get, wait or then - /// will throw an invalid_argument exception when called on a default constructed task. - /// A task that is created from a task_completion_event will complete (and have its continuations scheduled) when the task - /// completion event is set. - /// The version of the constructor that takes a cancellation token creates a task that can be canceled using the - /// cancellation_token_source the token was obtained from. Tasks created without a cancellation token are not cancelable. - /// Tasks created from a Windows::Foundation::IAsyncInfo interface or a lambda that returns an IAsyncInfo interface - /// reach their terminal state when the enclosed Windows Runtime asynchronous operation or action completes. Similarly, tasks created - /// from a lamda that returns a task<result_type> reach their terminal state when the inner task reaches its terminal state, - /// and not when the lamda returns. - /// task behaves like a smart pointer and is safe to pass around by value. It can be accessed by multiple threads - /// without the need for locks. - /// The constructor overloads that take a Windows::Foundation::IAsyncInfo interface or a lambda returning such an interface, are only available - /// to Windows Store apps. - /// For more information, see . - /// - /**/ - task(const task& _Other) : _M_unitTask(_Other._M_unitTask){} - - /// - /// Constructs a task object. - /// - /// - /// The source task object. - /// - /// - /// The default constructor for a task is only present in order to allow tasks to be used within containers. - /// A default constructed task cannot be used until you assign a valid task to it. Methods such as get, wait or then - /// will throw an invalid_argument exception when called on a default constructed task. - /// A task that is created from a task_completion_event will complete (and have its continuations scheduled) when the task - /// completion event is set. - /// The version of the constructor that takes a cancellation token creates a task that can be canceled using the - /// cancellation_token_source the token was obtained from. Tasks created without a cancellation token are not cancelable. - /// Tasks created from a Windows::Foundation::IAsyncInfo interface or a lambda that returns an IAsyncInfo interface - /// reach their terminal state when the enclosed Windows Runtime asynchronous operation or action completes. Similarly, tasks created - /// from a lamda that returns a task<result_type> reach their terminal state when the inner task reaches its terminal state, - /// and not when the lamda returns. - /// task behaves like a smart pointer and is safe to pass around by value. It can be accessed by multiple threads - /// without the need for locks. - /// The constructor overloads that take a Windows::Foundation::IAsyncInfo interface or a lambda returning such an interface, are only available - /// to Windows Store apps. - /// For more information, see . - /// - /**/ - task(task&& _Other) : _M_unitTask(std::move(_Other._M_unitTask)) {} - - /// - /// Replaces the contents of one task object with another. - /// - /// - /// The source task object. - /// - /// - /// As task behaves like a smart pointer, after a copy assignment, this task objects represents the same - /// actual task as does. - /// - /**/ - task& operator=(const task& _Other) - { - if (this != &_Other) - { - _M_unitTask = _Other._M_unitTask; - } - return *this; - } - - /// - /// Replaces the contents of one task object with another. - /// - /// - /// The source task object. - /// - /// - /// As task behaves like a smart pointer, after a copy assignment, this task objects represents the same - /// actual task as does. - /// - /**/ - task& operator=(task&& _Other) - { - if (this != &_Other) - { - _M_unitTask = std::move(_Other._M_unitTask); - } - return *this; - } -#if _MSC_VER < 1800 - /// - /// Adds a continuation task to this task. - /// - /// - /// The type of the function object that will be invoked by this task. - /// - /// - /// The continuation function to execute when this task completes. This continuation function must take as input - /// a variable of either result_type or task<result_type>, where result_type is the type - /// of the result this task produces. - /// - /// - /// The newly created continuation task. The result type of the returned task is determined by what returns. - /// - /// - /// The overloads of then that take a lambda or functor that returns a Windows::Foundation::IAsyncInfo interface, are only available - /// to Windows Store apps. - /// For more information on how to use task continuations to compose asynchronous work, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result - auto then(const _Function& _Func) const -> typename details::_ContinuationTypeTraits<_Function, void>::_TaskOfType - { - auto _ContinuationTask = _M_unitTask._ThenImpl(_Func, nullptr, task_continuation_context::use_default()); - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of then. - _ContinuationTask._SetTaskCreationAddressHint(_ReturnAddress()); - return _ContinuationTask; - } -#endif - /// - /// Adds a continuation task to this task. - /// - /// - /// The type of the function object that will be invoked by this task. - /// - /// - /// The continuation function to execute when this task completes. This continuation function must take as input - /// a variable of either result_type or task<result_type>, where result_type is the type - /// of the result this task produces. - /// - /// - /// The cancellation token to associate with the continuation task. A continuation task that is created without a cancellation token will inherit - /// the token of its antecedent task. - /// - /// - /// The newly created continuation task. The result type of the returned task is determined by what returns. - /// - /// - /// The overloads of then that take a lambda or functor that returns a Windows::Foundation::IAsyncInfo interface, are only available - /// to Windows Store apps. - /// For more information on how to use task continuations to compose asynchronous work, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result -#if _MSC_VER >= 1800 - auto then(const _Function& _Func, task_options _TaskOptions = task_options()) const -> typename details::_ContinuationTypeTraits<_Function, void>::_TaskOfType - { - details::_get_internal_task_options(_TaskOptions)._set_creation_callstack(_CAPTURE_CALLSTACK()); - return _M_unitTask._ThenImpl(_Func, _TaskOptions); - } -#else - auto then(const _Function& _Func, Concurrency::cancellation_token _CancellationToken) const -> typename details::_ContinuationTypeTraits<_Function, void>::_TaskOfType - { - auto _ContinuationTask = _M_unitTask._ThenImpl(_Func, _CancellationToken._GetImplValue(), task_continuation_context::use_default()); - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of then. - _ContinuationTask._SetTaskCreationAddressHint(_ReturnAddress()); - return _ContinuationTask; - } - /// - /// Adds a continuation task to this task. - /// - /// - /// The type of the function object that will be invoked by this task. - /// - /// - /// The continuation function to execute when this task completes. This continuation function must take as input - /// a variable of either result_type or task<result_type>, where result_type is the type - /// of the result this task produces. - /// - /// - /// A variable that specifies where the continuation should execute. This variable is only useful when used in a - /// Windows Store app. For more information, see task_continuation_context - /// - /// - /// The newly created continuation task. The result type of the returned task is determined by what returns. - /// - /// - /// The overloads of then that take a lambda or functor that returns a Windows::Foundation::IAsyncInfo interface, are only available - /// to Windows Store apps. - /// For more information on how to use task continuations to compose asynchronous work, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result - auto then(const _Function& _Func, task_continuation_context _ContinuationContext) const -> typename details::_ContinuationTypeTraits<_Function, void>::_TaskOfType - { - auto _ContinuationTask = _M_unitTask._ThenImpl(_Func, nullptr, _ContinuationContext); - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of then. - _ContinuationTask._SetTaskCreationAddressHint(_ReturnAddress()); - return _ContinuationTask; - - } -#endif - /// - /// Adds a continuation task to this task. - /// - /// - /// The type of the function object that will be invoked by this task. - /// - /// - /// The continuation function to execute when this task completes. This continuation function must take as input - /// a variable of either result_type or task<result_type>, where result_type is the type - /// of the result this task produces. - /// - /// - /// The cancellation token to associate with the continuation task. A continuation task that is created without a cancellation token will inherit - /// the token of its antecedent task. - /// - /// - /// A variable that specifies where the continuation should execute. This variable is only useful when used in a - /// Windows Store app. For more information, see task_continuation_context - /// - /// - /// The newly created continuation task. The result type of the returned task is determined by what returns. - /// - /// - /// The overloads of then that take a lambda or functor that returns a Windows::Foundation::IAsyncInfo interface, are only available - /// to Windows Store apps. - /// For more information on how to use task continuations to compose asynchronous work, see . - /// - /**/ - template - __declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result -#if _MSC_VER >= 1800 - auto then(const _Function& _Func, Concurrency::cancellation_token _CancellationToken, task_continuation_context _ContinuationContext) const -> typename details::_ContinuationTypeTraits<_Function, void>::_TaskOfType - { - task_options _TaskOptions(_CancellationToken, _ContinuationContext); - details::_get_internal_task_options(_TaskOptions)._set_creation_callstack(_CAPTURE_CALLSTACK()); - return _M_unitTask._ThenImpl(_Func, _TaskOptions); - } -#else - auto then(const _Function& _Func, Concurrency::cancellation_token _CancellationToken, task_continuation_context _ContinuationContext) const -> typename details::_ContinuationTypeTraits<_Function, void>::_TaskOfType - { - auto _ContinuationTask = _M_unitTask._ThenImpl(_Func, _CancellationToken._GetImplValue(), _ContinuationContext); - // Do not move the next line out of this function. It is important that _ReturnAddress() evaluate to the the call site of then. - _ContinuationTask._SetTaskCreationAddressHint(_ReturnAddress()); - return _ContinuationTask; - } -#endif - - /// - /// Waits for this task to reach a terminal state. It is possible for wait to execute the task inline, if all of the tasks - /// dependencies are satisfied, and it has not already been picked up for execution by a background worker. - /// - /// - /// A task_status value which could be either completed or canceled. If the task encountered an exception - /// during execution, or an exception was propagated to it from an antecedent task, wait will throw that exception. - /// - /**/ - task_status wait() const - { - return _M_unitTask.wait(); - } - - /// - /// Returns the result this task produced. If the task is not in a terminal state, a call to get will wait for the task to - /// finish. This method does not return a value when called on a task with a result_type of void. - /// - /// - /// If the task is canceled, a call to get will throw a task_canceled exception. If the task - /// encountered an different exception or an exception was propagated to it from an antecedent task, a call to get will throw that exception. - /// - /**/ - void get() const - { - _M_unitTask.get(); - } -#if _MSC_VER >= 1800 - - /// - /// Determines if the task is completed. - /// - /// - /// True if the task has completed, false otherwise. - /// - /// - /// The function returns true if the task is completed or canceled (with or without user exception). - /// - bool is_done() const - { - return _M_unitTask.is_done(); - } - - /// - /// Returns the scheduler for this task - /// - /// - /// A pointer to the scheduler - /// - Concurrency::scheduler_ptr scheduler() const - { - return _M_unitTask.scheduler(); - } -#endif - /// - /// Determines whether the task unwraps a Windows Runtime IAsyncInfo interface or is descended from such a task. - /// - /// - /// true if the task unwraps an IAsyncInfo interface or is descended from such a task, false otherwise. - /// - /**/ - bool is_apartment_aware() const - { - return _M_unitTask.is_apartment_aware(); - } - - /// - /// Determines whether two task objects represent the same internal task. - /// - /// - /// true if the objects refer to the same underlying task, and false otherwise. - /// - /**/ - bool operator==(const task& _Rhs) const - { - return (_M_unitTask == _Rhs._M_unitTask); - } - - /// - /// Determines whether two task objects represent different internal tasks. - /// - /// - /// true if the objects refer to different underlying tasks, and false otherwise. - /// - /**/ - bool operator!=(const task& _Rhs) const - { - return !operator==(_Rhs); - } - - /// - /// Create an underlying task implementation. - /// -#if _MSC_VER >= 1800 - void _CreateImpl(Concurrency::details::_CancellationTokenState * _Ct, Concurrency::scheduler_ptr _Scheduler) - { - _M_unitTask._CreateImpl(_Ct, _Scheduler); - } -#else - void _CreateImpl(Concurrency::details::_CancellationTokenState * _Ct) - { - _M_unitTask._CreateImpl(_Ct); - } -#endif - - /// - /// Return the underlying implementation for this task. - /// - const details::_Task_ptr::_Type & _GetImpl() const - { - return _M_unitTask._M_Impl; - } - - /// - /// Set the implementation of the task to be the supplied implementaion. - /// - void _SetImpl(const details::_Task_ptr::_Type & _Impl) - { - _M_unitTask._SetImpl(_Impl); - } - - /// - /// Set the implementation of the task to be the supplied implementaion using a move instead of a copy. - /// - void _SetImpl(details::_Task_ptr::_Type && _Impl) - { - _M_unitTask._SetImpl(std::move(_Impl)); - } - - /// - /// Sets a property determining whether the task is apartment aware. - /// - void _SetAsync(bool _Async = true) - { - _M_unitTask._SetAsync(_Async); - } - - /// - /// Sets a field in the task impl to the return address for calls to the task constructors and the then method. - /// -#if _MSC_VER >= 1800 - void _SetTaskCreationCallstack(const details::_TaskCreationCallstack &_callstack) - { - _M_unitTask._SetTaskCreationCallstack(_callstack); - } -#else - void _SetTaskCreationAddressHint(void* _Address) - { - _M_unitTask._SetTaskCreationAddressHint(_Address); - } -#endif - - /// - /// An internal version of then that takes additional flags and executes the continuation inline. Used for runtime internal continuations only. - /// - template -#if _MSC_VER >= 1800 - auto _Then(const _Function& _Func, Concurrency::details::_CancellationTokenState *_PTokenState, - details::_TaskInliningMode _InliningMode = Concurrency::details::_ForceInline) const -> typename details::_ContinuationTypeTraits<_Function, void>::_TaskOfType - { - // inherit from antecedent - auto _Scheduler = _GetImpl()->_GetScheduler(); - - return _M_unitTask._ThenImpl(_Func, _PTokenState, task_continuation_context::use_default(), _Scheduler, _CAPTURE_CALLSTACK(), _InliningMode); - } -#else - auto _Then(const _Function& _Func, Concurrency::details::_CancellationTokenState *_PTokenState, - bool _Aggregating, details::_TaskInliningMode _InliningMode = Concurrency::details::_ForceInline) const -> typename details::_ContinuationTypeTraits<_Function, void>::_TaskOfType - { - return _M_unitTask._ThenImpl(_Func, _PTokenState, task_continuation_context::use_default(), _Aggregating, _InliningMode); - } -#endif - -private: - template friend class task; - template friend class task_completion_event; - - /// - /// Initializes a task using a task completion event. - /// - void _TaskInitNoFunctor(task_completion_event& _Event) - { - _M_unitTask._TaskInitNoFunctor(_Event._M_unitEvent); - } - /// - /// Initializes a task using an asynchronous action IAsyncAction* - /// - void _TaskInitNoFunctor(ABI::Windows::Foundation::IAsyncAction* _AsyncAction) - { - _M_unitTask._TaskInitAsyncOp(Microsoft::WRL::Make(_AsyncAction).Get()); - } - - /// - /// Initializes a task using an asynchronous action with progress IAsyncActionWithProgress<_P>* - /// - template - void _TaskInitNoFunctor(ABI::Windows::Foundation::IAsyncActionWithProgress<_P>* _AsyncActionWithProgress) - { - _M_unitTask._TaskInitAsyncOp(Microsoft::WRL::Make>(_AsyncActionWithProgress).Get()); - } - /// - /// Initializes a task using a callable object. - /// - template - void _TaskInitMaybeFunctor(_Function & _Func, std::true_type) - { - _M_unitTask._TaskInitWithFunctor(_Func); - } - - /// - /// Initializes a task using a non-callable object. - /// - template - void _TaskInitMaybeFunctor(_T & _Param, std::false_type) - { - _TaskInitNoFunctor(_Param); - } - - // The void task contains a task of a dummy type so common code can be used for tasks with void and non-void results. - task _M_unitTask; -}; - -namespace details -{ - - /// - /// The following type traits are used for the create_task function. - /// - - // Unwrap task - template - _Ty _GetUnwrappedType(task<_Ty>); - - // Unwrap all supported types - template - auto _GetUnwrappedReturnType(_Ty _Arg, int) -> decltype(_GetUnwrappedType(_Arg)); - // fallback - template - _Ty _GetUnwrappedReturnType(_Ty, ...); - - /// - /// _GetTaskType functions will retrieve task type T in task[T](Arg), - /// for given constructor argument Arg and its property "callable". - /// It will automatically unwrap argument to get the final return type if necessary. - /// - - // Non-Callable - template - _Ty _GetTaskType(task_completion_event<_Ty>, std::false_type); - - // Non-Callable - template - auto _GetTaskType(_Ty _NonFunc, std::false_type) -> decltype(_GetUnwrappedType(_NonFunc)); - - // Callable - template - auto _GetTaskType(_Ty _Func, std::true_type) -> decltype(_GetUnwrappedReturnType(stdx::declval<_FunctionTypeTraits<_Ty, void>::_FuncRetType>(), 0)); - - // Special callable returns void - void _GetTaskType(std::function, std::true_type); - struct _BadArgType{}; - - template - auto _FilterValidTaskType(_Ty _Param, int) -> decltype(_GetTaskType(_Param, _IsCallable<_ReturnType>(_Param, 0, 0, 0))); - - template - _BadArgType _FilterValidTaskType(_Ty _Param, ...); - - template - struct _TaskTypeFromParam - { - typedef decltype(_FilterValidTaskType<_ReturnType>(stdx::declval<_Ty>(), 0)) _Type; - }; -} - - -/// -/// Creates a PPL task object. create_task can be used anywhere you would have used a task constructor. -/// It is provided mainly for convenience, because it allows use of the auto keyword while creating tasks. -/// -/// -/// The type of the parameter from which the task is to be constructed. -/// -/// -/// The parameter from which the task is to be constructed. This could be a lambda or function object, a task_completion_event -/// object, a different task object, or a Windows::Foundation::IAsyncInfo interface if you are using tasks in your Windows Store app. -/// -/// -/// A new task of type T, that is inferred from . -/// -/// -/// The first overload behaves like a task constructor that takes a single parameter. -/// The second overload associates the cancellation token provided with the newly created task. If you use this overload you are not -/// allowed to pass in a different task object as the first parameter. -/// The type of the returned task is inferred from the first parameter to the function. If is a task_completion_event<T>, -/// a task<T>, or a functor that returns either type T or task<T>, the type of the created task is task<T>. -/// In a Windows Store app, if is of type Windows::Foundation::IAsyncOperation<T>^ or -/// Windows::Foundation::IAsyncOperationWithProgress<T,P>^, or a functor that returns either of those types, the created task will be of type task<T>. -/// If is of type Windows::Foundation::IAsyncAction^ or Windows::Foundation::IAsyncActionWithProgress<P>^, or a functor -/// that returns either of those types, the created task will have type task<void>. -/// -/// -/// -/**/ -template -__declspec(noinline) -#if _MSC_VER >= 1800 -auto create_task(_Ty _Param, task_options _TaskOptions = task_options()) -> task::_Type> -#else -auto create_task(_Ty _Param) -> task::_Type> -#endif -{ - static_assert(!std::is_same::_Type, details::_BadArgType>::value, - "incorrect argument for create_task; can be a callable object, an asynchronous operation, or a task_completion_event" - ); -#if _MSC_VER >= 1800 - details::_get_internal_task_options(_TaskOptions)._set_creation_callstack(_CAPTURE_CALLSTACK()); - task::_Type> _CreatedTask(_Param, _TaskOptions); -#else - task::_Type> _CreatedTask(_Param); - // Ideally we would like to forceinline create_task, but __forceinline does nothing on debug builds. Therefore, we ask for no inlining - // and overwrite the creation address hint set by the task constructor. DO NOT REMOVE this next line from create_task. It is - // essential that _ReturnAddress() evaluate to the instruction right after the call to create_task in client code. - _CreatedTask._SetTaskCreationAddressHint(_ReturnAddress()); -#endif - return _CreatedTask; -} - -/// -/// Creates a PPL task object. create_task can be used anywhere you would have used a task constructor. -/// It is provided mainly for convenience, because it allows use of the auto keyword while creating tasks. -/// -/// -/// The type of the parameter from which the task is to be constructed. -/// -/// -/// The parameter from which the task is to be constructed. This could be a lambda or function object, a task_completion_event -/// object, a different task object, or a Windows::Foundation::IAsyncInfo interface if you are using tasks in your Windows Store app. -/// -/// -/// The cancellation token to associate with the task. When the source for this token is canceled, cancellation will be requested on the task. -/// -/// -/// A new task of type T, that is inferred from . -/// -/// -/// The first overload behaves like a task constructor that takes a single parameter. -/// The second overload associates the cancellation token provided with the newly created task. If you use this overload you are not -/// allowed to pass in a different task object as the first parameter. -/// The type of the returned task is inferred from the first parameter to the function. If is a task_completion_event<T>, -/// a task<T>, or a functor that returns either type T or task<T>, the type of the created task is task<T>. -/// In a Windows Store app, if is of type Windows::Foundation::IAsyncOperation<T>^ or -/// Windows::Foundation::IAsyncOperationWithProgress<T,P>^, or a functor that returns either of those types, the created task will be of type task<T>. -/// If is of type Windows::Foundation::IAsyncAction^ or Windows::Foundation::IAsyncActionWithProgress<P>^, or a functor -/// that returns either of those types, the created task will have type task<void>. -/// -/// -/// -/**/ -#if _MSC_VER >= 1800 -template -__declspec(noinline) -task<_ReturnType> create_task(const task<_ReturnType>& _Task) -{ - task<_ReturnType> _CreatedTask(_Task); - return _CreatedTask; -} -#else -template -__declspec(noinline) -auto create_task(_Ty _Param, Concurrency::cancellation_token _Token) -> task::_Type> -{ - static_assert(!std::is_same::_Type, details::_BadArgType>::value, - "incorrect argument for create_task; can be a callable object, an asynchronous operation, or a task_completion_event" - ); - task::_Type> _CreatedTask(_Param, _Token); - // Ideally we would like to forceinline create_task, but __forceinline does nothing on debug builds. Therefore, we ask for no inlining - // and overwrite the creation address hint set by the task constructor. DO NOT REMOVE this next line from create_task. It is - // essential that _ReturnAddress() evaluate to the instruction right after the call to create_task in client code. - _CreatedTask._SetTaskCreationAddressHint(_ReturnAddress()); - return _CreatedTask; -} -#endif - -namespace details -{ - template - task*>()))>::type> _To_task_helper(ABI::Windows::Foundation::IAsyncOperation<_T>* op) - { - return task<_T>(op); - } - - template - task*>()))>::type> _To_task_helper(ABI::Windows::Foundation::IAsyncOperationWithProgress<_T, _Progress>* op) - { - return task<_T>(op); - } - - inline task _To_task_helper(ABI::Windows::Foundation::IAsyncAction* op) - { - return task(op); - } - - template - task _To_task_helper(ABI::Windows::Foundation::IAsyncActionWithProgress<_Progress>* op) - { - return task(op); - } - - template - class _ProgressDispatcherBase - { - public: - - virtual ~_ProgressDispatcherBase() - { - } - - virtual void _Report(const _ProgressType& _Val) = 0; - }; - - template - class _ProgressDispatcher : public _ProgressDispatcherBase<_ProgressType> - { - public: - - virtual ~_ProgressDispatcher() - { - } - - _ProgressDispatcher(_ClassPtrType _Ptr) : _M_ptr(_Ptr) - { - } - - virtual void _Report(const _ProgressType& _Val) - { - _M_ptr->_FireProgress(_Val); - } - - private: - - _ClassPtrType _M_ptr; - }; -} // namespace details - - -/// -/// The progress reporter class allows reporting progress notifications of a specific type. Each progress_reporter object is bound -/// to a particular asynchronous action or operation. -/// -/// -/// The payload type of each progress notification reported through the progress reporter. -/// -/// -/// This type is only available to Windows Store apps. -/// -/// -/**/ -template -class progress_reporter -{ - typedef std::shared_ptr> _PtrType; - -public: - - /// - /// Sends a progress report to the asynchronous action or operation to which this progress reporter is bound. - /// - /// - /// The payload to report through a progress notification. - /// - /**/ - void report(const _ProgressType& _Val) const - { - _M_dispatcher->_Report(_Val); - } - - template - static progress_reporter _CreateReporter(_ClassPtrType _Ptr) - { - progress_reporter _Reporter; - details::_ProgressDispatcherBase<_ProgressType> *_PDispatcher = new details::_ProgressDispatcher<_ProgressType, _ClassPtrType>(_Ptr); - _Reporter._M_dispatcher = _PtrType(_PDispatcher); - return _Reporter; - } - progress_reporter() {} - -private: - progress_reporter(details::_ProgressReporterCtorArgType); - - _PtrType _M_dispatcher; -}; - -namespace details -{ - // - // maps internal definitions for AsyncStatus and defines states that are not client visible - // - enum _AsyncStatusInternal - { - _AsyncCreated = -1, // externally invisible - // client visible states (must match AsyncStatus exactly) - _AsyncStarted = ABI::Windows::Foundation::AsyncStatus::Started, // 0 - _AsyncCompleted = ABI::Windows::Foundation::AsyncStatus::Completed, // 1 - _AsyncCanceled = ABI::Windows::Foundation::AsyncStatus::Canceled, // 2 - _AsyncError = ABI::Windows::Foundation::AsyncStatus::Error, // 3 - // non-client visible internal states - _AsyncCancelPending, - _AsyncClosed, - _AsyncUndefined - }; - - // - // designates whether the "GetResults" method returns a single result (after complete fires) or multiple results - // (which are progressively consumable between Start state and before Close is called) - // - enum _AsyncResultType - { - SingleResult = 0x0001, - MultipleResults = 0x0002 - }; - - template - struct _ProgressTypeTraits - { - static const bool _TakesProgress = false; - typedef void _ProgressType; - }; - - template - struct _ProgressTypeTraits> - { - static const bool _TakesProgress = true; - typedef typename _T _ProgressType; - }; - - template::value, bool bTakesProgress = _ProgressTypeTraits<_T>::_TakesProgress> - struct _TokenTypeTraits - { - static const bool _TakesToken = false; - typedef typename _T _ReturnType; - }; - - template - struct _TokenTypeTraits<_T, false, true> - { - static const bool _TakesToken = false; - typedef void _ReturnType; - }; - - template - struct _TokenTypeTraits<_T, true, false> - { - static const bool _TakesToken = true; - typedef void _ReturnType; - }; - - template::_ArgumentCount> - struct _CAFunctorOptions - { - static const bool _TakesProgress = false; - static const bool _TakesToken = false; - typedef void _ProgressType; - typedef void _ReturnType; - }; - - template - struct _CAFunctorOptions<_T, 1> - { - private: - - typedef typename _FunctorTypeTraits<_T>::_Argument1Type _Argument1Type; - - public: - - static const bool _TakesProgress = _ProgressTypeTraits<_Argument1Type>::_TakesProgress; - static const bool _TakesToken = _TokenTypeTraits<_Argument1Type>::_TakesToken; - typedef typename _ProgressTypeTraits<_Argument1Type>::_ProgressType _ProgressType; - typedef typename _TokenTypeTraits<_Argument1Type>::_ReturnType _ReturnType; - }; - - template - struct _CAFunctorOptions<_T, 2> - { - private: - - typedef typename _FunctorTypeTraits<_T>::_Argument1Type _Argument1Type; - typedef typename _FunctorTypeTraits<_T>::_Argument2Type _Argument2Type; - - public: - - static const bool _TakesProgress = _ProgressTypeTraits<_Argument1Type>::_TakesProgress; - static const bool _TakesToken = !_TakesProgress ? true : _TokenTypeTraits<_Argument2Type>::_TakesToken; - typedef typename _ProgressTypeTraits<_Argument1Type>::_ProgressType _ProgressType; - typedef typename _TokenTypeTraits<_Argument2Type>::_ReturnType _ReturnType; - }; - - template - struct _CAFunctorOptions<_T, 3> - { - private: - - typedef typename _FunctorTypeTraits<_T>::_Argument1Type _Argument1Type; - - public: - - static const bool _TakesProgress = true; - static const bool _TakesToken = true; - typedef typename _ProgressTypeTraits<_Argument1Type>::_ProgressType _ProgressType; - typedef typename _FunctorTypeTraits<_T>::_Argument3Type _ReturnType; - }; - - class _Zip - { - }; - - // *************************************************************************** - // Async Operation Task Generators - // - - // - // Functor returns an IAsyncInfo - result needs to be wrapped in a task: - // - template - struct _SelectorTaskGenerator - { -#if _MSC_VER >= 1800 - template - static task<_ReturnType> _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task<_ReturnType>(_Func(_pRet), _taskOptinos); - } - - template - static task<_ReturnType> _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task<_ReturnType>(_Func(_Cts.get_token(), _pRet), _taskOptinos); - } - - template - static task<_ReturnType> _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task<_ReturnType>(_Func(_Progress, _pRet), _taskOptinos); - } - - template - static task<_ReturnType> _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task<_ReturnType>(_Func(_Progress, _Cts.get_token(), _pRet), _taskOptinos); - } -#else - template - static task<_ReturnType> _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return task<_ReturnType>(_Func(_pRet), _Cts.get_token()); - } - - template - static task<_ReturnType> _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return task<_ReturnType>(_Func(_Cts.get_token(), _pRet), _Cts.get_token()); - } - - template - static task<_ReturnType> _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return task<_ReturnType>(_Func(_Progress, _pRet), _Cts.get_token()); - } - - template - static task<_ReturnType> _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return task<_ReturnType>(_Func(_Progress, _Cts.get_token(), _pRet), _Cts.get_token()); - } -#endif - }; - - template - struct _SelectorTaskGenerator<_AsyncSelector, void> - { -#if _MSC_VER >= 1800 - template - static task _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task(_Func(), _taskOptinos); - } - - template - static task _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task(_Func(_Cts.get_token()), _taskOptinos); - } - - template - static task _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task(_Func(_Progress), _taskOptinos); - } - - template - static task _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task(_Func(_Progress, _Cts.get_token()), _taskOptinos); - } -#else - template - static task _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts) - { - return task(_Func(), _Cts.get_token()); - } - - template - static task _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts) - { - return task(_Func(_Cts.get_token()), _Cts.get_token()); - } - - template - static task _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts) - { - return task(_Func(_Progress), _Cts.get_token()); - } - - template - static task _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts) - { - return task(_Func(_Progress, _Cts.get_token()), _Cts.get_token()); - } -#endif - }; - -#if _MSC_VER < 1800 - // For create_async lambdas that return a (non-task) result, we oversubscriber the current task for the duration of the - // lambda. - struct _Task_generator_oversubscriber - { - _Task_generator_oversubscriber() - { - Concurrency::details::_Context::_Oversubscribe(true); - } - - ~_Task_generator_oversubscriber() - { - Concurrency::details::_Context::_Oversubscribe(false); - } - }; -#endif - - // - // Functor returns a result - it needs to be wrapped in a task: - // - template - struct _SelectorTaskGenerator - { -#if _MSC_VER >= 1800 - -#pragma warning(push) -#pragma warning(disable: 4702) - template - static task<_ReturnType> _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task<_ReturnType>([=](_ReturnType* retVal) -> HRESULT { - Concurrency::details::_Task_generator_oversubscriber_t _Oversubscriber; - (_Oversubscriber); - HRESULT hr = _Func(_pRet); - retVal = _pRet; - return hr; - }, _taskOptinos); - } -#pragma warning(pop) - - template - static task<_ReturnType> _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task<_ReturnType>([=](_ReturnType* retVal) -> HRESULT { - Concurrency::details::_Task_generator_oversubscriber_t _Oversubscriber; - (_Oversubscriber); - HRESULT hr = _Func(_Cts.get_token(), _pRet); - retVal = _pRet; - return hr; - }, _taskOptinos); - } - - template - static task<_ReturnType> _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task<_ReturnType>([=](_ReturnType* retVal) -> HRESULT { - Concurrency::details::_Task_generator_oversubscriber_t _Oversubscriber; - (_Oversubscriber); - HRESULT hr = _Func(_Progress, _pRet); - retVal = _pRet; - return hr; - }, _taskOptinos); - } - - template - static task<_ReturnType> _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task<_ReturnType>([=](_ReturnType* retVal) -> HRESULT { - Concurrency::details::_Task_generator_oversubscriber_t _Oversubscriber; - (_Oversubscriber); - HRESULT hr = _Func(_Progress, _Cts.get_token(), _pRet); - retVal = _pRet; - return hr; - }, _taskOptinos); - } -#else - template - static task<_ReturnType> _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return task<_ReturnType>([=](_ReturnType* retVal) -> HRESULT { - _Task_generator_oversubscriber _Oversubscriber; - HRESULT hr = _Func(_pRet); - retVal = _pRet; - return hr; - }, _Cts.get_token()); - } - - template - static task<_ReturnType> _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return task<_ReturnType>([=](_ReturnType* retVal) -> HRESULT { - _Task_generator_oversubscriber _Oversubscriber; - HRESULT hr = _Func(_Cts.get_token(), _pRet); - retVal = _pRet; - return hr; - }, _Cts.get_token()); - } - - template - static task<_ReturnType> _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return task<_ReturnType>([=](_ReturnType* retVal) -> HRESULT { - _Task_generator_oversubscriber _Oversubscriber; - HRESULT hr = _Func(_Progress, _pRet); - retVal = _pRet; - return hr; - }, _Cts.get_token()); - } - - template - static task<_ReturnType> _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return task<_ReturnType>([=](_ReturnType* retVal) -> HRESULT { - _Task_generator_oversubscriber _Oversubscriber; - HRESULT hr = _Func(_Progress, _Cts.get_token(), _pRet); - retVal = _pRet; - return hr; - }, _Cts.get_token()); - } -#endif - }; - - template<> - struct _SelectorTaskGenerator - { -#if _MSC_VER >= 1800 - template - static task _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task([=]() -> HRESULT { - Concurrency::details::_Task_generator_oversubscriber_t _Oversubscriber; - (_Oversubscriber); - return _Func(); - }, _taskOptinos); - } - - template - static task _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task([=]() -> HRESULT { - Concurrency::details::_Task_generator_oversubscriber_t _Oversubscriber; - (_Oversubscriber); - return _Func(_Cts.get_token()); - }, _taskOptinos); - } - - template - static task _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task([=]() -> HRESULT { - Concurrency::details::_Task_generator_oversubscriber_t _Oversubscriber; - (_Oversubscriber); - return _Func(_Progress); - }, _taskOptinos); - } - - template - static task _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - { - task_options _taskOptinos(_Cts.get_token()); - details::_get_internal_task_options(_taskOptinos)._set_creation_callstack(_callstack); - return task([=]() -> HRESULT { - Concurrency::details::_Task_generator_oversubscriber_t _Oversubscriber; - (_Oversubscriber); - return _Func(_Progress, _Cts.get_token()); - }, _taskOptinos); - } -#else - template - static task _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts) - { - return task([=]() -> HRESULT { - _Task_generator_oversubscriber _Oversubscriber; - return _Func(); - }, _Cts.get_token()); - } - - template - static task _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts) - { - return task([=]() -> HRESULT { - _Task_generator_oversubscriber _Oversubscriber; - return _Func(_Cts.get_token()); - }, _Cts.get_token()); - } - - template - static task _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts) - { - return task([=]() -> HRESULT { - _Task_generator_oversubscriber _Oversubscriber; - return _Func(_Progress); - }, _Cts.get_token()); - } - - template - static task _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts) - { - return task([=]() -> HRESULT { - _Task_generator_oversubscriber _Oversubscriber; - return _Func(_Progress, _Cts.get_token()); - }, _Cts.get_token()); - } -#endif - }; - - // - // Functor returns a task - the task can directly be returned: - // - template - struct _SelectorTaskGenerator - { - template -#if _MSC_VER >= 1800 - static task<_ReturnType> _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) -#else - static task<_ReturnType> _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) -#endif - { - task<_ReturnType> _task; - _Func(&_task); - return _task; - } - - template -#if _MSC_VER >= 1800 - static task<_ReturnType> _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) -#else - static task<_ReturnType> _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) -#endif - { - task<_ReturnType> _task; - _Func(_Cts.get_token(), &_task); - return _task; - } - - template -#if _MSC_VER >= 1800 - static task<_ReturnType> _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) -#else - static task<_ReturnType> _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) -#endif - { - task<_ReturnType> _task; - _Func(_Progress, &_task); - return _task; - } - - template -#if _MSC_VER >= 1800 - static task<_ReturnType> _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) -#else - static task<_ReturnType> _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) -#endif - { - task<_ReturnType> _task; - _Func(_Progress, _Cts.get_token(), &_task); - return _task; - } - }; - - template<> - struct _SelectorTaskGenerator - { - template -#if _MSC_VER >= 1800 - static task _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) -#else - static task _GenerateTask_0(const _Function& _Func, Concurrency::cancellation_token_source _Cts) -#endif - { - task _task; - _Func(&_task); - return _task; - } - - template -#if _MSC_VER >= 1800 - static task _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) -#else - static task _GenerateTask_1C(const _Function& _Func, Concurrency::cancellation_token_source _Cts) -#endif - { - task _task; - _Func(_Cts.get_token(), &_task); - return _task; - } - - template -#if _MSC_VER >= 1800 - static task _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) -#else - static task _GenerateTask_1P(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts) -#endif - { - task _task; - _Func(_Progress, &_task); - return _task; - } - - template -#if _MSC_VER >= 1800 - static task _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) -#else - static task _GenerateTask_2PC(const _Function& _Func, const _ProgressObject& _Progress, Concurrency::cancellation_token_source _Cts) -#endif - { - task _task; - _Func(_Progress, _Cts.get_token(), &_task); - return _task; - } - }; - - template - struct _TaskGenerator - { - }; - - template - struct _TaskGenerator<_Generator, false, false> - { -#if _MSC_VER >= 1800 - template - static auto _GenerateTaskNoRet(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _callstack)) - { - (void)_Ptr; - return _Generator::_GenerateTask_0(_Func, _Cts, _callstack); - } - - template - static auto _GenerateTask(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _pRet, _callstack)) - { - return _Generator::_GenerateTask_0(_Func, _Cts, _pRet, _callstack); - } -#else - template - static auto _GenerateTaskNoRet(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts)) - { - (void)_Ptr; - return _Generator::_GenerateTask_0(_Func, _Cts); - } - - template - static auto _GenerateTask(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _pRet)) - { - return _Generator::_GenerateTask_0(_Func, _Cts, _pRet); - } -#endif - }; - - template - struct _TaskGenerator<_Generator, true, false> - { -#if _MSC_VER >= 1800 - template - static auto _GenerateTaskNoRet(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _callstack)) - { - return _Generator::_GenerateTask_1C(_Func, _Cts, _callstack); - } - - template - static auto _GenerateTask(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _pRet, _callstack)) - { - return _Generator::_GenerateTask_1C(_Func, _Cts, _pRet, _callstack); - } -#else - template - static auto _GenerateTaskNoRet(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts)) - { - return _Generator::_GenerateTask_1C(_Func, _Cts); - } - - template - static auto _GenerateTask(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _pRet)) - { - return _Generator::_GenerateTask_1C(_Func, _Cts, _pRet); - } -#endif - }; - - template - struct _TaskGenerator<_Generator, false, true> - { -#if _MSC_VER >= 1800 - template - static auto _GenerateTaskNoRet(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _callstack)) - { - return _Generator::_GenerateTask_1P(_Func, progress_reporter<_ProgressType>::_CreateReporter(_Ptr), _Cts, _callstack); - } - - template - static auto _GenerateTask(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _pRet, _callstack)) - { - return _Generator::_GenerateTask_1P(_Func, progress_reporter<_ProgressType>::_CreateReporter(_Ptr), _Cts, _pRet, _callstack); - } -#else - template - static auto _GenerateTaskNoRet(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts)) - { - return _Generator::_GenerateTask_1P(_Func, progress_reporter<_ProgressType>::_CreateReporter(_Ptr), _Cts); - } - - template - static auto _GenerateTask(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _pRet)) - { - return _Generator::_GenerateTask_1P(_Func, progress_reporter<_ProgressType>::_CreateReporter(_Ptr), _Cts, _pRet); - } -#endif - }; - - template - struct _TaskGenerator<_Generator, true, true> - { -#if _MSC_VER >= 1800 - template - static auto _GenerateTaskNoRet(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _callstack)) - { - return _Generator::_GenerateTask_2PC(_Func, progress_reporter<_ProgressType>::_CreateReporter(_Ptr), _Cts, _callstack); - } - - template - static auto _GenerateTask(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _pRet, _callstack)) - { - return _Generator::_GenerateTask_2PC(_Func, progress_reporter<_ProgressType>::_CreateReporter(_Ptr), _Cts, _pRet, _callstack); - } -#else - template - static auto _GenerateTaskNoRet(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts)) - { - return _Generator::_GenerateTask_2PC(_Func, progress_reporter<_ProgressType>::_CreateReporter(_Ptr), _Cts); - } - - template - static auto _GenerateTask(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - -> decltype(_Generator::_GenerateTask_0(_Func, _Cts, _pRet)) - { - return _Generator::_GenerateTask_2PC(_Func, progress_reporter<_ProgressType>::_CreateReporter(_Ptr), _Cts, _pRet); - } -#endif - }; - - // *************************************************************************** - // Async Operation Attributes Classes - // - // These classes are passed through the hierarchy of async base classes in order to hold multiple attributes of a given async construct in - // a single container. An attribute class must define: - // - // Mandatory: - // ------------------------- - // - // _AsyncBaseType : The Windows Runtime interface which is being implemented. - // _CompletionDelegateType : The Windows Runtime completion delegate type for the interface. - // _ProgressDelegateType : If _TakesProgress is true, the Windows Runtime progress delegate type for the interface. If it is false, an empty Windows Runtime type. - // _ReturnType : The return type of the async construct (void for actions / non-void for operations) - // - // _TakesProgress : An indication as to whether or not - // - // _Generate_Task : A function adapting the user's function into what's necessary to produce the appropriate task - // - // Optional: - // ------------------------- - // - - template - struct _AsyncAttributes - { - }; - - template - struct _AsyncAttributes<_Function, _ProgressType, _ReturnType, _TaskTraits, _TakesToken, true> - { - typedef typename ABI::Windows::Foundation::IAsyncOperationWithProgress<_ReturnType, _ProgressType> _AsyncBaseType; - typedef typename ABI::Windows::Foundation::IAsyncOperationProgressHandler<_ReturnType, _ProgressType> _ProgressDelegateType; - typedef typename ABI::Windows::Foundation::IAsyncOperationWithProgressCompletedHandler<_ReturnType, _ProgressType> _CompletionDelegateType; - typedef typename _ReturnType _ReturnType; - typedef typename ABI::Windows::Foundation::Internal::GetAbiType()))>::type _ReturnType_abi; - typedef typename _ProgressType _ProgressType; - typedef typename ABI::Windows::Foundation::Internal::GetAbiType()))>::type _ProgressType_abi; - typedef typename _TaskTraits::_AsyncKind _AsyncKind; - typedef typename _SelectorTaskGenerator<_AsyncKind, _ReturnType> _SelectorTaskGenerator; - typedef typename _TaskGenerator<_SelectorTaskGenerator, _TakesToken, true> _TaskGenerator; - - static const bool _TakesProgress = true; - static const bool _TakesToken = _TakesToken; - - template -#if _MSC_VER >= 1800 - static task _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - return _TaskGenerator::_GenerateTask<_Function, _ClassPtr, _ProgressType_abi, _ReturnType>(_Func, _Ptr, _Cts, _pRet, _callstack); - } -#else - static task _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return _TaskGenerator::_GenerateTask<_Function, _ClassPtr, _ProgressType_abi, _ReturnType>(_Func, _Ptr, _Cts, _pRet); - } -#endif - }; - - template - struct _AsyncAttributes<_Function, _ProgressType, _ReturnType, _TaskTraits, _TakesToken, false> - { - typedef typename ABI::Windows::Foundation::IAsyncOperation<_ReturnType> _AsyncBaseType; - typedef _Zip _ProgressDelegateType; - typedef typename ABI::Windows::Foundation::IAsyncOperationCompletedHandler<_ReturnType> _CompletionDelegateType; - typedef typename _ReturnType _ReturnType; - typedef typename ABI::Windows::Foundation::Internal::GetAbiType()))>::type _ReturnType_abi; - typedef typename _TaskTraits::_AsyncKind _AsyncKind; - typedef typename _SelectorTaskGenerator<_AsyncKind, _ReturnType> _SelectorTaskGenerator; - typedef typename _TaskGenerator<_SelectorTaskGenerator, _TakesToken, false> _TaskGenerator; - - static const bool _TakesProgress = false; - static const bool _TakesToken = _TakesToken; - - template -#if _MSC_VER >= 1800 - static task _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - return _TaskGenerator::_GenerateTask<_Function, _ClassPtr, _ProgressType, _ReturnType>(_Func, _Ptr, _Cts, _pRet, _callstack); - } -#else - static task _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return _TaskGenerator::_GenerateTask<_Function, _ClassPtr, _ProgressType, _ReturnType>(_Func, _Ptr, _Cts, _pRet); - } -#endif - }; - - template - struct _AsyncAttributes<_Function, _ProgressType, void, _TaskTraits, _TakesToken, true> - { - typedef typename ABI::Windows::Foundation::IAsyncActionWithProgress<_ProgressType> _AsyncBaseType; - typedef typename ABI::Windows::Foundation::IAsyncActionProgressHandler<_ProgressType> _ProgressDelegateType; - typedef typename ABI::Windows::Foundation::IAsyncActionWithProgressCompletedHandler<_ProgressType> _CompletionDelegateType; - typedef void _ReturnType; - typedef void _ReturnType_abi; - typedef typename _ProgressType _ProgressType; - typedef typename ABI::Windows::Foundation::Internal::GetAbiType()))>::type _ProgressType_abi; - typedef typename _TaskTraits::_AsyncKind _AsyncKind; - typedef typename _SelectorTaskGenerator<_AsyncKind, _ReturnType> _SelectorTaskGenerator; - typedef typename _TaskGenerator<_SelectorTaskGenerator, _TakesToken, true> _TaskGenerator; - - static const bool _TakesProgress = true; - static const bool _TakesToken = _TakesToken; - -#if _MSC_VER >= 1800 - template - static task _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - { - return _TaskGenerator::_GenerateTaskNoRet<_Function, _ClassPtr, _ProgressType_abi>(_Func, _Ptr, _Cts, _callstack); - } - template - static task> _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - return _TaskGenerator::_GenerateTask<_Function, _ClassPtr, _ProgressType_abi>(_Func, _Ptr, _Cts, _pRet, _callstack); - } -#else - template - static task _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts) - { - return _TaskGenerator::_GenerateTaskNoRet<_Function, _ClassPtr, _ProgressType_abi>(_Func, _Ptr, _Cts); - } - template - static task> _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return _TaskGenerator::_GenerateTask<_Function, _ClassPtr, _ProgressType_abi>(_Func, _Ptr, _Cts, _pRet); - } -#endif - }; - - template - struct _AsyncAttributes<_Function, _ProgressType, void, _TaskTraits, _TakesToken, false> - { - typedef typename ABI::Windows::Foundation::IAsyncAction _AsyncBaseType; - typedef _Zip _ProgressDelegateType; - typedef typename ABI::Windows::Foundation::IAsyncActionCompletedHandler _CompletionDelegateType; - typedef void _ReturnType; - typedef void _ReturnType_abi; - typedef typename _TaskTraits::_AsyncKind _AsyncKind; - typedef typename _SelectorTaskGenerator<_AsyncKind, _ReturnType> _SelectorTaskGenerator; - typedef typename _TaskGenerator<_SelectorTaskGenerator, _TakesToken, false> _TaskGenerator; - - static const bool _TakesProgress = false; - static const bool _TakesToken = _TakesToken; - -#if _MSC_VER >= 1800 - template - static task _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, const _TaskCreationCallstack & _callstack) - { - return _TaskGenerator::_GenerateTaskNoRet<_Function, _ClassPtr, _ProgressType>(_Func, _Ptr, _Cts, _callstack); - } - template - static task> _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet, const _TaskCreationCallstack & _callstack) - { - return _TaskGenerator::_GenerateTask<_Function, _ClassPtr, _ProgressType>(_Func, _Ptr, _Cts, _pRet, _callstack); - } -#else - template - static task _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts) - { - return _TaskGenerator::_GenerateTaskNoRet<_Function, _ClassPtr, _ProgressType>(_Func, _Ptr, _Cts); - } - template - static task> _Generate_Task(const _Function& _Func, _ClassPtr _Ptr, Concurrency::cancellation_token_source _Cts, _ReturnType* _pRet) - { - return _TaskGenerator::_GenerateTask<_Function, _ClassPtr, _ProgressType>(_Func, _Ptr, _Cts, _pRet); - } -#endif - }; - - template - struct _AsyncLambdaTypeTraits - { - typedef typename _Unhat::_ReturnType>::_Value _ReturnType; - typedef typename _FunctorTypeTraits<_Function>::_Argument1Type _Argument1Type; - typedef typename _CAFunctorOptions<_Function>::_ProgressType _ProgressType; - - static const bool _TakesProgress = _CAFunctorOptions<_Function>::_TakesProgress; - static const bool _TakesToken = _CAFunctorOptions<_Function>::_TakesToken; - - typedef typename _TaskTypeTraits<_ReturnType> _TaskTraits; - typedef typename _AsyncAttributes<_Function, _ProgressType, typename _TaskTraits::_TaskRetType, _TaskTraits, _TakesToken, _TakesProgress> _AsyncAttributes; - }; - // *************************************************************************** - // AsyncInfo (and completion) Layer: - // -#ifndef RUNTIMECLASS_Concurrency_winrt_details__AsyncInfoBase_DEFINED -#define RUNTIMECLASS_Concurrency_winrt_details__AsyncInfoBase_DEFINED - extern const __declspec(selectany) WCHAR RuntimeClass_Concurrency_winrt_details__AsyncInfoBase[] = L"Concurrency_winrt.details._AsyncInfoBase"; -#endif - - // - // Internal base class implementation for async operations (based on internal Windows representation for ABI level async operations) - // - template < typename _Attributes, _AsyncResultType resultType = SingleResult > - class _AsyncInfoBase abstract : public Microsoft::WRL::RuntimeClass< - Microsoft::WRL::RuntimeClassFlags< Microsoft::WRL::RuntimeClassType::WinRt>, Microsoft::WRL::Implements> - { - InspectableClass(RuntimeClass_Concurrency_winrt_details__AsyncInfoBase, BaseTrust) - public: - _AsyncInfoBase() : - _M_currentStatus(_AsyncStatusInternal::_AsyncCreated), - _M_errorCode(S_OK), - _M_completeDelegate(nullptr), - _M_CompleteDelegateAssigned(0), - _M_CallbackMade(0) - { -#if _MSC_VER < 1800 - _M_id = Concurrency::details::_GetNextAsyncId(); -#else - _M_id = Concurrency::details::platform::GetNextAsyncId(); -#endif - } - public: - virtual STDMETHODIMP GetResults(typename _Attributes::_ReturnType_abi* results) - { - (void)results; - return E_UNEXPECTED; - } - - virtual STDMETHODIMP get_Id(unsigned int* id) - { - HRESULT hr = _CheckValidStateForAsyncInfoCall(); - if (FAILED(hr)) return hr; - if (!id) return E_POINTER; - *id = _M_id; - return S_OK; - } - - virtual STDMETHODIMP put_Id(unsigned int id) - { - HRESULT hr = _CheckValidStateForAsyncInfoCall(); - if (FAILED(hr)) return hr; - - if (id == 0) - { - return E_INVALIDARG; - } - else if (_M_currentStatus != _AsyncStatusInternal::_AsyncCreated) - { - return E_ILLEGAL_METHOD_CALL; - } - - _M_id = id; - return S_OK; - } - virtual STDMETHODIMP get_Status(ABI::Windows::Foundation::AsyncStatus* status) - { - HRESULT hr = _CheckValidStateForAsyncInfoCall(); - if (FAILED(hr)) return hr; - if (!status) return E_POINTER; - - _AsyncStatusInternal _Current = _M_currentStatus; - // - // Map our internal cancel pending to cancelled. This way "pending cancelled" looks to the outside as "cancelled" but - // can still transition to "completed" if the operation completes without acknowledging the cancellation request - // - switch (_Current) - { - case _AsyncCancelPending: - _Current = _AsyncCanceled; - break; - case _AsyncCreated: - _Current = _AsyncStarted; - break; - default: - break; - } - - *status = static_cast(_Current); - return S_OK; - } - - virtual STDMETHODIMP get_ErrorCode(HRESULT* errorCode) - { - HRESULT hr = _CheckValidStateForAsyncInfoCall(); - if (FAILED(hr)) return hr; - if (!hr) return hr; - *errorCode = _M_errorCode; - return S_OK; - } - - virtual STDMETHODIMP get_Progress(typename _Attributes::_ProgressDelegateType** _ProgressHandler) - { - return _GetOnProgress(_ProgressHandler); - } - - virtual STDMETHODIMP put_Progress(typename _Attributes::_ProgressDelegateType* _ProgressHandler) - { - return _PutOnProgress(_ProgressHandler); - } - - virtual STDMETHODIMP Cancel() - { - if (_TransitionToState(_AsyncCancelPending)) - { - _OnCancel(); - } - return S_OK; - } - - virtual STDMETHODIMP Close() - { - if (_TransitionToState(_AsyncClosed)) - { - _OnClose(); - } - else - { - if (_M_currentStatus != _AsyncClosed) // Closed => Closed transition is just ignored - { - return E_ILLEGAL_STATE_CHANGE; - } - } - return S_OK; - } - - virtual STDMETHODIMP get_Completed(typename _Attributes::_CompletionDelegateType** _CompleteHandler) - { - _CheckValidStateForDelegateCall(); - if (!_CompleteHandler) return E_POINTER; - *_CompleteHandler = _M_completeDelegate.Get(); - return S_OK; - } - - virtual STDMETHODIMP put_Completed(typename _Attributes::_CompletionDelegateType* _CompleteHandler) - { - _CheckValidStateForDelegateCall(); - // this delegate property is "write once" - if (InterlockedIncrement(&_M_CompleteDelegateAssigned) == 1) - { - _M_completeDelegateContext = _ContextCallback::_CaptureCurrent(); - _M_completeDelegate = _CompleteHandler; - // Guarantee that the write of _M_completeDelegate is ordered with respect to the read of state below - // as perceived from _FireCompletion on another thread. - MemoryBarrier(); - if (_IsTerminalState()) - { - _FireCompletion(); - } - } - else - { - return E_ILLEGAL_DELEGATE_ASSIGNMENT; - } - return S_OK; - } - - protected: - // _Start - this is not externally visible since async operations "hot start" before returning to the caller - STDMETHODIMP _Start() - { - if (_TransitionToState(_AsyncStarted)) - { - _OnStart(); - } - else - { - return E_ILLEGAL_STATE_CHANGE; - } - return S_OK; - } - - HRESULT _FireCompletion() - { - HRESULT hr = S_OK; - _TryTransitionToCompleted(); - - // we guarantee that completion can only ever be fired once - if (_M_completeDelegate != nullptr && InterlockedIncrement(&_M_CallbackMade) == 1) - { - hr = _M_completeDelegateContext._CallInContext([=]() -> HRESULT { - ABI::Windows::Foundation::AsyncStatus status; - HRESULT hr; - if (SUCCEEDED(hr = this->get_Status(&status))) - _M_completeDelegate->Invoke((_Attributes::_AsyncBaseType*)this, status); - _M_completeDelegate = nullptr; - return hr; - }); - } - return hr; - } - - virtual STDMETHODIMP _GetOnProgress(typename _Attributes::_ProgressDelegateType** _ProgressHandler) - { - (void)_ProgressHandler; - return E_UNEXPECTED; - } - - virtual STDMETHODIMP _PutOnProgress(typename _Attributes::_ProgressDelegateType* _ProgressHandler) - { - (void)_ProgressHandler; - return E_UNEXPECTED; - } - - - bool _TryTransitionToCompleted() - { - return _TransitionToState(_AsyncStatusInternal::_AsyncCompleted); - } - - bool _TryTransitionToCancelled() - { - return _TransitionToState(_AsyncStatusInternal::_AsyncCanceled); - } - - bool _TryTransitionToError(const HRESULT error) - { - _InterlockedCompareExchange(reinterpret_cast(&_M_errorCode), error, S_OK); - return _TransitionToState(_AsyncStatusInternal::_AsyncError); - } - - // This method checks to see if the delegate properties can be - // modified in the current state and generates the appropriate - // error hr in the case of violation. - inline HRESULT _CheckValidStateForDelegateCall() - { - if (_M_currentStatus == _AsyncClosed) - { - return E_ILLEGAL_METHOD_CALL; - } - return S_OK; - } - - // This method checks to see if results can be collected in the - // current state and generates the appropriate error hr in - // the case of a violation. - inline HRESULT _CheckValidStateForResultsCall() - { - _AsyncStatusInternal _Current = _M_currentStatus; - - if (_Current == _AsyncError) - { - return _M_errorCode; - } -#pragma warning(push) -#pragma warning(disable: 4127) // Conditional expression is constant - // single result illegal before transition to Completed or Cancelled state - if (resultType == SingleResult) -#pragma warning(pop) - { - if (_Current != _AsyncCompleted) - { - return E_ILLEGAL_METHOD_CALL; - } - } - // multiple results can be called after Start has been called and before/after Completed - else if (_Current != _AsyncStarted && - _Current != _AsyncCancelPending && - _Current != _AsyncCanceled && - _Current != _AsyncCompleted) - { - return E_ILLEGAL_METHOD_CALL; - } - return S_OK; - } - - // This method can be called by derived classes periodically to determine - // whether the asynchronous operation should continue processing or should - // be halted. - inline bool _ContinueAsyncOperation() - { - return _M_currentStatus == _AsyncStarted; - } - - // These two methods are used to allow the async worker implementation do work on - // state transitions. No real "work" should be done in these methods. In other words - // they should not block for a long time on UI timescales. - virtual void _OnStart() = 0; - virtual void _OnClose() = 0; - virtual void _OnCancel() = 0; - - private: - - // This method is used to check if calls to the AsyncInfo properties - // (id, status, errorcode) are legal in the current state. It also - // generates the appropriate error hr to return in the case of an - // illegal call. - inline HRESULT _CheckValidStateForAsyncInfoCall() - { - _AsyncStatusInternal _Current = _M_currentStatus; - if (_Current == _AsyncClosed) - { - return E_ILLEGAL_METHOD_CALL; - } - else if (_Current == _AsyncCreated) - { - return E_ASYNC_OPERATION_NOT_STARTED; - } - return S_OK; - } - - inline bool _TransitionToState(const _AsyncStatusInternal _NewState) - { - _AsyncStatusInternal _Current = _M_currentStatus; - - // This enforces the valid state transitions of the asynchronous worker object - // state machine. - switch (_NewState) - { - case _AsyncStatusInternal::_AsyncStarted: - if (_Current != _AsyncCreated) - { - return false; - } - break; - case _AsyncStatusInternal::_AsyncCompleted: - if (_Current != _AsyncStarted && _Current != _AsyncCancelPending) - { - return false; - } - break; - case _AsyncStatusInternal::_AsyncCancelPending: - if (_Current != _AsyncStarted) - { - return false; - } - break; - case _AsyncStatusInternal::_AsyncCanceled: - if (_Current != _AsyncStarted && _Current != _AsyncCancelPending) - { - return false; - } - break; - case _AsyncStatusInternal::_AsyncError: - if (_Current != _AsyncStarted && _Current != _AsyncCancelPending) - { - return false; - } - break; - case _AsyncStatusInternal::_AsyncClosed: - if (!_IsTerminalState(_Current)) - { - return false; - } - break; - default: - return false; - break; - } - - // attempt the transition to the new state - // Note: if currentStatus_ == _Current, then there was no intervening write - // by the async work object and the swap succeeded. - _AsyncStatusInternal _RetState = static_cast<_AsyncStatusInternal>( - _InterlockedCompareExchange(reinterpret_cast(&_M_currentStatus), - _NewState, - static_cast(_Current))); - - // ICE returns the former state, if the returned state and the - // state we captured at the beginning of this method are the same, - // the swap succeeded. - return (_RetState == _Current); - } - - inline bool _IsTerminalState() - { - return _IsTerminalState(_M_currentStatus); - } - - inline bool _IsTerminalState(_AsyncStatusInternal status) - { - return (status == _AsyncError || - status == _AsyncCanceled || - status == _AsyncCompleted || - status == _AsyncClosed); - } - - private: - - _ContextCallback _M_completeDelegateContext; - Microsoft::WRL::ComPtr _M_completeDelegate; //ComPtr cannot be volatile as it does not have volatile accessors - _AsyncStatusInternal volatile _M_currentStatus; - HRESULT volatile _M_errorCode; - unsigned int _M_id; - long volatile _M_CompleteDelegateAssigned; - long volatile _M_CallbackMade; - }; - - // *************************************************************************** - // Progress Layer (optional): - // - - template< typename _Attributes, bool _HasProgress, _AsyncResultType _ResultType = SingleResult > - class _AsyncProgressBase abstract : public _AsyncInfoBase<_Attributes, _ResultType> - { - }; - - template< typename _Attributes, _AsyncResultType _ResultType> - class _AsyncProgressBase<_Attributes, true, _ResultType> abstract : public _AsyncInfoBase<_Attributes, _ResultType> - { - public: - - _AsyncProgressBase() : _AsyncInfoBase<_Attributes, _ResultType>(), - _M_progressDelegate(nullptr) - { - } - - virtual STDMETHODIMP _GetOnProgress(typename _Attributes::_ProgressDelegateType** _ProgressHandler) override - { - HRESULT hr = _CheckValidStateForDelegateCall(); - if (FAILED(hr)) return hr; - *_ProgressHandler = _M_progressDelegate; - return S_OK; - } - - virtual STDMETHODIMP _PutOnProgress(typename _Attributes::_ProgressDelegateType* _ProgressHandler) override - { - HRESULT hr = _CheckValidStateForDelegateCall(); - if (FAILED(hr)) return hr; - _M_progressDelegate = _ProgressHandler; - _M_progressDelegateContext = _ContextCallback::_CaptureCurrent(); - return S_OK; - } - - public: - - void _FireProgress(const typename _Attributes::_ProgressType_abi& _ProgressValue) - { - if (_M_progressDelegate != nullptr) - { - _M_progressDelegateContext._CallInContext([=]() -> HRESULT { - _M_progressDelegate->Invoke((_Attributes::_AsyncBaseType*)this, _ProgressValue); - return S_OK; - }); - } - } - - private: - - _ContextCallback _M_progressDelegateContext; - typename _Attributes::_ProgressDelegateType* _M_progressDelegate; - }; - - template - class _AsyncBaseProgressLayer abstract : public _AsyncProgressBase<_Attributes, _Attributes::_TakesProgress, _ResultType> - { - }; - - // *************************************************************************** - // Task Adaptation Layer: - // - - // - // _AsyncTaskThunkBase provides a bridge between IAsync and task. - // - template - class _AsyncTaskThunkBase abstract : public _AsyncBaseProgressLayer<_Attributes> - { - public: - - //AsyncAction* - virtual STDMETHODIMP GetResults() - { - HRESULT hr = _CheckValidStateForResultsCall(); - if (FAILED(hr)) return hr; - _M_task.get(); - return S_OK; - } - public: - typedef task<_ReturnType> _TaskType; - - _AsyncTaskThunkBase(const _TaskType& _Task) - : _M_task(_Task) - { - } - - _AsyncTaskThunkBase() - { - } -#if _MSC_VER < 1800 - void _SetTaskCreationAddressHint(void* _SourceAddressHint) - { - if (!(std::is_same<_Attributes::_AsyncKind, _TypeSelectorAsyncTask>::value)) - { - // Overwrite the creation address with the return address of create_async unless the - // lambda returned a task. If the create async lambda returns a task, that task is reused and - // we want to preserve its creation address hint. - _M_task._SetTaskCreationAddressHint(_SourceAddressHint); - } - } -#endif - protected: - virtual void _OnStart() override - { - _M_task.then([=](_TaskType _Antecedent) -> HRESULT { - try - { - _Antecedent.get(); - } - catch (Concurrency::task_canceled&) - { - _TryTransitionToCancelled(); - } - catch (IRestrictedErrorInfo*& _Ex) - { - HRESULT hr; - HRESULT _hr; - hr = _Ex->GetErrorDetails(NULL, &_hr, NULL, NULL); - if (SUCCEEDED(hr)) hr = _hr; - _TryTransitionToError(hr); - } - catch (...) - { - _TryTransitionToError(E_FAIL); - } - return _FireCompletion(); - }); - } - - protected: - _TaskType _M_task; - Concurrency::cancellation_token_source _M_cts; - }; - - template - class _AsyncTaskReturn abstract : public _AsyncTaskThunkBase<_Attributes, _Return> - { - public: - //AsyncOperation* - virtual STDMETHODIMP GetResults(_ReturnType* results) - { - HRESULT hr = _CheckValidStateForResultsCall(); - if (FAILED(hr)) return hr; - _M_task.get(); - *results = _M_results; - return S_OK; - } - template -#if _MSC_VER >= 1800 - void DoCreateTask(_Function _func, const _TaskCreationCallstack & _callstack) - { - _M_task = _Attributes::_Generate_Task(_func, this, _M_cts, &_M_results, _callstack); - } -#else - void DoCreateTask(_Function _func) - { - _M_task = _Attributes::_Generate_Task(_func, this, _M_cts, &_M_results); - } -#endif - protected: - _ReturnType _M_results; - }; - - template - class _AsyncTaskReturn<_Attributes, _ReturnType, void> abstract : public _AsyncTaskThunkBase<_Attributes, void> - { - public: - template -#if _MSC_VER >= 1800 - void DoCreateTask(_Function _func, const _TaskCreationCallstack & _callstack) - { - _M_task = _Attributes::_Generate_Task(_func, this, _M_cts, _callstack); - } -#else - void DoCreateTask(_Function _func) - { - _M_task = _Attributes::_Generate_Task(_func, this, _M_cts); - } -#endif - }; - - template - class _AsyncTaskReturn<_Attributes, void, task> abstract : public _AsyncTaskThunkBase<_Attributes, task> - { - public: - template -#if _MSC_VER >= 1800 - void DoCreateTask(_Function _func, const _TaskCreationCallstack & _callstack) - { - _M_task = _Attributes::_Generate_Task(_func, this, _M_cts, &_M_results, _callstack); - } -#else - void DoCreateTask(_Function _func) - { - _M_task = _Attributes::_Generate_Task(_func, this, _M_cts, &_M_results); - } -#endif - protected: - task _M_results; - }; - - template - class _AsyncTaskThunk : public _AsyncTaskReturn<_Attributes, typename _Attributes::_ReturnType_abi, typename _Attributes::_ReturnType> - { - public: - - _AsyncTaskThunk(const _TaskType& _Task) : - _AsyncTaskThunkBase(_Task) - { - } - - _AsyncTaskThunk() - { - } - - protected: - - virtual void _OnClose() override - { - } - - virtual void _OnCancel() override - { - _M_cts.cancel(); - } - }; - - // *************************************************************************** - // Async Creation Layer: - // - template - class _AsyncTaskGeneratorThunk : public _AsyncTaskThunk::_AsyncAttributes> - { - public: - - typedef typename _AsyncLambdaTypeTraits<_Function>::_AsyncAttributes _Attributes; - typedef typename _AsyncTaskThunk<_Attributes> _Base; - typedef typename _Attributes::_AsyncBaseType _AsyncBaseType; - -#if _MSC_VER >= 1800 - _AsyncTaskGeneratorThunk(const _Function& _Func, const _TaskCreationCallstack &_callstack) : _M_func(_Func), _M_creationCallstack(_callstack) -#else - _AsyncTaskGeneratorThunk(const _Function& _Func) : _M_func(_Func) -#endif - { - // Virtual call here is safe as the class is declared 'sealed' - _Start(); - } - - protected: - - // - // The only thing we must do different from the base class is we must spin the hot task on transition from Created->Started. Otherwise, - // let the base thunk handle everything. - // - - virtual void _OnStart() override - { - // - // Call the appropriate task generator to actually produce a task of the expected type. This might adapt the user lambda for progress reports, - // wrap the return result in a task, or allow for direct return of a task depending on the form of the lambda. - // -#if _MSC_VER >= 1800 - DoCreateTask<_Function>(_M_func, _M_creationCallstack); -#else - DoCreateTask<_Function>(_M_func); -#endif - _Base::_OnStart(); - } - - virtual void _OnCancel() override - { - _Base::_OnCancel(); - } - - private: -#if _MSC_VER >= 1800 - _TaskCreationCallstack _M_creationCallstack; -#endif - _Function _M_func; - }; -} // namespace details - -/// -/// Creates a Windows Runtime asynchronous construct based on a user supplied lambda or function object. The return type of create_async is -/// one of either IAsyncAction^, IAsyncActionWithProgress<TProgress>^, IAsyncOperation<TResult>^, or -/// IAsyncOperationWithProgress<TResult, TProgress>^ based on the signature of the lambda passed to the method. -/// -/// -/// The lambda or function object from which to create a Windows Runtime asynchronous construct. -/// -/// -/// An asynchronous construct represented by an IAsyncAction^, IAsyncActionWithProgress<TProgress>^, IAsyncOperation<TResult>^, or an -/// IAsyncOperationWithProgress<TResult, TProgress>^. The interface returned depends on the signature of the lambda passed into the function. -/// -/// -/// The return type of the lambda determines whether the construct is an action or an operation. -/// Lambdas that return void cause the creation of actions. Lambdas that return a result of type TResult cause the creation of -/// operations of TResult. -/// The lambda may also return a task<TResult> which encapsulates the aysnchronous work within itself or is the continuation of -/// a chain of tasks that represent the asynchronous work. In this case, the lambda itself is executed inline, since the tasks are the ones that -/// execute asynchronously, and the return type of the lambda is unwrapped to produce the asynchronous construct returned by create_async. -/// This implies that a lambda that returns a task<void> will cause the creation of actions, and a lambda that returns a task<TResult> will -/// cause the creation of operations of TResult. -/// The lambda may take either zero, one or two arguments. The valid arguments are progress_reporter<TProgress> and -/// cancellation_token, in that order if both are used. A lambda without arguments causes the creation of an asynchronous construct without -/// the capability for progress reporting. A lambda that takes a progress_reporter<TProgress> will cause create_async to return an asynchronous -/// construct which reports progress of type TProgress each time the report method of the progress_reporter object is called. A lambda that -/// takes a cancellation_token may use that token to check for cancellation, or pass it to tasks that it creates so that cancellation of the -/// asynchronous construct causes cancellation of those tasks. -/// If the body of the lambda or function object returns a result (and not a task<TResult>), the lamdba will be executed -/// asynchronously within the process MTA in the context of a task the Runtime implicitly creates for it. The IAsyncInfo::Cancel method will -/// cause cancellation of the implicit task. -/// If the body of the lambda returns a task, the lamba executes inline, and by declaring the lambda to take an argument of type -/// cancellation_token you can trigger cancellation of any tasks you create within the lambda by passing that token in when you create them. -/// You may also use the register_callback method on the token to cause the Runtime to invoke a callback when you call IAsyncInfo::Cancel on -/// the async operation or action produced.. -/// This function is only available to Windows Store apps. -/// -/// -/// -/// -/**/ -template -__declspec(noinline) // Ask for no inlining so that the _ReturnAddress intrinsic gives us the expected result -details::_AsyncTaskGeneratorThunk<_Function>* create_async(const _Function& _Func) -{ - static_assert(std::is_same(_Func, 0, 0, 0, 0, 0, 0, 0, 0)), std::true_type>::value, - "argument to create_async must be a callable object taking zero, one, two or three arguments"); -#if _MSC_VER >= 1800 - Microsoft::WRL::ComPtr> _AsyncInfo = Microsoft::WRL::Make>(_Func, _CAPTURE_CALLSTACK()); -#else - Microsoft::WRL::ComPtr> _AsyncInfo = Microsoft::WRL::Make>(_Func); - _AsyncInfo->_SetTaskCreationAddressHint(_ReturnAddress()); -#endif - return _AsyncInfo.Detach(); -} - -namespace details -{ -#if _MSC_VER < 1800 - // Internal API which retrieves the next async id. - _CRTIMP2 unsigned int __cdecl _GetNextAsyncId(); -#endif - // Helper struct for when_all operators to know when tasks have completed - template - struct _RunAllParam - { - _RunAllParam() : _M_completeCount(0), _M_numTasks(0) - { - } - - void _Resize(size_t _Len, bool _SkipVector = false) - { - _M_numTasks = _Len; - if (!_SkipVector) -#if _MSC_VER >= 1800 - { - _M_vector._Result.resize(_Len); - } -#else - _M_vector.resize(_Len); - _M_contexts.resize(_Len); -#endif - } - - task_completion_event<_Unit_type> _M_completed; - atomic_size_t _M_completeCount; -#if _MSC_VER >= 1800 - _ResultHolder > _M_vector; - _ResultHolder<_Type> _M_mergeVal; -#else - std::vector<_Type> _M_vector; - std::vector<_ContextCallback> _M_contexts; - _Type _M_mergeVal; -#endif - size_t _M_numTasks; - }; - -#if _MSC_VER >= 1800 - template - struct _RunAllParam > - { - _RunAllParam() : _M_completeCount(0), _M_numTasks(0) - { - } - - void _Resize(size_t _Len, bool _SkipVector = false) - { - _M_numTasks = _Len; - - if (!_SkipVector) - { - _M_vector.resize(_Len); - } - } - - task_completion_event<_Unit_type> _M_completed; - std::vector<_ResultHolder > > _M_vector; - atomic_size_t _M_completeCount; - size_t _M_numTasks; - }; -#endif - - // Helper struct specialization for void - template<> -#if _MSC_VER >= 1800 - struct _RunAllParam<_Unit_type> -#else - struct _RunAllParam -#endif - { - _RunAllParam() : _M_completeCount(0), _M_numTasks(0) - { - } - - void _Resize(size_t _Len) - { - _M_numTasks = _Len; - } - - task_completion_event<_Unit_type> _M_completed; - atomic_size_t _M_completeCount; - size_t _M_numTasks; - }; - - inline void _JoinAllTokens_Add(const Concurrency::cancellation_token_source& _MergedSrc, Concurrency::details::_CancellationTokenState *_PJoinedTokenState) - { - if (_PJoinedTokenState != nullptr && _PJoinedTokenState != Concurrency::details::_CancellationTokenState::_None()) - { - Concurrency::cancellation_token _T = Concurrency::cancellation_token::_FromImpl(_PJoinedTokenState); - _T.register_callback([=](){ - _MergedSrc.cancel(); - }); - } - } - - template - void _WhenAllContinuationWrapper(_RunAllParam<_ElementType>* _PParam, _Function _Func, task<_TaskType>& _Task) - { - if (_Task._GetImpl()->_IsCompleted()) - { - _Func(); -#if _MSC_VER >= 1800 - if (Concurrency::details::atomic_increment(_PParam->_M_completeCount) == _PParam->_M_numTasks) -#else - if (_InterlockedIncrementSizeT(&_PParam->_M_completeCount) == _PParam->_M_numTasks) -#endif - { - // Inline execute its direct continuation, the _ReturnTask - _PParam->_M_completed.set(_Unit_type()); - // It's safe to delete it since all usage of _PParam in _ReturnTask has been finished. - delete _PParam; - } - } - else - { - _CONCRT_ASSERT(_Task._GetImpl()->_IsCanceled()); - if (_Task._GetImpl()->_HasUserException()) - { - // _Cancel will return false if the TCE is already canceled with or without exception - _PParam->_M_completed._Cancel(_Task._GetImpl()->_GetExceptionHolder()); - } - else - { - _PParam->_M_completed._Cancel(); - } -#if _MSC_VER >= 1800 - if (Concurrency::details::atomic_increment(_PParam->_M_completeCount) == _PParam->_M_numTasks) -#else - if (_InterlockedIncrementSizeT(&_PParam->_M_completeCount) == _PParam->_M_numTasks) -#endif - { - delete _PParam; - } - } - } - - template - struct _WhenAllImpl - { -#if _MSC_VER >= 1800 - static task> _Perform(const task_options& _TaskOptions, _Iterator _Begin, _Iterator _End) -#else - static task> _Perform(Concurrency::details::_CancellationTokenState *_PTokenState, _Iterator _Begin, _Iterator _End) -#endif - { -#if _MSC_VER >= 1800 - Concurrency::details::_CancellationTokenState *_PTokenState = _TaskOptions.has_cancellation_token() ? _TaskOptions.get_cancellation_token()._GetImplValue() : nullptr; -#endif - auto _PParam = new _RunAllParam<_ElementType>(); - Concurrency::cancellation_token_source _MergedSource; - - // Step1: Create task completion event. -#if _MSC_VER >= 1800 - task_options _Options(_TaskOptions); - _Options.set_cancellation_token(_MergedSource.get_token()); - task<_Unit_type> _All_tasks_completed(_PParam->_M_completed, _Options); -#else - task<_Unit_type> _All_tasks_completed(_PParam->_M_completed, _MergedSource.get_token()); -#endif - // The return task must be created before step 3 to enforce inline execution. - auto _ReturnTask = _All_tasks_completed._Then([=](_Unit_type, std::vector<_ElementType>* retVal) -> HRESULT { -#if _MSC_VER >= 1800 - * retVal = _PParam->_M_vector.Get(); -#else - auto _Result = _PParam->_M_vector; // copy by value - - size_t _Index = 0; - for (auto _It = _Result.begin(); _It != _Result.end(); ++_It) - { - *_It = _ResultContext<_ElementType>::_GetValue(*_It, _PParam->_M_contexts[_Index++], false); - } - *retVal = _Result; -#endif - return S_OK; -#if _MSC_VER >= 1800 - }, nullptr); -#else - }, nullptr, true); -#endif - // Step2: Combine and check tokens, and count elements in range. - if (_PTokenState) - { - details::_JoinAllTokens_Add(_MergedSource, _PTokenState); - _PParam->_Resize(static_cast(std::distance(_Begin, _End))); - } - else - { - size_t _TaskNum = 0; - for (auto _PTask = _Begin; _PTask != _End; ++_PTask) - { - _TaskNum++; - details::_JoinAllTokens_Add(_MergedSource, _PTask->_GetImpl()->_M_pTokenState); - } - _PParam->_Resize(_TaskNum); - } - - // Step3: Check states of previous tasks. - if (_Begin == _End) - { - _PParam->_M_completed.set(_Unit_type()); - delete _PParam; - } - else - { - size_t _Index = 0; - for (auto _PTask = _Begin; _PTask != _End; ++_PTask) - { - if (_PTask->is_apartment_aware()) - { - _ReturnTask._SetAsync(); - } - - _PTask->_Then([_PParam, _Index](task<_ElementType> _ResultTask) -> HRESULT { - -#if _MSC_VER >= 1800 - // Dev10 compiler bug - typedef _ElementType _ElementTypeDev10; - auto _PParamCopy = _PParam; - auto _IndexCopy = _Index; - auto _Func = [_PParamCopy, _IndexCopy, &_ResultTask](){ - _PParamCopy->_M_vector._Result[_IndexCopy] = _ResultTask._GetImpl()->_GetResult(); - }; -#else - auto _Func = [_PParam, _Index, &_ResultTask](){ - _PParam->_M_vector[_Index] = _ResultTask._GetImpl()->_GetResult(); - _PParam->_M_contexts[_Index] = _ResultContext<_ElementType>::_GetContext(false); - }; -#endif - _WhenAllContinuationWrapper(_PParam, _Func, _ResultTask); - return S_OK; -#if _MSC_VER >= 1800 - }, Concurrency::details::_CancellationTokenState::_None()); -#else - }, Concurrency::details::_CancellationTokenState::_None(), false); -#endif - - _Index++; - } - } - - return _ReturnTask; - } - }; - - template - struct _WhenAllImpl, _Iterator> - { -#if _MSC_VER >= 1800 - static task> _Perform(const task_options& _TaskOptions, _Iterator _Begin, _Iterator _End) -#else - static task> _Perform(Concurrency::details::_CancellationTokenState *_PTokenState, _Iterator _Begin, _Iterator _End) -#endif - { -#if _MSC_VER >= 1800 - Concurrency::details::_CancellationTokenState *_PTokenState = _TaskOptions.has_cancellation_token() ? _TaskOptions.get_cancellation_token()._GetImplValue() : nullptr; -#endif - auto _PParam = new _RunAllParam>(); - Concurrency::cancellation_token_source _MergedSource; - - // Step1: Create task completion event. -#if _MSC_VER >= 1800 - task_options _Options(_TaskOptions); - _Options.set_cancellation_token(_MergedSource.get_token()); - task<_Unit_type> _All_tasks_completed(_PParam->_M_completed, _Options); -#else - task<_Unit_type> _All_tasks_completed(_PParam->_M_completed, _MergedSource.get_token()); -#endif - // The return task must be created before step 3 to enforce inline execution. - auto _ReturnTask = _All_tasks_completed._Then([=](_Unit_type, std::vector<_ElementType>* retVal) -> HRESULT { - _CONCRT_ASSERT(_PParam->_M_completeCount == _PParam->_M_numTasks); - std::vector<_ElementType> _Result; - for (size_t _I = 0; _I < _PParam->_M_numTasks; _I++) - { -#if _MSC_VER >= 1800 - const std::vector<_ElementType>& _Vec = _PParam->_M_vector[_I].Get(); -#else - std::vector<_ElementType>& _Vec = _PParam->_M_vector[_I]; - - for (auto _It = _Vec.begin(); _It != _Vec.end(); ++_It) - { - *_It = _ResultContext<_ElementType>::_GetValue(*_It, _PParam->_M_contexts[_I], false); - } -#endif - _Result.insert(_Result.end(), _Vec.begin(), _Vec.end()); - } - *retVal = _Result; - return S_OK; -#if _MSC_VER >= 1800 - }, nullptr); -#else - }, nullptr, true); -#endif - - // Step2: Combine and check tokens, and count elements in range. - if (_PTokenState) - { - details::_JoinAllTokens_Add(_MergedSource, _PTokenState); - _PParam->_Resize(static_cast(std::distance(_Begin, _End))); - } - else - { - size_t _TaskNum = 0; - for (auto _PTask = _Begin; _PTask != _End; ++_PTask) - { - _TaskNum++; - details::_JoinAllTokens_Add(_MergedSource, _PTask->_GetImpl()->_M_pTokenState); - } - _PParam->_Resize(_TaskNum); - } - - // Step3: Check states of previous tasks. - if (_Begin == _End) - { - _PParam->_M_completed.set(_Unit_type()); - delete _PParam; - } - else - { - size_t _Index = 0; - for (auto _PTask = _Begin; _PTask != _End; ++_PTask) - { - if (_PTask->is_apartment_aware()) - { - _ReturnTask._SetAsync(); - } - - _PTask->_Then([_PParam, _Index](task> _ResultTask) -> HRESULT { -#if _MSC_VER >= 1800 - // Dev10 compiler bug - typedef _ElementType _ElementTypeDev10; - auto _PParamCopy = _PParam; - auto _IndexCopy = _Index; - auto _Func = [_PParamCopy, _IndexCopy, &_ResultTask]() { - _PParamCopy->_M_vector[_IndexCopy].Set(_ResultTask._GetImpl()->_GetResult()); - }; -#else - auto _Func = [_PParam, _Index, &_ResultTask]() { - _PParam->_M_vector[_Index] = _ResultTask._GetImpl()->_GetResult(); - _PParam->_M_contexts[_Index] = _ResultContext<_ElementType>::_GetContext(false); - }; -#endif - _WhenAllContinuationWrapper(_PParam, _Func, _ResultTask); - return S_OK; -#if _MSC_VER >= 1800 - }, Concurrency::details::_CancellationTokenState::_None()); -#else - }, Concurrency::details::_CancellationTokenState::_None(), false); -#endif - - _Index++; - } - } - - return _ReturnTask; - } - }; - - template - struct _WhenAllImpl - { -#if _MSC_VER >= 1800 - static task _Perform(const task_options& _TaskOptions, _Iterator _Begin, _Iterator _End) -#else - static task _Perform(Concurrency::details::_CancellationTokenState *_PTokenState, _Iterator _Begin, _Iterator _End) -#endif - { -#if _MSC_VER >= 1800 - Concurrency::details::_CancellationTokenState *_PTokenState = _TaskOptions.has_cancellation_token() ? _TaskOptions.get_cancellation_token()._GetImplValue() : nullptr; -#endif - auto _PParam = new _RunAllParam<_Unit_type>(); - Concurrency::cancellation_token_source _MergedSource; - - // Step1: Create task completion event. -#if _MSC_VER >= 1800 - task_options _Options(_TaskOptions); - _Options.set_cancellation_token(_MergedSource.get_token()); - task<_Unit_type> _All_tasks_completed(_PParam->_M_completed, _Options); -#else - task<_Unit_type> _All_tasks_completed(_PParam->_M_completed, _MergedSource.get_token()); -#endif - // The return task must be created before step 3 to enforce inline execution. - auto _ReturnTask = _All_tasks_completed._Then([=](_Unit_type) -> HRESULT { return S_OK; -#if _MSC_VER >= 1800 - }, nullptr); -#else - }, nullptr, false); -#endif - - // Step2: Combine and check tokens, and count elements in range. - if (_PTokenState) - { - details::_JoinAllTokens_Add(_MergedSource, _PTokenState); - _PParam->_Resize(static_cast(std::distance(_Begin, _End))); - } - else - { - size_t _TaskNum = 0; - for (auto _PTask = _Begin; _PTask != _End; ++_PTask) - { - _TaskNum++; - details::_JoinAllTokens_Add(_MergedSource, _PTask->_GetImpl()->_M_pTokenState); - } - _PParam->_Resize(_TaskNum); - } - - // Step3: Check states of previous tasks. - if (_Begin == _End) - { - _PParam->_M_completed.set(_Unit_type()); - delete _PParam; - } - else - { - for (auto _PTask = _Begin; _PTask != _End; ++_PTask) - { - if (_PTask->is_apartment_aware()) - { - _ReturnTask._SetAsync(); - } - - _PTask->_Then([_PParam](task _ResultTask) -> HRESULT { - - auto _Func = []() -> HRESULT { return S_OK; }; - _WhenAllContinuationWrapper(_PParam, _Func, _ResultTask); - return S_OK; -#if _MSC_VER >= 1800 - }, Concurrency::details::_CancellationTokenState::_None()); -#else - }, Concurrency::details::_CancellationTokenState::_None(), false); -#endif - } - } - - return _ReturnTask; - } - }; - - template - task> _WhenAllVectorAndValue(const task>& _VectorTask, const task<_ReturnType>& _ValueTask, - bool _OutputVectorFirst) - { - auto _PParam = new _RunAllParam<_ReturnType>(); - Concurrency::cancellation_token_source _MergedSource; - - // Step1: Create task completion event. - task<_Unit_type> _All_tasks_completed(_PParam->_M_completed, _MergedSource.get_token()); - // The return task must be created before step 3 to enforce inline execution. - auto _ReturnTask = _All_tasks_completed._Then([=](_Unit_type, std::vector<_ReturnType>* retVal) -> HRESULT { - _CONCRT_ASSERT(_PParam->_M_completeCount == 2); -#if _MSC_VER >= 1800 - auto _Result = _PParam->_M_vector.Get(); // copy by value - auto _mergeVal = _PParam->_M_mergeVal.Get(); -#else - auto _Result = _PParam->_M_vector; // copy by value - for (auto _It = _Result.begin(); _It != _Result.end(); ++_It) - { - *_It = _ResultContext<_ReturnType>::_GetValue(*_It, _PParam->_M_contexts[0], false); - } -#endif - - if (_OutputVectorFirst == true) - { -#if _MSC_VER >= 1800 - _Result.push_back(_mergeVal); -#else - _Result.push_back(_ResultContext<_ReturnType>::_GetValue(_PParam->_M_mergeVal, _PParam->_M_contexts[1], false)); -#endif - } - else - { -#if _MSC_VER >= 1800 - _Result.insert(_Result.begin(), _mergeVal); -#else - _Result.insert(_Result.begin(), _ResultContext<_ReturnType>::_GetValue(_PParam->_M_mergeVal, _PParam->_M_contexts[1], false)); -#endif - } - *retVal = _Result; - return S_OK; - }, nullptr, true); - - // Step2: Combine and check tokens. - _JoinAllTokens_Add(_MergedSource, _VectorTask._GetImpl()->_M_pTokenState); - _JoinAllTokens_Add(_MergedSource, _ValueTask._GetImpl()->_M_pTokenState); - - // Step3: Check states of previous tasks. - _PParam->_Resize(2, true); - - if (_VectorTask.is_apartment_aware() || _ValueTask.is_apartment_aware()) - { - _ReturnTask._SetAsync(); - } - _VectorTask._Then([_PParam](task> _ResultTask) -> HRESULT { -#if _MSC_VER >= 1800 - // Dev10 compiler bug - typedef _ReturnType _ReturnTypeDev10; - auto _PParamCopy = _PParam; - auto _Func = [_PParamCopy, &_ResultTask]() { - auto _ResultLocal = _ResultTask._GetImpl()->_GetResult(); - _PParamCopy->_M_vector.Set(_ResultLocal); - }; -#else - auto _Func = [_PParam, &_ResultTask]() { - _PParam->_M_vector = _ResultTask._GetImpl()->_GetResult(); - _PParam->_M_contexts[0] = _ResultContext<_ReturnType>::_GetContext(false); - }; -#endif - - _WhenAllContinuationWrapper(_PParam, _Func, _ResultTask); - return S_OK; -#if _MSC_VER >= 1800 - }, _CancellationTokenState::_None()); -#else - }, _CancellationTokenState::_None(), false); -#endif - _ValueTask._Then([_PParam](task<_ReturnType> _ResultTask) -> HRESULT { -#if _MSC_VER >= 1800 - // Dev10 compiler bug - typedef _ReturnType _ReturnTypeDev10; - auto _PParamCopy = _PParam; - auto _Func = [_PParamCopy, &_ResultTask]() { - auto _ResultLocal = _ResultTask._GetImpl()->_GetResult(); - _PParamCopy->_M_mergeVal.Set(_ResultLocal); - }; -#else - auto _Func = [_PParam, &_ResultTask]() { - _PParam->_M_mergeVal = _ResultTask._GetImpl()->_GetResult(); - _PParam->_M_contexts[1] = _ResultContext<_ReturnType>::_GetContext(false); - }; -#endif - _WhenAllContinuationWrapper(_PParam, _Func, _ResultTask); - return S_OK; -#if _MSC_VER >= 1800 - }, _CancellationTokenState::_None()); -#else - }, _CancellationTokenState::_None(), false); -#endif - - return _ReturnTask; - } -} // namespace details - -#if _MSC_VER < 1800 -/// -/// Creates a task that will complete successfully when all of the tasks supplied as arguments complete successfully. -/// -/// -/// The type of the input iterator. -/// -/// -/// The position of the first element in the range of elements to be combined into the resulting task. -/// -/// -/// The position of the first element beyond the range of elements to be combined into the resulting task. -/// -/// -/// A task that completes sucessfully when all of the input tasks have completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::vector<T>>. If the input tasks are of type void the output -/// task will also be a task<void>. -/// -/// -/// If one of the tasks is canceled or throws an exception, the returned task will complete early, in the canceled state, and the exception, -/// if one is encoutered, will be thrown if you call get() or wait() on that task. -/// -/// -/**/ -template -auto when_all(_Iterator _Begin, _Iterator _End) --> decltype (details::_WhenAllImpl::value_type::result_type, _Iterator>::_Perform(nullptr, _Begin, _End)) -{ - typedef typename std::iterator_traits<_Iterator>::value_type::result_type _ElementType; - return details::_WhenAllImpl<_ElementType, _Iterator>::_Perform(nullptr, _Begin, _End); -} -#endif - -/// -/// Creates a task that will complete successfully when all of the tasks supplied as arguments complete successfully. -/// -/// -/// The type of the input iterator. -/// -/// -/// The position of the first element in the range of elements to be combined into the resulting task. -/// -/// -/// The position of the first element beyond the range of elements to be combined into the resulting task. -/// -/// -/// The cancellation token which controls cancellation of the returned task. If you do not provide a cancellation token, the resulting -/// task will be created with a token that is a combination of all the cancelable tokens (tokens created by methods other than -/// cancellation_token::none()of the tasks supplied. -/// -/// -/// A task that completes sucessfully when all of the input tasks have completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::vector<T>>. If the input tasks are of type void the output -/// task will also be a task<void>. -/// -/// -/// If one of the tasks is canceled or throws an exception, the returned task will complete early, in the canceled state, and the exception, -/// if one is encoutered, will be thrown if you call get() or wait() on that task. -/// -/// -/**/ -template -#if _MSC_VER >= 1800 -auto when_all(_Iterator _Begin, _Iterator _End, const task_options& _TaskOptions = task_options()) --> decltype (details::_WhenAllImpl::value_type::result_type, _Iterator>::_Perform(_TaskOptions, _Begin, _End)) -{ - typedef typename std::iterator_traits<_Iterator>::value_type::result_type _ElementType; - return details::_WhenAllImpl<_ElementType, _Iterator>::_Perform(_TaskOptions, _Begin, _End); -} -#else -auto when_all(_Iterator _Begin, _Iterator _End, Concurrency::cancellation_token _CancellationToken) --> decltype (details::_WhenAllImpl::value_type::result_type, _Iterator>::_Perform(_CancellationToken._GetImplValue(), _Begin, _End)) -{ - typedef typename std::iterator_traits<_Iterator>::value_type::result_type _ElementType; - return details::_WhenAllImpl<_ElementType, _Iterator>::_Perform(_CancellationToken._GetImplValue(), _Begin, _End); -} -#endif - -/// -/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully. -/// -/// -/// The type of the returned task. -/// -/// -/// The first task to combine into the resulting task. -/// -/// -/// The second task to combine into the resulting task. -/// -/// -/// A task that completes successfully when both of the input tasks have completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::vector<T>>. If the input tasks are of type void the output -/// task will also be a task<void>. -/// To allow for a construct of the sort taskA && taskB && taskC, which are combined in pairs, the && operator -/// produces a task<std::vector<T>> if either one or both of the tasks are of type task<std::vector<T>>. -/// -/// -/// If one of the tasks is canceled or throws an exception, the returned task will complete early, in the canceled state, and the exception, -/// if one is encoutered, will be thrown if you call get() or wait() on that task. -/// -/// -/**/ -template -task> operator&&(const task<_ReturnType> & _Lhs, const task<_ReturnType> & _Rhs) -{ - task<_ReturnType> _PTasks[2] = { _Lhs, _Rhs }; - return when_all(_PTasks, _PTasks + 2); -} - -/// -/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully. -/// -/// -/// The type of the returned task. -/// -/// -/// The first task to combine into the resulting task. -/// -/// -/// The second task to combine into the resulting task. -/// -/// -/// A task that completes successfully when both of the input tasks have completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::vector<T>>. If the input tasks are of type void the output -/// task will also be a task<void>. -/// To allow for a construct of the sort taskA && taskB && taskC, which are combined in pairs, the && operator -/// produces a task<std::vector<T>> if either one or both of the tasks are of type task<std::vector<T>>. -/// -/// -/// If one of the tasks is canceled or throws an exception, the returned task will complete early, in the canceled state, and the exception, -/// if one is encoutered, will be thrown if you call get() or wait() on that task. -/// -/// -/**/ -template -task> operator&&(const task> & _Lhs, const task<_ReturnType> & _Rhs) -{ - return details::_WhenAllVectorAndValue(_Lhs, _Rhs, true); -} - -/// -/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully. -/// -/// -/// The type of the returned task. -/// -/// -/// The first task to combine into the resulting task. -/// -/// -/// The second task to combine into the resulting task. -/// -/// -/// A task that completes successfully when both of the input tasks have completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::vector<T>>. If the input tasks are of type void the output -/// task will also be a task<void>. -/// To allow for a construct of the sort taskA && taskB && taskC, which are combined in pairs, the && operator -/// produces a task<std::vector<T>> if either one or both of the tasks are of type task<std::vector<T>>. -/// -/// -/// If one of the tasks is canceled or throws an exception, the returned task will complete early, in the canceled state, and the exception, -/// if one is encoutered, will be thrown if you call get() or wait() on that task. -/// -/// -/**/ -template -task> operator&&(const task<_ReturnType> & _Lhs, const task> & _Rhs) -{ - return details::_WhenAllVectorAndValue(_Rhs, _Lhs, false); -} - -/// -/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully. -/// -/// -/// The type of the returned task. -/// -/// -/// The first task to combine into the resulting task. -/// -/// -/// The second task to combine into the resulting task. -/// -/// -/// A task that completes successfully when both of the input tasks have completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::vector<T>>. If the input tasks are of type void the output -/// task will also be a task<void>. -/// To allow for a construct of the sort taskA && taskB && taskC, which are combined in pairs, the && operator -/// produces a task<std::vector<T>> if either one or both of the tasks are of type task<std::vector<T>>. -/// -/// -/// If one of the tasks is canceled or throws an exception, the returned task will complete early, in the canceled state, and the exception, -/// if one is encoutered, will be thrown if you call get() or wait() on that task. -/// -/// -/**/ -template -task> operator&&(const task> & _Lhs, const task> & _Rhs) -{ - task> _PTasks[2] = { _Lhs, _Rhs }; - return when_all(_PTasks, _PTasks + 2); -} - -/// -/// Creates a task that will complete successfully when both of the tasks supplied as arguments complete successfully. -/// -/// -/// The type of the returned task. -/// -/// -/// The first task to combine into the resulting task. -/// -/// -/// The second task to combine into the resulting task. -/// -/// -/// A task that completes successfully when both of the input tasks have completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::vector<T>>. If the input tasks are of type void the output -/// task will also be a task<void>. -/// To allow for a construct of the sort taskA && taskB && taskC, which are combined in pairs, the && operator -/// produces a task<std::vector<T>> if either one or both of the tasks are of type task<std::vector<T>>. -/// -/// -/// If one of the tasks is canceled or throws an exception, the returned task will complete early, in the canceled state, and the exception, -/// if one is encoutered, will be thrown if you call get() or wait() on that task. -/// -/// -/**/ -inline task operator&&(const task & _Lhs, const task & _Rhs) -{ - task _PTasks[2] = { _Lhs, _Rhs }; - return when_all(_PTasks, _PTasks + 2); -} - -namespace details -{ - // Helper struct for when_any operators to know when tasks have completed - template - struct _RunAnyParam - { - _RunAnyParam() : _M_completeCount(0), _M_numTasks(0), _M_exceptionRelatedToken(nullptr), _M_fHasExplicitToken(false) - { - } - ~_RunAnyParam() - { - if (Concurrency::details::_CancellationTokenState::_IsValid(_M_exceptionRelatedToken)) - _M_exceptionRelatedToken->_Release(); - } - task_completion_event<_CompletionType> _M_Completed; - Concurrency::cancellation_token_source _M_cancellationSource; - Concurrency::details::_CancellationTokenState* _M_exceptionRelatedToken; - atomic_size_t _M_completeCount; - size_t _M_numTasks; - bool _M_fHasExplicitToken; - }; - - template - void _WhenAnyContinuationWrapper(_RunAnyParam<_CompletionType> * _PParam, const _Function & _Func, task<_TaskType>& _Task) - { - bool _IsTokenCancled = !_PParam->_M_fHasExplicitToken && _Task._GetImpl()->_M_pTokenState != Concurrency::details::_CancellationTokenState::_None() && _Task._GetImpl()->_M_pTokenState->_IsCanceled(); - if (_Task._GetImpl()->_IsCompleted() && !_IsTokenCancled) - { - _Func(); -#if _MSC_VER >= 1800 - if (Concurrency::details::atomic_increment(_PParam->_M_completeCount) == _PParam->_M_numTasks) -#else - if (_InterlockedIncrementSizeT(&_PParam->_M_completeCount) == _PParam->_M_numTasks) -#endif - { - delete _PParam; - } - } - else - { - _CONCRT_ASSERT(_Task._GetImpl()->_IsCanceled() || _IsTokenCancled); - if (_Task._GetImpl()->_HasUserException() && !_IsTokenCancled) - { - if (_PParam->_M_Completed._StoreException(_Task._GetImpl()->_GetExceptionHolder())) - { - // This can only enter once. - _PParam->_M_exceptionRelatedToken = _Task._GetImpl()->_M_pTokenState; - _CONCRT_ASSERT(_PParam->_M_exceptionRelatedToken); - // Deref token will be done in the _PParam destructor. - if (_PParam->_M_exceptionRelatedToken != Concurrency::details::_CancellationTokenState::_None()) - { - _PParam->_M_exceptionRelatedToken->_Reference(); - } - } - } - -#if _MSC_VER >= 1800 - if (Concurrency::details::atomic_increment(_PParam->_M_completeCount) == _PParam->_M_numTasks) -#else - if (_InterlockedIncrementSizeT(&_PParam->_M_completeCount) == _PParam->_M_numTasks) -#endif - { - // If no one has be completed so far, we need to make some final cancellation decision. - if (!_PParam->_M_Completed._IsTriggered()) - { - // If we already explicit token, we can skip the token join part. - if (!_PParam->_M_fHasExplicitToken) - { - if (_PParam->_M_exceptionRelatedToken) - { - details::_JoinAllTokens_Add(_PParam->_M_cancellationSource, _PParam->_M_exceptionRelatedToken); - } - else - { - // If haven't captured any exception token yet, there was no exception for all those tasks, - // so just pick a random token (current one) for normal cancellation. - details::_JoinAllTokens_Add(_PParam->_M_cancellationSource, _Task._GetImpl()->_M_pTokenState); - } - } - // Do exception cancellation or normal cancellation based on whether it has stored exception. - _PParam->_M_Completed._Cancel(); - } - delete _PParam; - } - } - } - - template - struct _WhenAnyImpl - { -#if _MSC_VER >= 1800 - static task> _Perform(const task_options& _TaskOptions, _Iterator _Begin, _Iterator _End) -#else - static task> _Perform(Concurrency::details::_CancellationTokenState *_PTokenState, _Iterator _Begin, _Iterator _End) -#endif - { - if (_Begin == _End) - { - throw Concurrency::invalid_operation("when_any(begin, end) cannot be called on an empty container."); - } -#if _MSC_VER >= 1800 - Concurrency::details::_CancellationTokenState *_PTokenState = _TaskOptions.has_cancellation_token() ? _TaskOptions.get_cancellation_token()._GetImplValue() : nullptr; -#endif - auto _PParam = new _RunAnyParam, Concurrency::details::_CancellationTokenState *>>(); - - if (_PTokenState) - { - details::_JoinAllTokens_Add(_PParam->_M_cancellationSource, _PTokenState); - _PParam->_M_fHasExplicitToken = true; - } -#if _MSC_VER >= 1800 - task_options _Options(_TaskOptions); - _Options.set_cancellation_token(_PParam->_M_cancellationSource.get_token()); - task, Concurrency::details::_CancellationTokenState *>> _Any_tasks_completed(_PParam->_M_Completed, _Options); -#else - task, Concurrency::details::_CancellationTokenState *>> _Any_tasks_completed(_PParam->_M_Completed, _PParam->_M_cancellationSource.get_token()); - _Any_tasks_completed._GetImpl()->_M_fRuntimeAggregate = true; -#endif - // Keep a copy ref to the token source - auto _CancellationSource = _PParam->_M_cancellationSource; - - _PParam->_M_numTasks = static_cast(std::distance(_Begin, _End)); - size_t index = 0; - for (auto _PTask = _Begin; _PTask != _End; ++_PTask) - { - if (_PTask->is_apartment_aware()) - { - _Any_tasks_completed._SetAsync(); - } - - _PTask->_Then([_PParam, index](task<_ElementType> _ResultTask) -> HRESULT { -#if _MSC_VER >= 1800 - auto _PParamCopy = _PParam; // Dev10 - auto _IndexCopy = index; // Dev10 - auto _Func = [&_ResultTask, _PParamCopy, _IndexCopy]() { - _PParamCopy->_M_Completed.set(std::make_pair(std::make_pair(_ResultTask._GetImpl()->_GetResult(), _IndexCopy), _ResultTask._GetImpl()->_M_pTokenState)); - }; -#else - auto _Func = [&_ResultTask, _PParam, index]() { - _PParam->_M_Completed.set(std::make_pair(std::make_pair(_ResultTask._GetImpl()->_GetResult(), index), _ResultTask._GetImpl()->_M_pTokenState)); - }; -#endif - _WhenAnyContinuationWrapper(_PParam, _Func, _ResultTask); - return S_OK; -#if _MSC_VER >= 1800 - }, Concurrency::details::_CancellationTokenState::_None()); -#else - }, Concurrency::details::_CancellationTokenState::_None(), false); -#endif - index++; - } - - // All _Any_tasks_completed._SetAsync() must be finished before this return continuation task being created. - return _Any_tasks_completed._Then([=](std::pair, Concurrency::details::_CancellationTokenState *> _Result, std::pair<_ElementType, size_t>* retVal) -> HRESULT { - _CONCRT_ASSERT(_Result.second); - if (!_PTokenState) - { - details::_JoinAllTokens_Add(_CancellationSource, _Result.second); - } - *retVal = _Result.first; - return S_OK; -#if _MSC_VER >= 1800 - }, nullptr); -#else - }, nullptr, true); -#endif - } - }; - - template - struct _WhenAnyImpl - { -#if _MSC_VER >= 1800 - static task _Perform(const task_options& _TaskOptions, _Iterator _Begin, _Iterator _End) -#else - static task _Perform(Concurrency::details::_CancellationTokenState *_PTokenState, _Iterator _Begin, _Iterator _End) -#endif - { - if (_Begin == _End) - { - throw Concurrency::invalid_operation("when_any(begin, end) cannot be called on an empty container."); - } -#if _MSC_VER >= 1800 - Concurrency::details::_CancellationTokenState *_PTokenState = _TaskOptions.has_cancellation_token() ? _TaskOptions.get_cancellation_token()._GetImplValue() : nullptr; -#endif - auto _PParam = new _RunAnyParam>(); - - if (_PTokenState) - { - details::_JoinAllTokens_Add(_PParam->_M_cancellationSource, _PTokenState); - _PParam->_M_fHasExplicitToken = true; - } - -#if _MSC_VER >= 1800 - task_options _Options(_TaskOptions); - _Options.set_cancellation_token(_PParam->_M_cancellationSource.get_token()); - task> _Any_tasks_completed(_PParam->_M_Completed, _Options); -#else - task> _Any_tasks_completed(_PParam->_M_Completed, _PParam->_M_cancellationSource.get_token()); -#endif - // Keep a copy ref to the token source - auto _CancellationSource = _PParam->_M_cancellationSource; - - _PParam->_M_numTasks = static_cast(std::distance(_Begin, _End)); - size_t index = 0; - for (auto _PTask = _Begin; _PTask != _End; ++_PTask) - { - if (_PTask->is_apartment_aware()) - { - _Any_tasks_completed._SetAsync(); - } - - _PTask->_Then([_PParam, index](task _ResultTask) -> HRESULT { -#if _MSC_VER >= 1800 - auto _PParamCopy = _PParam; // Dev10 - auto _IndexCopy = index; // Dev10 - auto _Func = [&_ResultTask, _PParamCopy, _IndexCopy]() { - _PParamCopy->_M_Completed.set(std::make_pair(_IndexCopy, _ResultTask._GetImpl()->_M_pTokenState)); - }; -#else - auto _Func = [&_ResultTask, _PParam, index]() { - _PParam->_M_Completed.set(std::make_pair(index, _ResultTask._GetImpl()->_M_pTokenState)); - }; -#endif - _WhenAnyContinuationWrapper(_PParam, _Func, _ResultTask); - return S_OK; -#if _MSC_VER >= 1800 - }, Concurrency::details::_CancellationTokenState::_None()); -#else - }, Concurrency::details::_CancellationTokenState::_None(), false); -#endif - - index++; - } - - // All _Any_tasks_completed._SetAsync() must be finished before this return continuation task being created. - return _Any_tasks_completed._Then([=](std::pair _Result, size_t* retVal) -> HRESULT { - _CONCRT_ASSERT(_Result.second); - if (!_PTokenState) - { - details::_JoinAllTokens_Add(_CancellationSource, _Result.second); - } - *retVal = _Result.first; - return S_OK; -#if _MSC_VER >= 1800 - }, nullptr); -#else - }, nullptr, false); -#endif - } - }; -} // namespace details - -/// -/// Creates a task that will complete successfully when any of the tasks supplied as arguments completes successfully. -/// -/// -/// The type of the input iterator. -/// -/// -/// The position of the first element in the range of elements to be combined into the resulting task. -/// -/// -/// The position of the first element beyond the range of elements to be combined into the resulting task. -/// -/// -/// A task that completes successfully when any one of the input tasks has completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::pair<T, size_t>>>, where the first element of the pair is the result -/// of the completing task, and the second element is the index of the task that finished. If the input tasks are of type void -/// the output is a task<size_t>, where the result is the index of the completing task. -/// -/// -/**/ -template -#if _MSC_VER >= 1800 -auto when_any(_Iterator _Begin, _Iterator _End, const task_options& _TaskOptions = task_options()) --> decltype (details::_WhenAnyImpl::value_type::result_type, _Iterator>::_Perform(_TaskOptions, _Begin, _End)) -{ - typedef typename std::iterator_traits<_Iterator>::value_type::result_type _ElementType; - return details::_WhenAnyImpl<_ElementType, _Iterator>::_Perform(_TaskOptions, _Begin, _End); -} -#else -auto when_any(_Iterator _Begin, _Iterator _End) --> decltype (details::_WhenAnyImpl::value_type::result_type, _Iterator>::_Perform(nullptr, _Begin, _End)) -{ - typedef typename std::iterator_traits<_Iterator>::value_type::result_type _ElementType; - return details::_WhenAnyImpl<_ElementType, _Iterator>::_Perform(nullptr, _Begin, _End); -} -#endif - -/// -/// Creates a task that will complete successfully when any of the tasks supplied as arguments completes successfully. -/// -/// -/// The type of the input iterator. -/// -/// -/// The position of the first element in the range of elements to be combined into the resulting task. -/// -/// -/// The position of the first element beyond the range of elements to be combined into the resulting task. -/// -/// -/// The cancellation token which controls cancellation of the returned task. If you do not provide a cancellation token, the resulting -/// task will receive the cancellation token of the task that causes it to complete. -/// -/// -/// A task that completes successfully when any one of the input tasks has completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::pair<T, size_t>>>, where the first element of the pair is the result -/// of the completing task, and the second element is the index of the task that finished. If the input tasks are of type void -/// the output is a task<size_t>, where the result is the index of the completing task. -/// -/// -/**/ -template -auto when_any(_Iterator _Begin, _Iterator _End, Concurrency::cancellation_token _CancellationToken) --> decltype (details::_WhenAnyImpl::value_type::result_type, _Iterator>::_Perform(_CancellationToken._GetImplValue(), _Begin, _End)) -{ - typedef typename std::iterator_traits<_Iterator>::value_type::result_type _ElementType; - return details::_WhenAnyImpl<_ElementType, _Iterator>::_Perform(_CancellationToken._GetImplValue(), _Begin, _End); -} - -/// -/// Creates a task that will complete successfully when either of the tasks supplied as arguments completes successfully. -/// -/// -/// The type of the returned task. -/// -/// -/// The first task to combine into the resulting task. -/// -/// -/// The second task to combine into the resulting task. -/// -/// -/// A task that completes sucessfully when either of the input tasks has completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::vector<T>. If the input tasks are of type void the output task -/// will also be a task<void>. -/// To allow for a construct of the sort taskA || taskB && taskC, which are combined in pairs, with && taking precedence -/// over ||, the operator|| produces a task<std::vector<T>> if one of the tasks is of type task<std::vector<T>> -/// and the other one is of type task<T>. -/// -/// -/// If both of the tasks are canceled or throw exceptions, the returned task will complete in the canceled state, and one of the exceptions, -/// if any are encountered, will be thrown when you call get() or wait() on that task. -/// -/// -/**/ -template -task<_ReturnType> operator||(const task<_ReturnType> & _Lhs, const task<_ReturnType> & _Rhs) -{ -#if _MSC_VER >= 1800 - auto _PParam = new details::_RunAnyParam>(); - - task> _Any_tasks_completed(_PParam->_M_Completed, _PParam->_M_cancellationSource.get_token()); - // Chain the return continuation task here to ensure it will get inline execution when _M_Completed.set is called, - // So that _PParam can be used before it getting deleted. - auto _ReturnTask = _Any_tasks_completed._Then([=](std::pair<_ReturnType, size_t> _Ret, _ReturnType* retVal) -> HRESULT { - _CONCRT_ASSERT(_Ret.second); - details::_JoinAllTokens_Add(_PParam->_M_cancellationSource, reinterpret_cast(_Ret.second)); - *retVal = _Ret.first; - return S_OK; - }, nullptr); -#else - auto _PParam = new details::_RunAnyParam>(); - - task> _Any_tasks_completed(_PParam->_M_Completed, _PParam->_M_cancellationSource.get_token()); - // Chain the return continuation task here to ensure it will get inline execution when _M_Completed.set is called, - // So that _PParam can be used before it getting deleted. - auto _ReturnTask = _Any_tasks_completed._Then([=](std::pair<_ReturnType, Concurrency::details::_CancellationTokenState *> _Ret, _ReturnType* retVal) -> HRESULT { - _CONCRT_ASSERT(_Ret.second); - details::_JoinAllTokens_Add(_PParam->_M_cancellationSource, _Ret.second); - *retVal = _Ret.first; - return S_OK; - }, nullptr, false); -#endif - if (_Lhs.is_apartment_aware() || _Rhs.is_apartment_aware()) - { - _ReturnTask._SetAsync(); - } - - _PParam->_M_numTasks = 2; - auto _Continuation = [_PParam](task<_ReturnType> _ResultTask) -> HRESULT { -#if _MSC_VER >= 1800 - // Dev10 compiler bug - auto _PParamCopy = _PParam; - auto _Func = [&_ResultTask, _PParamCopy]() { - _PParamCopy->_M_Completed.set(std::make_pair(_ResultTask._GetImpl()->_GetResult(), reinterpret_cast(_ResultTask._GetImpl()->_M_pTokenState))); - }; -#else - auto _Func = [&_ResultTask, _PParam]() { - _PParam->_M_Completed.set(std::make_pair(_ResultTask._GetImpl()->_GetResult(), _ResultTask._GetImpl()->_M_pTokenState)); - }; -#endif - _WhenAnyContinuationWrapper(_PParam, _Func, _ResultTask); - return S_OK; - }; - -#if _MSC_VER >= 1800 - _Lhs._Then(_Continuation, Concurrency::details::_CancellationTokenState::_None()); - _Rhs._Then(_Continuation, Concurrency::details::_CancellationTokenState::_None()); -#else - _Lhs._Then(_Continuation, Concurrency::details::_CancellationTokenState::_None(), false); - _Rhs._Then(_Continuation, Concurrency::details::_CancellationTokenState::_None(), false); -#endif - return _ReturnTask; -} - -/// -/// Creates a task that will complete successfully when any of the tasks supplied as arguments completes successfully. -/// -/// -/// The type of the returned task. -/// -/// -/// The first task to combine into the resulting task. -/// -/// -/// The second task to combine into the resulting task. -/// -/// -/// A task that completes sucessfully when either of the input tasks has completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::vector<T>. If the input tasks are of type void the output task -/// will also be a task<void>. -/// To allow for a construct of the sort taskA || taskB && taskC, which are combined in pairs, with && taking precedence -/// over ||, the operator|| produces a task<std::vector<T>> if one of the tasks is of type task<std::vector<T>> -/// and the other one is of type task<T>. -/// -/// -/// If both of the tasks are canceled or throw exceptions, the returned task will complete in the canceled state, and one of the exceptions, -/// if any are encountered, will be thrown when you call get() or wait() on that task. -/// -/// -/**/ -template -task> operator||(const task> & _Lhs, const task<_ReturnType> & _Rhs) -{ - auto _PParam = new details::_RunAnyParam, Concurrency::details::_CancellationTokenState *>>(); - - task, Concurrency::details::_CancellationTokenState *>> _Any_tasks_completed(_PParam->_M_Completed, _PParam->_M_cancellationSource.get_token()); -#if _MSC_VER < 1800 - _Any_tasks_completed._GetImpl()->_M_fRuntimeAggregate = true; -#endif - // Chain the return continuation task here to ensure it will get inline execution when _M_Completed.set is called, - // So that _PParam can be used before it getting deleted. - auto _ReturnTask = _Any_tasks_completed._Then([=](std::pair, Concurrency::details::_CancellationTokenState *> _Ret, std::vector<_ReturnType>* retVal) -> HRESULT { - _CONCRT_ASSERT(_Ret.second); - details::_JoinAllTokens_Add(_PParam->_M_cancellationSource, _Ret.second); - *retVal = _Ret.first; - return S_OK; - }, nullptr, true); - - if (_Lhs.is_apartment_aware() || _Rhs.is_apartment_aware()) - { - _ReturnTask._SetAsync(); - } - - _PParam->_M_numTasks = 2; - _Lhs._Then([_PParam](task> _ResultTask) -> HRESULT { -#if _MSC_VER >= 1800 - // Dev10 compiler bug - auto _PParamCopy = _PParam; - auto _Func = [&_ResultTask, _PParamCopy]() { - auto _Result = _ResultTask._GetImpl()->_GetResult(); - _PParamCopy->_M_Completed.set(std::make_pair(_Result, _ResultTask._GetImpl()->_M_pTokenState)); - }; -#else - auto _Func = [&_ResultTask, _PParam]() { - std::vector<_ReturnType> _Result = _ResultTask._GetImpl()->_GetResult(); - _PParam->_M_Completed.set(std::make_pair(_Result, _ResultTask._GetImpl()->_M_pTokenState)); - }; -#endif - _WhenAnyContinuationWrapper(_PParam, _Func, _ResultTask); - return S_OK; -#if _MSC_VER >= 1800 - }, Concurrency::details::_CancellationTokenState::_None()); -#else - }, Concurrency::details::_CancellationTokenState::_None(), false); -#endif - _Rhs._Then([_PParam](task<_ReturnType> _ResultTask) -> HRESULT { -#if _MSC_VER >= 1800 - // Dev10 compiler bug - typedef _ReturnType _ReturnTypeDev10; - auto _PParamCopy = _PParam; - auto _Func = [&_ResultTask, _PParamCopy]() { - auto _Result = _ResultTask._GetImpl()->_GetResult(); - - std::vector<_ReturnTypeDev10> _Vec; - _Vec.push_back(_Result); - _PParamCopy->_M_Completed.set(std::make_pair(_Vec, _ResultTask._GetImpl()->_M_pTokenState)); - }; -#else - auto _Func = [&_ResultTask, _PParam]() { - _ReturnType _Result = _ResultTask._GetImpl()->_GetResult(); - - std::vector<_ReturnType> _Vec; - _Vec.push_back(_Result); - _PParam->_M_Completed.set(std::make_pair(_Vec, _ResultTask._GetImpl()->_M_pTokenState)); - }; -#endif - _WhenAnyContinuationWrapper(_PParam, _Func, _ResultTask); - return S_OK; -#if _MSC_VER >= 1800 - }, Concurrency::details::_CancellationTokenState::_None()); -#else - }, Concurrency::details::_CancellationTokenState::_None(), false); -#endif - return _ReturnTask; -} - -/// -/// Creates a task that will complete successfully when any of the tasks supplied as arguments completes successfully. -/// -/// -/// The type of the returned task. -/// -/// -/// The first task to combine into the resulting task. -/// -/// -/// The second task to combine into the resulting task. -/// -/// -/// A task that completes sucessfully when either of the input tasks has completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::vector<T>. If the input tasks are of type void the output task -/// will also be a task<void>. -/// To allow for a construct of the sort taskA || taskB && taskC, which are combined in pairs, with && taking precedence -/// over ||, the operator|| produces a task<std::vector<T>> if one of the tasks is of type task<std::vector<T>> -/// and the other one is of type task<T>. -/// -/// -/// If both of the tasks are canceled or throw exceptions, the returned task will complete in the canceled state, and one of the exceptions, -/// if any are encountered, will be thrown when you call get() or wait() on that task. -/// -/// -/**/ -template -task> operator||(const task<_ReturnType> & _Lhs, const task> & _Rhs) -{ - return _Rhs || _Lhs; -} - -/// -/// Creates a task that will complete successfully when any of the tasks supplied as arguments completes successfully. -/// -/// -/// The type of the returned task. -/// -/// -/// The first task to combine into the resulting task. -/// -/// -/// The second task to combine into the resulting task. -/// -/// -/// A task that completes sucessfully when either of the input tasks has completed successfully. If the input tasks are of type T, -/// the output of this function will be a task<std::vector<T>. If the input tasks are of type void the output task -/// will also be a task<void>. -/// To allow for a construct of the sort taskA || taskB && taskC, which are combined in pairs, with && taking precedence -/// over ||, the operator|| produces a task<std::vector<T>> if one of the tasks is of type task<std::vector<T>> -/// and the other one is of type task<T>. -/// -/// -/// If both of the tasks are canceled or throw exceptions, the returned task will complete in the canceled state, and one of the exceptions, -/// if any are encountered, will be thrown when you call get() or wait() on that task. -/// -/// -/**/ -inline task operator||(const task & _Lhs, const task & _Rhs) -{ - auto _PParam = new details::_RunAnyParam>(); - - task> _Any_task_completed(_PParam->_M_Completed, _PParam->_M_cancellationSource.get_token()); - // Chain the return continuation task here to ensure it will get inline execution when _M_Completed.set is called, - // So that _PParam can be used before it getting deleted. - auto _ReturnTask = _Any_task_completed._Then([=](std::pair _Ret) -> HRESULT { - _CONCRT_ASSERT(_Ret.second); - details::_JoinAllTokens_Add(_PParam->_M_cancellationSource, _Ret.second); - return S_OK; -#if _MSC_VER >= 1800 - }, nullptr); -#else - }, nullptr, false); -#endif - - if (_Lhs.is_apartment_aware() || _Rhs.is_apartment_aware()) - { - _ReturnTask._SetAsync(); - } - - _PParam->_M_numTasks = 2; - auto _Continuation = [_PParam](task _ResultTask) mutable -> HRESULT { - // Dev10 compiler needs this. - auto _PParam1 = _PParam; - auto _Func = [&_ResultTask, _PParam1]() { - _PParam1->_M_Completed.set(std::make_pair(details::_Unit_type(), _ResultTask._GetImpl()->_M_pTokenState)); - }; - _WhenAnyContinuationWrapper(_PParam, _Func, _ResultTask); - return S_OK; - }; - -#if _MSC_VER >= 1800 - _Lhs._Then(_Continuation, Concurrency::details::_CancellationTokenState::_None()); - _Rhs._Then(_Continuation, Concurrency::details::_CancellationTokenState::_None()); -#else - _Lhs._Then(_Continuation, Concurrency::details::_CancellationTokenState::_None(), false); - _Rhs._Then(_Continuation, Concurrency::details::_CancellationTokenState::_None(), false); -#endif - - return _ReturnTask; -} - -#if _MSC_VER >= 1800 -template -task<_Ty> task_from_result(_Ty _Param, const task_options& _TaskOptions = task_options()) -{ - task_completion_event<_Ty> _Tce; - _Tce.set(_Param); - return create_task<_Ty>(_Tce, _TaskOptions); -} - -// Work around VS 2010 compiler bug -#if _MSC_VER == 1600 -inline task task_from_result(bool _Param) -{ - task_completion_event _Tce; - _Tce.set(_Param); - return create_task(_Tce, task_options()); -} -#endif -inline task task_from_result(const task_options& _TaskOptions = task_options()) -{ - task_completion_event _Tce; - _Tce.set(); - return create_task(_Tce, _TaskOptions); -} - -template -task<_TaskType> task_from_exception(_ExType _Exception, const task_options& _TaskOptions = task_options()) -{ - task_completion_event<_TaskType> _Tce; - _Tce.set_exception(_Exception); - return create_task<_TaskType>(_Tce, _TaskOptions); -} - -namespace details -{ - /// - /// A convenient extension to Concurrency: loop until a condition is no longer met - /// - /// - /// A function representing the body of the loop. It will be invoked at least once and - /// then repetitively as long as it returns true. - /// - inline - task do_while(std::function(void)> func) - { - task first = func(); - return first.then([=](bool guard, task* retVal) -> HRESULT { - if (guard) - *retVal = do_while(func); - else - *retVal = first; - return S_OK; - }); - } - -} // namespace details -#endif - -} // namespace Concurrency_winrt - -namespace concurrency_winrt = Concurrency_winrt; - -#pragma pop_macro("new") -#pragma warning(pop) -#pragma pack(pop) -#endif - -#endif diff --git a/modules/highgui/src/precomp.hpp b/modules/highgui/src/precomp.hpp index c9517783f9..796af39768 100644 --- a/modules/highgui/src/precomp.hpp +++ b/modules/highgui/src/precomp.hpp @@ -95,16 +95,18 @@ #define CV_WINDOW_MAGIC_VAL 0x00420042 #define CV_TRACKBAR_MAGIC_VAL 0x00420043 -//Yannick Verdie 2010 +//Yannick Verdie 2010, Max Kostin 2015 void cvSetModeWindow_W32(const char* name, double prop_value); void cvSetModeWindow_GTK(const char* name, double prop_value); void cvSetModeWindow_CARBON(const char* name, double prop_value); void cvSetModeWindow_COCOA(const char* name, double prop_value); +void cvSetModeWindow_WinRT(const char* name, double prop_value); double cvGetModeWindow_W32(const char* name); double cvGetModeWindow_GTK(const char* name); double cvGetModeWindow_CARBON(const char* name); double cvGetModeWindow_COCOA(const char* name); +double cvGetModeWindow_WinRT(const char* name); double cvGetPropWindowAutoSize_W32(const char* name); double cvGetPropWindowAutoSize_GTK(const char* name); diff --git a/modules/highgui/src/window.cpp b/modules/highgui/src/window.cpp index cda019102c..03d446dd01 100644 --- a/modules/highgui/src/window.cpp +++ b/modules/highgui/src/window.cpp @@ -65,7 +65,10 @@ CV_IMPL void cvSetWindowProperty(const char* name, int prop_id, double prop_valu cvSetModeWindow_CARBON(name,prop_value); #elif defined (HAVE_COCOA) cvSetModeWindow_COCOA(name,prop_value); + #elif defined (WINRT) + cvSetModeWindow_WinRT(name, prop_value); #endif + break; case CV_WND_PROP_AUTOSIZE: @@ -104,6 +107,8 @@ CV_IMPL double cvGetWindowProperty(const char* name, int prop_id) return cvGetModeWindow_CARBON(name); #elif defined (HAVE_COCOA) return cvGetModeWindow_COCOA(name); + #elif defined (WINRT) + return cvGetModeWindow_WinRT(name); #else return -1; #endif @@ -481,11 +486,12 @@ int cv::createButton(const String&, ButtonCallback, void*, int , bool ) #endif -#if defined(HAVE_WIN32UI) // see window_w32.cpp +#if defined (HAVE_WIN32UI) // see window_w32.cpp #elif defined (HAVE_GTK) // see window_gtk.cpp #elif defined (HAVE_COCOA) // see window_carbon.cpp #elif defined (HAVE_CARBON) -#elif defined (HAVE_QT) //YV see window_QT.cpp +#elif defined (HAVE_QT) // see window_QT.cpp +#elif defined (WINRT) && !defined (WINRT_8_0) // see window_winrt.cpp #else diff --git a/modules/highgui/src/window_w32.cpp b/modules/highgui/src/window_w32.cpp index a644f31bae..957f01a845 100644 --- a/modules/highgui/src/window_w32.cpp +++ b/modules/highgui/src/window_w32.cpp @@ -1874,21 +1874,38 @@ static void showSaveDialog(CvWindow* window) ofn.lStructSize = sizeof(ofn); #endif ofn.hwndOwner = window->hwnd; - ofn.lpstrFilter = "Portable Network Graphics files (*.png)\0*.png\0" - "JPEG files (*.jpeg;*.jpg;*.jpe)\0*.jpeg;*.jpg;*.jpe\0" + ofn.lpstrFilter = +#ifdef HAVE_PNG + "Portable Network Graphics files (*.png)\0*.png\0" +#endif "Windows bitmap (*.bmp;*.dib)\0*.bmp;*.dib\0" +#ifdef HAVE_JPEG + "JPEG files (*.jpeg;*.jpg;*.jpe)\0*.jpeg;*.jpg;*.jpe\0" +#endif +#ifdef HAVE_TIFF "TIFF Files (*.tiff;*.tif)\0*.tiff;*.tif\0" +#endif +#ifdef HAVE_JASPER "JPEG-2000 files (*.jp2)\0*.jp2\0" +#endif +#ifdef HAVE_WEBP "WebP files (*.webp)\0*.webp\0" +#endif "Portable image format (*.pbm;*.pgm;*.ppm;*.pxm;*.pnm)\0*.pbm;*.pgm;*.ppm;*.pxm;*.pnm\0" +#ifdef HAVE_OPENEXR "OpenEXR Image files (*.exr)\0*.exr\0" +#endif "Radiance HDR (*.hdr;*.pic)\0*.hdr;*.pic\0" "Sun raster files (*.sr;*.ras)\0*.sr;*.ras\0" "All Files (*.*)\0*.*\0"; ofn.lpstrFile = szFileName; ofn.nMaxFile = MAX_PATH; ofn.Flags = OFN_EXPLORER | OFN_PATHMUSTEXIST | OFN_OVERWRITEPROMPT | OFN_NOREADONLYRETURN | OFN_NOCHANGEDIR; +#ifdef HAVE_PNG ofn.lpstrDefExt = "png"; +#else + ofn.lpstrDefExt = "bmp"; +#endif if (GetSaveFileName(&ofn)) { diff --git a/modules/highgui/src/window_winrt.cpp b/modules/highgui/src/window_winrt.cpp new file mode 100644 index 0000000000..ba81b51c64 --- /dev/null +++ b/modules/highgui/src/window_winrt.cpp @@ -0,0 +1,268 @@ +// highgui to XAML bridge for OpenCV + +// Copyright (c) Microsoft Open Technologies, Inc. +// All rights reserved. +// +// (3 - clause BSD License) +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that +// the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the +// following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or +// promote products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +// PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include "precomp.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CV_WINRT_NO_GUI_ERROR( funcname ) \ +{ \ + cvError( CV_StsNotImplemented, funcname, \ + "The function is not implemented. ", \ + __FILE__, __LINE__ ); \ +} + +#define CV_ERROR( Code, Msg ) \ +{ \ + cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ ); \ +}; + +/********************************** WinRT Specific API Implementation ******************************************/ + +// Initializes or overrides container contents with default XAML markup structure +void cv::winrt_initContainer(::Windows::UI::Xaml::Controls::Panel^ _container) +{ + HighguiBridge::getInstance().setContainer(_container); +} + +/********************************** API Implementation *********************************************************/ + +CV_IMPL void cvShowImage(const char* name, const CvArr* arr) +{ + CV_FUNCNAME("cvShowImage"); + + __BEGIN__; + + CvMat stub, *image; + + if (!name) + CV_ERROR(CV_StsNullPtr, "NULL name"); + + CvWindow* window = HighguiBridge::getInstance().namedWindow(name); + + if (!window || !arr) + return; + + CV_CALL(image = cvGetMat(arr, &stub)); + + //TODO: use approach from window_w32.cpp or cv::Mat(.., .., CV_8UC4) + // and cvtColor(.., .., CV_BGR2BGRA) to convert image here + // than beforehand. + + window->updateImage(image); + HighguiBridge::getInstance().showWindow(window); + + __END__; +} + +CV_IMPL int cvNamedWindow(const char* name, int flags) +{ + CV_FUNCNAME("cvNamedWindow"); + + if (!name) + CV_ERROR(CV_StsNullPtr, "NULL name"); + + HighguiBridge::getInstance().namedWindow(name); + + return CV_OK; +} + +CV_IMPL void cvDestroyWindow(const char* name) +{ + CV_FUNCNAME("cvDestroyWindow"); + + if (!name) + CV_ERROR(CV_StsNullPtr, "NULL name string"); + + HighguiBridge::getInstance().destroyWindow(name); +} + +CV_IMPL void cvDestroyAllWindows() +{ + HighguiBridge::getInstance().destroyAllWindows(); +} + +CV_IMPL int cvCreateTrackbar2(const char* trackbar_name, const char* window_name, + int* val, int count, CvTrackbarCallback2 on_notify, void* userdata) +{ + CV_FUNCNAME("cvCreateTrackbar2"); + + int pos = 0; + + if (!window_name || !trackbar_name) + CV_ERROR(CV_StsNullPtr, "NULL window or trackbar name"); + + if (count < 0) + CV_ERROR(CV_StsOutOfRange, "Bad trackbar max value"); + + CvWindow* window = HighguiBridge::getInstance().namedWindow(window_name); + + if (!window) + { + CV_ERROR(CV_StsNullPtr, "NULL window"); + } + + window->createSlider(trackbar_name, val, count, on_notify, userdata); + + return CV_OK; +} + +CV_IMPL void cvSetTrackbarPos(const char* trackbar_name, const char* window_name, int pos) +{ + CV_FUNCNAME("cvSetTrackbarPos"); + + CvTrackbar* trackbar = 0; + + if (trackbar_name == 0 || window_name == 0) + CV_ERROR(CV_StsNullPtr, "NULL trackbar or window name"); + + CvWindow* window = HighguiBridge::getInstance().findWindowByName(window_name); + if (window) + trackbar = window->findTrackbarByName(trackbar_name); + + if (trackbar) + trackbar->setPosition(pos); +} + +CV_IMPL void cvSetTrackbarMax(const char* trackbar_name, const char* window_name, int maxval) +{ + CV_FUNCNAME("cvSetTrackbarMax"); + + if (maxval >= 0) + { + if (trackbar_name == 0 || window_name == 0) + CV_ERROR(CV_StsNullPtr, "NULL trackbar or window name"); + + CvTrackbar* trackbar = HighguiBridge::getInstance().findTrackbarByName(trackbar_name, window_name); + + if (trackbar) + trackbar->setMaxPosition(maxval); + } +} + +CV_IMPL int cvGetTrackbarPos(const char* trackbar_name, const char* window_name) +{ + int pos = -1; + + CV_FUNCNAME("cvGetTrackbarPos"); + + if (trackbar_name == 0 || window_name == 0) + CV_ERROR(CV_StsNullPtr, "NULL trackbar or window name"); + + CvTrackbar* trackbar = HighguiBridge::getInstance().findTrackbarByName(trackbar_name, window_name); + + if (trackbar) + pos = trackbar->getPosition(); + + return pos; +} + +/********************************** Not YET implemented API ****************************************************/ + +CV_IMPL int cvWaitKey(int delay) +{ + CV_WINRT_NO_GUI_ERROR("cvWaitKey"); + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms724411(v=vs.85).aspx + int time0 = GetTickCount64(); + + for (;;) + { + CvWindow* window; + + if (delay <= 0) + { + // TODO: implement appropriate logic here + } + } +} + +CV_IMPL void cvSetMouseCallback(const char* window_name, CvMouseCallback on_mouse, void* param) +{ + CV_WINRT_NO_GUI_ERROR("cvSetMouseCallback"); + + CV_FUNCNAME("cvSetMouseCallback"); + + if (!window_name) + CV_ERROR(CV_StsNullPtr, "NULL window name"); + + CvWindow* window = HighguiBridge::getInstance().findWindowByName(window_name); + if (!window) + return; + + // TODO: implement appropriate logic here +} + +/********************************** Disabled or not supported API **********************************************/ + +CV_IMPL void cvMoveWindow(const char* name, int x, int y) +{ + CV_WINRT_NO_GUI_ERROR("cvMoveWindow"); +} + +CV_IMPL void cvResizeWindow(const char* name, int width, int height) +{ + CV_WINRT_NO_GUI_ERROR("cvResizeWindow"); +} + +CV_IMPL int cvInitSystem(int, char**) +{ + CV_WINRT_NO_GUI_ERROR("cvInitSystem"); + return CV_StsNotImplemented; +} + +CV_IMPL void* cvGetWindowHandle(const char*) +{ + CV_WINRT_NO_GUI_ERROR("cvGetWindowHandle"); + return (void*) CV_StsNotImplemented; +} + +CV_IMPL const char* cvGetWindowName(void*) +{ + CV_WINRT_NO_GUI_ERROR("cvGetWindowName"); + return (const char*) CV_StsNotImplemented; +} + +void cvSetModeWindow_WinRT(const char* name, double prop_value) { + CV_WINRT_NO_GUI_ERROR("cvSetModeWindow"); +} + +double cvGetModeWindow_WinRT(const char* name) { + CV_WINRT_NO_GUI_ERROR("cvGetModeWindow"); + return CV_StsNotImplemented; +} + +CV_IMPL int cvStartWindowThread() { + CV_WINRT_NO_GUI_ERROR("cvStartWindowThread"); + return CV_StsNotImplemented; +} \ No newline at end of file diff --git a/modules/highgui/src/window_winrt_bridge.cpp b/modules/highgui/src/window_winrt_bridge.cpp new file mode 100644 index 0000000000..60421d681f --- /dev/null +++ b/modules/highgui/src/window_winrt_bridge.cpp @@ -0,0 +1,346 @@ +// highgui to XAML bridge for OpenCV + +// Copyright (c) Microsoft Open Technologies, Inc. +// All rights reserved. +// +// (3 - clause BSD License) +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that +// the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the +// following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or +// promote products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +// PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#include "opencv2\highgui\highgui_winrt.hpp" +#include "window_winrt_bridge.hpp" + +#include +#include // Windows::Storage::Streams::IBufferByteAccess + +using namespace Microsoft::WRL; // ComPtr +using namespace Windows::Storage::Streams; // IBuffer +using namespace Windows::UI::Xaml; +using namespace Windows::UI::Xaml::Controls; +using namespace Windows::UI::Xaml::Media::Imaging; + +using namespace ::std; + +/***************************** Constants ****************************************/ + +// Default markup for the container content allowing for proper components placement +const Platform::String^ CvWindow::markupContent = +"\n" \ +" \n" \ +" \n" \ +" \n" \ +" \n" \ +" \n" \ +""; + +const double CvWindow::sliderDefaultWidth = 100; + +/***************************** HighguiBridge class ******************************/ + +HighguiBridge& HighguiBridge::getInstance() +{ + static HighguiBridge instance; + return instance; +} + +void HighguiBridge::setContainer(Windows::UI::Xaml::Controls::Panel^ container) +{ + this->container = container; +} + +CvWindow* HighguiBridge::findWindowByName(cv::String name) +{ + auto search = windowsMap->find(name); + if (search != windowsMap->end()) { + return search->second; + } + + return nullptr; +} + +CvTrackbar* HighguiBridge::findTrackbarByName(cv::String trackbar_name, cv::String window_name) +{ + CvWindow* window = findWindowByName(window_name); + + if (window) + return window->findTrackbarByName(trackbar_name); + + return nullptr; +} + +Platform::String^ HighguiBridge::convertString(cv::String name) +{ + auto data = name.c_str(); + int bufferSize = MultiByteToWideChar(CP_UTF8, 0, data, -1, nullptr, 0); + auto wide = std::make_unique(bufferSize); + if (0 == MultiByteToWideChar(CP_UTF8, 0, data, -1, wide.get(), bufferSize)) + return nullptr; + + std::wstring* stdStr = new std::wstring(wide.get()); + return ref new Platform::String(stdStr->c_str()); +} + +void HighguiBridge::cleanContainer() +{ + container->Children->Clear(); +} + +void HighguiBridge::showWindow(CvWindow* window) +{ + currentWindow = window; + cleanContainer(); + HighguiBridge::getInstance().container->Children->Append(window->getPage()); +} + +CvWindow* HighguiBridge::namedWindow(cv::String name) { + + CvWindow* window = HighguiBridge::getInstance().findWindowByName(name.c_str()); + if (!window) + { + window = createWindow(name); + } + + return window; +} + +void HighguiBridge::destroyWindow(cv::String name) +{ + auto window = windowsMap->find(name); + if (window != windowsMap->end()) + { + // Check if deleted window is the one currently displayed + // and clear container if this is the case + if (window->second == currentWindow) + { + cleanContainer(); + } + + windowsMap->erase(window); + } +} + +void HighguiBridge::destroyAllWindows() +{ + cleanContainer(); + windowsMap->clear(); +} + +CvWindow* HighguiBridge::createWindow(cv::String name) +{ + CvWindow* window = new CvWindow(name); + windowsMap->insert(std::pair(name, window)); + + return window; +} + +/***************************** CvTrackbar class *********************************/ + +CvTrackbar::CvTrackbar(cv::String name, Slider^ slider, CvWindow* parent) : name(name), slider(slider), parent(parent) {} + +CvTrackbar::~CvTrackbar() {} + +void CvTrackbar::setPosition(double pos) +{ + if (pos < 0) + pos = 0; + + if (pos > slider->Maximum) + pos = slider->Maximum; + + slider->Value = pos; +} + +void CvTrackbar::setMaxPosition(double pos) +{ + if (pos < 0) + pos = 0; + + slider->Maximum = pos; +} + +void CvTrackbar::setSlider(Slider^ slider) { + if (slider) + this->slider = slider; +} + +double CvTrackbar::getPosition() +{ + return slider->Value; +} + +double CvTrackbar::getMaxPosition() +{ + return slider->Maximum; +} + +Slider^ CvTrackbar::getSlider() +{ + return slider; +} + +/***************************** CvWindow class ***********************************/ + +CvWindow::CvWindow(cv::String name, int flags) : name(name) +{ + this->page = (Page^)Windows::UI::Xaml::Markup::XamlReader::Load(const_cast(markupContent)); + this->sliderMap = new std::map(); + + sliderPanel = (Panel^)page->FindName("cvTrackbar"); + imageControl = (Image^)page->FindName("cvImage"); + buttonPanel = (Panel^)page->FindName("cvButton"); + + // Required to adapt controls to the size of the image. + // System calculates image control width first, after that we can + // update other controls + imageControl->Loaded += ref new Windows::UI::Xaml::RoutedEventHandler( + [=](Platform::Object^ sender, + Windows::UI::Xaml::RoutedEventArgs^ e) + { + // Need to update sliders with appropriate width + for (auto iter = sliderMap->begin(); iter != sliderMap->end(); ++iter) { + iter->second->getSlider()->Width = imageControl->ActualWidth; + } + + // Need to update buttons with appropriate width + // TODO: implement when adding buttons + }); + +} + +CvWindow::~CvWindow() {} + +void CvWindow::createSlider(cv::String name, int* val, int count, CvTrackbarCallback2 on_notify, void* userdata) +{ + CvTrackbar* trackbar = findTrackbarByName(name); + + // Creating slider if name is new or reusing the existing one + Slider^ slider = !trackbar ? ref new Slider() : trackbar->getSlider(); + + slider->Header = HighguiBridge::getInstance().convertString(name); + + // Making slider the same size as the image control or setting minimal size. + // This is added to cover potential edge cases because: + // 1. Fist clause will not be true until the second call to any container-updating API + // e.g. cv::createTrackbar, cv:imshow or cv::namedWindow + // 2. Second clause will work but should be immediately overridden by Image->Loaded callback, + // see CvWindow ctor. + if (this->imageControl->ActualWidth > 0) { + // One would use double.NaN for auto-stretching but there is no such constant in C++/CX + // see https://msdn.microsoft.com/en-us/library/windows/apps/windows.ui.xaml.frameworkelement.width + slider->Width = this->imageControl->ActualWidth; + } else { + // This value would never be used/seen on the screen unless there is something wrong with the image. + // Although this code actually gets called, slider width will be overridden in the callback after + // Image control is loaded. See callback implementation in CvWindow ctor. + slider->Width = sliderDefaultWidth; + } + slider->Value = *val; + slider->Maximum = count; + slider->Visibility = Windows::UI::Xaml::Visibility::Visible; + slider->Margin = Windows::UI::Xaml::ThicknessHelper::FromLengths(10, 10, 10, 0); + slider->HorizontalAlignment = Windows::UI::Xaml::HorizontalAlignment::Left; + + if (!trackbar) + { + if (!sliderPanel) return; + + // Adding slider to the list for current window + CvTrackbar* trackbar = new CvTrackbar(name, slider, this); + trackbar->callback = on_notify; + slider->ValueChanged += + ref new Controls::Primitives::RangeBaseValueChangedEventHandler( + [=](Platform::Object^ sender, + Windows::UI::Xaml::Controls::Primitives::RangeBaseValueChangedEventArgs^ e) + { + Slider^ slider = (Slider^)sender; + trackbar->callback(slider->Value, nullptr); + }); + this->sliderMap->insert(std::pair(name, trackbar)); + + // Adding slider to the window + sliderPanel->Children->Append(slider); + } +} + +CvTrackbar* CvWindow::findTrackbarByName(cv::String name) +{ + auto search = sliderMap->find(name); + if (search != sliderMap->end()) { + return search->second; + } + + return nullptr; +} + +void CvWindow::updateImage(CvMat* src) +{ + if (!imageControl) return; + + this->imageData = src; + this->imageWidth = src->width; + + // Create the WriteableBitmap + WriteableBitmap^ bitmap = ref new WriteableBitmap(src->cols, src->rows); + + // Get access to the pixels + IBuffer^ buffer = bitmap->PixelBuffer; + unsigned char* dstPixels; + + // Obtain IBufferByteAccess + ComPtr pBufferByteAccess; + ComPtr pBuffer((IInspectable*)buffer); + pBuffer.As(&pBufferByteAccess); + + // Get pointer to pixel bytes + pBufferByteAccess->Buffer(&dstPixels); + memcpy(dstPixels, src->data.ptr, CV_ELEM_SIZE(src->type) * src->cols*src->rows); + + // Set the bitmap to the Image element + imageControl->Source = bitmap; +} + +Page^ CvWindow::getPage() +{ + return page; +} + +//TODO: prototype, not in use yet +void CvWindow::createButton(cv::String name) +{ + if (!buttonPanel) return; + + Button^ b = ref new Button(); + b->Content = HighguiBridge::getInstance().convertString(name); + b->Width = 260; + b->Height = 80; + b->Click += ref new Windows::UI::Xaml::RoutedEventHandler( + [=](Platform::Object^ sender, + Windows::UI::Xaml::RoutedEventArgs^ e) + { + Button^ button = (Button^)sender; + // TODO: more logic here... + }); + + buttonPanel->Children->Append(b); +} + +// end \ No newline at end of file diff --git a/modules/highgui/src/window_winrt_bridge.hpp b/modules/highgui/src/window_winrt_bridge.hpp new file mode 100644 index 0000000000..d19dd29c82 --- /dev/null +++ b/modules/highgui/src/window_winrt_bridge.hpp @@ -0,0 +1,233 @@ +// highgui to XAML bridge for OpenCV + +// Copyright (c) Microsoft Open Technologies, Inc. +// All rights reserved. +// +// (3 - clause BSD License) +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that +// the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the +// following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or +// promote products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +// PARTICULAR PURPOSE ARE DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include + +using namespace Windows::UI::Xaml::Controls; + +class CvWindow; +class CvTrackbar; + +class HighguiBridge +{ +public: + + /** @brief Instantiates a Highgui singleton (Meyers type). + + The function Instantiates a Highgui singleton (Meyers type) and returns reference to that instance. + */ + static HighguiBridge& getInstance(); + + /** @brief Finds window by name and returns the reference to it. + + @param name Name of the window. + + The function finds window by name and returns the reference to it. Returns nullptr + if window with specified name is not found or name argument is null. + */ + CvWindow* findWindowByName(cv::String name); + + /** @brief Returns reference to the trackbar(slider) registered within window with a provided name. + + @param name Name of the window. + + The function returns reference to the trackbar(slider) registered within window with a provided name. + Returns nullptr if trackbar with specified name is not found or window reference is nullptr. + */ + CvTrackbar* findTrackbarByName(cv::String trackbarName, cv::String windowName); + + /** @brief Converts cv::String to Platform::String. + + @param name String to convert. + + The function converts cv::String to Platform::String. + Returns nullptr if conversion fails. + */ + Platform::String^ convertString(cv::String name); + + /** @brief Creates window if there is no window with this name, otherwise returns existing window. + + @param name Window name. + + The function creates window if there is no window with this name, otherwise returns existing window. + */ + CvWindow* namedWindow(cv::String name); + + /** @brief Shows provided window. + + The function shows provided window: makes provided window current, removes current container + contents and shows current window by putting it as a container content. + */ + void showWindow(CvWindow* window); + + /** @brief Destroys window if there exists window with this name, otherwise does nothing. + + @param name Window name. + + The function destroys window if there exists window with this name, otherwise does nothing. + If window being destroyed is the current one, it will be hidden by clearing the window container. + */ + void destroyWindow(cv::String name); + + /** @brief Destroys all windows. + + The function destroys all windows. + */ + void destroyAllWindows(); + + /** @brief Assigns container used to display windows. + + @param _container Container reference. + + The function assigns container used to display windows. + */ + void setContainer(Windows::UI::Xaml::Controls::Panel^ _container); + +private: + + // Meyers singleton + HighguiBridge(const HighguiBridge &); + void operator=(HighguiBridge &); + HighguiBridge() { + windowsMap = new std::map(); + }; + + /** @brief Creates window if there is no window with this name. + + @param name Window name. + + The function creates window if there is no window with this name. + */ + CvWindow* createWindow(cv::String name); + + /** @brief Cleans current container contents. + + The function cleans current container contents. + */ + void cleanContainer(); + + // see https://msdn.microsoft.com/en-US/library/windows/apps/xaml/hh700103.aspx + // see https://msdn.microsoft.com/ru-ru/library/windows.foundation.collections.aspx + std::map* windowsMap; + CvWindow* currentWindow; + + // Holds current container/content to manipulate with + Windows::UI::Xaml::Controls::Panel^ container; +}; + +class CvTrackbar +{ +public: + CvTrackbar(cv::String name, Slider^ slider, CvWindow* parent); + ~CvTrackbar(); + + double getPosition(); + void setPosition(double pos); + double getMaxPosition(); + void setMaxPosition(double pos); + Slider^ getSlider(); + void setSlider(Slider^ pos); + + CvTrackbarCallback2 callback; + +private: + cv::String name; + Slider^ slider; + CvWindow* parent; +}; + +class CvWindow +{ +public: + CvWindow(cv::String name, int flag = CV_WINDOW_NORMAL); + ~CvWindow(); + + /** @brief NOTE: prototype. + + Should create button if there is no button with this name already. + */ + void createButton(cv::String name); + + /** @brief Creates slider if there is no slider with this name already. + + The function creates slider if there is no slider with this name already OR resets + provided values for the existing one. + */ + void createSlider(cv::String name, int* val, int count, CvTrackbarCallback2 on_notify, void* userdata); + + /** @brief Updates window image. + + @param src Image data object reference. + + The function updates window image. If argument is null or image control is not found - does nothing. + */ + void updateImage(CvMat* arr); + + /** @brief Returns reference to the trackbar(slider) registered within provided window. + + @param name Name of the window. + + The function returns reference to the trackbar(slider) registered within provided window. + Returns nullptr if trackbar with specified name is not found or window reference is nullptr. + */ + CvTrackbar* findTrackbarByName(cv::String name); + Page^ getPage(); + +private: + cv::String name; + + // Holds image data in CV format + CvMat* imageData; + + // Map of all sliders assigned to this window + std::map* sliderMap; + + // Window contents holder + Page^ page; + + // Image control displayed by this window + Image^ imageControl; + + // Container for sliders + Panel^ sliderPanel; + + // Container for buttons + // TODO: prototype, not available via API + Panel^ buttonPanel; + + // Holds image width to arrange other UI elements. + // Required since imageData->width value gets recalculated when processing + int imageWidth; + + // Default markup for the container content allowing for proper components placement + static const Platform::String^ markupContent; + + // Default Slider size, fallback solution for unexpected edge cases + static const double sliderDefaultWidth; +}; \ No newline at end of file diff --git a/modules/imgcodecs/include/opencv2/imgcodecs.hpp b/modules/imgcodecs/include/opencv2/imgcodecs.hpp index 91e44fb869..b7c8633cef 100644 --- a/modules/imgcodecs/include/opencv2/imgcodecs.hpp +++ b/modules/imgcodecs/include/opencv2/imgcodecs.hpp @@ -131,6 +131,14 @@ returns an empty matrix ( Mat::data==NULL ). Currently, the following file forma */ CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR ); +/** @brief Loads and resizes down an image from a file. +@anchor imread_reduced +@param filename Name of file to be loaded. +@param flags Flag that can take values of @ref cv::ImreadModes +@param scale_denom + */ +CV_EXPORTS_W Mat imread_reduced( const String& filename, int flags = IMREAD_COLOR, int scale_denom=1 ); + /** @brief Loads a multi-page image from a file. (see imread for details.) @param filename Name of file to be loaded. diff --git a/modules/imgcodecs/src/grfmt_base.cpp b/modules/imgcodecs/src/grfmt_base.cpp index 267cb31b57..cda8b10b5e 100644 --- a/modules/imgcodecs/src/grfmt_base.cpp +++ b/modules/imgcodecs/src/grfmt_base.cpp @@ -52,6 +52,7 @@ BaseImageDecoder::BaseImageDecoder() m_width = m_height = 0; m_type = -1; m_buf_supported = false; + m_scale_denom = 1; } bool BaseImageDecoder::setSource( const String& filename ) @@ -81,6 +82,13 @@ bool BaseImageDecoder::checkSignature( const String& signature ) const return signature.size() >= len && memcmp( signature.c_str(), m_signature.c_str(), len ) == 0; } +int BaseImageDecoder::setScale( const int& scale_denom ) +{ + int temp = m_scale_denom; + m_scale_denom = scale_denom; + return temp; +} + ImageDecoder BaseImageDecoder::newDecoder() const { return ImageDecoder(); diff --git a/modules/imgcodecs/src/grfmt_base.hpp b/modules/imgcodecs/src/grfmt_base.hpp index dcb75b0dcd..88e3ca7c1c 100644 --- a/modules/imgcodecs/src/grfmt_base.hpp +++ b/modules/imgcodecs/src/grfmt_base.hpp @@ -67,6 +67,7 @@ public: virtual bool setSource( const String& filename ); virtual bool setSource( const Mat& buf ); + virtual int setScale( const int& scale_denom ); virtual bool readHeader() = 0; virtual bool readData( Mat& img ) = 0; @@ -81,6 +82,7 @@ protected: int m_width; // width of the image ( filled by readHeader ) int m_height; // height of the image ( filled by readHeader ) int m_type; + int m_scale_denom; String m_filename; String m_signature; Mat m_buf; diff --git a/modules/imgcodecs/src/grfmt_jpeg.cpp b/modules/imgcodecs/src/grfmt_jpeg.cpp index d6272e00db..14a2b415dc 100644 --- a/modules/imgcodecs/src/grfmt_jpeg.cpp +++ b/modules/imgcodecs/src/grfmt_jpeg.cpp @@ -242,8 +242,12 @@ bool JpegDecoder::readHeader() { jpeg_read_header( &state->cinfo, TRUE ); - m_width = state->cinfo.image_width; - m_height = state->cinfo.image_height; + state->cinfo.scale_num=1; + state->cinfo.scale_denom = m_scale_denom; + m_scale_denom=1; // trick! to know which decoder used scale_denom see imread_ + jpeg_calc_output_dimensions(&state->cinfo); + m_width = state->cinfo.output_width; + m_height = state->cinfo.output_height; m_type = state->cinfo.num_components > 1 ? CV_8UC3 : CV_8UC1; result = true; } @@ -391,7 +395,7 @@ int my_jpeg_load_dht (struct jpeg_decompress_struct *info, unsigned char *dht, bool JpegDecoder::readData( Mat& img ) { - bool result = false; + volatile bool result = false; int step = (int)img.step; bool color = img.channels() > 1; @@ -553,7 +557,7 @@ bool JpegEncoder::write( const Mat& img, const std::vector& params ) fileWrapper() : f(0) {} ~fileWrapper() { if(f) fclose(f); } }; - bool result = false; + volatile bool result = false; fileWrapper fw; int width = img.cols, height = img.rows; diff --git a/modules/imgcodecs/src/grfmt_png.cpp b/modules/imgcodecs/src/grfmt_png.cpp index 19d3f52caf..95a605f63f 100644 --- a/modules/imgcodecs/src/grfmt_png.cpp +++ b/modules/imgcodecs/src/grfmt_png.cpp @@ -224,7 +224,7 @@ bool PngDecoder::readHeader() bool PngDecoder::readData( Mat& img ) { - bool result = false; + volatile bool result = false; AutoBuffer _buffer(m_height); uchar** buffer = _buffer; int color = img.channels() > 1; @@ -342,10 +342,10 @@ bool PngEncoder::write( const Mat& img, const std::vector& params ) { png_structp png_ptr = png_create_write_struct( PNG_LIBPNG_VER_STRING, 0, 0, 0 ); png_infop info_ptr = 0; - FILE* f = 0; + FILE * volatile f = 0; int y, width = img.cols, height = img.rows; int depth = img.depth(), channels = img.channels(); - bool result = false; + volatile bool result = false; AutoBuffer buffer; if( depth != CV_8U && depth != CV_16U ) diff --git a/modules/imgcodecs/src/grfmt_sunras.cpp b/modules/imgcodecs/src/grfmt_sunras.cpp index 6cbefef3c7..34e5c4e837 100644 --- a/modules/imgcodecs/src/grfmt_sunras.cpp +++ b/modules/imgcodecs/src/grfmt_sunras.cpp @@ -96,7 +96,7 @@ bool SunRasterDecoder::readHeader() (m_encoding == RAS_OLD || m_encoding == RAS_STANDARD || (m_type == RAS_BYTE_ENCODED && m_bpp == 8) || m_type == RAS_FORMAT_RGB) && ((m_maptype == RMT_NONE && m_maplength == 0) || - (m_maptype == RMT_EQUAL_RGB && m_maplength <= palSize && m_bpp <= 8))) + (m_maptype == RMT_EQUAL_RGB && m_maplength <= palSize && m_maplength > 0 && m_bpp <= 8))) { memset( m_palette, 0, sizeof(m_palette)); diff --git a/modules/imgcodecs/src/ios_conversions.mm b/modules/imgcodecs/src/ios_conversions.mm index 43268734c9..8f2b4e84fc 100644 --- a/modules/imgcodecs/src/ios_conversions.mm +++ b/modules/imgcodecs/src/ios_conversions.mm @@ -66,6 +66,10 @@ UIImage* MatToUIImage(const cv::Mat& image) { CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data); + // Preserve alpha transparency, if exists + bool alpha = image.channels() == 4; + CGBitmapInfo bitmapInfo = (alpha ? kCGImageAlphaLast : kCGImageAlphaNone) | kCGBitmapByteOrderDefault; + // Creating CGImage from cv::Mat CGImageRef imageRef = CGImageCreate(image.cols, image.rows, @@ -73,8 +77,7 @@ UIImage* MatToUIImage(const cv::Mat& image) { 8 * image.elemSize(), image.step.p[0], colorSpace, - kCGImageAlphaNone| - kCGBitmapByteOrderDefault, + bitmapInfo, provider, NULL, false, diff --git a/modules/imgcodecs/src/loadsave.cpp b/modules/imgcodecs/src/loadsave.cpp index 383c25a2b3..a7bf46f427 100644 --- a/modules/imgcodecs/src/loadsave.cpp +++ b/modules/imgcodecs/src/loadsave.cpp @@ -234,10 +234,11 @@ enum { LOAD_CVMAT=0, LOAD_IMAGE=1, LOAD_MAT=2 }; * LOAD_MAT=2 * } * @param[in] mat Reference to C++ Mat object (If LOAD_MAT) + * @param[in] scale_denom Scale value * */ static void* -imread_( const String& filename, int flags, int hdrtype, Mat* mat=0 ) +imread_( const String& filename, int flags, int hdrtype, Mat* mat=0, int scale_denom=1 ) { IplImage* image = 0; CvMat *matrix = 0; @@ -261,6 +262,9 @@ imread_( const String& filename, int flags, int hdrtype, Mat* mat=0 ) return 0; } + /// set the scale_denom in the driver + decoder->setScale( scale_denom ); + /// set the filename in the driver decoder->setSource(filename); @@ -316,6 +320,12 @@ imread_( const String& filename, int flags, int hdrtype, Mat* mat=0 ) return 0; } + int testdecoder = decoder->setScale( scale_denom ); // if decoder is JpegDecoder then testdecoder will be 1 + if( (scale_denom > 1 ) & ( testdecoder > 1 ) ) + { + resize(*mat,*mat,Size(size.width/scale_denom,size.height/scale_denom)); + } + return hdrtype == LOAD_CVMAT ? (void*)matrix : hdrtype == LOAD_IMAGE ? (void*)image : (void*)mat; } @@ -411,6 +421,27 @@ Mat imread( const String& filename, int flags ) return img; } +/** + * Read an image and resize it + * + * This function merely calls the actual implementation above and returns itself. + * + * @param[in] filename File to load + * @param[in] flags Flags you wish to set. + * @param[in] scale_denom Scale value +*/ +Mat imread_reduced( const String& filename, int flags, int scale_denom ) +{ + /// create the basic container + Mat img; + + /// load the data + imread_( filename, flags, LOAD_MAT, &img, scale_denom ); + + /// return a reference to the data + return img; +} + /** * Read a multi-page image * diff --git a/modules/imgcodecs/src/precomp.hpp b/modules/imgcodecs/src/precomp.hpp index a5bbb41918..101f01577c 100644 --- a/modules/imgcodecs/src/precomp.hpp +++ b/modules/imgcodecs/src/precomp.hpp @@ -47,6 +47,7 @@ #include "opencv2/core/utility.hpp" #include "opencv2/core/private.hpp" +#include "opencv2/imgproc.hpp" #include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgcodecs/imgcodecs_c.h" diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 5c18545bce..baa81cf0ba 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -1369,6 +1369,28 @@ CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT ); +/** @brief Calculates the first order image derivative in both x and y using a Sobel operator + +Equivalent to calling: + +@code +Sobel( src, dx, CV_16SC1, 1, 0, 3 ); +Sobel( src, dy, CV_16SC1, 0, 1, 3 ); +@endcode + +@param src input image. +@param dx output image with first-order derivative in x. +@param dy output image with first-order derivative in y. +@param ksize size of Sobel kernel. It must be 3. +@param borderType pixel extrapolation method, see cv::BorderTypes + +@sa Sobel + */ + +CV_EXPORTS_W void spatialGradient( InputArray src, OutputArray dx, + OutputArray dy, int ksize = 3, + int borderType = BORDER_DEFAULT ); + /** @brief Calculates the first x- or y- image derivative using Scharr operator. The function computes the first x- or y- spatial image derivative using the Scharr operator. The @@ -1689,6 +1711,7 @@ See the line detection example below: #include using namespace cv; + using namespace std; int main(int argc, char** argv) { @@ -1774,18 +1797,19 @@ Example: : #include using namespace cv; + using namespace std; int main(int argc, char** argv) { Mat img, gray; - if( argc != 2 && !(img=imread(argv[1], 1)).data) + if( argc != 2 || !(img=imread(argv[1], 1)).data) return -1; cvtColor(img, gray, COLOR_BGR2GRAY); // smooth it, otherwise a lot of false circles may be detected GaussianBlur( gray, gray, Size(9, 9), 2, 2 ); vector circles; HoughCircles(gray, circles, HOUGH_GRADIENT, - 2, gray->rows/4, 200, 100 ); + 2, gray.rows/4, 200, 100 ); for( size_t i = 0; i < circles.size(); i++ ) { Point center(cvRound(circles[i][0]), cvRound(circles[i][1])); @@ -1797,6 +1821,8 @@ Example: : } namedWindow( "circles", 1 ); imshow( "circles", img ); + + waitKey(0); return 0; } @endcode diff --git a/modules/imgproc/perf/perf_spatialgradient.cpp b/modules/imgproc/perf/perf_spatialgradient.cpp new file mode 100644 index 0000000000..0f9479abd9 --- /dev/null +++ b/modules/imgproc/perf/perf_spatialgradient.cpp @@ -0,0 +1,35 @@ +#include "perf_precomp.hpp" + +using namespace std; +using namespace cv; +using namespace perf; +using namespace testing; +using std::tr1::make_tuple; +using std::tr1::get; + +typedef std::tr1::tuple Size_Ksize_BorderType_t; +typedef perf::TestBaseWithParam Size_Ksize_BorderType; + +PERF_TEST_P( Size_Ksize_BorderType, spatialGradient, + Combine( + SZ_ALL_HD, + Values( 3 ), + Values( BORDER_DEFAULT, BORDER_REPLICATE ) + ) +) +{ + Size size = std::tr1::get<0>(GetParam()); + int ksize = std::tr1::get<1>(GetParam()); + int borderType = std::tr1::get<2>(GetParam()); + + Mat src(size, CV_8UC1); + Mat dx(size, CV_16SC1); + Mat dy(size, CV_16SC1); + + declare.in(src, WARMUP_RNG).out(dx, dy); + + TEST_CYCLE() spatialGradient(src, dx, dy, ksize, borderType); + + SANITY_CHECK(dx); + SANITY_CHECK(dy); +} diff --git a/modules/imgproc/src/accum.cpp b/modules/imgproc/src/accum.cpp index 23dc4576ba..8792e85d04 100644 --- a/modules/imgproc/src/accum.cpp +++ b/modules/imgproc/src/accum.cpp @@ -843,6 +843,70 @@ static bool ocl_accumulate( InputArray _src, InputArray _src2, InputOutputArray } +#if defined(HAVE_IPP) +namespace cv +{ +static bool ipp_accumulate(InputArray _src, InputOutputArray _dst, InputArray _mask) +{ + int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), scn = CV_MAT_CN(stype); + int dtype = _dst.type(), ddepth = CV_MAT_DEPTH(dtype); + + Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); + + if (src.dims <= 2 || (src.isContinuous() && dst.isContinuous() && (mask.empty() || mask.isContinuous()))) + { + typedef IppStatus (CV_STDCALL * ippiAdd)(const void * pSrc, int srcStep, Ipp32f * pSrcDst, int srcdstStep, IppiSize roiSize); + typedef IppStatus (CV_STDCALL * ippiAddMask)(const void * pSrc, int srcStep, const Ipp8u * pMask, int maskStep, Ipp32f * pSrcDst, + int srcDstStep, IppiSize roiSize); + ippiAdd ippFunc = 0; + ippiAddMask ippFuncMask = 0; + + if (mask.empty()) + { + CV_SUPPRESS_DEPRECATED_START + ippFunc = sdepth == CV_8U && ddepth == CV_32F ? (ippiAdd)ippiAdd_8u32f_C1IR : + sdepth == CV_16U && ddepth == CV_32F ? (ippiAdd)ippiAdd_16u32f_C1IR : + sdepth == CV_32F && ddepth == CV_32F ? (ippiAdd)ippiAdd_32f_C1IR : 0; + CV_SUPPRESS_DEPRECATED_END + } + else if (scn == 1) + { + ippFuncMask = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddMask)ippiAdd_8u32f_C1IMR : + sdepth == CV_16U && ddepth == CV_32F ? (ippiAddMask)ippiAdd_16u32f_C1IMR : + sdepth == CV_32F && ddepth == CV_32F ? (ippiAddMask)ippiAdd_32f_C1IMR : 0; + } + + if (ippFunc || ippFuncMask) + { + IppStatus status = ippStsErr; + + Size size = src.size(); + int srcstep = (int)src.step, dststep = (int)dst.step, maskstep = (int)mask.step; + if (src.isContinuous() && dst.isContinuous() && mask.isContinuous()) + { + srcstep = static_cast(src.total() * src.elemSize()); + dststep = static_cast(dst.total() * dst.elemSize()); + maskstep = static_cast(mask.total() * mask.elemSize()); + size.width = static_cast(src.total()); + size.height = 1; + } + size.width *= scn; + + if (ippFunc) + status = ippFunc(src.ptr(), srcstep, dst.ptr(), dststep, ippiSize(size.width, size.height)); + else if(ippFuncMask) + status = ippFuncMask(src.ptr(), srcstep, mask.ptr(), maskstep, + dst.ptr(), dststep, ippiSize(size.width, size.height)); + + if (status >= 0) + return true; + } + } + return false; +} +} +#endif + void cv::accumulate( InputArray _src, InputOutputArray _dst, InputArray _mask ) { int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), scn = CV_MAT_CN(stype); @@ -854,66 +918,11 @@ void cv::accumulate( InputArray _src, InputOutputArray _dst, InputArray _mask ) CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), ocl_accumulate(_src, noArray(), _dst, 0.0, _mask, ACCUMULATE)) + CV_IPP_RUN((_src.dims() <= 2 || (_src.isContinuous() && _dst.isContinuous() && (_mask.empty() || _mask.isContinuous()))), + ipp_accumulate(_src, _dst, _mask)); + Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); -#if defined HAVE_IPP - CV_IPP_CHECK() - { - if (src.dims <= 2 || (src.isContinuous() && dst.isContinuous() && (mask.empty() || mask.isContinuous()))) - { - typedef IppStatus (CV_STDCALL * ippiAdd)(const void * pSrc, int srcStep, Ipp32f * pSrcDst, int srcdstStep, IppiSize roiSize); - typedef IppStatus (CV_STDCALL * ippiAddMask)(const void * pSrc, int srcStep, const Ipp8u * pMask, int maskStep, Ipp32f * pSrcDst, - int srcDstStep, IppiSize roiSize); - ippiAdd ippFunc = 0; - ippiAddMask ippFuncMask = 0; - - if (mask.empty()) - { - CV_SUPPRESS_DEPRECATED_START - ippFunc = sdepth == CV_8U && ddepth == CV_32F ? (ippiAdd)ippiAdd_8u32f_C1IR : - sdepth == CV_16U && ddepth == CV_32F ? (ippiAdd)ippiAdd_16u32f_C1IR : - sdepth == CV_32F && ddepth == CV_32F ? (ippiAdd)ippiAdd_32f_C1IR : 0; - CV_SUPPRESS_DEPRECATED_END - } - else if (scn == 1) - { - ippFuncMask = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddMask)ippiAdd_8u32f_C1IMR : - sdepth == CV_16U && ddepth == CV_32F ? (ippiAddMask)ippiAdd_16u32f_C1IMR : - sdepth == CV_32F && ddepth == CV_32F ? (ippiAddMask)ippiAdd_32f_C1IMR : 0; - } - - if (ippFunc || ippFuncMask) - { - IppStatus status = ippStsNoErr; - - Size size = src.size(); - int srcstep = (int)src.step, dststep = (int)dst.step, maskstep = (int)mask.step; - if (src.isContinuous() && dst.isContinuous() && mask.isContinuous()) - { - srcstep = static_cast(src.total() * src.elemSize()); - dststep = static_cast(dst.total() * dst.elemSize()); - maskstep = static_cast(mask.total() * mask.elemSize()); - size.width = static_cast(src.total()); - size.height = 1; - } - size.width *= scn; - - if (mask.empty()) - status = ippFunc(src.ptr(), srcstep, dst.ptr(), dststep, ippiSize(size.width, size.height)); - else - status = ippFuncMask(src.ptr(), srcstep, mask.ptr(), maskstep, - dst.ptr(), dststep, ippiSize(size.width, size.height)); - - if (status >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } - } -#endif int fidx = getAccTabIdx(sdepth, ddepth); AccFunc func = fidx >= 0 ? accTab[fidx] : 0; @@ -928,6 +937,68 @@ void cv::accumulate( InputArray _src, InputOutputArray _dst, InputArray _mask ) func(ptrs[0], ptrs[1], ptrs[2], len, scn); } +#if defined(HAVE_IPP) +namespace cv +{ +static bool ipp_accumulate_square(InputArray _src, InputOutputArray _dst, InputArray _mask) +{ + int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), scn = CV_MAT_CN(stype); + int dtype = _dst.type(), ddepth = CV_MAT_DEPTH(dtype); + + Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); + + if (src.dims <= 2 || (src.isContinuous() && dst.isContinuous() && (mask.empty() || mask.isContinuous()))) + { + typedef IppStatus (CV_STDCALL * ippiAddSquare)(const void * pSrc, int srcStep, Ipp32f * pSrcDst, int srcdstStep, IppiSize roiSize); + typedef IppStatus (CV_STDCALL * ippiAddSquareMask)(const void * pSrc, int srcStep, const Ipp8u * pMask, int maskStep, Ipp32f * pSrcDst, + int srcDstStep, IppiSize roiSize); + ippiAddSquare ippFunc = 0; + ippiAddSquareMask ippFuncMask = 0; + + if (mask.empty()) + { + ippFunc = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddSquare)ippiAddSquare_8u32f_C1IR : + sdepth == CV_16U && ddepth == CV_32F ? (ippiAddSquare)ippiAddSquare_16u32f_C1IR : + sdepth == CV_32F && ddepth == CV_32F ? (ippiAddSquare)ippiAddSquare_32f_C1IR : 0; + } + else if (scn == 1) + { + ippFuncMask = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddSquareMask)ippiAddSquare_8u32f_C1IMR : + sdepth == CV_16U && ddepth == CV_32F ? (ippiAddSquareMask)ippiAddSquare_16u32f_C1IMR : + sdepth == CV_32F && ddepth == CV_32F ? (ippiAddSquareMask)ippiAddSquare_32f_C1IMR : 0; + } + + if (ippFunc || ippFuncMask) + { + IppStatus status = ippStsErr; + + Size size = src.size(); + int srcstep = (int)src.step, dststep = (int)dst.step, maskstep = (int)mask.step; + if (src.isContinuous() && dst.isContinuous() && mask.isContinuous()) + { + srcstep = static_cast(src.total() * src.elemSize()); + dststep = static_cast(dst.total() * dst.elemSize()); + maskstep = static_cast(mask.total() * mask.elemSize()); + size.width = static_cast(src.total()); + size.height = 1; + } + size.width *= scn; + + if (ippFunc) + status = ippFunc(src.ptr(), srcstep, dst.ptr(), dststep, ippiSize(size.width, size.height)); + else if(ippFuncMask) + status = ippFuncMask(src.ptr(), srcstep, mask.ptr(), maskstep, + dst.ptr(), dststep, ippiSize(size.width, size.height)); + + if (status >= 0) + return true; + } + } + return false; +} +} +#endif + void cv::accumulateSquare( InputArray _src, InputOutputArray _dst, InputArray _mask ) { int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), scn = CV_MAT_CN(stype); @@ -939,65 +1010,11 @@ void cv::accumulateSquare( InputArray _src, InputOutputArray _dst, InputArray _m CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), ocl_accumulate(_src, noArray(), _dst, 0.0, _mask, ACCUMULATE_SQUARE)) + CV_IPP_RUN((_src.dims() <= 2 || (_src.isContinuous() && _dst.isContinuous() && (_mask.empty() || _mask.isContinuous()))), + ipp_accumulate_square(_src, _dst, _mask)); + Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); -#if defined(HAVE_IPP) - CV_IPP_CHECK() - { - if (src.dims <= 2 || (src.isContinuous() && dst.isContinuous() && (mask.empty() || mask.isContinuous()))) - { - typedef IppStatus (CV_STDCALL * ippiAddSquare)(const void * pSrc, int srcStep, Ipp32f * pSrcDst, int srcdstStep, IppiSize roiSize); - typedef IppStatus (CV_STDCALL * ippiAddSquareMask)(const void * pSrc, int srcStep, const Ipp8u * pMask, int maskStep, Ipp32f * pSrcDst, - int srcDstStep, IppiSize roiSize); - ippiAddSquare ippFunc = 0; - ippiAddSquareMask ippFuncMask = 0; - - if (mask.empty()) - { - ippFunc = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddSquare)ippiAddSquare_8u32f_C1IR : - sdepth == CV_16U && ddepth == CV_32F ? (ippiAddSquare)ippiAddSquare_16u32f_C1IR : - sdepth == CV_32F && ddepth == CV_32F ? (ippiAddSquare)ippiAddSquare_32f_C1IR : 0; - } - else if (scn == 1) - { - ippFuncMask = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddSquareMask)ippiAddSquare_8u32f_C1IMR : - sdepth == CV_16U && ddepth == CV_32F ? (ippiAddSquareMask)ippiAddSquare_16u32f_C1IMR : - sdepth == CV_32F && ddepth == CV_32F ? (ippiAddSquareMask)ippiAddSquare_32f_C1IMR : 0; - } - - if (ippFunc || ippFuncMask) - { - IppStatus status = ippStsNoErr; - - Size size = src.size(); - int srcstep = (int)src.step, dststep = (int)dst.step, maskstep = (int)mask.step; - if (src.isContinuous() && dst.isContinuous() && mask.isContinuous()) - { - srcstep = static_cast(src.total() * src.elemSize()); - dststep = static_cast(dst.total() * dst.elemSize()); - maskstep = static_cast(mask.total() * mask.elemSize()); - size.width = static_cast(src.total()); - size.height = 1; - } - size.width *= scn; - - if (mask.empty()) - status = ippFunc(src.ptr(), srcstep, dst.ptr(), dststep, ippiSize(size.width, size.height)); - else - status = ippFuncMask(src.ptr(), srcstep, mask.ptr(), maskstep, - dst.ptr(), dststep, ippiSize(size.width, size.height)); - - if (status >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } - } -#endif - int fidx = getAccTabIdx(sdepth, ddepth); AccFunc func = fidx >= 0 ? accSqrTab[fidx] : 0; CV_Assert( func != 0 ); @@ -1011,6 +1028,74 @@ void cv::accumulateSquare( InputArray _src, InputOutputArray _dst, InputArray _m func(ptrs[0], ptrs[1], ptrs[2], len, scn); } +#if defined(HAVE_IPP) +namespace cv +{ +static bool ipp_accumulate_product(InputArray _src1, InputArray _src2, + InputOutputArray _dst, InputArray _mask) +{ + int stype = _src1.type(), sdepth = CV_MAT_DEPTH(stype), scn = CV_MAT_CN(stype); + int dtype = _dst.type(), ddepth = CV_MAT_DEPTH(dtype); + + Mat src1 = _src1.getMat(), src2 = _src2.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); + + if (src1.dims <= 2 || (src1.isContinuous() && src2.isContinuous() && dst.isContinuous())) + { + typedef IppStatus (CV_STDCALL * ippiAddProduct)(const void * pSrc1, int src1Step, const void * pSrc2, + int src2Step, Ipp32f * pSrcDst, int srcDstStep, IppiSize roiSize); + typedef IppStatus (CV_STDCALL * ippiAddProductMask)(const void * pSrc1, int src1Step, const void * pSrc2, int src2Step, + const Ipp8u * pMask, int maskStep, Ipp32f * pSrcDst, int srcDstStep, IppiSize roiSize); + ippiAddProduct ippFunc = 0; + ippiAddProductMask ippFuncMask = 0; + + if (mask.empty()) + { + ippFunc = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddProduct)ippiAddProduct_8u32f_C1IR : + sdepth == CV_16U && ddepth == CV_32F ? (ippiAddProduct)ippiAddProduct_16u32f_C1IR : + sdepth == CV_32F && ddepth == CV_32F ? (ippiAddProduct)ippiAddProduct_32f_C1IR : 0; + } + else if (scn == 1) + { + ippFuncMask = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddProductMask)ippiAddProduct_8u32f_C1IMR : + sdepth == CV_16U && ddepth == CV_32F ? (ippiAddProductMask)ippiAddProduct_16u32f_C1IMR : + sdepth == CV_32F && ddepth == CV_32F ? (ippiAddProductMask)ippiAddProduct_32f_C1IMR : 0; + } + + if (ippFunc || ippFuncMask) + { + IppStatus status = ippStsErr; + + Size size = src1.size(); + int src1step = (int)src1.step, src2step = (int)src2.step, dststep = (int)dst.step, maskstep = (int)mask.step; + if (src1.isContinuous() && src2.isContinuous() && dst.isContinuous() && mask.isContinuous()) + { + src1step = static_cast(src1.total() * src1.elemSize()); + src2step = static_cast(src2.total() * src2.elemSize()); + dststep = static_cast(dst.total() * dst.elemSize()); + maskstep = static_cast(mask.total() * mask.elemSize()); + size.width = static_cast(src1.total()); + size.height = 1; + } + size.width *= scn; + + if (ippFunc) + status = ippFunc(src1.ptr(), src1step, src2.ptr(), src2step, dst.ptr(), + dststep, ippiSize(size.width, size.height)); + else if(ippFuncMask) + status = ippFuncMask(src1.ptr(), src1step, src2.ptr(), src2step, mask.ptr(), maskstep, + dst.ptr(), dststep, ippiSize(size.width, size.height)); + + if (status >= 0) + return true; + } + } + return false; +} +} +#endif + + + void cv::accumulateProduct( InputArray _src1, InputArray _src2, InputOutputArray _dst, InputArray _mask ) { @@ -1024,68 +1109,11 @@ void cv::accumulateProduct( InputArray _src1, InputArray _src2, CV_OCL_RUN(_src1.dims() <= 2 && _dst.isUMat(), ocl_accumulate(_src1, _src2, _dst, 0.0, _mask, ACCUMULATE_PRODUCT)) + CV_IPP_RUN( (_src1.dims() <= 2 || (_src1.isContinuous() && _src2.isContinuous() && _dst.isContinuous())), + ipp_accumulate_product(_src1, _src2, _dst, _mask)); + Mat src1 = _src1.getMat(), src2 = _src2.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); -#if defined(HAVE_IPP) - CV_IPP_CHECK() - { - if (src1.dims <= 2 || (src1.isContinuous() && src2.isContinuous() && dst.isContinuous())) - { - typedef IppStatus (CV_STDCALL * ippiAddProduct)(const void * pSrc1, int src1Step, const void * pSrc2, - int src2Step, Ipp32f * pSrcDst, int srcDstStep, IppiSize roiSize); - typedef IppStatus (CV_STDCALL * ippiAddProductMask)(const void * pSrc1, int src1Step, const void * pSrc2, int src2Step, - const Ipp8u * pMask, int maskStep, Ipp32f * pSrcDst, int srcDstStep, IppiSize roiSize); - ippiAddProduct ippFunc = 0; - ippiAddProductMask ippFuncMask = 0; - - if (mask.empty()) - { - ippFunc = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddProduct)ippiAddProduct_8u32f_C1IR : - sdepth == CV_16U && ddepth == CV_32F ? (ippiAddProduct)ippiAddProduct_16u32f_C1IR : - sdepth == CV_32F && ddepth == CV_32F ? (ippiAddProduct)ippiAddProduct_32f_C1IR : 0; - } - else if (scn == 1) - { - ippFuncMask = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddProductMask)ippiAddProduct_8u32f_C1IMR : - sdepth == CV_16U && ddepth == CV_32F ? (ippiAddProductMask)ippiAddProduct_16u32f_C1IMR : - sdepth == CV_32F && ddepth == CV_32F ? (ippiAddProductMask)ippiAddProduct_32f_C1IMR : 0; - } - - if (ippFunc || ippFuncMask) - { - IppStatus status = ippStsNoErr; - - Size size = src1.size(); - int src1step = (int)src1.step, src2step = (int)src2.step, dststep = (int)dst.step, maskstep = (int)mask.step; - if (src1.isContinuous() && src2.isContinuous() && dst.isContinuous() && mask.isContinuous()) - { - src1step = static_cast(src1.total() * src1.elemSize()); - src2step = static_cast(src2.total() * src2.elemSize()); - dststep = static_cast(dst.total() * dst.elemSize()); - maskstep = static_cast(mask.total() * mask.elemSize()); - size.width = static_cast(src1.total()); - size.height = 1; - } - size.width *= scn; - - if (mask.empty()) - status = ippFunc(src1.ptr(), src1step, src2.ptr(), src2step, dst.ptr(), - dststep, ippiSize(size.width, size.height)); - else - status = ippFuncMask(src1.ptr(), src1step, src2.ptr(), src2step, mask.ptr(), maskstep, - dst.ptr(), dststep, ippiSize(size.width, size.height)); - - if (status >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } - } -#endif - int fidx = getAccTabIdx(sdepth, ddepth); AccProdFunc func = fidx >= 0 ? accProdTab[fidx] : 0; CV_Assert( func != 0 ); @@ -1099,6 +1127,71 @@ void cv::accumulateProduct( InputArray _src1, InputArray _src2, func(ptrs[0], ptrs[1], ptrs[2], ptrs[3], len, scn); } +#if defined(HAVE_IPP) +namespace cv +{ +static bool ipp_accumulate_weighted( InputArray _src, InputOutputArray _dst, + double alpha, InputArray _mask ) +{ + int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), scn = CV_MAT_CN(stype); + int dtype = _dst.type(), ddepth = CV_MAT_DEPTH(dtype); + + Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); + + if (src.dims <= 2 || (src.isContinuous() && dst.isContinuous() && mask.isContinuous())) + { + typedef IppStatus (CV_STDCALL * ippiAddWeighted)(const void * pSrc, int srcStep, Ipp32f * pSrcDst, int srcdstStep, + IppiSize roiSize, Ipp32f alpha); + typedef IppStatus (CV_STDCALL * ippiAddWeightedMask)(const void * pSrc, int srcStep, const Ipp8u * pMask, + int maskStep, Ipp32f * pSrcDst, + int srcDstStep, IppiSize roiSize, Ipp32f alpha); + ippiAddWeighted ippFunc = 0; + ippiAddWeightedMask ippFuncMask = 0; + + if (mask.empty()) + { + ippFunc = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddWeighted)ippiAddWeighted_8u32f_C1IR : + sdepth == CV_16U && ddepth == CV_32F ? (ippiAddWeighted)ippiAddWeighted_16u32f_C1IR : + sdepth == CV_32F && ddepth == CV_32F ? (ippiAddWeighted)ippiAddWeighted_32f_C1IR : 0; + } + else if (scn == 1) + { + ippFuncMask = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddWeightedMask)ippiAddWeighted_8u32f_C1IMR : + sdepth == CV_16U && ddepth == CV_32F ? (ippiAddWeightedMask)ippiAddWeighted_16u32f_C1IMR : + sdepth == CV_32F && ddepth == CV_32F ? (ippiAddWeightedMask)ippiAddWeighted_32f_C1IMR : 0; + } + + if (ippFunc || ippFuncMask) + { + IppStatus status = ippStsErr; + + Size size = src.size(); + int srcstep = (int)src.step, dststep = (int)dst.step, maskstep = (int)mask.step; + if (src.isContinuous() && dst.isContinuous() && mask.isContinuous()) + { + srcstep = static_cast(src.total() * src.elemSize()); + dststep = static_cast(dst.total() * dst.elemSize()); + maskstep = static_cast(mask.total() * mask.elemSize()); + size.width = static_cast((int)src.total()); + size.height = 1; + } + size.width *= scn; + + if (ippFunc) + status = ippFunc(src.ptr(), srcstep, dst.ptr(), dststep, ippiSize(size.width, size.height), (Ipp32f)alpha); + else if(ippFuncMask) + status = ippFuncMask(src.ptr(), srcstep, mask.ptr(), maskstep, + dst.ptr(), dststep, ippiSize(size.width, size.height), (Ipp32f)alpha); + + if (status >= 0) + return true; + } + } + return false; +} +} +#endif + void cv::accumulateWeighted( InputArray _src, InputOutputArray _dst, double alpha, InputArray _mask ) { @@ -1111,66 +1204,11 @@ void cv::accumulateWeighted( InputArray _src, InputOutputArray _dst, CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), ocl_accumulate(_src, noArray(), _dst, alpha, _mask, ACCUMULATE_WEIGHTED)) + CV_IPP_RUN((_src.dims() <= 2 || (_src.isContinuous() && _dst.isContinuous() && _mask.isContinuous())), ipp_accumulate_weighted(_src, _dst, alpha, _mask)); + + Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); -#if defined(HAVE_IPP) - CV_IPP_CHECK() - { - if (src.dims <= 2 || (src.isContinuous() && dst.isContinuous() && mask.isContinuous())) - { - typedef IppStatus (CV_STDCALL * ippiAddWeighted)(const void * pSrc, int srcStep, Ipp32f * pSrcDst, int srcdstStep, - IppiSize roiSize, Ipp32f alpha); - typedef IppStatus (CV_STDCALL * ippiAddWeightedMask)(const void * pSrc, int srcStep, const Ipp8u * pMask, - int maskStep, Ipp32f * pSrcDst, - int srcDstStep, IppiSize roiSize, Ipp32f alpha); - ippiAddWeighted ippFunc = 0; - ippiAddWeightedMask ippFuncMask = 0; - - if (mask.empty()) - { - ippFunc = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddWeighted)ippiAddWeighted_8u32f_C1IR : - sdepth == CV_16U && ddepth == CV_32F ? (ippiAddWeighted)ippiAddWeighted_16u32f_C1IR : - sdepth == CV_32F && ddepth == CV_32F ? (ippiAddWeighted)ippiAddWeighted_32f_C1IR : 0; - } - else if (scn == 1) - { - ippFuncMask = sdepth == CV_8U && ddepth == CV_32F ? (ippiAddWeightedMask)ippiAddWeighted_8u32f_C1IMR : - sdepth == CV_16U && ddepth == CV_32F ? (ippiAddWeightedMask)ippiAddWeighted_16u32f_C1IMR : - sdepth == CV_32F && ddepth == CV_32F ? (ippiAddWeightedMask)ippiAddWeighted_32f_C1IMR : 0; - } - - if (ippFunc || ippFuncMask) - { - IppStatus status = ippStsNoErr; - - Size size = src.size(); - int srcstep = (int)src.step, dststep = (int)dst.step, maskstep = (int)mask.step; - if (src.isContinuous() && dst.isContinuous() && mask.isContinuous()) - { - srcstep = static_cast(src.total() * src.elemSize()); - dststep = static_cast(dst.total() * dst.elemSize()); - maskstep = static_cast(mask.total() * mask.elemSize()); - size.width = static_cast((int)src.total()); - size.height = 1; - } - size.width *= scn; - - if (mask.empty()) - status = ippFunc(src.ptr(), srcstep, dst.ptr(), dststep, ippiSize(size.width, size.height), (Ipp32f)alpha); - else - status = ippFuncMask(src.ptr(), srcstep, mask.ptr(), maskstep, - dst.ptr(), dststep, ippiSize(size.width, size.height), (Ipp32f)alpha); - - if (status >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } - } -#endif int fidx = getAccTabIdx(sdepth, ddepth); AccWFunc func = fidx >= 0 ? accWTab[fidx] : 0; diff --git a/modules/imgproc/src/color.cpp b/modules/imgproc/src/color.cpp index f8bc095891..33ab41f4f8 100644 --- a/modules/imgproc/src/color.cpp +++ b/modules/imgproc/src/color.cpp @@ -1674,7 +1674,9 @@ struct RGB2Gray bool haveSIMD; }; -#else +#endif // CV_SSE2 + +#if !CV_NEON && !CV_SSE4_1 template<> struct RGB2Gray { @@ -1698,7 +1700,7 @@ template<> struct RGB2Gray int coeffs[3]; }; -#endif +#endif // !CV_NEON && !CV_SSE4_1 ///////////////////////////////////// RGB <-> YCrCb ////////////////////////////////////// @@ -7323,7 +7325,597 @@ static bool ocl_cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) #endif -}//namespace cv +#ifdef HAVE_IPP +static bool ipp_cvtColor( Mat &src, OutputArray _dst, int code, int dcn ) +{ + int stype = src.type(); + int scn = CV_MAT_CN(stype), depth = CV_MAT_DEPTH(stype); + + Mat dst; + Size sz = src.size(); + + switch( code ) + { +#if IPP_VERSION_MAJOR >= 7 + case CV_BGR2BGRA: case CV_RGB2BGRA: case CV_BGRA2BGR: + case CV_RGBA2BGR: case CV_RGB2BGR: case CV_BGRA2RGBA: + CV_Assert( scn == 3 || scn == 4 ); + dcn = code == CV_BGR2BGRA || code == CV_RGB2BGRA || code == CV_BGRA2RGBA ? 4 : 3; + _dst.create( sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( code == CV_BGR2BGRA) + { + if ( CvtColorIPPLoop(src, dst, IPPReorderFunctor(ippiSwapChannelsC3C4RTab[depth], 0, 1, 2)) ) + return true; + } + else if( code == CV_BGRA2BGR ) + { + if ( CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiCopyAC4C3RTab[depth])) ) + return true; + } + else if( code == CV_BGR2RGBA ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderFunctor(ippiSwapChannelsC3C4RTab[depth], 2, 1, 0)) ) + return true; + } + else if( code == CV_RGBA2BGR ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderFunctor(ippiSwapChannelsC4C3RTab[depth], 2, 1, 0)) ) + return true; + } + else if( code == CV_RGB2BGR ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderFunctor(ippiSwapChannelsC3RTab[depth], 2, 1, 0)) ) + return true; + } +#if IPP_VERSION_X100 >= 801 + else if( code == CV_RGBA2BGRA ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderFunctor(ippiSwapChannelsC4RTab[depth], 2, 1, 0)) ) + return true; + } +#endif + return false; +#endif + +#if 0 // breaks OCL accuracy tests + case CV_BGR2BGR565: case CV_BGR2BGR555: case CV_RGB2BGR565: case CV_RGB2BGR555: + case CV_BGRA2BGR565: case CV_BGRA2BGR555: case CV_RGBA2BGR565: case CV_RGBA2BGR555: + CV_Assert( (scn == 3 || scn == 4) && depth == CV_8U ); + _dst.create(sz, CV_8UC2); + dst = _dst.getMat(); + + CV_SUPPRESS_DEPRECATED_START + + if (code == CV_BGR2BGR565 && scn == 3) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiBGRToBGR565_8u16u_C3R))) + return true; + } + else if (code == CV_BGRA2BGR565 && scn == 4) + { + if (CvtColorIPPLoopCopy(src, dst, + IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], + (ippiGeneralFunc)ippiBGRToBGR565_8u16u_C3R, 0, 1, 2, depth))) + return true; + } + else if (code == CV_RGB2BGR565 && scn == 3) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], + (ippiGeneralFunc)ippiBGRToBGR565_8u16u_C3R, 2, 1, 0, depth)) ) + return true; + } + else if (code == CV_RGBA2BGR565 && scn == 4) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], + (ippiGeneralFunc)ippiBGRToBGR565_8u16u_C3R, 2, 1, 0, depth)) ) + return true; + } + CV_SUPPRESS_DEPRECATED_END + return false; +#endif + + case CV_BGR5652BGR: case CV_BGR5552BGR: case CV_BGR5652RGB: case CV_BGR5552RGB: + case CV_BGR5652BGRA: case CV_BGR5552BGRA: case CV_BGR5652RGBA: case CV_BGR5552RGBA: + if(dcn <= 0) dcn = (code==CV_BGR5652BGRA || code==CV_BGR5552BGRA || code==CV_BGR5652RGBA || code==CV_BGR5552RGBA) ? 4 : 3; + CV_Assert( (dcn == 3 || dcn == 4) && scn == 2 && depth == CV_8U ); + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + CV_SUPPRESS_DEPRECATED_START + if (code == CV_BGR5652BGR && dcn == 3) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiBGR565ToBGR_16u8u_C3R))) + return true; + } + else if (code == CV_BGR5652RGB && dcn == 3) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiBGR565ToBGR_16u8u_C3R, + ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth))) + return true; + } + else if (code == CV_BGR5652BGRA && dcn == 4) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiBGR565ToBGR_16u8u_C3R, + ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth))) + return true; + } + else if (code == CV_BGR5652RGBA && dcn == 4) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiBGR565ToBGR_16u8u_C3R, + ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth))) + return true; + } + CV_SUPPRESS_DEPRECATED_END + return false; + +#if IPP_VERSION_MAJOR >= 7 + case CV_BGR2GRAY: case CV_BGRA2GRAY: case CV_RGB2GRAY: case CV_RGBA2GRAY: + CV_Assert( scn == 3 || scn == 4 ); + _dst.create(sz, CV_MAKETYPE(depth, 1)); + dst = _dst.getMat(); + + if( code == CV_BGR2GRAY && depth == CV_32F ) + { + if( CvtColorIPPLoop(src, dst, IPPColor2GrayFunctor(ippiColor2GrayC3Tab[depth])) ) + return true; + } + else if( code == CV_RGB2GRAY && depth == CV_32F ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiRGB2GrayC3Tab[depth])) ) + return true; + } + else if( code == CV_BGRA2GRAY && depth == CV_32F ) + { + if( CvtColorIPPLoop(src, dst, IPPColor2GrayFunctor(ippiColor2GrayC4Tab[depth])) ) + return true; + } + else if( code == CV_RGBA2GRAY && depth == CV_32F ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiRGB2GrayC4Tab[depth])) ) + return true; + } + return false; + + case CV_GRAY2BGR: case CV_GRAY2BGRA: + if( dcn <= 0 ) dcn = (code==CV_GRAY2BGRA) ? 4 : 3; + CV_Assert( scn == 1 && (dcn == 3 || dcn == 4)); + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( code == CV_GRAY2BGR ) + { + if( CvtColorIPPLoop(src, dst, IPPGray2BGRFunctor(ippiCopyP3C3RTab[depth])) ) + return true; + } + else if( code == CV_GRAY2BGRA ) + { + if( CvtColorIPPLoop(src, dst, IPPGray2BGRAFunctor(ippiCopyP3C3RTab[depth], ippiSwapChannelsC3C4RTab[depth], depth)) ) + return true; + } + return false; +#endif + +#if 0 + case CV_BGR2YCrCb: case CV_RGB2YCrCb: + case CV_BGR2YUV: case CV_RGB2YUV: + { + CV_Assert( scn == 3 || scn == 4 ); + static const float yuv_f[] = { 0.114f, 0.587f, 0.299f, 0.492f, 0.877f }; + static const int yuv_i[] = { B2Y, G2Y, R2Y, 8061, 14369 }; + const float* coeffs_f = code == CV_BGR2YCrCb || code == CV_RGB2YCrCb ? 0 : yuv_f; + const int* coeffs_i = code == CV_BGR2YCrCb || code == CV_RGB2YCrCb ? 0 : yuv_i; + + _dst.create(sz, CV_MAKETYPE(depth, 3)); + dst = _dst.getMat(); + + if (code == CV_RGB2YUV && scn == 3 && depth == CV_8U) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiRGBToYUV_8u_C3R))) + return true; + } + else if (code == CV_BGR2YUV && scn == 3 && depth == CV_8U) + { + if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], + (ippiGeneralFunc)ippiRGBToYUV_8u_C3R, 2, 1, 0, depth))) + return true; + } + else if (code == CV_RGB2YUV && scn == 4 && depth == CV_8U) + { + if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], + (ippiGeneralFunc)ippiRGBToYUV_8u_C3R, 0, 1, 2, depth))) + return true; + } + else if (code == CV_BGR2YUV && scn == 4 && depth == CV_8U) + { + if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], + (ippiGeneralFunc)ippiRGBToYUV_8u_C3R, 2, 1, 0, depth))) + return true; + } + return false; + } +#endif + +#if 0 + case CV_YCrCb2BGR: case CV_YCrCb2RGB: + case CV_YUV2BGR: case CV_YUV2RGB: + { + if( dcn <= 0 ) dcn = 3; + CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) ); + static const float yuv_f[] = { 2.032f, -0.395f, -0.581f, 1.140f }; + static const int yuv_i[] = { 33292, -6472, -9519, 18678 }; + const float* coeffs_f = code == CV_YCrCb2BGR || code == CV_YCrCb2RGB ? 0 : yuv_f; + const int* coeffs_i = code == CV_YCrCb2BGR || code == CV_YCrCb2RGB ? 0 : yuv_i; + + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if (code == CV_YUV2RGB && dcn == 3 && depth == CV_8U) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiYUVToRGB_8u_C3R))) + return true; + } + else if (code == CV_YUV2BGR && dcn == 3 && depth == CV_8U) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiYUVToRGB_8u_C3R, + ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth))) + return true; + } + else if (code == CV_YUV2RGB && dcn == 4 && depth == CV_8U) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiYUVToRGB_8u_C3R, + ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth))) + return true; + } + else if (code == CV_YUV2BGR && dcn == 4 && depth == CV_8U) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiYUVToRGB_8u_C3R, + ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth))) + return true; + } + return false; + } +#endif + +#if IPP_VERSION_MAJOR >= 7 + case CV_BGR2XYZ: case CV_RGB2XYZ: + CV_Assert( scn == 3 || scn == 4 ); + _dst.create(sz, CV_MAKETYPE(depth, 3)); + dst = _dst.getMat(); + + if( code == CV_BGR2XYZ && scn == 3 && depth != CV_32F ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], ippiRGB2XYZTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_BGR2XYZ && scn == 4 && depth != CV_32F ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2XYZTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_RGB2XYZ && scn == 3 && depth != CV_32F ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2XYZTab[depth])) ) + return true; + } + else if( code == CV_RGB2XYZ && scn == 4 && depth != CV_32F ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2XYZTab[depth], 0, 1, 2, depth)) ) + return true; + } + return false; +#endif + +#if IPP_VERSION_MAJOR >= 7 + case CV_XYZ2BGR: case CV_XYZ2RGB: + if( dcn <= 0 ) dcn = 3; + CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) ); + + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( code == CV_XYZ2BGR && dcn == 3 && depth != CV_32F ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralReorderFunctor(ippiXYZ2RGBTab[depth], ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_XYZ2BGR && dcn == 4 && depth != CV_32F ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiXYZ2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) + return true; + } + if( code == CV_XYZ2RGB && dcn == 3 && depth != CV_32F ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiXYZ2RGBTab[depth])) ) + return true; + } + else if( code == CV_XYZ2RGB && dcn == 4 && depth != CV_32F ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiXYZ2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) + return true; + } + return false; +#endif + +#if IPP_VERSION_MAJOR >= 7 + case CV_BGR2HSV: case CV_RGB2HSV: case CV_BGR2HSV_FULL: case CV_RGB2HSV_FULL: + case CV_BGR2HLS: case CV_RGB2HLS: case CV_BGR2HLS_FULL: case CV_RGB2HLS_FULL: + { + CV_Assert( (scn == 3 || scn == 4) && (depth == CV_8U || depth == CV_32F) ); + _dst.create(sz, CV_MAKETYPE(depth, 3)); + dst = _dst.getMat(); + + if( depth == CV_8U || depth == CV_16U ) + { +#if 0 // breaks OCL accuracy tests + if( code == CV_BGR2HSV_FULL && scn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], ippiRGB2HSVTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_BGR2HSV_FULL && scn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HSVTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_RGB2HSV_FULL && scn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HSVTab[depth], 0, 1, 2, depth)) ) + return true; + } else +#endif + if( code == CV_RGB2HSV_FULL && scn == 3 && depth == CV_16U ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2HSVTab[depth])) ) + return true; + } + else if( code == CV_BGR2HLS_FULL && scn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], ippiRGB2HLSTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_BGR2HLS_FULL && scn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HLSTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_RGB2HLS_FULL && scn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2HLSTab[depth])) ) + return true; + } + else if( code == CV_RGB2HLS_FULL && scn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HLSTab[depth], 0, 1, 2, depth)) ) + return true; + } + } + return false; + } +#endif + +#if IPP_VERSION_MAJOR >= 7 + case CV_HSV2BGR: case CV_HSV2RGB: case CV_HSV2BGR_FULL: case CV_HSV2RGB_FULL: + case CV_HLS2BGR: case CV_HLS2RGB: case CV_HLS2BGR_FULL: case CV_HLS2RGB_FULL: + { + if( dcn <= 0 ) dcn = 3; + CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) && (depth == CV_8U || depth == CV_32F) ); + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( depth == CV_8U || depth == CV_16U ) + { + if( code == CV_HSV2BGR_FULL && dcn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralReorderFunctor(ippiHSV2RGBTab[depth], ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_HSV2BGR_FULL && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHSV2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_HSV2RGB_FULL && dcn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiHSV2RGBTab[depth])) ) + return true; + } + else if( code == CV_HSV2RGB_FULL && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHSV2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) + return true; + } + else if( code == CV_HLS2BGR_FULL && dcn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralReorderFunctor(ippiHLS2RGBTab[depth], ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_HLS2BGR_FULL && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHLS2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_HLS2RGB_FULL && dcn == 3 ) + { + if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiHLS2RGBTab[depth])) ) + return true; + } + else if( code == CV_HLS2RGB_FULL && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHLS2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) + return true; + } + } + return false; + } +#endif + +#if 0 + case CV_BGR2Lab: case CV_RGB2Lab: case CV_LBGR2Lab: case CV_LRGB2Lab: + case CV_BGR2Luv: case CV_RGB2Luv: case CV_LBGR2Luv: case CV_LRGB2Luv: + { + CV_Assert( (scn == 3 || scn == 4) && (depth == CV_8U || depth == CV_32F) ); + bool srgb = code == CV_BGR2Lab || code == CV_RGB2Lab || + code == CV_BGR2Luv || code == CV_RGB2Luv; + + _dst.create(sz, CV_MAKETYPE(depth, 3)); + dst = _dst.getMat(); + + if (code == CV_LBGR2Lab && scn == 3 && depth == CV_8U) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiBGRToLab_8u_C3R))) + return true; + } + else if (code == CV_LBGR2Lab && scn == 4 && depth == CV_8U) + { + if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], + (ippiGeneralFunc)ippiBGRToLab_8u_C3R, 0, 1, 2, depth))) + return true; + } + else + if (code == CV_LRGB2Lab && scn == 3 && depth == CV_8U) // slower than OpenCV + { + if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], + (ippiGeneralFunc)ippiBGRToLab_8u_C3R, 2, 1, 0, depth))) + return true; + } + else if (code == CV_LRGB2Lab && scn == 4 && depth == CV_8U) // slower than OpenCV + { + if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], + (ippiGeneralFunc)ippiBGRToLab_8u_C3R, 2, 1, 0, depth))) + return true; + } + else if (code == CV_LRGB2Luv && scn == 3) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiRGBToLUVTab[depth]))) + return true; + } + else if (code == CV_LRGB2Luv && scn == 4) + { + if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], + ippiRGBToLUVTab[depth], 0, 1, 2, depth))) + return true; + } + else if (code == CV_LBGR2Luv && scn == 3) + { + if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], + ippiRGBToLUVTab[depth], 2, 1, 0, depth))) + return true; + } + else if (code == CV_LBGR2Luv && scn == 4) + { + if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], + ippiRGBToLUVTab[depth], 2, 1, 0, depth))) + return true; + } + return false; + } +#endif + +#if 0 + case CV_Lab2BGR: case CV_Lab2RGB: case CV_Lab2LBGR: case CV_Lab2LRGB: + case CV_Luv2BGR: case CV_Luv2RGB: case CV_Luv2LBGR: case CV_Luv2LRGB: + { + if( dcn <= 0 ) dcn = 3; + CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) && (depth == CV_8U || depth == CV_32F) ); + bool srgb = code == CV_Lab2BGR || code == CV_Lab2RGB || + code == CV_Luv2BGR || code == CV_Luv2RGB; + + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( code == CV_Lab2LBGR && dcn == 3 && depth == CV_8U) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiLabToBGR_8u_C3R)) ) + return true; + } + else if( code == CV_Lab2LBGR && dcn == 4 && depth == CV_8U ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiLabToBGR_8u_C3R, + ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) + return true; + } + if( code == CV_Lab2LRGB && dcn == 3 && depth == CV_8U ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiLabToBGR_8u_C3R, + ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_Lab2LRGB && dcn == 4 && depth == CV_8U ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiLabToBGR_8u_C3R, + ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) + return true; + } + if( code == CV_Luv2LRGB && dcn == 3 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiLUVToRGBTab[depth])) ) + return true; + } + else if( code == CV_Luv2LRGB && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiLUVToRGBTab[depth], + ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) + return true; + } + if( code == CV_Luv2LBGR && dcn == 3 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiLUVToRGBTab[depth], + ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) + return true; + } + else if( code == CV_Luv2LBGR && dcn == 4 ) + { + if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiLUVToRGBTab[depth], + ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) + return true; + } + return false; + } +#endif + + case CV_YUV2GRAY_420: + { + if (dcn <= 0) dcn = 1; + + CV_Assert( dcn == 1 ); + CV_Assert( sz.width % 2 == 0 && sz.height % 3 == 0 && depth == CV_8U ); + + Size dstSz(sz.width, sz.height * 2 / 3); + _dst.create(dstSz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if (ippStsNoErr == ippiCopy_8u_C1R(src.data, (int)src.step, dst.data, (int)dst.step, + ippiSize(dstSz.width, dstSz.height))) + return true; + return false; + } + + case CV_RGBA2mRGBA: + { + if (dcn <= 0) dcn = 4; + CV_Assert( scn == 4 && dcn == 4 ); + + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( depth == CV_8U ) + { + if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiAlphaPremul_8u_AC4R))) + return true; + return false; + } + + return false; + } + + default: + return false; + } +} +#endif +} ////////////////////////////////////////////////////////////////////////////////////////// // The main function // @@ -7339,9 +7931,10 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) Mat src = _src.getMat(), dst; Size sz = src.size(); - CV_Assert( depth == CV_8U || depth == CV_16U || depth == CV_32F ); + CV_IPP_RUN(true, ipp_cvtColor(src, _dst, code, dcn)); + switch( code ) { case CV_BGR2BGRA: case CV_RGB2BGRA: case CV_BGRA2BGR: @@ -7353,68 +7946,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create( sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - if( code == CV_BGR2BGRA) - { - if ( CvtColorIPPLoop(src, dst, IPPReorderFunctor(ippiSwapChannelsC3C4RTab[depth], 0, 1, 2)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_BGRA2BGR ) - { - if ( CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiCopyAC4C3RTab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_BGR2RGBA ) - { - if( CvtColorIPPLoop(src, dst, IPPReorderFunctor(ippiSwapChannelsC3C4RTab[depth], 2, 1, 0)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_RGBA2BGR ) - { - if( CvtColorIPPLoop(src, dst, IPPReorderFunctor(ippiSwapChannelsC4C3RTab[depth], 2, 1, 0)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_RGB2BGR ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPReorderFunctor(ippiSwapChannelsC3RTab[depth], 2, 1, 0)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } -#if IPP_VERSION_X100 >= 801 - else if( code == CV_RGBA2BGRA ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPReorderFunctor(ippiSwapChannelsC4RTab[depth], 2, 1, 0)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } -#endif - } -#endif - if( depth == CV_8U ) { #ifdef HAVE_TEGRA_OPTIMIZATION @@ -7435,55 +7966,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_8UC2); dst = _dst.getMat(); -#if defined(HAVE_IPP) && 0 // breaks OCL accuracy tests - CV_IPP_CHECK() - { - CV_SUPPRESS_DEPRECATED_START - - if (code == CV_BGR2BGR565 && scn == 3) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiBGRToBGR565_8u16u_C3R))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_BGRA2BGR565 && scn == 4) - { - if (CvtColorIPPLoopCopy(src, dst, - IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], - (ippiGeneralFunc)ippiBGRToBGR565_8u16u_C3R, 0, 1, 2, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_RGB2BGR565 && scn == 3) - { - if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], - (ippiGeneralFunc)ippiBGRToBGR565_8u16u_C3R, 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_RGBA2BGR565 && scn == 4) - { - if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], - (ippiGeneralFunc)ippiBGRToBGR565_8u16u_C3R, 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - CV_SUPPRESS_DEPRECATED_END - } -#endif - #ifdef HAVE_TEGRA_OPTIMIZATION if(code == CV_BGR2BGR565 || code == CV_BGRA2BGR565 || code == CV_RGB2BGR565 || code == CV_RGBA2BGR565) if(tegra::useTegra() && tegra::cvtRGB2RGB565(src, dst, code == CV_RGB2BGR565 || code == CV_RGBA2BGR565 ? 0 : 2)) @@ -7505,53 +7987,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); -#ifdef HAVE_IPP - CV_IPP_CHECK() - { - CV_SUPPRESS_DEPRECATED_START - if (code == CV_BGR5652BGR && dcn == 3) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiBGR565ToBGR_16u8u_C3R))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_BGR5652RGB && dcn == 3) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiBGR565ToBGR_16u8u_C3R, - ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_BGR5652BGRA && dcn == 4) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiBGR565ToBGR_16u8u_C3R, - ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_BGR5652RGBA && dcn == 4) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiBGR565ToBGR_16u8u_C3R, - ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - CV_SUPPRESS_DEPRECATED_END - } -#endif - CvtColorLoop(src, dst, RGB5x52RGB(dcn, code == CV_BGR5652BGR || code == CV_BGR5552BGR || code == CV_BGR5652BGRA || code == CV_BGR5552BGRA ? 0 : 2, // blue idx @@ -7565,48 +8000,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, 1)); dst = _dst.getMat(); -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - if( code == CV_BGR2GRAY && depth == CV_32F ) - { - if( CvtColorIPPLoop(src, dst, IPPColor2GrayFunctor(ippiColor2GrayC3Tab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_RGB2GRAY && depth == CV_32F ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiRGB2GrayC3Tab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_BGRA2GRAY && depth == CV_32F ) - { - if( CvtColorIPPLoop(src, dst, IPPColor2GrayFunctor(ippiColor2GrayC4Tab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_RGBA2GRAY && depth == CV_32F ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiRGB2GrayC4Tab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - } -#endif - bidx = code == CV_BGR2GRAY || code == CV_BGRA2GRAY ? 0 : 2; if( depth == CV_8U ) @@ -7637,31 +8030,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - if( code == CV_GRAY2BGR ) - { - if( CvtColorIPPLoop(src, dst, IPPGray2BGRFunctor(ippiCopyP3C3RTab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_GRAY2BGRA ) - { - if( CvtColorIPPLoop(src, dst, IPPGray2BGRAFunctor(ippiCopyP3C3RTab[depth], ippiSwapChannelsC3C4RTab[depth], depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - } -#endif - - if( depth == CV_8U ) { #ifdef HAVE_TEGRA_OPTIMIZATION @@ -7697,51 +8065,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, 3)); dst = _dst.getMat(); -#if defined HAVE_IPP && 0 - CV_IPP_CHECK() - { - if (code == CV_RGB2YUV && scn == 3 && depth == CV_8U) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiRGBToYUV_8u_C3R))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_BGR2YUV && scn == 3 && depth == CV_8U) - { - if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], - (ippiGeneralFunc)ippiRGBToYUV_8u_C3R, 2, 1, 0, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_RGB2YUV && scn == 4 && depth == CV_8U) - { - if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], - (ippiGeneralFunc)ippiRGBToYUV_8u_C3R, 0, 1, 2, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_BGR2YUV && scn == 4 && depth == CV_8U) - { - if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], - (ippiGeneralFunc)ippiRGBToYUV_8u_C3R, 2, 1, 0, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - } -#endif - if( depth == CV_8U ) { #ifdef HAVE_TEGRA_OPTIMIZATION @@ -7771,51 +8094,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); -#if defined HAVE_IPP && 0 - CV_IPP_CHECK() - { - if (code == CV_YUV2RGB && dcn == 3 && depth == CV_8U) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiYUVToRGB_8u_C3R))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_YUV2BGR && dcn == 3 && depth == CV_8U) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiYUVToRGB_8u_C3R, - ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_YUV2RGB && dcn == 4 && depth == CV_8U) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiYUVToRGB_8u_C3R, - ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_YUV2BGR && dcn == 4 && depth == CV_8U) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiYUVToRGB_8u_C3R, - ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - } -#endif - if( depth == CV_8U ) CvtColorLoop(src, dst, YCrCb2RGB_i(dcn, bidx, coeffs_i)); else if( depth == CV_16U ) @@ -7832,48 +8110,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, 3)); dst = _dst.getMat(); -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - if( code == CV_BGR2XYZ && scn == 3 && depth != CV_32F ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], ippiRGB2XYZTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_BGR2XYZ && scn == 4 && depth != CV_32F ) - { - if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2XYZTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_RGB2XYZ && scn == 3 && depth != CV_32F ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2XYZTab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_RGB2XYZ && scn == 4 && depth != CV_32F ) - { - if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2XYZTab[depth], 0, 1, 2, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - } -#endif - if( depth == CV_8U ) CvtColorLoop(src, dst, RGB2XYZ_i(scn, bidx, 0)); else if( depth == CV_16U ) @@ -7890,48 +8126,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - if( code == CV_XYZ2BGR && dcn == 3 && depth != CV_32F ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPGeneralReorderFunctor(ippiXYZ2RGBTab[depth], ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_XYZ2BGR && dcn == 4 && depth != CV_32F ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiXYZ2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - if( code == CV_XYZ2RGB && dcn == 3 && depth != CV_32F ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiXYZ2RGBTab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_XYZ2RGB && dcn == 4 && depth != CV_32F ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiXYZ2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - } -#endif - if( depth == CV_8U ) CvtColorLoop(src, dst, XYZ2RGB_i(dcn, bidx, 0)); else if( depth == CV_16U ) @@ -7952,89 +8146,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, 3)); dst = _dst.getMat(); -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - if( depth == CV_8U || depth == CV_16U ) - { -#if 0 // breaks OCL accuracy tests - if( code == CV_BGR2HSV_FULL && scn == 3 ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], ippiRGB2HSVTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_BGR2HSV_FULL && scn == 4 ) - { - if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HSVTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_RGB2HSV_FULL && scn == 4 ) - { - if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HSVTab[depth], 0, 1, 2, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } else -#endif - if( code == CV_RGB2HSV_FULL && scn == 3 && depth == CV_16U ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2HSVTab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_BGR2HLS_FULL && scn == 3 ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], ippiRGB2HLSTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_BGR2HLS_FULL && scn == 4 ) - { - if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HLSTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_RGB2HLS_FULL && scn == 3 ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiRGB2HLSTab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_RGB2HLS_FULL && scn == 4 ) - { - if( CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], ippiRGB2HLSTab[depth], 0, 1, 2, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - } - } -#endif - if( code == CV_BGR2HSV || code == CV_RGB2HSV || code == CV_BGR2HSV_FULL || code == CV_RGB2HSV_FULL ) { @@ -8070,87 +8181,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - if( depth == CV_8U || depth == CV_16U ) - { - if( code == CV_HSV2BGR_FULL && dcn == 3 ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPGeneralReorderFunctor(ippiHSV2RGBTab[depth], ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_HSV2BGR_FULL && dcn == 4 ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHSV2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_HSV2RGB_FULL && dcn == 3 ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiHSV2RGBTab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_HSV2RGB_FULL && dcn == 4 ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHSV2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_HLS2BGR_FULL && dcn == 3 ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPGeneralReorderFunctor(ippiHLS2RGBTab[depth], ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_HLS2BGR_FULL && dcn == 4 ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHLS2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_HLS2RGB_FULL && dcn == 3 ) - { - if( CvtColorIPPLoopCopy(src, dst, IPPGeneralFunctor(ippiHLS2RGBTab[depth])) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_HLS2RGB_FULL && dcn == 4 ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiHLS2RGBTab[depth], ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - } - } -#endif - if( code == CV_HSV2BGR || code == CV_HSV2RGB || code == CV_HSV2BGR_FULL || code == CV_HSV2RGB_FULL ) { @@ -8181,91 +8211,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, 3)); dst = _dst.getMat(); -#if defined HAVE_IPP && 0 - CV_IPP_CHECK() - { - if (code == CV_LBGR2Lab && scn == 3 && depth == CV_8U) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiBGRToLab_8u_C3R))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_LBGR2Lab && scn == 4 && depth == CV_8U) - { - if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], - (ippiGeneralFunc)ippiBGRToLab_8u_C3R, 0, 1, 2, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else - if (code == CV_LRGB2Lab && scn == 3 && depth == CV_8U) // slower than OpenCV - { - if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], - (ippiGeneralFunc)ippiBGRToLab_8u_C3R, 2, 1, 0, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_LRGB2Lab && scn == 4 && depth == CV_8U) // slower than OpenCV - { - if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], - (ippiGeneralFunc)ippiBGRToLab_8u_C3R, 2, 1, 0, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_LRGB2Luv && scn == 3) - { - if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiRGBToLUVTab[depth]))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_LRGB2Luv && scn == 4) - { - if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], - ippiRGBToLUVTab[depth], 0, 1, 2, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_LBGR2Luv && scn == 3) - { - if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC3RTab[depth], - ippiRGBToLUVTab[depth], 2, 1, 0, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if (code == CV_LBGR2Luv && scn == 4) - { - if (CvtColorIPPLoop(src, dst, IPPReorderGeneralFunctor(ippiSwapChannelsC4C3RTab[depth], - ippiRGBToLUVTab[depth], 2, 1, 0, depth))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - } -#endif - if( code == CV_BGR2Lab || code == CV_RGB2Lab || code == CV_LBGR2Lab || code == CV_LRGB2Lab ) { @@ -8297,83 +8242,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) _dst.create(sz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); -#if defined HAVE_IPP && 0 - CV_IPP_CHECK() - { - if( code == CV_Lab2LBGR && dcn == 3 && depth == CV_8U) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiLabToBGR_8u_C3R)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_Lab2LBGR && dcn == 4 && depth == CV_8U ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiLabToBGR_8u_C3R, - ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - if( code == CV_Lab2LRGB && dcn == 3 && depth == CV_8U ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiLabToBGR_8u_C3R, - ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - else if( code == CV_Lab2LRGB && dcn == 4 && depth == CV_8U ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor((ippiGeneralFunc)ippiLabToBGR_8u_C3R, - ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - if( code == CV_Luv2LRGB && dcn == 3 ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralFunctor(ippiLUVToRGBTab[depth])) ) - return; - } - else if( code == CV_Luv2LRGB && dcn == 4 ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiLUVToRGBTab[depth], - ippiSwapChannelsC3C4RTab[depth], 0, 1, 2, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - } - if( code == CV_Luv2LBGR && dcn == 3 ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiLUVToRGBTab[depth], - ippiSwapChannelsC3RTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - } - else if( code == CV_Luv2LBGR && dcn == 4 ) - { - if( CvtColorIPPLoop(src, dst, IPPGeneralReorderFunctor(ippiLUVToRGBTab[depth], - ippiSwapChannelsC3C4RTab[depth], 2, 1, 0, depth)) ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - } - } -#endif - if( code == CV_Lab2BGR || code == CV_Lab2RGB || code == CV_Lab2LBGR || code == CV_Lab2LRGB ) { @@ -8481,18 +8349,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) Size dstSz(sz.width, sz.height * 2 / 3); _dst.create(dstSz, CV_MAKETYPE(depth, dcn)); dst = _dst.getMat(); -#if defined HAVE_IPP - CV_IPP_CHECK() - { - if (ippStsNoErr == ippiCopy_8u_C1R(src.data, (int)src.step, dst.data, (int)dst.step, - ippiSize(dstSz.width, dstSz.height))) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } -#endif src(Range(0, dstSz.height), Range::all()).copyTo(dst); } break; @@ -8582,17 +8438,6 @@ void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) if( depth == CV_8U ) { -#if defined(HAVE_IPP) - CV_IPP_CHECK() - { - if (CvtColorIPPLoop(src, dst, IPPGeneralFunctor((ippiGeneralFunc)ippiAlphaPremul_8u_AC4R))) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } -#endif CvtColorLoop(src, dst, RGBA2mRGBA()); } else diff --git a/modules/imgproc/src/corner.cpp b/modules/imgproc/src/corner.cpp index 38f9676007..d9b3d943eb 100644 --- a/modules/imgproc/src/corner.cpp +++ b/modules/imgproc/src/corner.cpp @@ -523,16 +523,16 @@ static bool ocl_preCornerDetect( InputArray _src, OutputArray _dst, int ksize, i } -void cv::cornerMinEigenVal( InputArray _src, OutputArray _dst, int blockSize, int ksize, int borderType ) +#if defined(HAVE_IPP) +namespace cv { - CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), - ocl_cornerMinEigenValVecs(_src, _dst, blockSize, ksize, 0.0, borderType, MINEIGENVAL)) - +static bool ipp_cornerMinEigenVal( InputArray _src, OutputArray _dst, int blockSize, int ksize, int borderType ) +{ +#if IPP_VERSION_MAJOR >= 8 Mat src = _src.getMat(); _dst.create( src.size(), CV_32FC1 ); Mat dst = _dst.getMat(); -#if defined(HAVE_IPP) && (IPP_VERSION_MAJOR >= 8) - CV_IPP_CHECK() + { typedef IppStatus (CV_STDCALL * ippiMinEigenValGetBufferSize)(IppiSize, int, int, int*); typedef IppStatus (CV_STDCALL * ippiMinEigenVal)(const void*, int, Ipp32f*, int, IppiSize, IppiKernelType, int, int, Ipp8u*); @@ -583,28 +583,57 @@ void cv::cornerMinEigenVal( InputArray _src, OutputArray _dst, int blockSize, in if (ok >= 0) { CV_IMPL_ADD(CV_IMPL_IPP); - return; + return true; } } - setIppErrorStatus(); } } } +#else + CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(blockSize); CV_UNUSED(borderType); #endif - cornerEigenValsVecs( src, dst, blockSize, ksize, MINEIGENVAL, 0, borderType ); + return false; } +} +#endif -void cv::cornerHarris( InputArray _src, OutputArray _dst, int blockSize, int ksize, double k, int borderType ) +void cv::cornerMinEigenVal( InputArray _src, OutputArray _dst, int blockSize, int ksize, int borderType ) { CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), - ocl_cornerMinEigenValVecs(_src, _dst, blockSize, ksize, k, borderType, HARRIS)) + ocl_cornerMinEigenValVecs(_src, _dst, blockSize, ksize, 0.0, borderType, MINEIGENVAL)) + +#ifdef HAVE_IPP + int kerSize = ksize; + if (ksize < 0) + { + kerSize = 3; + } + bool isolated = (borderType & BORDER_ISOLATED) != 0; + int borderTypeNI = borderType & ~BORDER_ISOLATED; +#endif + CV_IPP_RUN(((borderTypeNI == BORDER_REPLICATE && (!_src.isSubmatrix() || isolated)) && + (kerSize == 3 || kerSize == 5) && (blockSize == 3 || blockSize == 5)) && IPP_VERSION_MAJOR >= 8, + ipp_cornerMinEigenVal( _src, _dst, blockSize, ksize, borderType )); + Mat src = _src.getMat(); _dst.create( src.size(), CV_32FC1 ); Mat dst = _dst.getMat(); + cornerEigenValsVecs( src, dst, blockSize, ksize, MINEIGENVAL, 0, borderType ); +} + + +#if defined(HAVE_IPP) +namespace cv +{ +static bool ipp_cornerHarris( InputArray _src, OutputArray _dst, int blockSize, int ksize, double k, int borderType ) +{ #if IPP_VERSION_X100 >= 801 && 0 - CV_IPP_CHECK() + Mat src = _src.getMat(); + _dst.create( src.size(), CV_32FC1 ); + Mat dst = _dst.getMat(); + { int type = src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); int borderTypeNI = borderType & ~BORDER_ISOLATED; @@ -643,13 +672,37 @@ void cv::cornerHarris( InputArray _src, OutputArray _dst, int blockSize, int ksi if (status >= 0) { CV_IMPL_ADD(CV_IMPL_IPP); - return; + return true; } } - setIppErrorStatus(); } } +#else + CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(blockSize); CV_UNUSED(ksize); CV_UNUSED(k); CV_UNUSED(borderType); #endif + return false; +} +} +#endif + +void cv::cornerHarris( InputArray _src, OutputArray _dst, int blockSize, int ksize, double k, int borderType ) +{ + CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), + ocl_cornerMinEigenValVecs(_src, _dst, blockSize, ksize, k, borderType, HARRIS)) + +#ifdef HAVE_IPP + int borderTypeNI = borderType & ~BORDER_ISOLATED; + bool isolated = (borderType & BORDER_ISOLATED) != 0; +#endif + CV_IPP_RUN(((ksize == 3 || ksize == 5) && (_src.type() == CV_8UC1 || _src.type() == CV_32FC1) && + (borderTypeNI == BORDER_CONSTANT || borderTypeNI == BORDER_REPLICATE) && CV_MAT_CN(_src.type()) == 1 && + (!_src.isSubmatrix() || isolated)) && IPP_VERSION_X100 >= 801 && 0, ipp_cornerHarris( _src, _dst, blockSize, ksize, k, borderType )); + + + Mat src = _src.getMat(); + _dst.create( src.size(), CV_32FC1 ); + Mat dst = _dst.getMat(); + cornerEigenValsVecs( src, dst, blockSize, ksize, HARRIS, k, borderType ); } diff --git a/modules/imgproc/src/deriv.cpp b/modules/imgproc/src/deriv.cpp index d1e3a0b129..482f4d365b 100644 --- a/modules/imgproc/src/deriv.cpp +++ b/modules/imgproc/src/deriv.cpp @@ -547,7 +547,39 @@ static bool IPPDerivSobel(InputArray _src, OutputArray _dst, int ddepth, int dx, } return false; } - +#ifdef HAVE_IPP +static bool ipp_sobel(InputArray _src, OutputArray _dst, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType) +{ + if (ksize < 0) + { + if (IPPDerivScharr(_src, _dst, ddepth, dx, dy, scale, delta, borderType)) + { + CV_IMPL_ADD(CV_IMPL_IPP); + return true; + } + } + else if (0 < ksize) + { + if (IPPDerivSobel(_src, _dst, ddepth, dx, dy, ksize, scale, delta, borderType)) + { + CV_IMPL_ADD(CV_IMPL_IPP); + return true; + } + } + return false; +} +static bool ipp_scharr(InputArray _src, OutputArray _dst, int ddepth, int dx, int dy, double scale, double delta, int borderType) +{ +#if IPP_VERSION_MAJOR >= 7 + if (IPPDerivScharr(_src, _dst, ddepth, dx, dy, scale, delta, borderType)) + { + CV_IMPL_ADD(CV_IMPL_IPP); + return true; + } +#endif + return false; +} +#endif } #endif @@ -572,27 +604,10 @@ void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy, } #endif -#ifdef HAVE_IPP - CV_IPP_CHECK() - { - if (ksize < 0) - { - if (IPPDerivScharr(_src, _dst, ddepth, dx, dy, scale, delta, borderType)) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - } - else if (0 < ksize) - { - if (IPPDerivSobel(_src, _dst, ddepth, dx, dy, ksize, scale, delta, borderType)) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - } - } -#endif + + CV_IPP_RUN(true, ipp_sobel(_src, _dst, ddepth, dx, dy, ksize, scale, delta, borderType)); + + int ktype = std::max(CV_32F, std::max(ddepth, sdepth)); Mat kx, ky; @@ -628,16 +643,10 @@ void cv::Scharr( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy, } #endif -#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) - CV_IPP_CHECK() - { - if (IPPDerivScharr(_src, _dst, ddepth, dx, dy, scale, delta, borderType)) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - } -#endif + + CV_IPP_RUN(true, ipp_scharr(_src, _dst, ddepth, dx, dy, scale, delta, borderType)); + + int ktype = std::max(CV_32F, std::max(ddepth, sdepth)); Mat kx, ky; @@ -799,33 +808,30 @@ static bool ocl_Laplacian5(InputArray _src, OutputArray _dst, #endif -void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize, - double scale, double delta, int borderType ) +#if defined(HAVE_IPP) +namespace cv +{ +static bool ipp_Laplacian(InputArray _src, OutputArray _dst, int ddepth, int ksize, + double scale, double delta, int borderType) { int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); if (ddepth < 0) ddepth = sdepth; _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) ); -#ifdef HAVE_IPP - CV_IPP_CHECK() - { - if ((ksize == 3 || ksize == 5) && ((borderType & BORDER_ISOLATED) != 0 || !_src.isSubmatrix()) && - ((stype == CV_8UC1 && ddepth == CV_16S) || (ddepth == CV_32F && stype == CV_32FC1)) && !ocl::useOpenCL()) - { - int iscale = saturate_cast(scale), idelta = saturate_cast(delta); - bool floatScale = std::fabs(scale - iscale) > DBL_EPSILON, needScale = iscale != 1; - bool floatDelta = std::fabs(delta - idelta) > DBL_EPSILON, needDelta = delta != 0; - int borderTypeNI = borderType & ~BORDER_ISOLATED; - Mat src = _src.getMat(), dst = _dst.getMat(); + int iscale = saturate_cast(scale), idelta = saturate_cast(delta); + bool floatScale = std::fabs(scale - iscale) > DBL_EPSILON, needScale = iscale != 1; + bool floatDelta = std::fabs(delta - idelta) > DBL_EPSILON, needDelta = delta != 0; + int borderTypeNI = borderType & ~BORDER_ISOLATED; + Mat src = _src.getMat(), dst = _dst.getMat(); - if (src.data != dst.data) - { - Ipp32s bufsize; - IppStatus status = (IppStatus)-1; - IppiSize roisize = { src.cols, src.rows }; - IppiMaskSize masksize = ksize == 3 ? ippMskSize3x3 : ippMskSize5x5; - IppiBorderType borderTypeIpp = ippiGetBorderType(borderTypeNI); + if (src.data != dst.data) + { + Ipp32s bufsize; + IppStatus status = (IppStatus)-1; + IppiSize roisize = { src.cols, src.rows }; + IppiMaskSize masksize = ksize == 3 ? ippMskSize3x3 : ippMskSize5x5; + IppiBorderType borderTypeIpp = ippiGetBorderType(borderTypeNI); #define IPP_FILTER_LAPLACIAN(ippsrctype, ippdsttype, ippfavor) \ do \ @@ -839,39 +845,51 @@ void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize, } \ } while ((void)0, 0) - CV_SUPPRESS_DEPRECATED_START - if (sdepth == CV_8U && ddepth == CV_16S && !floatScale && !floatDelta) - { - IPP_FILTER_LAPLACIAN(Ipp8u, Ipp16s, 8u16s); + CV_SUPPRESS_DEPRECATED_START + if (sdepth == CV_8U && ddepth == CV_16S && !floatScale && !floatDelta) + { + IPP_FILTER_LAPLACIAN(Ipp8u, Ipp16s, 8u16s); - if (needScale && status >= 0) - status = ippiMulC_16s_C1IRSfs((Ipp16s)iscale, dst.ptr(), (int)dst.step, roisize, 0); - if (needDelta && status >= 0) - status = ippiAddC_16s_C1IRSfs((Ipp16s)idelta, dst.ptr(), (int)dst.step, roisize, 0); - } - else if (sdepth == CV_32F && ddepth == CV_32F) - { - IPP_FILTER_LAPLACIAN(Ipp32f, Ipp32f, 32f); - - if (needScale && status >= 0) - status = ippiMulC_32f_C1IR((Ipp32f)scale, dst.ptr(), (int)dst.step, roisize); - if (needDelta && status >= 0) - status = ippiAddC_32f_C1IR((Ipp32f)delta, dst.ptr(), (int)dst.step, roisize); - } - CV_SUPPRESS_DEPRECATED_END - - if (status >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } + if (needScale && status >= 0) + status = ippiMulC_16s_C1IRSfs((Ipp16s)iscale, dst.ptr(), (int)dst.step, roisize, 0); + if (needDelta && status >= 0) + status = ippiAddC_16s_C1IRSfs((Ipp16s)idelta, dst.ptr(), (int)dst.step, roisize, 0); } -#undef IPP_FILTER_LAPLACIAN + else if (sdepth == CV_32F && ddepth == CV_32F) + { + IPP_FILTER_LAPLACIAN(Ipp32f, Ipp32f, 32f); + + if (needScale && status >= 0) + status = ippiMulC_32f_C1IR((Ipp32f)scale, dst.ptr(), (int)dst.step, roisize); + if (needDelta && status >= 0) + status = ippiAddC_32f_C1IR((Ipp32f)delta, dst.ptr(), (int)dst.step, roisize); + } + CV_SUPPRESS_DEPRECATED_END + + if (status >= 0) + return true; } + +#undef IPP_FILTER_LAPLACIAN + return false; +} +} #endif + +void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize, + double scale, double delta, int borderType ) +{ + int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); + if (ddepth < 0) + ddepth = sdepth; + _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) ); + + CV_IPP_RUN((ksize == 3 || ksize == 5) && ((borderType & BORDER_ISOLATED) != 0 || !_src.isSubmatrix()) && + ((stype == CV_8UC1 && ddepth == CV_16S) || (ddepth == CV_32F && stype == CV_32FC1)) && (!cv::ocl::useOpenCL()), + ipp_Laplacian(_src, _dst, ddepth, ksize, scale, delta, borderType)); + + #ifdef HAVE_TEGRA_OPTIMIZATION if (tegra::useTegra() && scale == 1.0 && delta == 0) { diff --git a/modules/imgproc/src/distransform.cpp b/modules/imgproc/src/distransform.cpp index e6aac214c9..a6491086eb 100644 --- a/modules/imgproc/src/distransform.cpp +++ b/modules/imgproc/src/distransform.cpp @@ -438,7 +438,7 @@ static void getDistanceTransformMask( int maskType, float *metrics ) metrics[2] = 2.1969f; break; default: - CV_Error(CV_StsBadArg, "Uknown metric type"); + CV_Error(CV_StsBadArg, "Unknown metric type"); } } @@ -662,7 +662,7 @@ distanceATS_L1_8u( const Mat& src, Mat& dst ) // do right edge a = lut[dbase[width-1+dststep]]; - dbase[width-1] = (uchar)(MIN(a, dbase[width-1])); + a = dbase[width-1] = (uchar)(MIN(a, dbase[width-1])); for( x = width - 2; x >= 0; x-- ) { @@ -730,7 +730,7 @@ void cv::distanceTransform( InputArray _src, OutputArray _dst, OutputArray _labe float _mask[5] = {0}; if( maskSize != CV_DIST_MASK_3 && maskSize != CV_DIST_MASK_5 && maskSize != CV_DIST_MASK_PRECISE ) - CV_Error( CV_StsBadSize, "Mask size should be 3 or 5 or 0 (presize)" ); + CV_Error( CV_StsBadSize, "Mask size should be 3 or 5 or 0 (precise)" ); if( distType == CV_DIST_C || distType == CV_DIST_L1 ) maskSize = !need_labels ? CV_DIST_MASK_3 : CV_DIST_MASK_5; diff --git a/modules/imgproc/src/filter.cpp b/modules/imgproc/src/filter.cpp index f0b7ee79e5..10c8240a90 100644 --- a/modules/imgproc/src/filter.cpp +++ b/modules/imgproc/src/filter.cpp @@ -4555,6 +4555,96 @@ cv::Ptr cv::createLinearFilter( int _srcType, int _dstType, _rowBorderType, _columnBorderType, _borderValue ); } +#ifdef HAVE_IPP +namespace cv +{ +static bool ipp_filter2D( InputArray _src, OutputArray _dst, int ddepth, + InputArray _kernel, Point anchor0, + double delta, int borderType ) +{ +#if !HAVE_ICV + Mat src = _src.getMat(), kernel = _kernel.getMat(); + + if( ddepth < 0 ) + ddepth = src.depth(); + + _dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) ); + Mat dst = _dst.getMat(); + Point anchor = normalizeAnchor(anchor0, kernel.size()); + + typedef IppStatus (CV_STDCALL * ippiFilterBorder)(const void * pSrc, int srcStep, void * pDst, int dstStep, IppiSize dstRoiSize, + IppiBorderType border, const void * borderValue, + const IppiFilterBorderSpec* pSpec, Ipp8u* pBuffer); + + int stype = src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype), + ktype = kernel.type(), kdepth = CV_MAT_DEPTH(ktype); + bool isolated = (borderType & BORDER_ISOLATED) != 0; + Point ippAnchor(kernel.cols >> 1, kernel.rows >> 1); + int borderTypeNI = borderType & ~BORDER_ISOLATED; + IppiBorderType ippBorderType = ippiGetBorderType(borderTypeNI); + + if (borderTypeNI == BORDER_CONSTANT || borderTypeNI == BORDER_REPLICATE) + { + ippiFilterBorder ippFunc = + stype == CV_8UC1 ? (ippiFilterBorder)ippiFilterBorder_8u_C1R : + stype == CV_8UC3 ? (ippiFilterBorder)ippiFilterBorder_8u_C3R : + stype == CV_8UC4 ? (ippiFilterBorder)ippiFilterBorder_8u_C4R : + stype == CV_16UC1 ? (ippiFilterBorder)ippiFilterBorder_16u_C1R : + stype == CV_16UC3 ? (ippiFilterBorder)ippiFilterBorder_16u_C3R : + stype == CV_16UC4 ? (ippiFilterBorder)ippiFilterBorder_16u_C4R : + stype == CV_16SC1 ? (ippiFilterBorder)ippiFilterBorder_16s_C1R : + stype == CV_16SC3 ? (ippiFilterBorder)ippiFilterBorder_16s_C3R : + stype == CV_16SC4 ? (ippiFilterBorder)ippiFilterBorder_16s_C4R : + stype == CV_32FC1 ? (ippiFilterBorder)ippiFilterBorder_32f_C1R : + stype == CV_32FC3 ? (ippiFilterBorder)ippiFilterBorder_32f_C3R : + stype == CV_32FC4 ? (ippiFilterBorder)ippiFilterBorder_32f_C4R : 0; + + if (sdepth == ddepth && (ktype == CV_16SC1 || ktype == CV_32FC1) && + ippFunc && (int)ippBorderType >= 0 && (!src.isSubmatrix() || isolated) && + std::fabs(delta - 0) < DBL_EPSILON && ippAnchor == anchor && dst.data != src.data) + { + IppiSize kernelSize = { kernel.cols, kernel.rows }, dstRoiSize = { dst.cols, dst.rows }; + IppDataType dataType = ippiGetDataType(ddepth), kernelType = ippiGetDataType(kdepth); + Ipp32s specSize = 0, bufsize = 0; + IppStatus status = (IppStatus)-1; + + if ((status = ippiFilterBorderGetSize(kernelSize, dstRoiSize, dataType, kernelType, cn, &specSize, &bufsize)) >= 0) + { + IppiFilterBorderSpec * spec = (IppiFilterBorderSpec *)ippMalloc(specSize); + Ipp8u * buffer = ippsMalloc_8u(bufsize); + Ipp32f borderValue[4] = { 0, 0, 0, 0 }; + + Mat reversedKernel; + flip(kernel, reversedKernel, -1); + + if ((kdepth == CV_32F && (status = ippiFilterBorderInit_32f((const Ipp32f *)reversedKernel.data, kernelSize, + dataType, cn, ippRndFinancial, spec)) >= 0 ) || + (kdepth == CV_16S && (status = ippiFilterBorderInit_16s((const Ipp16s *)reversedKernel.data, + kernelSize, 0, dataType, cn, ippRndFinancial, spec)) >= 0)) + { + status = ippFunc(src.data, (int)src.step, dst.data, (int)dst.step, dstRoiSize, + ippBorderType, borderValue, spec, buffer); + } + + ippsFree(buffer); + ippsFree(spec); + } + + if (status >= 0) + { + CV_IMPL_ADD(CV_IMPL_IPP); + return true; + } + } + } +#else + CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(ddepth); CV_UNUSED(_kernel), CV_UNUSED(anchor0), CV_UNUSED(delta), CV_UNUSED(borderType); +#endif + return false; +} +} +#endif + void cv::filter2D( InputArray _src, OutputArray _dst, int ddepth, InputArray _kernel, Point anchor0, @@ -4579,77 +4669,8 @@ void cv::filter2D( InputArray _src, OutputArray _dst, int ddepth, Mat dst = _dst.getMat(); Point anchor = normalizeAnchor(anchor0, kernel.size()); -#if IPP_VERSION_X100 > 0 && !defined HAVE_IPP_ICV_ONLY - CV_IPP_CHECK() - { - typedef IppStatus (CV_STDCALL * ippiFilterBorder)(const void * pSrc, int srcStep, void * pDst, int dstStep, IppiSize dstRoiSize, - IppiBorderType border, const void * borderValue, - const IppiFilterBorderSpec* pSpec, Ipp8u* pBuffer); + CV_IPP_RUN(true, ipp_filter2D(_src, _dst, ddepth, _kernel, anchor0, delta, borderType)); - int stype = src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype), - ktype = kernel.type(), kdepth = CV_MAT_DEPTH(ktype); - bool isolated = (borderType & BORDER_ISOLATED) != 0; - Point ippAnchor(kernel.cols >> 1, kernel.rows >> 1); - int borderTypeNI = borderType & ~BORDER_ISOLATED; - IppiBorderType ippBorderType = ippiGetBorderType(borderTypeNI); - - if (borderTypeNI == BORDER_CONSTANT || borderTypeNI == BORDER_REPLICATE) - { - ippiFilterBorder ippFunc = - stype == CV_8UC1 ? (ippiFilterBorder)ippiFilterBorder_8u_C1R : - stype == CV_8UC3 ? (ippiFilterBorder)ippiFilterBorder_8u_C3R : - stype == CV_8UC4 ? (ippiFilterBorder)ippiFilterBorder_8u_C4R : - stype == CV_16UC1 ? (ippiFilterBorder)ippiFilterBorder_16u_C1R : - stype == CV_16UC3 ? (ippiFilterBorder)ippiFilterBorder_16u_C3R : - stype == CV_16UC4 ? (ippiFilterBorder)ippiFilterBorder_16u_C4R : - stype == CV_16SC1 ? (ippiFilterBorder)ippiFilterBorder_16s_C1R : - stype == CV_16SC3 ? (ippiFilterBorder)ippiFilterBorder_16s_C3R : - stype == CV_16SC4 ? (ippiFilterBorder)ippiFilterBorder_16s_C4R : - stype == CV_32FC1 ? (ippiFilterBorder)ippiFilterBorder_32f_C1R : - stype == CV_32FC3 ? (ippiFilterBorder)ippiFilterBorder_32f_C3R : - stype == CV_32FC4 ? (ippiFilterBorder)ippiFilterBorder_32f_C4R : 0; - - if (sdepth == ddepth && (ktype == CV_16SC1 || ktype == CV_32FC1) && - ippFunc && (int)ippBorderType >= 0 && (!src.isSubmatrix() || isolated) && - std::fabs(delta - 0) < DBL_EPSILON && ippAnchor == anchor && dst.data != src.data) - { - IppiSize kernelSize = { kernel.cols, kernel.rows }, dstRoiSize = { dst.cols, dst.rows }; - IppDataType dataType = ippiGetDataType(ddepth), kernelType = ippiGetDataType(kdepth); - Ipp32s specSize = 0, bufsize = 0; - IppStatus status = (IppStatus)-1; - - if ((status = ippiFilterBorderGetSize(kernelSize, dstRoiSize, dataType, kernelType, cn, &specSize, &bufsize)) >= 0) - { - IppiFilterBorderSpec * spec = (IppiFilterBorderSpec *)ippMalloc(specSize); - Ipp8u * buffer = ippsMalloc_8u(bufsize); - Ipp32f borderValue[4] = { 0, 0, 0, 0 }; - - Mat reversedKernel; - flip(kernel, reversedKernel, -1); - - if ((kdepth == CV_32F && (status = ippiFilterBorderInit_32f((const Ipp32f *)reversedKernel.data, kernelSize, - dataType, cn, ippRndFinancial, spec)) >= 0 ) || - (kdepth == CV_16S && (status = ippiFilterBorderInit_16s((const Ipp16s *)reversedKernel.data, - kernelSize, 0, dataType, cn, ippRndFinancial, spec)) >= 0)) - { - status = ippFunc(src.data, (int)src.step, dst.data, (int)dst.step, dstRoiSize, - ippBorderType, borderValue, spec, buffer); - } - - ippsFree(buffer); - ippsFree(spec); - } - - if (status >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } - } -#endif #ifdef HAVE_TEGRA_OPTIMIZATION if( tegra::useTegra() && tegra::filter2D(src, dst, kernel, anchor, delta, borderType) ) diff --git a/modules/imgproc/src/histogram.cpp b/modules/imgproc/src/histogram.cpp index ec8de4d815..d9c5edfd4b 100644 --- a/modules/imgproc/src/histogram.cpp +++ b/modules/imgproc/src/histogram.cpp @@ -1220,7 +1220,10 @@ private: } -void cv::calcHist( const Mat* images, int nimages, const int* channels, +#if defined(HAVE_IPP) +namespace cv +{ +static bool ipp_calchist(const Mat* images, int nimages, const int* channels, InputArray _mask, OutputArray _hist, int dims, const int* histSize, const float** ranges, bool uniform, bool accumulate ) { @@ -1228,13 +1231,10 @@ void cv::calcHist( const Mat* images, int nimages, const int* channels, CV_Assert(dims > 0 && histSize); - const uchar* const histdata = _hist.getMat().ptr(); _hist.create(dims, histSize, CV_32F); Mat hist = _hist.getMat(), ihist = hist; ihist.flags = (ihist.flags & ~CV_MAT_TYPE_MASK)|CV_32S; -#ifdef HAVE_IPP - CV_IPP_CHECK() { if (nimages == 1 && images[0].type() == CV_8UC1 && dims == 1 && channels && channels[0] == 0 && mask.empty() && images[0].dims <= 2 && @@ -1256,14 +1256,37 @@ void cv::calcHist( const Mat* images, int nimages, const int* channels, if (ok) { ihist.convertTo(hist, CV_32F); - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; + CV_IMPL_ADD(CV_IMPL_IPP); + return true; } - setIppErrorStatus(); } } + return false; +} +} #endif +void cv::calcHist( const Mat* images, int nimages, const int* channels, + InputArray _mask, OutputArray _hist, int dims, const int* histSize, + const float** ranges, bool uniform, bool accumulate ) +{ + + CV_IPP_RUN(nimages == 1 && images[0].type() == CV_8UC1 && dims == 1 && channels && + channels[0] == 0 && _mask.getMat().empty() && images[0].dims <= 2 && + !accumulate && uniform, + ipp_calchist(images, nimages, channels, + _mask, _hist, dims, histSize, + ranges, uniform, accumulate)); + + Mat mask = _mask.getMat(); + + CV_Assert(dims > 0 && histSize); + + const uchar* const histdata = _hist.getMat().ptr(); + _hist.create(dims, histSize, CV_32F); + Mat hist = _hist.getMat(), ihist = hist; + ihist.flags = (ihist.flags & ~CV_MAT_TYPE_MASK)|CV_32S; + if( !accumulate || histdata != hist.data ) hist = Scalar(0.); else diff --git a/modules/imgproc/src/imgwarp.cpp b/modules/imgproc/src/imgwarp.cpp index 760f3fb0a2..433e38f704 100644 --- a/modules/imgproc/src/imgwarp.cpp +++ b/modules/imgproc/src/imgwarp.cpp @@ -3092,7 +3092,32 @@ static bool ocl_resize( InputArray _src, OutputArray _dst, Size dsize, #endif +#if IPP_VERSION_X100 >= 701 +static bool ipp_resize_mt( Mat src, Mat dst, + double inv_scale_x, double inv_scale_y, int interpolation) +{ + int mode = -1; + if (interpolation == INTER_LINEAR && src.rows >= 2 && src.cols >= 2) + mode = ippLinear; + else if (interpolation == INTER_CUBIC && src.rows >= 4 && src.cols >= 4) + mode = ippCubic; + else + return false; + + bool ok = true; + Range range(0, src.rows); + IPPresizeInvoker invoker(src, dst, inv_scale_x, inv_scale_y, mode, &ok); + parallel_for_(range, invoker, dst.total()/(double)(1<<16)); + if( ok ) + return true; + + return false; } +#endif + +} + + ////////////////////////////////////////////////////////////////////////////////////////// @@ -3219,6 +3244,17 @@ void cv::resize( InputArray _src, OutputArray _dst, Size dsize, inv_scale_y = (double)dsize.height/ssize.height; } + + int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); + double scale_x = 1./inv_scale_x, scale_y = 1./inv_scale_y; + + int iscale_x = saturate_cast(scale_x); + int iscale_y = saturate_cast(scale_y); + + bool is_area_fast = std::abs(scale_x - iscale_x) < DBL_EPSILON && + std::abs(scale_y - iscale_y) < DBL_EPSILON; + + CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat() && _src.cols() > 10 && _src.rows() > 10, ocl_resize(_src, _dst, dsize, inv_scale_x, inv_scale_y, interpolation)) @@ -3231,53 +3267,23 @@ void cv::resize( InputArray _src, OutputArray _dst, Size dsize, return; #endif - int type = src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); - double scale_x = 1./inv_scale_x, scale_y = 1./inv_scale_y; - int k, sx, sy, dx, dy; +#ifdef HAVE_IPP + int mode = -1; + if (interpolation == INTER_LINEAR && _src.rows() >= 2 && _src.cols() >= 2) + mode = INTER_LINEAR; + else if (interpolation == INTER_CUBIC && _src.rows() >= 4 && _src.cols() >= 4) + mode = INTER_CUBIC; - int iscale_x = saturate_cast(scale_x); - int iscale_y = saturate_cast(scale_y); - - bool is_area_fast = std::abs(scale_x - iscale_x) < DBL_EPSILON && - std::abs(scale_y - iscale_y) < DBL_EPSILON; - -#if IPP_VERSION_X100 >= 701 - CV_IPP_CHECK() - { -#define IPP_RESIZE_EPS 1e-10 - - double ex = fabs((double)dsize.width / src.cols - inv_scale_x) / inv_scale_x; - double ey = fabs((double)dsize.height / src.rows - inv_scale_y) / inv_scale_y; - - if ( ((ex < IPP_RESIZE_EPS && ey < IPP_RESIZE_EPS && depth != CV_64F) || (ex == 0 && ey == 0 && depth == CV_64F)) && - (interpolation == INTER_LINEAR || interpolation == INTER_CUBIC) && - !(interpolation == INTER_LINEAR && is_area_fast && iscale_x == 2 && iscale_y == 2 && depth == CV_8U)) - { - int mode = -1; - if (interpolation == INTER_LINEAR && src.rows >= 2 && src.cols >= 2) - mode = ippLinear; - else if (interpolation == INTER_CUBIC && src.rows >= 4 && src.cols >= 4) - mode = ippCubic; - - if( mode >= 0 && (cn == 1 || cn == 3 || cn == 4) && - (depth == CV_16U || depth == CV_16S || depth == CV_32F || - (depth == CV_64F && mode == ippLinear))) - { - bool ok = true; - Range range(0, src.rows); - IPPresizeInvoker invoker(src, dst, inv_scale_x, inv_scale_y, mode, &ok); - parallel_for_(range, invoker, dst.total()/(double)(1<<16)); - if( ok ) - { - CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT); - return; - } - setIppErrorStatus(); - } - } -#undef IPP_RESIZE_EPS - } + const double IPP_RESIZE_EPS = 1e-10; + double ex = fabs((double)dsize.width / _src.cols() - inv_scale_x) / inv_scale_x; + double ey = fabs((double)dsize.height / _src.rows() - inv_scale_y) / inv_scale_y; #endif + CV_IPP_RUN(IPP_VERSION_X100 >= 701 && ((ex < IPP_RESIZE_EPS && ey < IPP_RESIZE_EPS && depth != CV_64F) || (ex == 0 && ey == 0 && depth == CV_64F)) && + (interpolation == INTER_LINEAR || interpolation == INTER_CUBIC) && + !(interpolation == INTER_LINEAR && is_area_fast && iscale_x == 2 && iscale_y == 2 && depth == CV_8U) && + mode >= 0 && (cn == 1 || cn == 3 || cn == 4) && (depth == CV_16U || depth == CV_16S || depth == CV_32F || + (depth == CV_64F && mode == INTER_LINEAR)), ipp_resize_mt(src, dst, inv_scale_x, inv_scale_y, interpolation)) + if( interpolation == INTER_NEAREST ) { @@ -3285,6 +3291,9 @@ void cv::resize( InputArray _src, OutputArray _dst, Size dsize, return; } + int k, sx, sy, dx, dy; + + { // in case of scale_x && scale_y is equal to 2 // INTER_AREA (fast) also is equal to INTER_LINEAR diff --git a/modules/imgproc/src/morph.cpp b/modules/imgproc/src/morph.cpp index f2d971bea3..822df8e47d 100644 --- a/modules/imgproc/src/morph.cpp +++ b/modules/imgproc/src/morph.cpp @@ -1136,10 +1136,11 @@ private: Scalar borderValue; }; -#if IPP_VERSION_X100 >= 801 -static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kernel, +#ifdef HAVE_IPP +static bool ipp_MorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kernel, const Size& ksize, const Point &anchor, bool rectKernel) { +#if IPP_VERSION_X100 >= 801 int type = src.type(); const Mat* _src = &src; Mat temp; @@ -1257,10 +1258,13 @@ static bool IPPMorphReplicate(int op, const Mat &src, Mat &dst, const Mat &kerne } #undef IPP_MORPH_CASE } +#else + CV_UNUSED(op); CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(kernel); CV_UNUSED(ksize); CV_UNUSED(anchor); CV_UNUSED(rectKernel); +#endif return false; } -static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, +static bool ipp_MorphOp(int op, InputArray _src, OutputArray _dst, const Mat& _kernel, Point anchor, int iterations, int borderType, const Scalar &borderValue) { @@ -1331,7 +1335,7 @@ static bool IPPMorphOp(int op, InputArray _src, OutputArray _dst, if( iterations > 1 ) return false; - return IPPMorphReplicate( op, src, dst, kernel, ksize, anchor, rectKernel ); + return ipp_MorphReplicate( op, src, dst, kernel, ksize, anchor, rectKernel ); } #endif @@ -1711,16 +1715,7 @@ static void morphOp( int op, InputArray _src, OutputArray _dst, iterations = 1; } -#if IPP_VERSION_X100 >= 801 - CV_IPP_CHECK() - { - if( IPPMorphOp(op, _src, _dst, kernel, anchor, iterations, borderType, borderValue) ) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - } -#endif + CV_IPP_RUN(IPP_VERSION_X100 >= 801, ipp_MorphOp(op, _src, _dst, kernel, anchor, iterations, borderType, borderValue)) Mat src = _src.getMat(); _dst.create( src.size(), src.type() ); diff --git a/modules/imgproc/src/precomp.hpp b/modules/imgproc/src/precomp.hpp index e71a0356c0..7a0cece2f2 100644 --- a/modules/imgproc/src/precomp.hpp +++ b/modules/imgproc/src/precomp.hpp @@ -49,6 +49,7 @@ #include "opencv2/imgproc/imgproc_c.h" #include "opencv2/core/private.hpp" #include "opencv2/core/ocl.hpp" +#include "opencv2/hal.hpp" #include #include diff --git a/modules/imgproc/src/pyramids.cpp b/modules/imgproc/src/pyramids.cpp index 4018e08f70..1d8c943333 100644 --- a/modules/imgproc/src/pyramids.cpp +++ b/modules/imgproc/src/pyramids.cpp @@ -1166,103 +1166,22 @@ static bool ocl_pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int } -void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType ) +#if defined(HAVE_IPP) +namespace cv { - CV_Assert(borderType != BORDER_CONSTANT); - - CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), - ocl_pyrDown(_src, _dst, _dsz, borderType)) +static bool ipp_pyrdown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType ) +{ +#if IPP_VERSION_X100 >= 801 && 0 + Size dsz = _dsz.area() == 0 ? Size((_src.cols() + 1)/2, (_src.rows() + 1)/2) : _dsz; + bool isolated = (borderType & BORDER_ISOLATED) != 0; + int borderTypeNI = borderType & ~BORDER_ISOLATED; Mat src = _src.getMat(); - Size dsz = _dsz.area() == 0 ? Size((src.cols + 1)/2, (src.rows + 1)/2) : _dsz; _dst.create( dsz, src.type() ); Mat dst = _dst.getMat(); int depth = src.depth(); -#ifdef HAVE_TEGRA_OPTIMIZATION - if(borderType == BORDER_DEFAULT && tegra::useTegra() && tegra::pyrDown(src, dst)) - return; -#endif -#if IPP_VERSION_X100 >= 801 && 0 - CV_IPP_CHECK() - { - bool isolated = (borderType & BORDER_ISOLATED) != 0; - int borderTypeNI = borderType & ~BORDER_ISOLATED; - if (borderTypeNI == BORDER_DEFAULT && (!src.isSubmatrix() || isolated) && dsz == Size((src.cols + 1)/2, (src.rows + 1)/2)) - { - typedef IppStatus (CV_STDCALL * ippiPyrDown)(const void* pSrc, int srcStep, void* pDst, int dstStep, IppiSize srcRoi, Ipp8u* buffer); - int type = src.type(); - CV_SUPPRESS_DEPRECATED_START - ippiPyrDown pyrDownFunc = type == CV_8UC1 ? (ippiPyrDown) ippiPyrDown_Gauss5x5_8u_C1R : - type == CV_8UC3 ? (ippiPyrDown) ippiPyrDown_Gauss5x5_8u_C3R : - type == CV_32FC1 ? (ippiPyrDown) ippiPyrDown_Gauss5x5_32f_C1R : - type == CV_32FC3 ? (ippiPyrDown) ippiPyrDown_Gauss5x5_32f_C3R : 0; - CV_SUPPRESS_DEPRECATED_END - - if (pyrDownFunc) - { - int bufferSize; - IppiSize srcRoi = { src.cols, src.rows }; - IppDataType dataType = depth == CV_8U ? ipp8u : ipp32f; - CV_SUPPRESS_DEPRECATED_START - IppStatus ok = ippiPyrDownGetBufSize_Gauss5x5(srcRoi.width, dataType, src.channels(), &bufferSize); - CV_SUPPRESS_DEPRECATED_END - if (ok >= 0) - { - Ipp8u* buffer = ippsMalloc_8u(bufferSize); - ok = pyrDownFunc(src.data, (int) src.step, dst.data, (int) dst.step, srcRoi, buffer); - ippsFree(buffer); - - if (ok >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } - } - } -#endif - - PyrFunc func = 0; - if( depth == CV_8U ) - func = pyrDown_, PyrDownVec_32s8u>; - else if( depth == CV_16S ) - func = pyrDown_, PyrDownVec_32s16s >; - else if( depth == CV_16U ) - func = pyrDown_, PyrDownVec_32s16u >; - else if( depth == CV_32F ) - func = pyrDown_, PyrDownVec_32f>; - else if( depth == CV_64F ) - func = pyrDown_, PyrDownNoVec >; - else - CV_Error( CV_StsUnsupportedFormat, "" ); - - func( src, dst, borderType ); -} - -void cv::pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType ) -{ - CV_Assert(borderType == BORDER_DEFAULT); - - CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), - ocl_pyrUp(_src, _dst, _dsz, borderType)) - - Mat src = _src.getMat(); - Size dsz = _dsz.area() == 0 ? Size(src.cols*2, src.rows*2) : _dsz; - _dst.create( dsz, src.type() ); - Mat dst = _dst.getMat(); - int depth = src.depth(); - -#ifdef HAVE_TEGRA_OPTIMIZATION - if(borderType == BORDER_DEFAULT && tegra::useTegra() && tegra::pyrUp(src, dst)) - return; -#endif - -#if IPP_VERSION_X100 >= 801 && 0 - CV_IPP_CHECK() { bool isolated = (borderType & BORDER_ISOLATED) != 0; int borderTypeNI = borderType & ~BORDER_ISOLATED; @@ -1294,14 +1213,149 @@ void cv::pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderT if (ok >= 0) { CV_IMPL_ADD(CV_IMPL_IPP); - return; + return true; } - setIppErrorStatus(); } } } } +#else + CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(_dsz); CV_UNUSED(borderType); #endif + return false; +} +} +#endif + +void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType ) +{ + CV_Assert(borderType != BORDER_CONSTANT); + + CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), + ocl_pyrDown(_src, _dst, _dsz, borderType)) + + Mat src = _src.getMat(); + Size dsz = _dsz.area() == 0 ? Size((src.cols + 1)/2, (src.rows + 1)/2) : _dsz; + _dst.create( dsz, src.type() ); + Mat dst = _dst.getMat(); + int depth = src.depth(); + +#ifdef HAVE_TEGRA_OPTIMIZATION + if(borderType == BORDER_DEFAULT && tegra::useTegra() && tegra::pyrDown(src, dst)) + return; +#endif + +#ifdef HAVE_IPP + bool isolated = (borderType & BORDER_ISOLATED) != 0; + int borderTypeNI = borderType & ~BORDER_ISOLATED; +#endif + CV_IPP_RUN(borderTypeNI == BORDER_DEFAULT && (!_src.isSubmatrix() || isolated) && dsz == Size((_src.cols() + 1)/2, (_src.rows() + 1)/2), + ipp_pyrdown( _src, _dst, _dsz, borderType)); + + + PyrFunc func = 0; + if( depth == CV_8U ) + func = pyrDown_, PyrDownVec_32s8u>; + else if( depth == CV_16S ) + func = pyrDown_, PyrDownVec_32s16s >; + else if( depth == CV_16U ) + func = pyrDown_, PyrDownVec_32s16u >; + else if( depth == CV_32F ) + func = pyrDown_, PyrDownVec_32f>; + else if( depth == CV_64F ) + func = pyrDown_, PyrDownNoVec >; + else + CV_Error( CV_StsUnsupportedFormat, "" ); + + func( src, dst, borderType ); +} + + +#if defined(HAVE_IPP) +namespace cv +{ +static bool ipp_pyrup( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType ) +{ +#if IPP_VERSION_X100 >= 801 && 0 + Size sz = _src.dims() <= 2 ? _src.size() : Size(); + Size dsz = _dsz.area() == 0 ? Size(_src.cols()*2, _src.rows()*2) : _dsz; + + Mat src = _src.getMat(); + _dst.create( dsz, src.type() ); + Mat dst = _dst.getMat(); + int depth = src.depth(); + + { + bool isolated = (borderType & BORDER_ISOLATED) != 0; + int borderTypeNI = borderType & ~BORDER_ISOLATED; + if (borderTypeNI == BORDER_DEFAULT && (!src.isSubmatrix() || isolated) && dsz == Size(src.cols*2, src.rows*2)) + { + typedef IppStatus (CV_STDCALL * ippiPyrUp)(const void* pSrc, int srcStep, void* pDst, int dstStep, IppiSize srcRoi, Ipp8u* buffer); + int type = src.type(); + CV_SUPPRESS_DEPRECATED_START + ippiPyrUp pyrUpFunc = type == CV_8UC1 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_8u_C1R : + type == CV_8UC3 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_8u_C3R : + type == CV_32FC1 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_32f_C1R : + type == CV_32FC3 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_32f_C3R : 0; + CV_SUPPRESS_DEPRECATED_END + + if (pyrUpFunc) + { + int bufferSize; + IppiSize srcRoi = { src.cols, src.rows }; + IppDataType dataType = depth == CV_8U ? ipp8u : ipp32f; + CV_SUPPRESS_DEPRECATED_START + IppStatus ok = ippiPyrUpGetBufSize_Gauss5x5(srcRoi.width, dataType, src.channels(), &bufferSize); + CV_SUPPRESS_DEPRECATED_END + if (ok >= 0) + { + Ipp8u* buffer = ippsMalloc_8u(bufferSize); + ok = pyrUpFunc(src.data, (int) src.step, dst.data, (int) dst.step, srcRoi, buffer); + ippsFree(buffer); + + if (ok >= 0) + { + CV_IMPL_ADD(CV_IMPL_IPP); + return true; + } + } + } + } + } +#else + CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(_dsz); CV_UNUSED(borderType); +#endif + return false; +} +} +#endif + +void cv::pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType ) +{ + CV_Assert(borderType == BORDER_DEFAULT); + + CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(), + ocl_pyrUp(_src, _dst, _dsz, borderType)) + + + Mat src = _src.getMat(); + Size dsz = _dsz.area() == 0 ? Size(src.cols*2, src.rows*2) : _dsz; + _dst.create( dsz, src.type() ); + Mat dst = _dst.getMat(); + int depth = src.depth(); + +#ifdef HAVE_TEGRA_OPTIMIZATION + if(borderType == BORDER_DEFAULT && tegra::useTegra() && tegra::pyrUp(src, dst)) + return; +#endif + +#ifdef HAVE_IPP + bool isolated = (borderType & BORDER_ISOLATED) != 0; + int borderTypeNI = borderType & ~BORDER_ISOLATED; +#endif + CV_IPP_RUN(borderTypeNI == BORDER_DEFAULT && (!_src.isSubmatrix() || isolated) && dsz == Size(_src.cols()*2, _src.rows()*2), + ipp_pyrup( _src, _dst, _dsz, borderType)); + PyrFunc func = 0; if( depth == CV_8U ) @@ -1320,28 +1374,19 @@ void cv::pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderT func( src, dst, borderType ); } -void cv::buildPyramid( InputArray _src, OutputArrayOfArrays _dst, int maxlevel, int borderType ) + +#if 0 //#ifdef HAVE_IPP +namespace cv { - CV_Assert(borderType != BORDER_CONSTANT); - - if (_src.dims() <= 2 && _dst.isUMatVector()) - { - UMat src = _src.getUMat(); - _dst.create( maxlevel + 1, 1, 0 ); - _dst.getUMatRef(0) = src; - for( int i = 1; i <= maxlevel; i++ ) - pyrDown( _dst.getUMatRef(i-1), _dst.getUMatRef(i), Size(), borderType ); - return; - } - +static bool ipp_buildpyramid( InputArray _src, OutputArrayOfArrays _dst, int maxlevel, int borderType ) +{ +#if IPP_VERSION_X100 >= 801 && 0 Mat src = _src.getMat(); _dst.create( maxlevel + 1, 1, 0 ); _dst.getMatRef(0) = src; int i=1; -#if IPP_VERSION_X100 >= 801 && 0 - CV_IPP_CHECK() { bool isolated = (borderType & BORDER_ISOLATED) != 0; int borderTypeNI = borderType & ~BORDER_ISOLATED; @@ -1414,8 +1459,8 @@ void cv::buildPyramid( InputArray _src, OutputArrayOfArrays _dst, int maxlevel, if (ok < 0) { - setIppErrorStatus(); - break; + pyrFreeFunc(gPyr->pState); + return false; } else { @@ -1425,13 +1470,52 @@ void cv::buildPyramid( InputArray _src, OutputArrayOfArrays _dst, int maxlevel, pyrFreeFunc(gPyr->pState); } else - setIppErrorStatus(); - + { + ippiPyramidFree(gPyr); + return false; + } ippiPyramidFree(gPyr); } + return true; } + return false; } +#else + CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(maxlevel); CV_UNUSED(borderType); #endif + return false; +} +} +#endif + +void cv::buildPyramid( InputArray _src, OutputArrayOfArrays _dst, int maxlevel, int borderType ) +{ + CV_Assert(borderType != BORDER_CONSTANT); + + if (_src.dims() <= 2 && _dst.isUMatVector()) + { + UMat src = _src.getUMat(); + _dst.create( maxlevel + 1, 1, 0 ); + _dst.getUMatRef(0) = src; + for( int i = 1; i <= maxlevel; i++ ) + pyrDown( _dst.getUMatRef(i-1), _dst.getUMatRef(i), Size(), borderType ); + return; + } + + Mat src = _src.getMat(); + _dst.create( maxlevel + 1, 1, 0 ); + _dst.getMatRef(0) = src; + + int i=1; + +#if (IPP_VERSION_X100 >= 801 && 0) + bool isolated = (borderType & BORDER_ISOLATED) != 0; + int borderTypeNI = borderType & ~BORDER_ISOLATED; + CV_IPP_RUN(((IPP_VERSION_X100 >= 801 && 0) && (borderTypeNI == BORDER_DEFAULT && (!_src.isSubmatrix() || isolated))), + ipp_buildpyramid( _src, _dst, maxlevel, borderType)); +#endif + + for( ; i <= maxlevel; i++ ) pyrDown( _dst.getMatRef(i-1), _dst.getMatRef(i), Size(), borderType ); } diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index dbe8a6315a..012b90418e 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -1303,17 +1303,24 @@ cv::Ptr cv::createBoxFilter( int srcType, int dstType, Size ks srcType, dstType, sumType, borderType ); } - -void cv::boxFilter( InputArray _src, OutputArray _dst, int ddepth, +#if defined(HAVE_IPP) +namespace cv +{ +static bool ipp_boxfilter( InputArray _src, OutputArray _dst, int ddepth, Size ksize, Point anchor, bool normalize, int borderType ) { - CV_OCL_RUN(_dst.isUMat(), ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize)) - - Mat src = _src.getMat(); - int stype = src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); + int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); if( ddepth < 0 ) ddepth = sdepth; + int ippBorderType = borderType & ~BORDER_ISOLATED; + Point ocvAnchor, ippAnchor; + ocvAnchor.x = anchor.x < 0 ? ksize.width / 2 : anchor.x; + ocvAnchor.y = anchor.y < 0 ? ksize.height / 2 : anchor.y; + ippAnchor.x = ksize.width / 2 - (ksize.width % 2 == 0 ? 1 : 0); + ippAnchor.y = ksize.height / 2 - (ksize.height % 2 == 0 ? 1 : 0); + + Mat src = _src.getMat(); _dst.create( src.size(), CV_MAKETYPE(ddepth, cn) ); Mat dst = _dst.getMat(); if( borderType != BORDER_CONSTANT && normalize && (borderType & BORDER_ISOLATED) != 0 ) @@ -1323,21 +1330,8 @@ void cv::boxFilter( InputArray _src, OutputArray _dst, int ddepth, if( src.cols == 1 ) ksize.width = 1; } -#ifdef HAVE_TEGRA_OPTIMIZATION - if ( tegra::useTegra() && tegra::box(src, dst, ksize, anchor, normalize, borderType) ) - return; -#endif -#if defined(HAVE_IPP) - CV_IPP_CHECK() { - int ippBorderType = borderType & ~BORDER_ISOLATED; - Point ocvAnchor, ippAnchor; - ocvAnchor.x = anchor.x < 0 ? ksize.width / 2 : anchor.x; - ocvAnchor.y = anchor.y < 0 ? ksize.height / 2 : anchor.y; - ippAnchor.x = ksize.width / 2 - (ksize.width % 2 == 0 ? 1 : 0); - ippAnchor.y = ksize.height / 2 - (ksize.height % 2 == 0 ? 1 : 0); - if (normalize && !src.isSubmatrix() && ddepth == sdepth && (/*ippBorderType == BORDER_REPLICATE ||*/ /* returns ippStsStepErr: Step value is not valid */ ippBorderType == BORDER_CONSTANT) && ocvAnchor == ippAnchor && @@ -1361,10 +1355,9 @@ void cv::boxFilter( InputArray _src, OutputArray _dst, int ddepth, if (status >= 0) \ { \ CV_IMPL_ADD(CV_IMPL_IPP); \ - return; \ + return true; \ } \ } \ - setIppErrorStatus(); \ } while ((void)0, 0) if (stype == CV_8UC1) @@ -1399,13 +1392,57 @@ void cv::boxFilter( InputArray _src, OutputArray _dst, int ddepth, } #undef IPP_FILTER_BOX_BORDER } + return false; +} +} #endif + +void cv::boxFilter( InputArray _src, OutputArray _dst, int ddepth, + Size ksize, Point anchor, + bool normalize, int borderType ) +{ + CV_OCL_RUN(_dst.isUMat(), ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize)) + + Mat src = _src.getMat(); + int stype = src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); + if( ddepth < 0 ) + ddepth = sdepth; + _dst.create( src.size(), CV_MAKETYPE(ddepth, cn) ); + Mat dst = _dst.getMat(); + if( borderType != BORDER_CONSTANT && normalize && (borderType & BORDER_ISOLATED) != 0 ) + { + if( src.rows == 1 ) + ksize.height = 1; + if( src.cols == 1 ) + ksize.width = 1; + } +#ifdef HAVE_TEGRA_OPTIMIZATION + if ( tegra::useTegra() && tegra::box(src, dst, ksize, anchor, normalize, borderType) ) + return; +#endif + +#ifdef HAVE_IPP + int ippBorderType = borderType & ~BORDER_ISOLATED; +#endif + Point ocvAnchor, ippAnchor; + ocvAnchor.x = anchor.x < 0 ? ksize.width / 2 : anchor.x; + ocvAnchor.y = anchor.y < 0 ? ksize.height / 2 : anchor.y; + ippAnchor.x = ksize.width / 2 - (ksize.width % 2 == 0 ? 1 : 0); + ippAnchor.y = ksize.height / 2 - (ksize.height % 2 == 0 ? 1 : 0); + CV_IPP_RUN((normalize && !_src.isSubmatrix() && ddepth == sdepth && + (/*ippBorderType == BORDER_REPLICATE ||*/ /* returns ippStsStepErr: Step value is not valid */ + ippBorderType == BORDER_CONSTANT) && ocvAnchor == ippAnchor && + _dst.cols() != ksize.width && _dst.rows() != ksize.height), + ipp_boxfilter( _src, _dst, ddepth, ksize, anchor, normalize, borderType)); + + Ptr f = createBoxFilter( src.type(), dst.type(), ksize, anchor, normalize, borderType ); f->apply( src, dst ); } + void cv::blur( InputArray src, OutputArray dst, Size ksize, Point anchor, int borderType ) { @@ -1624,6 +1661,103 @@ cv::Ptr cv::createGaussianFilter( int type, Size ksize, return createSeparableLinearFilter( type, type, kx, ky, Point(-1,-1), 0, borderType ); } +#ifdef HAVE_IPP +namespace cv +{ +static bool ipp_GaussianBlur( InputArray _src, OutputArray _dst, Size ksize, + double sigma1, double sigma2, + int borderType ) +{ + int type = _src.type(); + Size size = _src.size(); + + if( borderType != BORDER_CONSTANT && (borderType & BORDER_ISOLATED) != 0 ) + { + if( size.height == 1 ) + ksize.height = 1; + if( size.width == 1 ) + ksize.width = 1; + } + + int depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); + + if ((depth == CV_8U || depth == CV_16U || depth == CV_16S || depth == CV_32F) && (cn == 1 || cn == 3) && + sigma1 == sigma2 && ksize.width == ksize.height && sigma1 != 0.0 ) + { + IppiBorderType ippBorder = ippiGetBorderType(borderType); + if (ippBorderConst == ippBorder || ippBorderRepl == ippBorder) + { + Mat src = _src.getMat(), dst = _dst.getMat(); + IppiSize roiSize = { src.cols, src.rows }; + IppDataType dataType = ippiGetDataType(depth); + Ipp32s specSize = 0, bufferSize = 0; + + if (ippiFilterGaussianGetBufferSize(roiSize, (Ipp32u)ksize.width, dataType, cn, &specSize, &bufferSize) >= 0) + { + IppFilterGaussianSpec * pSpec = (IppFilterGaussianSpec *)ippMalloc(specSize); + Ipp8u * pBuffer = (Ipp8u*)ippMalloc(bufferSize); + + if (ippiFilterGaussianInit(roiSize, (Ipp32u)ksize.width, (Ipp32f)sigma1, ippBorder, dataType, 1, pSpec, pBuffer) >= 0) + { +#define IPP_FILTER_GAUSS_C1(ippfavor) \ + { \ + typedef Ipp##ippfavor ippType; \ + ippType borderValues = 0; \ + status = ippiFilterGaussianBorder_##ippfavor##_C1R(src.ptr(), (int)src.step, \ + dst.ptr(), (int)dst.step, roiSize, borderValues, pSpec, pBuffer); \ + } + +#define IPP_FILTER_GAUSS_CN(ippfavor, ippcn) \ + { \ + typedef Ipp##ippfavor ippType; \ + ippType borderValues[] = { 0, 0, 0 }; \ + status = ippiFilterGaussianBorder_##ippfavor##_C##ippcn##R(src.ptr(), (int)src.step, \ + dst.ptr(), (int)dst.step, roiSize, borderValues, pSpec, pBuffer); \ + } + + IppStatus status = ippStsErr; +#if !HAVE_ICV + if (type == CV_8UC1) + IPP_FILTER_GAUSS_C1(8u) + else if (type == CV_8UC3) + IPP_FILTER_GAUSS_CN(8u, 3) + else if (type == CV_16UC1) + IPP_FILTER_GAUSS_C1(16u) + else if (type == CV_16UC3) + IPP_FILTER_GAUSS_CN(16u, 3) + else if (type == CV_16SC1) + IPP_FILTER_GAUSS_C1(16s) + else if (type == CV_16SC3) + IPP_FILTER_GAUSS_CN(16s, 3) + else if (type == CV_32FC3) + IPP_FILTER_GAUSS_CN(32f, 3) + else +#endif + if (type == CV_32FC1) + IPP_FILTER_GAUSS_C1(32f) + + if (pSpec) + ippFree(pSpec); + if (pBuffer) + ippFree(pBuffer); + + if(status >= 0) + { + CV_IMPL_ADD(CV_IMPL_IPP); + return true; + } + +#undef IPP_FILTER_GAUSS_C1 +#undef IPP_FILTER_GAUSS_CN + } + } + } + } + return false; +} +} +#endif + void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize, double sigma1, double sigma2, @@ -1654,72 +1788,9 @@ void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize, return; #endif -#if IPP_VERSION_X100 >= 801 && 0 // these functions are slower in IPP 8.1 - CV_IPP_CHECK() - { - int depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); - if ((depth == CV_8U || depth == CV_16U || depth == CV_16S || depth == CV_32F) && (cn == 1 || cn == 3) && - sigma1 == sigma2 && ksize.width == ksize.height && sigma1 != 0.0 ) - { - IppiBorderType ippBorder = ippiGetBorderType(borderType); - if (ippBorderConst == ippBorder || ippBorderRepl == ippBorder) - { - Mat src = _src.getMat(), dst = _dst.getMat(); - IppiSize roiSize = { src.cols, src.rows }; - IppDataType dataType = ippiGetDataType(depth); - Ipp32s specSize = 0, bufferSize = 0; + CV_IPP_RUN(true, ipp_GaussianBlur( _src, _dst, ksize, sigma1, sigma2, borderType)); - if (ippiFilterGaussianGetBufferSize(roiSize, (Ipp32u)ksize.width, dataType, cn, &specSize, &bufferSize) >= 0) - { - IppFilterGaussianSpec * pSpec = (IppFilterGaussianSpec *)ippMalloc(specSize); - Ipp8u * pBuffer = (Ipp8u*)ippMalloc(bufferSize); - - if (ippiFilterGaussianInit(roiSize, (Ipp32u)ksize.width, (Ipp32f)sigma1, ippBorder, dataType, 1, pSpec, pBuffer) >= 0) - { -#define IPP_FILTER_GAUSS(ippfavor, ippcn) \ - do \ - { \ - typedef Ipp##ippfavor ippType; \ - ippType borderValues[] = { 0, 0, 0 }; \ - IppStatus status = ippcn == 1 ? \ - ippiFilterGaussianBorder_##ippfavor##_C1R(src.ptr(), (int)src.step, \ - dst.ptr(), (int)dst.step, roiSize, borderValues[0], pSpec, pBuffer) : \ - ippiFilterGaussianBorder_##ippfavor##_C3R(src.ptr(), (int)src.step, \ - dst.ptr(), (int)dst.step, roiSize, borderValues, pSpec, pBuffer); \ - ippFree(pBuffer); \ - ippFree(pSpec); \ - if (status >= 0) \ - { \ - CV_IMPL_ADD(CV_IMPL_IPP); \ - return; \ - } \ - } while ((void)0, 0) - - if (type == CV_8UC1) - IPP_FILTER_GAUSS(8u, 1); - else if (type == CV_8UC3) - IPP_FILTER_GAUSS(8u, 3); - else if (type == CV_16UC1) - IPP_FILTER_GAUSS(16u, 1); - else if (type == CV_16UC3) - IPP_FILTER_GAUSS(16u, 3); - else if (type == CV_16SC1) - IPP_FILTER_GAUSS(16s, 1); - else if (type == CV_16SC3) - IPP_FILTER_GAUSS(16s, 3); - else if (type == CV_32FC1) - IPP_FILTER_GAUSS(32f, 1); - else if (type == CV_32FC3) - IPP_FILTER_GAUSS(32f, 3); -#undef IPP_FILTER_GAUSS - } - } - setIppErrorStatus(); - } - } - } -#endif Mat kx, ky; createGaussianKernels(kx, ky, type, ksize, sigma1, sigma2); @@ -2632,6 +2703,63 @@ static bool ocl_medianFilter(InputArray _src, OutputArray _dst, int m) } +#ifdef HAVE_IPP +namespace cv +{ +static bool ipp_medianFilter( InputArray _src0, OutputArray _dst, int ksize ) +{ +#if IPP_VERSION_X100 >= 801 + Mat src0 = _src0.getMat(); + _dst.create( src0.size(), src0.type() ); + Mat dst = _dst.getMat(); + +#define IPP_FILTER_MEDIAN_BORDER(ippType, ippDataType, flavor) \ + do \ + { \ + if (ippiFilterMedianBorderGetBufferSize(dstRoiSize, maskSize, \ + ippDataType, CV_MAT_CN(type), &bufSize) >= 0) \ + { \ + Ipp8u * buffer = ippsMalloc_8u(bufSize); \ + IppStatus status = ippiFilterMedianBorder_##flavor(src.ptr(), (int)src.step, \ + dst.ptr(), (int)dst.step, dstRoiSize, maskSize, \ + ippBorderRepl, (ippType)0, buffer); \ + ippsFree(buffer); \ + if (status >= 0) \ + { \ + CV_IMPL_ADD(CV_IMPL_IPP); \ + return true; \ + } \ + } \ + } \ + while ((void)0, 0) + + if( ksize <= 5 ) + { + Ipp32s bufSize; + IppiSize dstRoiSize = ippiSize(dst.cols, dst.rows), maskSize = ippiSize(ksize, ksize); + Mat src; + if( dst.data != src0.data ) + src = src0; + else + src0.copyTo(src); + + int type = src0.type(); + if (type == CV_8UC1) + IPP_FILTER_MEDIAN_BORDER(Ipp8u, ipp8u, 8u_C1R); + else if (type == CV_16UC1) + IPP_FILTER_MEDIAN_BORDER(Ipp16u, ipp16u, 16u_C1R); + else if (type == CV_16SC1) + IPP_FILTER_MEDIAN_BORDER(Ipp16s, ipp16s, 16s_C1R); + else if (type == CV_32FC1) + IPP_FILTER_MEDIAN_BORDER(Ipp32f, ipp32f, 32f_C1R); + } +#undef IPP_FILTER_MEDIAN_BORDER +#endif + return false; +} +} +#endif + void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize ) { CV_Assert( (ksize % 2 == 1) && (_src0.dims() <= 2 )); @@ -2649,53 +2777,7 @@ void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize ) _dst.create( src0.size(), src0.type() ); Mat dst = _dst.getMat(); -#if IPP_VERSION_X100 >= 801 - CV_IPP_CHECK() - { -#define IPP_FILTER_MEDIAN_BORDER(ippType, ippDataType, flavor) \ - do \ - { \ - if (ippiFilterMedianBorderGetBufferSize(dstRoiSize, maskSize, \ - ippDataType, CV_MAT_CN(type), &bufSize) >= 0) \ - { \ - Ipp8u * buffer = ippsMalloc_8u(bufSize); \ - IppStatus status = ippiFilterMedianBorder_##flavor(src.ptr(), (int)src.step, \ - dst.ptr(), (int)dst.step, dstRoiSize, maskSize, \ - ippBorderRepl, (ippType)0, buffer); \ - ippsFree(buffer); \ - if (status >= 0) \ - { \ - CV_IMPL_ADD(CV_IMPL_IPP); \ - return; \ - } \ - } \ - setIppErrorStatus(); \ - } \ - while ((void)0, 0) - - if( ksize <= 5 ) - { - Ipp32s bufSize; - IppiSize dstRoiSize = ippiSize(dst.cols, dst.rows), maskSize = ippiSize(ksize, ksize); - Mat src; - if( dst.data != src0.data ) - src = src0; - else - src0.copyTo(src); - - int type = src0.type(); - if (type == CV_8UC1) - IPP_FILTER_MEDIAN_BORDER(Ipp8u, ipp8u, 8u_C1R); - else if (type == CV_16UC1) - IPP_FILTER_MEDIAN_BORDER(Ipp16u, ipp16u, 16u_C1R); - else if (type == CV_16SC1) - IPP_FILTER_MEDIAN_BORDER(Ipp16s, ipp16s, 16s_C1R); - else if (type == CV_32FC1) - IPP_FILTER_MEDIAN_BORDER(Ipp32f, ipp32f, 32f_C1R); - } -#undef IPP_FILTER_MEDIAN_BORDER - } -#endif + CV_IPP_RUN(IPP_VERSION_X100 >= 801 && ksize <= 5, ipp_medianFilter(_src0,_dst, ksize)); #ifdef HAVE_TEGRA_OPTIMIZATION if (tegra::useTegra() && tegra::medianBlur(src0, dst, ksize)) diff --git a/modules/imgproc/src/spatialgradient.cpp b/modules/imgproc/src/spatialgradient.cpp new file mode 100644 index 0000000000..b4dc032acb --- /dev/null +++ b/modules/imgproc/src/spatialgradient.cpp @@ -0,0 +1,329 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include "opencv2/hal/intrin.hpp" + +#include +namespace cv +{ + +/* NOTE: + * + * Sobel-x: -1 0 1 + * -2 0 2 + * -1 0 1 + * + * Sobel-y: -1 -2 -1 + * 0 0 0 + * 1 2 1 + */ +template +static inline void spatialGradientKernel( T& vx, T& vy, + const T& v00, const T& v01, const T& v02, + const T& v10, const T& v12, + const T& v20, const T& v21, const T& v22 ) +{ + // vx = (v22 - v00) + (v02 - v20) + 2 * (v12 - v10) + // vy = (v22 - v00) + (v20 - v02) + 2 * (v21 - v01) + + T tmp_add = v22 - v00, + tmp_sub = v02 - v20, + tmp_x = v12 - v10, + tmp_y = v21 - v01; + + vx = tmp_add + tmp_sub + tmp_x + tmp_x; + vy = tmp_add - tmp_sub + tmp_y + tmp_y; +} + +void spatialGradient( InputArray _src, OutputArray _dx, OutputArray _dy, + int ksize, int borderType ) +{ + + // Prepare InputArray src + Mat src = _src.getMat(); + CV_Assert( !src.empty() ); + CV_Assert( src.type() == CV_8UC1 ); + CV_Assert( borderType == BORDER_DEFAULT || borderType == BORDER_REPLICATE ); + + // Prepare OutputArrays dx, dy + _dx.create( src.size(), CV_16SC1 ); + _dy.create( src.size(), CV_16SC1 ); + Mat dx = _dx.getMat(), + dy = _dy.getMat(); + + // TODO: Allow for other kernel sizes + CV_Assert(ksize == 3); + + // Get dimensions + const int H = src.rows, + W = src.cols; + + // Row, column indices + int i = 0, + j = 0; + + // Handle border types + int i_top = 0, // Case for H == 1 && W == 1 && BORDER_REPLICATE + i_bottom = H - 1, + j_offl = 0, // j offset from 0th pixel to reach -1st pixel + j_offr = 0; // j offset from W-1th pixel to reach Wth pixel + + if ( borderType == BORDER_DEFAULT ) // Equiv. to BORDER_REFLECT_101 + { + if ( H > 1 ) + { + i_top = 1; + i_bottom = H - 2; + } + if ( W > 1 ) + { + j_offl = 1; + j_offr = -1; + } + } + + // Pointer to row vectors + uchar *p_src, *c_src, *n_src; // previous, current, next row + short *c_dx, *c_dy; + + int i_start = 0; + int j_start = 0; +#if CV_SIMD128 && CV_SSE2 + uchar *m_src; + short *n_dx, *n_dy; + + // Characters in variable names have the following meanings: + // u: unsigned char + // s: signed int + // + // [row][column] + // m: offset -1 + // n: offset 0 + // p: offset 1 + // Example: umn is offset -1 in row and offset 0 in column + for ( i = 0; i < H - 1; i += 2 ) + { + if ( i == 0 ) p_src = src.ptr(i_top); + else p_src = src.ptr(i-1); + + c_src = src.ptr(i); + n_src = src.ptr(i+1); + + if ( i == H - 2 ) m_src = src.ptr(i_bottom); + else m_src = src.ptr(i+2); + + c_dx = dx.ptr(i); + c_dy = dy.ptr(i); + n_dx = dx.ptr(i+1); + n_dy = dy.ptr(i+1); + + v_uint8x16 v_select_m = v_uint8x16(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0xFF); + + // Process rest of columns 16-column chunks at a time + for ( j = 1; j < W - 16; j += 16 ) + { + // Load top row for 3x3 Sobel filter + v_uint8x16 v_um = v_load(&p_src[j-1]); + v_uint8x16 v_up = v_load(&p_src[j+1]); + // TODO: Replace _mm_slli_si128 with hal method + v_uint8x16 v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)), + v_uint8x16(_mm_srli_si128(v_um.val, 1))); + v_uint16x8 v_um1, v_um2, v_un1, v_un2, v_up1, v_up2; + v_expand(v_um, v_um1, v_um2); + v_expand(v_un, v_un1, v_un2); + v_expand(v_up, v_up1, v_up2); + v_int16x8 v_s1m1 = v_reinterpret_as_s16(v_um1); + v_int16x8 v_s1m2 = v_reinterpret_as_s16(v_um2); + v_int16x8 v_s1n1 = v_reinterpret_as_s16(v_un1); + v_int16x8 v_s1n2 = v_reinterpret_as_s16(v_un2); + v_int16x8 v_s1p1 = v_reinterpret_as_s16(v_up1); + v_int16x8 v_s1p2 = v_reinterpret_as_s16(v_up2); + + // Load second row for 3x3 Sobel filter + v_um = v_load(&c_src[j-1]); + v_up = v_load(&c_src[j+1]); + // TODO: Replace _mm_slli_si128 with hal method + v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)), + v_uint8x16(_mm_srli_si128(v_um.val, 1))); + v_expand(v_um, v_um1, v_um2); + v_expand(v_un, v_un1, v_un2); + v_expand(v_up, v_up1, v_up2); + v_int16x8 v_s2m1 = v_reinterpret_as_s16(v_um1); + v_int16x8 v_s2m2 = v_reinterpret_as_s16(v_um2); + v_int16x8 v_s2n1 = v_reinterpret_as_s16(v_un1); + v_int16x8 v_s2n2 = v_reinterpret_as_s16(v_un2); + v_int16x8 v_s2p1 = v_reinterpret_as_s16(v_up1); + v_int16x8 v_s2p2 = v_reinterpret_as_s16(v_up2); + + // Load third row for 3x3 Sobel filter + v_um = v_load(&n_src[j-1]); + v_up = v_load(&n_src[j+1]); + // TODO: Replace _mm_slli_si128 with hal method + v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)), + v_uint8x16(_mm_srli_si128(v_um.val, 1))); + v_expand(v_um, v_um1, v_um2); + v_expand(v_un, v_un1, v_un2); + v_expand(v_up, v_up1, v_up2); + v_int16x8 v_s3m1 = v_reinterpret_as_s16(v_um1); + v_int16x8 v_s3m2 = v_reinterpret_as_s16(v_um2); + v_int16x8 v_s3n1 = v_reinterpret_as_s16(v_un1); + v_int16x8 v_s3n2 = v_reinterpret_as_s16(v_un2); + v_int16x8 v_s3p1 = v_reinterpret_as_s16(v_up1); + v_int16x8 v_s3p2 = v_reinterpret_as_s16(v_up2); + + // dx & dy for rows 1, 2, 3 + v_int16x8 v_sdx1, v_sdy1; + spatialGradientKernel( v_sdx1, v_sdy1, + v_s1m1, v_s1n1, v_s1p1, + v_s2m1, v_s2p1, + v_s3m1, v_s3n1, v_s3p1 ); + + v_int16x8 v_sdx2, v_sdy2; + spatialGradientKernel( v_sdx2, v_sdy2, + v_s1m2, v_s1n2, v_s1p2, + v_s2m2, v_s2p2, + v_s3m2, v_s3n2, v_s3p2 ); + + // Store + v_store(&c_dx[j], v_sdx1); + v_store(&c_dx[j+8], v_sdx2); + v_store(&c_dy[j], v_sdy1); + v_store(&c_dy[j+8], v_sdy2); + + // Load fourth row for 3x3 Sobel filter + v_um = v_load(&m_src[j-1]); + v_up = v_load(&m_src[j+1]); + // TODO: Replace _mm_slli_si128 with hal method + v_un = v_select(v_select_m, v_uint8x16(_mm_slli_si128(v_up.val, 1)), + v_uint8x16(_mm_srli_si128(v_um.val, 1))); + v_expand(v_um, v_um1, v_um2); + v_expand(v_un, v_un1, v_un2); + v_expand(v_up, v_up1, v_up2); + v_int16x8 v_s4m1 = v_reinterpret_as_s16(v_um1); + v_int16x8 v_s4m2 = v_reinterpret_as_s16(v_um2); + v_int16x8 v_s4n1 = v_reinterpret_as_s16(v_un1); + v_int16x8 v_s4n2 = v_reinterpret_as_s16(v_un2); + v_int16x8 v_s4p1 = v_reinterpret_as_s16(v_up1); + v_int16x8 v_s4p2 = v_reinterpret_as_s16(v_up2); + + // dx & dy for rows 2, 3, 4 + spatialGradientKernel( v_sdx1, v_sdy1, + v_s2m1, v_s2n1, v_s2p1, + v_s3m1, v_s3p1, + v_s4m1, v_s4n1, v_s4p1 ); + + spatialGradientKernel( v_sdx2, v_sdy2, + v_s2m2, v_s2n2, v_s2p2, + v_s3m2, v_s3p2, + v_s4m2, v_s4n2, v_s4p2 ); + + // Store + v_store(&n_dx[j], v_sdx1); + v_store(&n_dx[j+8], v_sdx2); + v_store(&n_dy[j], v_sdy1); + v_store(&n_dy[j+8], v_sdy2); + } + } + i_start = i; + j_start = j; +#endif + int j_p, j_n; + uchar v00, v01, v02, v10, v11, v12, v20, v21, v22; + for ( i = 0; i < H; i++ ) + { + if ( i == 0 ) p_src = src.ptr(i_top); + else p_src = src.ptr(i-1); + + c_src = src.ptr(i); + + if ( i == H - 1 ) n_src = src.ptr(i_bottom); + else n_src = src.ptr(i+1); + + c_dx = dx.ptr(i); + c_dy = dy.ptr(i); + + // Process left-most column + j = 0; + j_p = j + j_offl; + j_n = 1; + if ( j_n >= W ) j_n = j + j_offr; + v00 = p_src[j_p]; v01 = p_src[j]; v02 = p_src[j_n]; + v10 = c_src[j_p]; v11 = c_src[j]; v12 = c_src[j_n]; + v20 = n_src[j_p]; v21 = n_src[j]; v22 = n_src[j_n]; + spatialGradientKernel( c_dx[0], c_dy[0], v00, v01, v02, v10, + v12, v20, v21, v22 ); + v00 = v01; v10 = v11; v20 = v21; + v01 = v02; v11 = v12; v21 = v22; + + // Process middle columns + j = i >= i_start ? 1 : j_start; + j_p = j - 1; + v00 = p_src[j_p]; v01 = p_src[j]; + v10 = c_src[j_p]; v11 = c_src[j]; + v20 = n_src[j_p]; v21 = n_src[j]; + + for ( ; j < W - 1; j++ ) + { + // Get values for next column + j_n = j + 1; v02 = p_src[j_n]; v12 = c_src[j_n]; v22 = n_src[j_n]; + spatialGradientKernel( c_dx[j], c_dy[j], v00, v01, v02, v10, + v12, v20, v21, v22 ); + + // Move values back one column for next iteration + v00 = v01; v10 = v11; v20 = v21; + v01 = v02; v11 = v12; v21 = v22; + } + + // Process right-most column + if ( j < W ) + { + j_n = j + j_offr; v02 = p_src[j_n]; v12 = c_src[j_n]; v22 = n_src[j_n]; + spatialGradientKernel( c_dx[j], c_dy[j], v00, v01, v02, v10, + v12, v20, v21, v22 ); + } + } + +} + +} diff --git a/modules/imgproc/src/sumpixels.cpp b/modules/imgproc/src/sumpixels.cpp index ce6aa794c6..a6e86f6c04 100755 --- a/modules/imgproc/src/sumpixels.cpp +++ b/modules/imgproc/src/sumpixels.cpp @@ -424,6 +424,69 @@ static bool ocl_integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, } +#if defined(HAVE_IPP) +namespace cv +{ +static bool ipp_integral(InputArray _src, OutputArray _sum, OutputArray _sqsum, OutputArray _tilted, int sdepth, int sqdepth) +{ +#if !defined(HAVE_IPP_ICV_ONLY) // Disabled on ICV due invalid results + int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); + if( sdepth <= 0 ) + sdepth = depth == CV_8U ? CV_32S : CV_64F; + if ( sqdepth <= 0 ) + sqdepth = CV_64F; + sdepth = CV_MAT_DEPTH(sdepth), sqdepth = CV_MAT_DEPTH(sqdepth); + + + Size ssize = _src.size(), isize(ssize.width + 1, ssize.height + 1); + _sum.create( isize, CV_MAKETYPE(sdepth, cn) ); + Mat src = _src.getMat(), sum =_sum.getMat(), sqsum, tilted; + + if( _sqsum.needed() ) + { + _sqsum.create( isize, CV_MAKETYPE(sqdepth, cn) ); + sqsum = _sqsum.getMat(); + }; + + if( ( depth == CV_8U ) && ( sdepth == CV_32F || sdepth == CV_32S ) && ( !_tilted.needed() ) && ( !_sqsum.needed() || sqdepth == CV_64F ) && ( cn == 1 ) ) + { + IppStatus status = ippStsErr; + IppiSize srcRoiSize = ippiSize( src.cols, src.rows ); + if( sdepth == CV_32F ) + { + if( _sqsum.needed() ) + { + status = ippiSqrIntegral_8u32f64f_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32f*)sum.data, (int)sum.step, (Ipp64f*)sqsum.data, (int)sqsum.step, srcRoiSize, 0, 0 ); + } + else + { + status = ippiIntegral_8u32f_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32f*)sum.data, (int)sum.step, srcRoiSize, 0 ); + } + } + else if( sdepth == CV_32S ) + { + if( _sqsum.needed() ) + { + status = ippiSqrIntegral_8u32s64f_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32s*)sum.data, (int)sum.step, (Ipp64f*)sqsum.data, (int)sqsum.step, srcRoiSize, 0, 0 ); + } + else + { + status = ippiIntegral_8u32s_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32s*)sum.data, (int)sum.step, srcRoiSize, 0 ); + } + } + if (0 <= status) + { + CV_IMPL_ADD(CV_IMPL_IPP); + return true; + } + } +#else + CV_UNUSED(_src); CV_UNUSED(_sum); CV_UNUSED(_sqsum); CV_UNUSED(_tilted); CV_UNUSED(sdepth); CV_UNUSED(sqdepth); +#endif + return false; +} +} +#endif void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, OutputArray _tilted, int sdepth, int sqdepth ) { @@ -456,44 +519,9 @@ void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, Output sqsum = _sqsum.getMat(); }; -#if defined(HAVE_IPP) && !defined(HAVE_IPP_ICV_ONLY) // Disabled on ICV due invalid results - CV_IPP_CHECK() - { - if( ( depth == CV_8U ) && ( sdepth == CV_32F || sdepth == CV_32S ) && ( !_tilted.needed() ) && ( !_sqsum.needed() || sqdepth == CV_64F ) && ( cn == 1 ) ) - { - IppStatus status = ippStsErr; - IppiSize srcRoiSize = ippiSize( src.cols, src.rows ); - if( sdepth == CV_32F ) - { - if( _sqsum.needed() ) - { - status = ippiSqrIntegral_8u32f64f_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32f*)sum.data, (int)sum.step, (Ipp64f*)sqsum.data, (int)sqsum.step, srcRoiSize, 0, 0 ); - } - else - { - status = ippiIntegral_8u32f_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32f*)sum.data, (int)sum.step, srcRoiSize, 0 ); - } - } - else if( sdepth == CV_32S ) - { - if( _sqsum.needed() ) - { - status = ippiSqrIntegral_8u32s64f_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32s*)sum.data, (int)sum.step, (Ipp64f*)sqsum.data, (int)sqsum.step, srcRoiSize, 0, 0 ); - } - else - { - status = ippiIntegral_8u32s_C1R( (const Ipp8u*)src.data, (int)src.step, (Ipp32s*)sum.data, (int)sum.step, srcRoiSize, 0 ); - } - } - if (0 <= status) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } -#endif + CV_IPP_RUN(( depth == CV_8U ) && ( sdepth == CV_32F || sdepth == CV_32S ) && + ( !_tilted.needed() ) && ( !_sqsum.needed() || sqdepth == CV_64F ) && ( cn == 1 ), + ipp_integral(_src, _sum, _sqsum, _tilted, sdepth, sqdepth)); if( _tilted.needed() ) { diff --git a/modules/imgproc/src/templmatch.cpp b/modules/imgproc/src/templmatch.cpp index c3d583bcca..e5f4986cf9 100644 --- a/modules/imgproc/src/templmatch.cpp +++ b/modules/imgproc/src/templmatch.cpp @@ -895,28 +895,13 @@ static void matchTemplateMask( InputArray _img, InputArray _templ, OutputArray _ } } -//////////////////////////////////////////////////////////////////////////////////////////////////////// -void cv::matchTemplate( InputArray _img, InputArray _templ, OutputArray _result, int method, InputArray _mask ) +namespace cv { - if (!_mask.empty()) - { - cv::matchTemplateMask(_img, _templ, _result, method, _mask); +static void common_matchTemplate( Mat& img, Mat& templ, Mat& result, int method, int cn ) +{ + if( method == CV_TM_CCORR ) return; - } - - int type = _img.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); - CV_Assert( CV_TM_SQDIFF <= method && method <= CV_TM_CCOEFF_NORMED ); - CV_Assert( (depth == CV_8U || depth == CV_32F) && type == _templ.type() && _img.dims() <= 2 ); - - bool needswap = _img.size().height < _templ.size().height || _img.size().width < _templ.size().width; - if (needswap) - { - CV_Assert(_img.size().height <= _templ.size().height && _img.size().width <= _templ.size().width); - } - - CV_OCL_RUN(_img.dims() <= 2 && _result.isUMat(), - (!needswap ? ocl_matchTemplate(_img, _templ, _result, method) : ocl_matchTemplate(_templ, _img, _result, method))) int numType = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 : method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2; @@ -924,57 +909,6 @@ void cv::matchTemplate( InputArray _img, InputArray _templ, OutputArray _result, method == CV_TM_SQDIFF_NORMED || method == CV_TM_CCOEFF_NORMED; - Mat img = _img.getMat(), templ = _templ.getMat(); - if (needswap) - std::swap(img, templ); - - Size corrSize(img.cols - templ.cols + 1, img.rows - templ.rows + 1); - _result.create(corrSize, CV_32F); - Mat result = _result.getMat(); - -#ifdef HAVE_TEGRA_OPTIMIZATION - if (tegra::useTegra() && tegra::matchTemplate(img, templ, result, method)) - return; -#endif - -#if defined HAVE_IPP - bool useIppMT = false; - CV_IPP_CHECK() - { - useIppMT = (templ.rows < img.rows/2 && templ.cols < img.cols/2); - - if (method == CV_TM_SQDIFF && cn == 1 && useIppMT) - { - if (ipp_sqrDistance(img, templ, result)) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return; - } - setIppErrorStatus(); - } - } -#endif - -#if defined HAVE_IPP - if (cn == 1 && useIppMT) - { - if (!ipp_crossCorr(img, templ, result)) - { - setIppErrorStatus(); - crossCorr( img, templ, result, result.size(), result.type(), Point(0,0), 0, 0); - } - else - { - CV_IMPL_ADD(CV_IMPL_IPP); - } - } - else -#endif - crossCorr( img, templ, result, result.size(), result.type(), Point(0,0), 0, 0); - - if( method == CV_TM_CCORR ) - return; - double invArea = 1./((double)templ.rows * templ.cols); Mat sum, sqsum; @@ -1081,8 +1015,81 @@ void cv::matchTemplate( InputArray _img, InputArray _templ, OutputArray _result, } } } +} +#if defined HAVE_IPP +namespace cv +{ +static bool ipp_matchTemplate( Mat& img, Mat& templ, Mat& result, int method, int cn ) +{ + bool useIppMT = (templ.rows < img.rows/2 && templ.cols < img.cols/2); + + if(cn == 1 && useIppMT) + { + if(method == CV_TM_SQDIFF) + { + if (ipp_sqrDistance(img, templ, result)) + return true; + } + else + { + if(ipp_crossCorr(img, templ, result)) + { + common_matchTemplate(img, templ, result, method, cn); + return true; + } + } + } + + return false; +} +} +#endif + +//////////////////////////////////////////////////////////////////////////////////////////////////////// + +void cv::matchTemplate( InputArray _img, InputArray _templ, OutputArray _result, int method, InputArray _mask ) +{ + if (!_mask.empty()) + { + cv::matchTemplateMask(_img, _templ, _result, method, _mask); + return; + } + + int type = _img.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); + CV_Assert( CV_TM_SQDIFF <= method && method <= CV_TM_CCOEFF_NORMED ); + CV_Assert( (depth == CV_8U || depth == CV_32F) && type == _templ.type() && _img.dims() <= 2 ); + + bool needswap = _img.size().height < _templ.size().height || _img.size().width < _templ.size().width; + if (needswap) + { + CV_Assert(_img.size().height <= _templ.size().height && _img.size().width <= _templ.size().width); + } + + CV_OCL_RUN(_img.dims() <= 2 && _result.isUMat(), + (!needswap ? ocl_matchTemplate(_img, _templ, _result, method) : ocl_matchTemplate(_templ, _img, _result, method))) + + Mat img = _img.getMat(), templ = _templ.getMat(); + if (needswap) + std::swap(img, templ); + + Size corrSize(img.cols - templ.cols + 1, img.rows - templ.rows + 1); + _result.create(corrSize, CV_32F); + Mat result = _result.getMat(); + +#ifdef HAVE_TEGRA_OPTIMIZATION + if (tegra::useTegra() && tegra::matchTemplate(img, templ, result, method)) + return; +#endif + + CV_IPP_RUN(true, ipp_matchTemplate(img, templ, result, method, cn)) + + crossCorr( img, templ, result, result.size(), result.type(), Point(0,0), 0, 0); + + common_matchTemplate(img, templ, result, method, cn); +} + CV_IMPL void cvMatchTemplate( const CvArr* _img, const CvArr* _templ, CvArr* _result, int method ) { diff --git a/modules/imgproc/src/thresh.cpp b/modules/imgproc/src/thresh.cpp index 490bdff3e5..18d8afe1b7 100644 --- a/modules/imgproc/src/thresh.cpp +++ b/modules/imgproc/src/thresh.cpp @@ -904,6 +904,24 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type ) } } +#ifdef HAVE_IPP +static bool ipp_getThreshVal_Otsu_8u( const unsigned char* _src, int step, Size size, unsigned char &thresh) +{ +#if IPP_VERSION_X100 >= 801 && !HAVE_ICV + int ippStatus = -1; + IppiSize srcSize = { size.width, size.height }; + CV_SUPPRESS_DEPRECATED_START + ippStatus = ippiComputeThreshold_Otsu_8u_C1R(_src, step, srcSize, &thresh); + CV_SUPPRESS_DEPRECATED_END + + if(ippStatus >= 0) + return true; +#else + CV_UNUSED(_src); CV_UNUSED(step); CV_UNUSED(size); CV_UNUSED(thresh); +#endif + return false; +} +#endif static double getThreshVal_Otsu_8u( const Mat& _src ) @@ -917,21 +935,9 @@ getThreshVal_Otsu_8u( const Mat& _src ) step = size.width; } -#if IPP_VERSION_X100 >= 801 && !defined(HAVE_IPP_ICV_ONLY) - CV_IPP_CHECK() - { - IppiSize srcSize = { size.width, size.height }; - Ipp8u thresh; - CV_SUPPRESS_DEPRECATED_START - IppStatus ok = ippiComputeThreshold_Otsu_8u_C1R(_src.ptr(), step, srcSize, &thresh); - CV_SUPPRESS_DEPRECATED_END - if (ok >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return thresh; - } - setIppErrorStatus(); - } +#ifdef HAVE_IPP + unsigned char thresh; + CV_IPP_RUN(IPP_VERSION_X100 >= 801 && !HAVE_ICV, ipp_getThreshVal_Otsu_8u(_src.ptr(), step, size, thresh), thresh); #endif const int N = 256; diff --git a/modules/imgproc/test/test_contours.cpp b/modules/imgproc/test/test_contours.cpp index b94408d3b8..d8d51f2fe1 100644 --- a/modules/imgproc/test/test_contours.cpp +++ b/modules/imgproc/test/test_contours.cpp @@ -40,7 +40,6 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" using namespace cv; using namespace std; diff --git a/modules/imgproc/test/test_convhull.cpp b/modules/imgproc/test/test_convhull.cpp index e7b2886d37..116a4ae3e9 100644 --- a/modules/imgproc/test/test_convhull.cpp +++ b/modules/imgproc/test/test_convhull.cpp @@ -1426,7 +1426,7 @@ protected: void run_func(void); int validate_test_results( int test_case_idx ); double max_noise; - float line[6], line0[6]; + AutoBuffer line, line0; int dist_type; double reps, aeps; }; @@ -1439,11 +1439,6 @@ CV_FitLineTest::CV_FitLineTest() max_noise = 0.05; } -#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 8) -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Warray-bounds" -#endif - void CV_FitLineTest::generate_point_set( void* pointsSet ) { RNG& rng = ts->get_rng(); @@ -1515,14 +1510,12 @@ void CV_FitLineTest::generate_point_set( void* pointsSet ) } } -#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 8) -# pragma GCC diagnostic pop -#endif - int CV_FitLineTest::prepare_test_case( int test_case_idx ) { RNG& rng = ts->get_rng(); dims = cvtest::randInt(rng) % 2 + 2; + line.allocate(dims * 2); + line0.allocate(dims * 2); min_log_size = MAX(min_log_size,5); max_log_size = MAX(min_log_size,max_log_size); int code = CV_BaseShapeDescrTest::prepare_test_case( test_case_idx ); @@ -1543,11 +1536,6 @@ void CV_FitLineTest::run_func() cv::fitLine(cv::cvarrToMat(points), (cv::Vec6f&)line[0], dist_type, 0, reps, aeps); } -#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 8) -# pragma GCC diagnostic push -# pragma GCC diagnostic ignored "-Warray-bounds" -#endif - int CV_FitLineTest::validate_test_results( int test_case_idx ) { int code = CV_BaseShapeDescrTest::validate_test_results( test_case_idx ); @@ -1626,10 +1614,6 @@ _exit_: return code; } -#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 8) -# pragma GCC diagnostic pop -#endif - /****************************************************************************************\ * ContourMoments Test * \****************************************************************************************/ diff --git a/modules/imgproc/test/test_distancetransform.cpp b/modules/imgproc/test/test_distancetransform.cpp index dd3c2e8b41..7d28428437 100644 --- a/modules/imgproc/test/test_distancetransform.cpp +++ b/modules/imgproc/test/test_distancetransform.cpp @@ -158,7 +158,7 @@ cvTsDistTransform( const CvMat* _src, CvMat* _dst, int dist_type, const float init_val = 1e6; float mask[3]; CvMat* temp; - int ofs[16]; + int ofs[16] = {0}; float delta[16]; int tstep, count; diff --git a/modules/imgproc/test/test_filter.cpp b/modules/imgproc/test/test_filter.cpp index 9253132186..5994b1b11b 100644 --- a/modules/imgproc/test/test_filter.cpp +++ b/modules/imgproc/test/test_filter.cpp @@ -552,6 +552,68 @@ void CV_SobelTest::prepare_to_validation( int /*test_case_idx*/ ) } +/////////////// spatialGradient /////////////// + +class CV_SpatialGradientTest : public CV_DerivBaseTest +{ +public: + CV_SpatialGradientTest(); + +protected: + void prepare_to_validation( int test_case_idx ); + void run_func(); + void get_test_array_types_and_sizes( int test_case_idx, + vector >& sizes, vector >& types ); + int ksize; +}; + +CV_SpatialGradientTest::CV_SpatialGradientTest() { + test_array[OUTPUT].push_back(NULL); + test_array[REF_OUTPUT].push_back(NULL); + inplace = false; +} + + +void CV_SpatialGradientTest::get_test_array_types_and_sizes( int test_case_idx, + vector >& sizes, + vector >& types ) +{ + CV_DerivBaseTest::get_test_array_types_and_sizes( test_case_idx, sizes, types ); + + sizes[OUTPUT][1] = sizes[REF_OUTPUT][1] = sizes[OUTPUT][0]; + + // Inputs are only CV_8UC1 for now + types[INPUT][0] = CV_8UC1; + + // Outputs are only CV_16SC1 for now + types[OUTPUT][0] = types[OUTPUT][1] = types[REF_OUTPUT][0] + = types[REF_OUTPUT][1] = CV_16SC1; + + ksize = 3; + border = BORDER_DEFAULT; // TODO: Add BORDER_REPLICATE +} + + +void CV_SpatialGradientTest::run_func() +{ + spatialGradient( test_mat[INPUT][0], test_mat[OUTPUT][0], + test_mat[OUTPUT][1], ksize, border ); +} + +void CV_SpatialGradientTest::prepare_to_validation( int /*test_case_idx*/ ) +{ + int dx, dy; + + dx = 1; dy = 0; + Sobel( test_mat[INPUT][0], test_mat[REF_OUTPUT][0], CV_16SC1, dx, dy, ksize, + 1, 0, border ); + + dx = 0; dy = 1; + Sobel( test_mat[INPUT][0], test_mat[REF_OUTPUT][1], CV_16SC1, dx, dy, ksize, + 1, 0, border ); +} + + /////////////// laplace /////////////// class CV_LaplaceTest : public CV_DerivBaseTest @@ -1773,6 +1835,7 @@ TEST(Imgproc_Dilate, accuracy) { CV_DilateTest test; test.safe_run(); } TEST(Imgproc_MorphologyEx, accuracy) { CV_MorphExTest test; test.safe_run(); } TEST(Imgproc_Filter2D, accuracy) { CV_FilterTest test; test.safe_run(); } TEST(Imgproc_Sobel, accuracy) { CV_SobelTest test; test.safe_run(); } +TEST(Imgproc_SpatialGradient, accuracy) { CV_SpatialGradientTest test; test.safe_run(); } TEST(Imgproc_Laplace, accuracy) { CV_LaplaceTest test; test.safe_run(); } TEST(Imgproc_Blur, accuracy) { CV_BlurTest test; test.safe_run(); } TEST(Imgproc_GaussianBlur, accuracy) { CV_GaussianBlurTest test; test.safe_run(); } diff --git a/modules/java/CMakeLists.txt b/modules/java/CMakeLists.txt index 0528fa486e..3a113d5cc2 100644 --- a/modules/java/CMakeLists.txt +++ b/modules/java/CMakeLists.txt @@ -282,6 +282,8 @@ else() set(LIB_NAME_SUFIX "${OPENCV_VERSION_MAJOR}${OPENCV_VERSION_MINOR}${OPENCV_VERSION_PATCH}") endif() +file(MAKE_DIRECTORY "${OpenCV_BINARY_DIR}/bin") + # step 4: build jar if(ANDROID) set(JAR_FILE "${OpenCV_BINARY_DIR}/bin/classes.jar") @@ -369,6 +371,7 @@ set_target_properties(${the_module} PROPERTIES if(ANDROID) ocv_target_link_libraries(${the_module} jnigraphics) # for Mat <=> Bitmap converters + ocv_target_link_libraries(${the_module} LINK_INTERFACE_LIBRARIES ${OPENCV_LINKER_LIBS} jnigraphics) # force strip library after the build command # because samples and tests will make a copy of the library before install diff --git a/modules/java/generator/src/java/android+AsyncServiceHelper.java b/modules/java/generator/src/java/android+AsyncServiceHelper.java index e18d5a5001..4d9d115389 100644 --- a/modules/java/generator/src/java/android+AsyncServiceHelper.java +++ b/modules/java/generator/src/java/android+AsyncServiceHelper.java @@ -376,7 +376,7 @@ class AsyncServiceHelper else { // If the dependencies list is not defined or empty. - String AbsLibraryPath = Path + File.separator + "libopencv_java.so"; + String AbsLibraryPath = Path + File.separator + "libopencv_java3.so"; result &= loadLibrary(AbsLibraryPath); } diff --git a/modules/java/generator/src/java/android+StaticHelper.java b/modules/java/generator/src/java/android+StaticHelper.java index 10442c904d..36f9f6f64a 100644 --- a/modules/java/generator/src/java/android+StaticHelper.java +++ b/modules/java/generator/src/java/android+StaticHelper.java @@ -92,7 +92,7 @@ class StaticHelper { else { // If dependencies list is not defined or empty. - result &= loadLibrary("opencv_java"); + result &= loadLibrary("opencv_java3"); } return result; diff --git a/modules/ml/include/opencv2/ml.hpp b/modules/ml/include/opencv2/ml.hpp index d0d2c33613..fd491e4f6e 100644 --- a/modules/ml/include/opencv2/ml.hpp +++ b/modules/ml/include/opencv2/ml.hpp @@ -790,7 +790,7 @@ public: Returns vector of covariation matrices. Number of matrices is the number of gaussian mixtures, each matrix is a square floating-point matrix NxN, where N is the space dimensionality. */ - virtual void getCovs(std::vector& covs) const = 0; + CV_WRAP virtual void getCovs(CV_OUT std::vector& covs) const = 0; /** @brief Returns a likelihood logarithm value and an index of the most probable mixture component for the given sample. @@ -804,7 +804,7 @@ public: the sample. First element is an index of the most probable mixture component for the given sample. */ - CV_WRAP CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0; + CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0; /** @brief Estimate the Gaussian mixture parameters from a samples set. diff --git a/modules/ml/src/nbayes.cpp b/modules/ml/src/nbayes.cpp index 5ca74acd91..221db93e24 100644 --- a/modules/ml/src/nbayes.cpp +++ b/modules/ml/src/nbayes.cpp @@ -313,7 +313,7 @@ public: CV_Error( CV_StsBadArg, "The input samples must be 32f matrix with the number of columns = nallvars" ); - if( samples.rows > 1 && _results.needed() ) + if( (samples.rows > 1) && (! _results.needed()) ) CV_Error( CV_StsNullPtr, "When the number of input samples is >1, the output vector of results must be passed" ); diff --git a/modules/ml/src/svm.cpp b/modules/ml/src/svm.cpp index 93180856ec..812e2839bb 100644 --- a/modules/ml/src/svm.cpp +++ b/modules/ml/src/svm.cpp @@ -1675,6 +1675,7 @@ public: Mat samples = data->getTrainSamples(); Mat responses; bool is_classification = false; + Mat class_labels0; int class_count = (int)class_labels.total(); if( svmType == C_SVC || svmType == NU_SVC ) @@ -1688,7 +1689,8 @@ public: setRangeVector(temp_class_labels, class_count); // temporarily replace class labels with 0, 1, ..., NCLASSES-1 - Mat(temp_class_labels).copyTo(class_labels); + class_labels0 = class_labels; + class_labels = Mat(temp_class_labels).clone(); } else responses = data->getTrainResponses(); @@ -1821,6 +1823,7 @@ public: } params = best_params; + class_labels = class_labels0; return do_train( samples, responses ); } diff --git a/modules/objdetect/include/opencv2/objdetect/detection_based_tracker.hpp b/modules/objdetect/include/opencv2/objdetect/detection_based_tracker.hpp index 54117fdb91..1f5f1d3676 100644 --- a/modules/objdetect/include/opencv2/objdetect/detection_based_tracker.hpp +++ b/modules/objdetect/include/opencv2/objdetect/detection_based_tracker.hpp @@ -44,6 +44,7 @@ #ifndef __OPENCV_OBJDETECT_DBT_HPP__ #define __OPENCV_OBJDETECT_DBT_HPP__ +// After this condition removal update blacklist for bindings: modules/python/common.cmake #if defined(__linux__) || defined(LINUX) || defined(__APPLE__) || defined(__ANDROID__) || \ (defined(__cplusplus) && __cplusplus > 201103L) || (defined(_MSC_VER) && _MSC_VER >= 1700) diff --git a/modules/python/common.cmake b/modules/python/common.cmake index 57439809b7..2444b77a98 100644 --- a/modules/python/common.cmake +++ b/modules/python/common.cmake @@ -28,6 +28,7 @@ endforeach(m) ocv_list_filterout(opencv_hdrs ".h$") ocv_list_filterout(opencv_hdrs "cuda") ocv_list_filterout(opencv_hdrs "cudev") +ocv_list_filterout(opencv_hdrs "detection_based_tracker.hpp") # Conditional compilation set(cv2_generated_hdrs "${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_include.h" diff --git a/modules/python/src2/cv2.cpp b/modules/python/src2/cv2.cpp index 974545994b..84cbb496fc 100644 --- a/modules/python/src2/cv2.cpp +++ b/modules/python/src2/cv2.cpp @@ -190,9 +190,13 @@ public: void deallocate(UMatData* u) const { - if(u) + if(!u) + return; + PyEnsureGIL gil; + CV_Assert(u->urefcount >= 0); + CV_Assert(u->refcount >= 0); + if(u->refcount == 0) { - PyEnsureGIL gil; PyObject* o = (PyObject*)u->userdata; Py_XDECREF(o); delete u; @@ -315,8 +319,9 @@ static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info) // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases // b) transposed arrays, where _strides[] elements go in non-descending order // c) flipped arrays, where some of _strides[] elements are negative - if( (i == ndims-1 && (size_t)_strides[i] != elemsize) || - (i < ndims-1 && _strides[i] < _strides[i+1]) ) + // the _sizes[i] > 1 is needed to avoid spurious copies when NPY_RELAXED_STRIDES is set + if( (i == ndims-1 && _sizes[i] > 1 && (size_t)_strides[i] != elemsize) || + (i < ndims-1 && _sizes[i] > 1 && _strides[i] < _strides[i+1]) ) needcopy = true; } @@ -343,10 +348,21 @@ static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info) _strides = PyArray_STRIDES(oarr); } - for(int i = 0; i < ndims; i++) + // Normalize strides in case NPY_RELAXED_STRIDES is set + size_t default_step = elemsize; + for ( int i = ndims - 1; i >= 0; --i ) { size[i] = (int)_sizes[i]; - step[i] = (size_t)_strides[i]; + if ( size[i] > 1 ) + { + step[i] = (size_t)_strides[i]; + default_step = step[i] * size[i]; + } + else + { + step[i] = default_step; + default_step *= size[i]; + } } // handle degenerate case diff --git a/modules/python/src2/gen2.py b/modules/python/src2/gen2.py index 1648e53abe..20c5c812cc 100755 --- a/modules/python/src2/gen2.py +++ b/modules/python/src2/gen2.py @@ -757,19 +757,6 @@ class PythonWrapperGenerator(object): sys.exit(-1) self.classes[classinfo.name] = classinfo - if classinfo.base: - chunks = classinfo.base.split('_') - base = '_'.join(chunks) - while base not in self.classes and len(chunks)>1: - del chunks[-2] - base = '_'.join(chunks) - if base not in self.classes: - print("Generator error: unable to resolve base %s for %s" - % (classinfo.base, classinfo.name)) - sys.exit(-1) - classinfo.base = base - classinfo.isalgorithm |= self.classes[base].isalgorithm - def split_decl_name(self, name): chunks = name.split('.') namespace = chunks[:-1] @@ -881,6 +868,22 @@ class PythonWrapperGenerator(object): # function self.add_func(decl) + # step 1.5 check if all base classes exist + for name, classinfo in self.classes.items(): + if classinfo.base: + chunks = classinfo.base.split('_') + base = '_'.join(chunks) + while base not in self.classes and len(chunks)>1: + del chunks[-2] + base = '_'.join(chunks) + if base not in self.classes: + print("Generator error: unable to resolve base %s for %s" + % (classinfo.base, classinfo.name)) + sys.exit(-1) + classinfo.base = base + classinfo.isalgorithm |= self.classes[base].isalgorithm + self.classes[name] = classinfo + # step 2: generate code for the classes and their methods classlist = list(self.classes.items()) classlist.sort() diff --git a/modules/stitching/CMakeLists.txt b/modules/stitching/CMakeLists.txt index 9a24f43160..76c7bc8489 100644 --- a/modules/stitching/CMakeLists.txt +++ b/modules/stitching/CMakeLists.txt @@ -4,6 +4,10 @@ if(HAVE_CUDA) ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations -Wshadow) endif() +set(STITCHING_CONTRIB_DEPS "opencv_xfeatures2d") +if(BUILD_SHARED_LIBS AND BUILD_opencv_world) + set(STITCHING_CONTRIB_DEPS "") +endif() ocv_define_module(stitching opencv_imgproc opencv_features2d opencv_calib3d opencv_objdetect - OPTIONAL opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d opencv_cudalegacy opencv_xfeatures2d + OPTIONAL opencv_cudaarithm opencv_cudafilters opencv_cudafeatures2d opencv_cudalegacy ${STITCHING_CONTRIB_DEPS} WRAP python) diff --git a/modules/ts/CMakeLists.txt b/modules/ts/CMakeLists.txt index c0158ba416..2732924b74 100644 --- a/modules/ts/CMakeLists.txt +++ b/modules/ts/CMakeLists.txt @@ -1,12 +1,19 @@ set(the_description "The ts module") -if(IOS OR WINRT) +if(IOS) ocv_module_disable(ts) endif() set(OPENCV_MODULE_TYPE STATIC) set(OPENCV_MODULE_IS_PART_OF_WORLD FALSE) +if(WINRT) + # WINRT doesn't have access to environment variables + # so adding corresponding macros during CMake run + add_env_definitions(OPENCV_TEST_DATA_PATH) + add_env_definitions(OPENCV_PERF_VALIDATION_DIR) +endif() + ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef) ocv_add_module(ts INTERNAL opencv_core opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui) diff --git a/modules/ts/include/opencv2/ts.hpp b/modules/ts/include/opencv2/ts.hpp index 4a08182c62..0f3b3149de 100644 --- a/modules/ts/include/opencv2/ts.hpp +++ b/modules/ts/include/opencv2/ts.hpp @@ -588,3 +588,102 @@ int main(int argc, char **argv) \ #endif #include "opencv2/ts/ts_perf.hpp" + +#ifdef WINRT +#ifndef __FSTREAM_EMULATED__ +#define __FSTREAM_EMULATED__ +#include +#include +#include + +#undef ifstream +#undef ofstream +#define ifstream ifstream_emulated +#define ofstream ofstream_emulated + +namespace std { + +class ifstream : public stringstream +{ + FILE* f; +public: + ifstream(const char* filename, ios_base::openmode mode = ios_base::in) + : f(NULL) + { + string modeStr("r"); + printf("Open file (read): %s\n", filename); + if (mode & ios_base::binary) + modeStr += "b"; + f = fopen(filename, modeStr.c_str()); + + if (f == NULL) + { + printf("Can't open file: %s\n", filename); + return; + } + fseek(f, 0, SEEK_END); + size_t sz = ftell(f); + if (sz > 0) + { + char* buf = (char*) malloc(sz); + fseek(f, 0, SEEK_SET); + if (fread(buf, 1, sz, f) == sz) + { + this->str(std::string(buf, sz)); + } + free(buf); + } + } + + ~ifstream() { close(); } + bool is_open() const { return f != NULL; } + void close() + { + if (f) + fclose(f); + f = NULL; + this->str(""); + } +}; + +class ofstream : public stringstream +{ + FILE* f; +public: + ofstream(const char* filename, ios_base::openmode mode = ios_base::out) + : f(NULL) + { + open(filename, mode); + } + ~ofstream() { close(); } + void open(const char* filename, ios_base::openmode mode = ios_base::out) + { + string modeStr("w+"); + if (mode & ios_base::trunc) + modeStr = "w"; + if (mode & ios_base::binary) + modeStr += "b"; + f = fopen(filename, modeStr.c_str()); + printf("Open file (write): %s\n", filename); + if (f == NULL) + { + printf("Can't open file (write): %s\n", filename); + return; + } + } + bool is_open() const { return f != NULL; } + void close() + { + if (f) + { + fwrite(reinterpret_cast(this->str().c_str()), this->str().size(), 1, f); + fclose(f); + } + f = NULL; + this->str(""); + } +}; + +} // namespace std +#endif // __FSTREAM_EMULATED__ +#endif // WINRT diff --git a/modules/ts/include/opencv2/ts/ts_gtest.h b/modules/ts/include/opencv2/ts/ts_gtest.h index 243c63879c..cec926a08f 100644 --- a/modules/ts/include/opencv2/ts/ts_gtest.h +++ b/modules/ts/include/opencv2/ts/ts_gtest.h @@ -2924,7 +2924,7 @@ inline const char* StrNCpy(char* dest, const char* src, size_t n) { // StrError() aren't needed on Windows CE at this time and thus not // defined there. -#if !GTEST_OS_WINDOWS_MOBILE +#if !GTEST_OS_WINDOWS_MOBILE && !defined WINRT inline int ChDir(const char* dir) { return chdir(dir); } #endif inline FILE* FOpen(const char* path, const char* mode) { @@ -2948,7 +2948,7 @@ inline int Close(int fd) { return close(fd); } inline const char* StrError(int errnum) { return strerror(errnum); } #endif inline const char* GetEnv(const char* name) { -#if GTEST_OS_WINDOWS_MOBILE +#if GTEST_OS_WINDOWS_MOBILE || defined WINRT // We are on Windows CE, which has no environment variables. return NULL; #elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9) diff --git a/modules/ts/src/ts.cpp b/modules/ts/src/ts.cpp index 29fd056fea..232124a96a 100644 --- a/modules/ts/src/ts.cpp +++ b/modules/ts/src/ts.cpp @@ -450,7 +450,11 @@ static int tsErrorCallback( int status, const char* func_name, const char* err_m void TS::init( const string& modulename ) { +#ifndef WINRT char* datapath_dir = getenv("OPENCV_TEST_DATA_PATH"); +#else + char* datapath_dir = OPENCV_TEST_DATA_PATH; +#endif if( datapath_dir ) { @@ -684,7 +688,11 @@ void parseCustomOptions(int argc, char **argv) test_ipp_check = parser.get("test_ipp_check"); if (!test_ipp_check) +#ifndef WINRT test_ipp_check = getenv("OPENCV_IPP_CHECK") != NULL; +#else + test_ipp_check = false; +#endif } /* End of file. */ diff --git a/modules/ts/src/ts_gtest.cpp b/modules/ts/src/ts_gtest.cpp index 50c8808aff..29a3996be8 100644 --- a/modules/ts/src/ts_gtest.cpp +++ b/modules/ts/src/ts_gtest.cpp @@ -4054,7 +4054,7 @@ enum GTestColor { COLOR_YELLOW }; -#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && !defined WINRT // Returns the character attribute for the given color. WORD GetColorAttribute(GTestColor color) { @@ -4122,7 +4122,7 @@ static void ColoredPrintf(GTestColor color, const char* fmt, ...) { va_list args; va_start(args, fmt); -#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS || GTEST_OS_IOS +#if GTEST_OS_WINDOWS_MOBILE || WINRT || GTEST_OS_SYMBIAN || GTEST_OS_ZOS || GTEST_OS_IOS const bool use_color = false; #else static const bool in_color_mode = @@ -4137,7 +4137,7 @@ static void ColoredPrintf(GTestColor color, const char* fmt, ...) { return; } -#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && !defined WINRT const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); // Gets the current text color. @@ -5320,7 +5320,7 @@ void UnitTest::AddTestPartResult( // with another testing framework) and specify the former on the // command line for debugging. if (GTEST_FLAG(break_on_failure)) { -#if GTEST_OS_WINDOWS +#if GTEST_OS_WINDOWS && !defined WINRT // Using DebugBreak on Windows allows gtest to still break into a debugger // when a failure happens and both the --gtest_break_on_failure and // the --gtest_catch_exceptions flags are specified. @@ -5398,7 +5398,7 @@ int UnitTest::Run() { // process. In either case the user does not want to see pop-up dialogs // about crashes - they are expected. if (impl()->catch_exceptions() || in_death_test_child_process) { -# if !GTEST_OS_WINDOWS_MOBILE +# if !GTEST_OS_WINDOWS_MOBILE && !defined WINRT // SetErrorMode doesn't exist on CE. SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX); @@ -7110,6 +7110,7 @@ bool DeathTestImpl::Passed(bool status_ok) { } # if GTEST_OS_WINDOWS +#ifndef WINRT // WindowsDeathTest implements death tests on Windows. Due to the // specifics of starting new processes on Windows, death tests there are // always threadsafe, and Google Test considers the @@ -7301,6 +7302,7 @@ DeathTest::TestRole WindowsDeathTest::AssumeRole() { set_spawned(true); return OVERSEE_TEST; } +#endif # else // We are not on Windows. // ForkingDeathTest provides implementations for most of the abstract @@ -7711,10 +7713,14 @@ bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex, } # if GTEST_OS_WINDOWS - if (GTEST_FLAG(death_test_style) == "threadsafe" || GTEST_FLAG(death_test_style) == "fast") { +#ifndef WINRT *test = new WindowsDeathTest(statement, regex, file, line); +#else + printf("DeathTest is not supported on winrt!\n"); + return false; +#endif } # else @@ -7758,6 +7764,7 @@ static void SplitString(const ::std::string& str, char delimiter, } # if GTEST_OS_WINDOWS +#ifndef WINRT // Recreates the pipe and event handles from the provided parameters, // signals the event, and returns a file descriptor wrapped around the pipe // handle. This function is called in the child process only. @@ -7823,6 +7830,7 @@ int GetStatusFileDescriptor(unsigned int parent_process_id, return write_fd; } +#endif # endif // GTEST_OS_WINDOWS // Returns a newly created InternalRunDeathTestFlag object with fields @@ -7840,7 +7848,7 @@ InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() { int write_fd = -1; # if GTEST_OS_WINDOWS - +#ifndef WINRT unsigned int parent_process_id = 0; size_t write_handle_as_size_t = 0; size_t event_handle_as_size_t = 0; @@ -7857,6 +7865,7 @@ InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() { write_fd = GetStatusFileDescriptor(parent_process_id, write_handle_as_size_t, event_handle_as_size_t); +#endif # else if (fields.size() != 4 @@ -7974,7 +7983,7 @@ static bool IsPathSeparator(char c) { // Returns the current working directory, or "" if unsuccessful. FilePath FilePath::GetCurrentDir() { -#if GTEST_OS_WINDOWS_MOBILE +#if GTEST_OS_WINDOWS_MOBILE || WINRT // Windows CE doesn't have a current directory, so we just return // something reasonable. return FilePath(kCurrentDirectoryString); @@ -8765,6 +8774,7 @@ class CapturedStream { public: // The ctor redirects the stream to a temporary file. explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) { +#ifndef WINRT # if GTEST_OS_WINDOWS char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT @@ -8810,6 +8820,7 @@ class CapturedStream { fflush(NULL); dup2(captured_fd, fd_); close(captured_fd); +#endif } ~CapturedStream() { diff --git a/modules/ts/src/ts_perf.cpp b/modules/ts/src/ts_perf.cpp index f5ba1d81e6..ef8f0af9a0 100644 --- a/modules/ts/src/ts_perf.cpp +++ b/modules/ts/src/ts_perf.cpp @@ -185,7 +185,11 @@ void Regression::init(const std::string& testSuitName, const std::string& ext) return; } +#ifndef WINRT const char *data_path_dir = getenv("OPENCV_TEST_DATA_PATH"); +#else + const char *data_path_dir = OPENCV_TEST_DATA_PATH; +#endif const char *path_separator = "/"; if (data_path_dir) @@ -814,7 +818,12 @@ void TestBase::Init(const std::vector & availableImpls, param_force_samples = args.get("perf_force_samples"); param_write_sanity = args.has("perf_write_sanity"); param_verify_sanity = args.has("perf_verify_sanity"); + +#ifndef WINRT test_ipp_check = !args.has("perf_ipp_check") ? getenv("OPENCV_IPP_CHECK") != NULL : true; +#else + test_ipp_check = false; +#endif param_threads = args.get("perf_threads"); #ifdef CV_COLLECT_IMPL_DATA param_collect_impl = args.has("perf_collect_impl"); @@ -881,7 +890,11 @@ void TestBase::Init(const std::vector & availableImpls, #endif { +#ifndef WINRT const char* path = getenv("OPENCV_PERF_VALIDATION_DIR"); +#else + const char* path = OPENCV_PERF_VALIDATION_DIR; +#endif if (path) perf_validation_results_directory = path; } @@ -1185,7 +1198,11 @@ bool TestBase::next() printf("Performance is unstable, it may be a result of overheat problems\n"); printf("Idle delay for %d ms... \n", perf_validation_idle_delay_ms); #if defined WIN32 || defined _WIN32 || defined WIN64 || defined _WIN64 +#ifndef WINRT_8_0 Sleep(perf_validation_idle_delay_ms); +#else + WaitForSingleObjectEx(GetCurrentThread(), perf_validation_idle_delay_ms, FALSE); +#endif #else usleep(perf_validation_idle_delay_ms * 1000); #endif @@ -1635,7 +1652,11 @@ std::string TestBase::getDataPath(const std::string& relativePath) throw PerfEarlyExitException(); } +#ifndef WINRT const char *data_path_dir = getenv("OPENCV_TEST_DATA_PATH"); +#else + const char *data_path_dir = OPENCV_TEST_DATA_PATH; +#endif const char *path_separator = "/"; std::string path; diff --git a/modules/video/src/opencl/pyrlk.cl b/modules/video/src/opencl/pyrlk.cl index 84889b4482..44707aa7c7 100644 --- a/modules/video/src/opencl/pyrlk.cl +++ b/modules/video/src/opencl/pyrlk.cl @@ -228,27 +228,24 @@ __constant sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAM // macro to get pixel value from local memory -#define VAL(_y,_x,_yy,_xx) (IPatchLocal[(yid+((_y)*LSy)+1+(_yy))*LM_W+(xid+((_x)*LSx)+1+(_xx))]) +#define VAL(_y,_x,_yy,_xx) (IPatchLocal[mad24(((_y) + (_yy)), LM_W, ((_x) + (_xx)))]) inline void SetPatch(local float* IPatchLocal, int TileY, int TileX, float* Pch, float* Dx, float* Dy, float* A11, float* A12, float* A22, float w) { - unsigned int xid=get_local_id(0); - unsigned int yid=get_local_id(1); - *Pch = VAL(TileY,TileX,0,0); + int xid=get_local_id(0); + int yid=get_local_id(1); + int xBase = mad24(TileX, LSx, (xid + 1)); + int yBase = mad24(TileY, LSy, (yid + 1)); - float dIdx = (3.0f*VAL(TileY,TileX,-1,1)+10.0f*VAL(TileY,TileX,0,1)+3.0f*VAL(TileY,TileX,+1,1))-(3.0f*VAL(TileY,TileX,-1,-1)+10.0f*VAL(TileY,TileX,0,-1)+3.0f*VAL(TileY,TileX,+1,-1)); - float dIdy = (3.0f*VAL(TileY,TileX,1,-1)+10.0f*VAL(TileY,TileX,1,0)+3.0f*VAL(TileY,TileX,1,+1))-(3.0f*VAL(TileY,TileX,-1,-1)+10.0f*VAL(TileY,TileX,-1,0)+3.0f*VAL(TileY,TileX,-1,+1)); + *Pch = VAL(yBase,xBase,0,0); - dIdx *= w; - dIdy *= w; + *Dx = mad((VAL(yBase,xBase,-1,1) + VAL(yBase,xBase,+1,1) - VAL(yBase,xBase,-1,-1) - VAL(yBase,xBase,+1,-1)), 3.0f, (VAL(yBase,xBase,0,1) - VAL(yBase,xBase,0,-1)) * 10.0f) * w; + *Dy = mad((VAL(yBase,xBase,1,-1) + VAL(yBase,xBase,1,+1) - VAL(yBase,xBase,-1,-1) - VAL(yBase,xBase,-1,+1)), 3.0f, (VAL(yBase,xBase,1,0) - VAL(yBase,xBase,-1,0)) * 10.0f) * w; - *Dx = dIdx; - *Dy = dIdy; - - *A11 += dIdx * dIdx; - *A12 += dIdx * dIdy; - *A22 += dIdy * dIdy; + *A11 = mad(*Dx, *Dx, *A11); + *A12 = mad(*Dx, *Dy, *A12); + *A22 = mad(*Dy, *Dy, *A22); } #undef VAL @@ -256,10 +253,9 @@ inline void GetPatch(image2d_t J, float x, float y, float* Pch, float* Dx, float* Dy, float* b1, float* b2) { - float J_val = read_imagef(J, sampler, (float2)(x, y)).x; - float diff = (J_val - *Pch) * 32.0f; - *b1 += diff**Dx; - *b2 += diff**Dy; + float diff = read_imagef(J, sampler, (float2)(x,y)).x-*Pch; + *b1 = mad(diff, *Dx, *b1); + *b2 = mad(diff, *Dy, *b2); } inline void GetError(image2d_t J, const float x, const float y, const float* Pch, float* errval) @@ -270,11 +266,11 @@ inline void GetError(image2d_t J, const float x, const float y, const float* Pch //macro to read pixel value into local memory. -#define READI(_y,_x) IPatchLocal[(yid+((_y)*LSy))*LM_W+(xid+((_x)*LSx))] = read_imagef(I, sampler, (float2)(Point.x + xid+(_x)*LSx + 0.5f-1, Point.y + yid+(_y)*LSy+ 0.5f-1)).x; +#define READI(_y,_x) IPatchLocal[mad24(mad24((_y), LSy, yid), LM_W, mad24((_x), LSx, xid))] = read_imagef(I, sampler, (float2)(mad((_x), LSx, Point.x + xid - 0.5f), mad((_y), LSy, Point.y + yid - 0.5f))).x; void ReadPatchIToLocalMem(image2d_t I, float2 Point, local float* IPatchLocal) { - unsigned int xid=get_local_id(0); - unsigned int yid=get_local_id(1); + int xid=get_local_id(0); + int yid=get_local_id(1); //read (3*LSx)*(3*LSy) window. each macro call read LSx*LSy pixels block READI(0,0);READI(0,1);READI(0,2); READI(1,0);READI(1,1);READI(1,2); @@ -308,14 +304,16 @@ __kernel void lkSparse(image2d_t I, image2d_t J, __local float smem2[BUFFER]; __local float smem3[BUFFER]; - unsigned int xid=get_local_id(0); - unsigned int yid=get_local_id(1); - unsigned int gid=get_group_id(0); - unsigned int xsize=get_local_size(0); - unsigned int ysize=get_local_size(1); - int xBase, yBase, k; - float wx = ((xid+2*xsize)>1, (c_winSize_y - 1)>>1); @@ -399,7 +397,7 @@ __kernel void lkSparse(image2d_t I, image2d_t J, A22 = smem3[0]; barrier(CLK_LOCAL_MEM_FENCE); - float D = A11 * A22 - A12 * A12; + float D = mad(A11, A22, - A12 * A12); if (D < 1.192092896e-07f) { @@ -413,7 +411,13 @@ __kernel void lkSparse(image2d_t I, image2d_t J, A12 /= D; A22 /= D; - prevPt = nextPts[gid] * 2.0f - c_halfWin; + prevPt = mad(nextPts[gid], 2.0f, - c_halfWin); + + float2 offset0 = (float2)(xid + 0.5f, yid + 0.5f); + float2 offset1 = (float2)(xsize, ysize); + float2 loc0 = prevPt + offset0; + float2 loc1 = loc0 + offset1; + float2 loc2 = loc1 + offset1; for (k = 0; k < c_iters; ++k) { @@ -426,57 +430,45 @@ __kernel void lkSparse(image2d_t I, image2d_t J, float b1 = 0; float b2 = 0; - yBase=yid; { - xBase=xid; - GetPatch(J, prevPt.x + xBase + 0.5f, prevPt.y + yBase + 0.5f, + GetPatch(J, loc0.x, loc0.y, &I_patch[0][0], &dIdx_patch[0][0], &dIdy_patch[0][0], &b1, &b2); - xBase+=xsize; - GetPatch(J, prevPt.x + xBase + 0.5f, prevPt.y + yBase + 0.5f, + GetPatch(J, loc1.x, loc0.y, &I_patch[0][1], &dIdx_patch[0][1], &dIdy_patch[0][1], &b1, &b2); - xBase+=xsize; - GetPatch(J, prevPt.x + xBase + 0.5f, prevPt.y + yBase + 0.5f, + GetPatch(J, loc2.x, loc0.y, &I_patch[0][2], &dIdx_patch[0][2], &dIdy_patch[0][2], &b1, &b2); } - yBase+=ysize; { - xBase=xid; - GetPatch(J, prevPt.x + xBase + 0.5f, prevPt.y + yBase + 0.5f, + GetPatch(J, loc0.x, loc1.y, &I_patch[1][0], &dIdx_patch[1][0], &dIdy_patch[1][0], &b1, &b2); - xBase+=xsize; - GetPatch(J, prevPt.x + xBase + 0.5f, prevPt.y + yBase + 0.5f, + GetPatch(J, loc1.x, loc1.y, &I_patch[1][1], &dIdx_patch[1][1], &dIdy_patch[1][1], &b1, &b2); - xBase+=xsize; - GetPatch(J, prevPt.x + xBase + 0.5f, prevPt.y + yBase + 0.5f, + GetPatch(J, loc2.x, loc1.y, &I_patch[1][2], &dIdx_patch[1][2], &dIdy_patch[1][2], &b1, &b2); } - yBase+=ysize; { - xBase=xid; - GetPatch(J, prevPt.x + xBase + 0.5f, prevPt.y + yBase + 0.5f, + GetPatch(J, loc0.x, loc2.y, &I_patch[2][0], &dIdx_patch[2][0], &dIdy_patch[2][0], &b1, &b2); - xBase+=xsize; - GetPatch(J, prevPt.x + xBase + 0.5f, prevPt.y + yBase + 0.5f, + GetPatch(J, loc1.x, loc2.y, &I_patch[2][1], &dIdx_patch[2][1], &dIdy_patch[2][1], &b1, &b2); - xBase+=xsize; - GetPatch(J, prevPt.x + xBase + 0.5f, prevPt.y + yBase + 0.5f, + GetPatch(J, loc2.x, loc2.y, &I_patch[2][2], &dIdx_patch[2][2], &dIdy_patch[2][2], &b1, &b2); } @@ -488,10 +480,13 @@ __kernel void lkSparse(image2d_t I, image2d_t J, barrier(CLK_LOCAL_MEM_FENCE); float2 delta; - delta.x = A12 * b2 - A22 * b1; - delta.y = A12 * b1 - A11 * b2; + delta.x = mad(A12, b2, - A22 * b1) * 32.0f; + delta.y = mad(A12, b1, - A11 * b2) * 32.0f; prevPt += delta; + loc0 += delta; + loc1 += delta; + loc2 += delta; if (fabs(delta.x) < THRESHOLD && fabs(delta.y) < THRESHOLD) break; @@ -500,54 +495,25 @@ __kernel void lkSparse(image2d_t I, image2d_t J, D = 0.0f; if (calcErr) { - yBase=yid; { - xBase=xid; - GetError(J, prevPt.x + xBase + 0.5f, prevPt.y + yBase + 0.5f, - &I_patch[0][0], &D); - - - xBase+=xsize; - GetError(J, prevPt.x + xBase + 0.5f, prevPt.y + yBase + 0.5f, - &I_patch[0][1], &D); - - xBase+=xsize; - if(xBase cap; Ptr icap; @@ -564,7 +590,9 @@ public: @param fourcc 4-character code of codec used to compress the frames. For example, VideoWriter::fourcc('P','I','M','1') is a MPEG-1 codec, VideoWriter::fourcc('M','J','P','G') is a motion-jpeg codec etc. List of codes can be obtained at [Video Codecs by - FOURCC](http://www.fourcc.org/codecs.php) page. + FOURCC](http://www.fourcc.org/codecs.php) page. FFMPEG backend with MP4 container natively uses + other values as fourcc code: see [ObjectType](http://www.mp4ra.org/codecs.html), + so you may receive a warning message from OpenCV about fourcc code conversion. @param fps Framerate of the created video stream. @param frameSize Size of the video frames. @param isColor If it is not zero, the encoder will expect and encode color frames, otherwise it @@ -608,6 +636,7 @@ public: @param propId Property identifier. It can be one of the following: - **VIDEOWRITER_PROP_QUALITY** Quality (0..100%) of the videostream encoded. Can be adjusted dynamically in some codecs. + - **VIDEOWRITER_PROP_NSTRIPES** Number of stripes for parallel encoding @param value Value of the property. */ CV_WRAP virtual bool set(int propId, double value); @@ -617,6 +646,7 @@ public: @param propId Property identifier. It can be one of the following: - **VIDEOWRITER_PROP_QUALITY** Current quality of the encoded videostream. - **VIDEOWRITER_PROP_FRAMEBYTES** (Read-only) Size of just encoded video frame; note that the encoding order may be different from representation order. + - **VIDEOWRITER_PROP_NSTRIPES** Number of stripes for parallel encoding @note When querying a property that is not supported by the backend used by the VideoWriter class, value 0 is returned. diff --git a/modules/videoio/include/opencv2/videoio/videoio_c.h b/modules/videoio/include/opencv2/videoio/videoio_c.h index b8973850cf..0365b9223a 100644 --- a/modules/videoio/include/opencv2/videoio/videoio_c.h +++ b/modules/videoio/include/opencv2/videoio/videoio_c.h @@ -63,6 +63,9 @@ typedef struct CvCapture CvCapture; /* start capturing frames from video file */ CVAPI(CvCapture*) cvCreateFileCapture( const char* filename ); +/* start capturing frames from video file. allows specifying a preferred API to use */ +CVAPI(CvCapture*) cvCreateFileCaptureWithPreference( const char* filename , int apiPreference); + enum { CV_CAP_ANY =0, // autodetect @@ -111,8 +114,10 @@ enum CV_CAP_INTELPERC = 1500, // Intel Perceptual Computing CV_CAP_OPENNI2 = 1600, // OpenNI2 (for Kinect) - - CV_CAP_GPHOTO2 = 1700 + CV_CAP_GPHOTO2 = 1700, + CV_CAP_GSTREAMER = 1800, // GStreamer + CV_CAP_FFMPEG = 1900, // FFMPEG + CV_CAP_IMAGES = 2000 // OpenCV Image Sequence (e.g. img_%02d.jpg) }; /* start capturing frames from camera: index = camera_index + domain_offset (CV_CAP_*) */ diff --git a/modules/videoio/src/cap.cpp b/modules/videoio/src/cap.cpp index f2b5a4a305..d2da6edfe7 100644 --- a/modules/videoio/src/cap.cpp +++ b/modules/videoio/src/cap.cpp @@ -126,289 +126,221 @@ CV_IMPL int cvGetCaptureDomain( CvCapture* capture) */ CV_IMPL CvCapture * cvCreateCameraCapture (int index) { - int domains[] = - { -#ifdef HAVE_MSMF - CV_CAP_MSMF, -#endif -#if 1 - CV_CAP_IEEE1394, // identical to CV_CAP_DC1394 -#endif -#ifdef HAVE_TYZX - CV_CAP_STEREO, -#endif -#ifdef HAVE_PVAPI - CV_CAP_PVAPI, -#endif -#if 1 - CV_CAP_VFW, // identical to CV_CAP_V4L -#endif -#ifdef HAVE_MIL - CV_CAP_MIL, -#endif -#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT) - CV_CAP_QT, -#endif -#ifdef HAVE_UNICAP - CV_CAP_UNICAP, -#endif -#ifdef HAVE_OPENNI - CV_CAP_OPENNI, -#endif -#ifdef HAVE_OPENNI2 - CV_CAP_OPENNI2, -#endif -#ifdef HAVE_XIMEA - CV_CAP_XIAPI, -#endif -#ifdef HAVE_AVFOUNDATION - CV_CAP_AVFOUNDATION, -#endif -#ifdef HAVE_GIGE_API - CV_CAP_GIGANETIX, -#endif -#ifdef HAVE_INTELPERC - CV_CAP_INTELPERC, -#endif - -1 - }; - // interpret preferred interface (0 = autodetect) int pref = (index / 100) * 100; - if (pref) - { - domains[0]=pref; - index %= 100; - domains[1]=-1; - } - // try every possibly installed camera API - for (int i = 0; domains[i] >= 0; i++) - { -#if defined(HAVE_MSMF) || \ - defined(HAVE_TYZX) || \ - defined(HAVE_VFW) || \ - defined(HAVE_LIBV4L) || \ - defined(HAVE_CAMV4L) || \ - defined(HAVE_CAMV4L2) || \ - defined(HAVE_VIDEOIO) || \ - defined(HAVE_GSTREAMER) || \ - defined(HAVE_DC1394_2) || \ - defined(HAVE_DC1394) || \ - defined(HAVE_CMU1394) || \ - defined(HAVE_MIL) || \ - defined(HAVE_QUICKTIME) || \ - defined(HAVE_QTKIT) || \ - defined(HAVE_UNICAP) || \ - defined(HAVE_PVAPI) || \ - defined(HAVE_OPENNI) || \ - defined(HAVE_OPENNI2) || \ - defined(HAVE_XIMEA) || \ - defined(HAVE_AVFOUNDATION) || \ - defined(HAVE_GIGE_API) || \ - defined(HAVE_INTELPERC) || \ - (0) - // local variable to memorize the captured device - CvCapture *capture; -#endif + // local variable to memorize the captured device + CvCapture *capture = 0; + + switch (pref) + { + default: + // user specified an API we do not know + // bail out to let the user know that it is not available + if (pref) break; - switch (domains[i]) - { #ifdef HAVE_MSMF - case CV_CAP_MSMF: - capture = cvCreateCameraCapture_MSMF (index); - if (capture) - return capture; - break; + case CV_CAP_MSMF: + if (!capture) + capture = cvCreateCameraCapture_MSMF(index); + if (pref) break; #endif #ifdef HAVE_TYZX - case CV_CAP_STEREO: - capture = cvCreateCameraCapture_TYZX (index); - if (capture) - return capture; - break; + case CV_CAP_STEREO: + if (!capture) + capture = cvCreateCameraCapture_TYZX(index); + if (pref) break; #endif - case CV_CAP_VFW: + case CV_CAP_VFW: #ifdef HAVE_VFW - capture = cvCreateCameraCapture_VFW (index); - if (capture) - return capture; + if (!capture) + capture = cvCreateCameraCapture_VFW(index); #endif #if defined HAVE_LIBV4L || defined HAVE_CAMV4L || defined HAVE_CAMV4L2 || defined HAVE_VIDEOIO - capture = cvCreateCameraCapture_V4L (index); - if (capture) - return capture; + if (!capture) + capture = cvCreateCameraCapture_V4L(index); #endif #ifdef HAVE_GSTREAMER + if (!capture) capture = cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_V4L2, 0); - if (capture) - return capture; - capture = cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_V4L, 0); - if (capture) - return capture; -#endif - break; //CV_CAP_VFW - case CV_CAP_FIREWIRE: + if (!capture) + capture = cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_V4L, 0); +#endif + if (pref) break; // CV_CAP_VFW + + case CV_CAP_FIREWIRE: #ifdef HAVE_DC1394_2 - capture = cvCreateCameraCapture_DC1394_2 (index); - if (capture) - return capture; + if (!capture) + capture = cvCreateCameraCapture_DC1394_2(index); #endif #ifdef HAVE_DC1394 - capture = cvCreateCameraCapture_DC1394 (index); - if (capture) - return capture; + if (!capture) + capture = cvCreateCameraCapture_DC1394(index); #endif #ifdef HAVE_CMU1394 - capture = cvCreateCameraCapture_CMU (index); - if (capture) - return capture; + if (!capture) + capture = cvCreateCameraCapture_CMU(index); #endif #if defined(HAVE_GSTREAMER) && 0 - //Re-enable again when gstreamer 1394 support will land in the backend code + // Re-enable again when gstreamer 1394 support will land in the backend code + if (!capture) capture = cvCreateCapture_GStreamer(CV_CAP_GSTREAMER_1394, 0); - if (capture) - return capture; #endif - break; //CV_CAP_FIREWIRE + if (pref) break; // CV_CAP_FIREWIRE #ifdef HAVE_MIL - case CV_CAP_MIL: - capture = cvCreateCameraCapture_MIL (index); - if (capture) - return capture; - break; + case CV_CAP_MIL: + if (!capture) + capture = cvCreateCameraCapture_MIL(index); + if (pref) break; #endif #if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT) - case CV_CAP_QT: - capture = cvCreateCameraCapture_QT (index); - if (capture) - return capture; - break; + case CV_CAP_QT: + if (!capture) + capture = cvCreateCameraCapture_QT(index); + if (pref) break; #endif #ifdef HAVE_UNICAP - case CV_CAP_UNICAP: - capture = cvCreateCameraCapture_Unicap (index); - if (capture) - return capture; - break; + case CV_CAP_UNICAP: + if (!capture) + capture = cvCreateCameraCapture_Unicap(index); + if (pref) break; #endif #ifdef HAVE_PVAPI - case CV_CAP_PVAPI: - capture = cvCreateCameraCapture_PvAPI (index); - if (capture) - return capture; - break; + case CV_CAP_PVAPI: + if (!capture) + capture = cvCreateCameraCapture_PvAPI(index); + if (pref) break; #endif #ifdef HAVE_OPENNI - case CV_CAP_OPENNI: - capture = cvCreateCameraCapture_OpenNI (index); - if (capture) - return capture; - break; + case CV_CAP_OPENNI: + if (!capture) + capture = cvCreateCameraCapture_OpenNI(index); + if (pref) break; #endif #ifdef HAVE_OPENNI2 - case CV_CAP_OPENNI2: + case CV_CAP_OPENNI2: + if (!capture) capture = cvCreateCameraCapture_OpenNI(index); - if (capture) - return capture; - break; + if (pref) break; #endif #ifdef HAVE_XIMEA - case CV_CAP_XIAPI: - capture = cvCreateCameraCapture_XIMEA (index); - if (capture) - return capture; - break; + case CV_CAP_XIAPI: + if (!capture) + capture = cvCreateCameraCapture_XIMEA(index); + if (pref) break; #endif #ifdef HAVE_AVFOUNDATION - case CV_CAP_AVFOUNDATION: - capture = cvCreateCameraCapture_AVFoundation (index); - if (capture) - return capture; - break; + case CV_CAP_AVFOUNDATION: + if (!capture) + capture = cvCreateCameraCapture_AVFoundation(index); + if (pref) break; #endif #ifdef HAVE_GIGE_API - case CV_CAP_GIGANETIX: - capture = cvCreateCameraCapture_Giganetix (index); - if (capture) - return capture; - break; // CV_CAP_GIGANETIX + case CV_CAP_GIGANETIX: + if (!capture) + capture = cvCreateCameraCapture_Giganetix(index); + if (pref) break; // CV_CAP_GIGANETIX #endif - } } - // failed open a camera - return 0; + return capture; } /** * Videoreader dispatching method: it tries to find the first * API that can access a given filename. */ -CV_IMPL CvCapture * cvCreateFileCapture (const char * filename) +CV_IMPL CvCapture * cvCreateFileCaptureWithPreference (const char * filename, int apiPreference) { CvCapture * result = 0; + switch(apiPreference) { + default: + // user specified an API we do not know + // bail out to let the user know that it is not available + if (apiPreference) break; + #ifdef HAVE_FFMPEG - if (! result) - result = cvCreateFileCapture_FFMPEG_proxy (filename); + case CV_CAP_FFMPEG: + if (! result) + result = cvCreateFileCapture_FFMPEG_proxy (filename); + if (apiPreference) break; #endif #ifdef HAVE_VFW - if (! result) - result = cvCreateFileCapture_VFW (filename); + case CV_CAP_VFW: + if (! result) + result = cvCreateFileCapture_VFW (filename); + if (apiPreference) break; #endif + case CV_CAP_MSMF: #ifdef HAVE_MSMF - if (! result) - result = cvCreateFileCapture_MSMF (filename); + if (! result) + result = cvCreateFileCapture_MSMF (filename); #endif #ifdef HAVE_XINE - if (! result) - result = cvCreateFileCapture_XINE (filename); + if (! result) + result = cvCreateFileCapture_XINE (filename); #endif + if (apiPreference) break; #ifdef HAVE_GSTREAMER - if (! result) - result = cvCreateCapture_GStreamer (CV_CAP_GSTREAMER_FILE, filename); + case CV_CAP_GSTREAMER: + if (! result) + result = cvCreateCapture_GStreamer (CV_CAP_GSTREAMER_FILE, filename); + if (apiPreference) break; #endif #if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT) - if (! result) - result = cvCreateFileCapture_QT (filename); + case CV_CAP_QT: + if (! result) + result = cvCreateFileCapture_QT (filename); + if (apiPreference) break; #endif #ifdef HAVE_AVFOUNDATION - if (! result) - result = cvCreateFileCapture_AVFoundation (filename); + case CV_CAP_AVFOUNDATION: + if (! result) + result = cvCreateFileCapture_AVFoundation (filename); + if (apiPreference) break; #endif #ifdef HAVE_OPENNI - if (! result) - result = cvCreateFileCapture_OpenNI (filename); + case CV_CAP_OPENNI: + if (! result) + result = cvCreateFileCapture_OpenNI (filename); + if (apiPreference) break; #endif - if (! result) - result = cvCreateFileCapture_Images (filename); + case CV_CAP_IMAGES: + if (! result) + result = cvCreateFileCapture_Images (filename); + } return result; } +CV_IMPL CvCapture * cvCreateFileCapture (const char * filename) +{ + return cvCreateFileCaptureWithPreference(filename, CV_CAP_ANY); +} + /** * Videowriter dispatching method: it tries to find the first * API that can write a given stream. @@ -615,14 +547,19 @@ static Ptr IVideoWriter_create(const String& filename, int _fourcc VideoCapture::VideoCapture() {} -VideoCapture::VideoCapture(const String& filename) +VideoCapture::VideoCapture(const String& filename, int apiPreference) { - open(filename); + open(filename, apiPreference); } -VideoCapture::VideoCapture(int device) +VideoCapture::VideoCapture(const String& filename) { - open(device); + open(filename, CAP_ANY); +} + +VideoCapture::VideoCapture(int index) +{ + open(index); } VideoCapture::~VideoCapture() @@ -631,24 +568,29 @@ VideoCapture::~VideoCapture() cap.release(); } -bool VideoCapture::open(const String& filename) +bool VideoCapture::open(const String& filename, int apiPreference) { if (isOpened()) release(); icap = IVideoCapture_create(filename); if (!icap.empty()) return true; - cap.reset(cvCreateFileCapture(filename.c_str())); + cap.reset(cvCreateFileCaptureWithPreference(filename.c_str(), apiPreference)); return isOpened(); } -bool VideoCapture::open(int device) +bool VideoCapture::open(const String& filename) +{ + return open(filename, CAP_ANY); +} + +bool VideoCapture::open(int index) { if (isOpened()) release(); - icap = IVideoCapture_create(device); + icap = IVideoCapture_create(index); if (!icap.empty()) return true; - cap.reset(cvCreateCameraCapture(device)); + cap.reset(cvCreateCameraCapture(index)); return isOpened(); } diff --git a/modules/videoio/src/cap_ffmpeg_impl.hpp b/modules/videoio/src/cap_ffmpeg_impl.hpp index 96b8b6890e..93c730a1b2 100644 --- a/modules/videoio/src/cap_ffmpeg_impl.hpp +++ b/modules/videoio/src/cap_ffmpeg_impl.hpp @@ -568,7 +568,7 @@ bool CvCapture_FFMPEG::open( const char* _filename ) #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0) av_dict_set(&dict, "rtsp_transport", "tcp", 0); - int err = avformat_open_input(&ic, _filename, NULL, NULL); + int err = avformat_open_input(&ic, _filename, NULL, &dict); #else int err = av_open_input_file(&ic, _filename, NULL, 0, NULL); #endif @@ -1543,7 +1543,7 @@ void CvVideoWriter_FFMPEG::close() #define CV_PRINTABLE_CHAR(ch) ((ch) < 32 ? '?' : (ch)) #define CV_TAG_TO_PRINTABLE_CHAR4(tag) CV_PRINTABLE_CHAR((tag) & 255), CV_PRINTABLE_CHAR(((tag) >> 8) & 255), CV_PRINTABLE_CHAR(((tag) >> 16) & 255), CV_PRINTABLE_CHAR(((tag) >> 24) & 255) -static inline bool cv_ff_codec_tag_match(const AVCodecTag *tags, enum AVCodecID id, unsigned int tag) +static inline bool cv_ff_codec_tag_match(const AVCodecTag *tags, CV_CODEC_ID id, unsigned int tag) { while (tags->id != AV_CODEC_ID_NONE) { @@ -1553,7 +1553,7 @@ static inline bool cv_ff_codec_tag_match(const AVCodecTag *tags, enum AVCodecID } return false; } -static inline bool cv_ff_codec_tag_list_match(const AVCodecTag *const *tags, enum AVCodecID id, unsigned int tag) +static inline bool cv_ff_codec_tag_list_match(const AVCodecTag *const *tags, CV_CODEC_ID id, unsigned int tag) { int i; for (i = 0; tags && tags[i]; i++) { @@ -1611,21 +1611,24 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc, #if LIBAVCODEC_VERSION_INT<((51<<16)+(49<<8)+0) if( (codec_id = codec_get_bmp_id( fourcc )) == CV_CODEC(CODEC_ID_NONE) ) return false; -#elif LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(54, 1, 0) -// APIchnages: -// 2012-01-31 - dd6d3b0 - lavf 54.01.0 -// Add avformat_get_riff_video_tags() and avformat_get_riff_audio_tags(). +#else if( (codec_id = av_codec_get_id(fmt->codec_tag, fourcc)) == CV_CODEC(CODEC_ID_NONE) ) { const struct AVCodecTag * fallback_tags[] = { +#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(54, 1, 0) +// APIchanges: +// 2012-01-31 - dd6d3b0 - lavf 54.01.0 +// Add avformat_get_riff_video_tags() and avformat_get_riff_audio_tags(). avformat_get_riff_video_tags(), -#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(55, 25, 100) +#endif +#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(55, 25, 100) && defined LIBAVFORMAT_VERSION_MICRO && LIBAVFORMAT_VERSION_MICRO >= 100 // APIchanges: ffmpeg only // 2014-01-19 - 1a193c4 - lavf 55.25.100 - avformat.h // Add avformat_get_mov_video_tags() and avformat_get_mov_audio_tags(). - // TODO ffmpeg only, need to skip libav: avformat_get_mov_video_tags(), + avformat_get_mov_video_tags(), #endif - codec_bmp_tags, NULL }; + codec_bmp_tags, // fallback for avformat < 54.1 + NULL }; if( (codec_id = av_codec_get_id(fallback_tags, fourcc)) == CV_CODEC(CODEC_ID_NONE) ) { fflush(stdout); @@ -1650,10 +1653,6 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc, fourcc = supported_tag; } } -#else - const struct AVCodecTag * tags[] = { codec_bmp_tags, NULL}; - if( (codec_id = av_codec_get_id(tags, fourcc)) == CV_CODEC(CODEC_ID_NONE) ) - return false; #endif // alloc memory for context @@ -1740,7 +1739,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc, /* find the video encoder */ codec = avcodec_find_encoder(c->codec_id); if (!codec) { - fprintf(stderr, "Could not find encoder for codec id %d: %s", c->codec_id, icvFFMPEGErrStr( + fprintf(stderr, "Could not find encoder for codec id %d: %s\n", c->codec_id, icvFFMPEGErrStr( #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0) AVERROR_ENCODER_NOT_FOUND #else @@ -1764,7 +1763,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc, avcodec_open(c, codec) #endif ) < 0) { - fprintf(stderr, "Could not open codec '%s': %s", codec->name, icvFFMPEGErrStr(err)); + fprintf(stderr, "Could not open codec '%s': %s\n", codec->name, icvFFMPEGErrStr(err)); return false; } @@ -1968,6 +1967,13 @@ void OutputMediaStream_FFMPEG::close() AVStream* OutputMediaStream_FFMPEG::addVideoStream(AVFormatContext *oc, CV_CODEC_ID codec_id, int w, int h, int bitrate, double fps, PixelFormat pixel_format) { + AVCodec* codec = avcodec_find_encoder(codec_id); + if (!codec) + { + fprintf(stderr, "Could not find encoder for codec id %d\n", codec_id); + return NULL; + } + #if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 10, 0) AVStream* st = avformat_new_stream(oc, 0); #else @@ -1999,8 +2005,6 @@ AVStream* OutputMediaStream_FFMPEG::addVideoStream(AVFormatContext *oc, CV_CODEC c->width = w; c->height = h; - AVCodec* codec = avcodec_find_encoder(c->codec_id); - // time base: this is the fundamental unit of time (in seconds) in terms // of which frame timestamps are represented. for fixed-fps content, // timebase should be 1/framerate and timestamp increments should be diff --git a/modules/videoio/src/cap_mjpeg_encoder.cpp b/modules/videoio/src/cap_mjpeg_encoder.cpp index ee78809eb9..201b284687 100644 --- a/modules/videoio/src/cap_mjpeg_encoder.cpp +++ b/modules/videoio/src/cap_mjpeg_encoder.cpp @@ -41,6 +41,7 @@ #include "precomp.hpp" #include +#include #if CV_NEON #define WITH_NEON @@ -350,14 +351,253 @@ protected: }; +class mjpeg_buffer +{ +public: + mjpeg_buffer() + { + reset(); + } + + void resize(int size) + { + data.resize(size); + } + + void put(unsigned bits, int len) + { + if((m_pos == (data.size() - 1) && len > bits_free) || m_pos == data.size()) + { + resize(int(2*data.size())); + } + + bits_free -= (len); + unsigned int tempval = (bits) & bit_mask[(len)]; + + if( bits_free <= 0 ) + { + data[m_pos] |= ((unsigned)tempval >> -bits_free); + + bits_free += 32; + ++m_pos; + data[m_pos] = bits_free < 32 ? (tempval << bits_free) : 0; + } + else + { + data[m_pos] |= (tempval << bits_free); + } + } + + void finish() + { + if(bits_free == 32) + { + bits_free = 0; + m_data_len = m_pos; + } + else + { + m_data_len = m_pos + 1; + } + } + + void reset() + { + bits_free = 32; + m_pos = 0; + m_data_len = 0; + } + + void clear() + { + //we need to clear only first element, the rest would be overwritten + data[0] = 0; + } + + int get_bits_free() + { + return bits_free; + } + + unsigned* get_data() + { + return &data[0]; + } + + unsigned get_len() + { + return m_data_len; + } + +private: + std::vector data; + int bits_free; + unsigned m_pos; + unsigned m_data_len; +}; + + +class mjpeg_buffer_keeper +{ +public: + mjpeg_buffer_keeper() + { + reset(); + } + + mjpeg_buffer& operator[](int i) + { + return m_buffer_list[i]; + } + + void allocate_buffers(int count, int size) + { + for(int i = (int)m_buffer_list.size(); i < count; ++i) + { + m_buffer_list.push_back(mjpeg_buffer()); + m_buffer_list.back().resize(size); + } + } + + unsigned* get_data() + { + //if there is only one buffer (single thread) there is no need to stack buffers + if(m_buffer_list.size() == 1) + { + m_buffer_list[0].finish(); + + m_data_len = m_buffer_list[0].get_len(); + m_last_bit_len = m_buffer_list[0].get_bits_free() ? 32 - m_buffer_list[0].get_bits_free() : 0; + + return m_buffer_list[0].get_data(); + } + + allocate_output_buffer(); + + int bits = 0; + unsigned currval = 0; + m_data_len = 0; + + for(unsigned j = 0; j < m_buffer_list.size(); ++j) + { + mjpeg_buffer& buffer = m_buffer_list[j]; + + //if no bit shift required we could use memcpy + if(bits == 0) + { + size_t current_pos = m_data_len; + + if(buffer.get_bits_free() == 0) + { + memcpy(&m_output_buffer[current_pos], buffer.get_data(), sizeof(buffer.get_data()[0])*buffer.get_len()); + m_data_len += buffer.get_len(); + currval = 0; + } + else + { + memcpy(&m_output_buffer[current_pos], buffer.get_data(), sizeof(buffer.get_data()[0])*(buffer.get_len() - 1 )); + m_data_len += buffer.get_len() - 1; + currval = buffer.get_data()[buffer.get_len() - 1]; + } + } + else + { + for(unsigned i = 0; i < buffer.get_len() - 1; ++i) + { + currval |= ( (unsigned)buffer.get_data()[i] >> (31 & (-bits)) ); + + m_output_buffer[m_data_len++] = currval; + + currval = buffer.get_data()[i] << (bits + 32); + } + + currval |= ( (unsigned)buffer.get_data()[buffer.get_len() - 1] >> (31 & (-bits)) ); + + if( buffer.get_bits_free() <= -bits) + { + m_output_buffer[m_data_len++] = currval; + + currval = buffer.get_data()[buffer.get_len() - 1] << (bits + 32); + } + } + + bits += buffer.get_bits_free(); + + if(bits > 0) + { + bits -= 32; + } + } + + //bits == 0 means that last element shouldn't be used. + m_output_buffer[m_data_len++] = currval; + + m_last_bit_len = -bits; + + return &m_output_buffer[0]; + } + + int get_last_bit_len() + { + return m_last_bit_len; + } + + int get_data_size() + { + return m_data_len; + } + + void reset() + { + m_last_bit_len = 0; + for(unsigned i = 0; i < m_buffer_list.size(); ++i) + { + m_buffer_list[i].reset(); + } + + //there is no need to erase output buffer since it would be overwritten + m_data_len = 0; + } + +private: + + void allocate_output_buffer() + { + unsigned total_size = 0; + + for(unsigned i = 0; i < m_buffer_list.size(); ++i) + { + m_buffer_list[i].finish(); + total_size += m_buffer_list[i].get_len(); + } + + if(total_size > m_output_buffer.size()) + { + m_output_buffer.clear(); + m_output_buffer.resize(total_size); + } + } + + std::deque m_buffer_list; + std::vector m_output_buffer; + int m_data_len; + int m_last_bit_len; +}; + class MotionJpegWriter : public IVideoWriter { public: - MotionJpegWriter() { rawstream = false; } + MotionJpegWriter() + { + rawstream = false; + nstripes = -1; + } + MotionJpegWriter(const String& filename, double fps, Size size, bool iscolor) { rawstream = false; open(filename, fps, size, iscolor); + nstripes = -1; } ~MotionJpegWriter() { close(); } @@ -616,6 +856,8 @@ public: return quality; if( propId == VIDEOWRITER_PROP_FRAMEBYTES ) return frameSize.empty() ? 0. : (double)frameSize.back(); + if( propId == VIDEOWRITER_PROP_NSTRIPES ) + return nstripes; return 0.; } @@ -626,6 +868,13 @@ public: quality = value; return true; } + + if( propId == VIDEOWRITER_PROP_NSTRIPES) + { + nstripes = value; + return true; + } + return false; } @@ -638,6 +887,8 @@ protected: size_t moviPointer; std::vector frameOffset, frameSize, AVIChunkSizeIndex, frameNumIndexes; bool rawstream; + mjpeg_buffer_keeper buffers_list; + double nstripes; BitStream strm; }; @@ -1107,6 +1358,380 @@ static void aan_fdct8x8( const short *src, short *dst, } #endif + +inline void convertToYUV(int colorspace, int channels, int input_channels, short* UV_data, short* Y_data, const uchar* pix_data, int y_limit, int x_limit, int step, int u_plane_ofs, int v_plane_ofs) +{ + int i, j; + const int UV_step = 16; + int x_scale = channels > 1 ? 2 : 1, y_scale = x_scale; + int Y_step = x_scale*8; + + if( channels > 1 ) + { + if( colorspace == COLORSPACE_YUV444P && y_limit == 16 && x_limit == 16 ) + { + for( i = 0; i < y_limit; i += 2, pix_data += step*2, Y_data += Y_step*2, UV_data += UV_step ) + { +#ifdef WITH_NEON + { + uint16x8_t masklo = vdupq_n_u16(255); + uint16x8_t lane = vld1q_u16((unsigned short*)(pix_data+v_plane_ofs)); + uint16x8_t t1 = vaddq_u16(vshrq_n_u16(lane, 8), vandq_u16(lane, masklo)); + lane = vld1q_u16((unsigned short*)(pix_data + v_plane_ofs + step)); + uint16x8_t t2 = vaddq_u16(vshrq_n_u16(lane, 8), vandq_u16(lane, masklo)); + t1 = vaddq_u16(t1, t2); + vst1q_s16(UV_data, vsubq_s16(vreinterpretq_s16_u16(t1), vdupq_n_s16(128*4))); + + lane = vld1q_u16((unsigned short*)(pix_data+u_plane_ofs)); + t1 = vaddq_u16(vshrq_n_u16(lane, 8), vandq_u16(lane, masklo)); + lane = vld1q_u16((unsigned short*)(pix_data + u_plane_ofs + step)); + t2 = vaddq_u16(vshrq_n_u16(lane, 8), vandq_u16(lane, masklo)); + t1 = vaddq_u16(t1, t2); + vst1q_s16(UV_data + 8, vsubq_s16(vreinterpretq_s16_u16(t1), vdupq_n_s16(128*4))); + } + + { + int16x8_t lane = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(pix_data))); + int16x8_t delta = vdupq_n_s16(128); + lane = vsubq_s16(lane, delta); + vst1q_s16(Y_data, lane); + + lane = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(pix_data+8))); + lane = vsubq_s16(lane, delta); + vst1q_s16(Y_data + 8, lane); + + lane = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(pix_data+step))); + lane = vsubq_s16(lane, delta); + vst1q_s16(Y_data+Y_step, lane); + + lane = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(pix_data + step + 8))); + lane = vsubq_s16(lane, delta); + vst1q_s16(Y_data+Y_step + 8, lane); + } +#else + for( j = 0; j < x_limit; j += 2, pix_data += 2 ) + { + Y_data[j] = pix_data[0] - 128; + Y_data[j+1] = pix_data[1] - 128; + Y_data[j+Y_step] = pix_data[step] - 128; + Y_data[j+Y_step+1] = pix_data[step+1] - 128; + + UV_data[j>>1] = pix_data[v_plane_ofs] + pix_data[v_plane_ofs+1] + + pix_data[v_plane_ofs+step] + pix_data[v_plane_ofs+step+1] - 128*4; + UV_data[(j>>1)+8] = pix_data[u_plane_ofs] + pix_data[u_plane_ofs+1] + + pix_data[u_plane_ofs+step] + pix_data[u_plane_ofs+step+1] - 128*4; + + } + + pix_data -= x_limit*input_channels; +#endif + } + } + else + { + for( i = 0; i < y_limit; i++, pix_data += step, Y_data += Y_step ) + { + for( j = 0; j < x_limit; j++, pix_data += input_channels ) + { + int Y, U, V; + + if( colorspace == COLORSPACE_BGR ) + { + int r = pix_data[2]; + int g = pix_data[1]; + int b = pix_data[0]; + + Y = DCT_DESCALE( r*y_r + g*y_g + b*y_b, fixc) - 128; + U = DCT_DESCALE( r*cb_r + g*cb_g + b*cb_b, fixc ); + V = DCT_DESCALE( r*cr_r + g*cr_g + b*cr_b, fixc ); + } + else if( colorspace == COLORSPACE_RGBA ) + { + int r = pix_data[0]; + int g = pix_data[1]; + int b = pix_data[2]; + + Y = DCT_DESCALE( r*y_r + g*y_g + b*y_b, fixc) - 128; + U = DCT_DESCALE( r*cb_r + g*cb_g + b*cb_b, fixc ); + V = DCT_DESCALE( r*cr_r + g*cr_g + b*cr_b, fixc ); + } + else + { + Y = pix_data[0] - 128; + U = pix_data[v_plane_ofs] - 128; + V = pix_data[u_plane_ofs] - 128; + } + + int j2 = j >> (x_scale - 1); + Y_data[j] = (short)Y; + UV_data[j2] = (short)(UV_data[j2] + U); + UV_data[j2 + 8] = (short)(UV_data[j2 + 8] + V); + } + + pix_data -= x_limit*input_channels; + if( ((i+1) & (y_scale - 1)) == 0 ) + { + UV_data += UV_step; + } + } + } + + } + else + { + for( i = 0; i < y_limit; i++, pix_data += step, Y_data += Y_step ) + { + for( j = 0; j < x_limit; j++ ) + Y_data[j] = (short)(pix_data[j]*4 - 128*4); + } + } +} + +class MjpegEncoder : public ParallelLoopBody +{ +public: + MjpegEncoder(int _height, + int _width, + int _step, + const uchar* _data, + int _input_channels, + int _channels, + int _colorspace, + unsigned (&_huff_dc_tab)[2][16], + unsigned (&_huff_ac_tab)[2][256], + short (&_fdct_qtab)[2][64], + uchar* _cat_table, + mjpeg_buffer_keeper& _buffer_list, + double nstripes + ) : + m_buffer_list(_buffer_list), + height(_height), + width(_width), + step(_step), + in_data(_data), + input_channels(_input_channels), + channels(_channels), + colorspace(_colorspace), + huff_dc_tab(_huff_dc_tab), + huff_ac_tab(_huff_ac_tab), + fdct_qtab(_fdct_qtab), + cat_table(_cat_table) + { + //empirically found value. if number of pixels is less than that value there is no sense to parallelize it. + const int min_pixels_count = 96*96; + + stripes_count = 1; + + if(nstripes < 0) + { + if(height*width > min_pixels_count) + { + stripes_count = default_stripes_count; + } + } + else + { + stripes_count = cvCeil(nstripes); + } + + int y_scale = channels > 1 ? 2 : 1; + int y_step = y_scale * 8; + + int max_stripes = (height - 1)/y_step + 1; + + stripes_count = std::min(stripes_count, max_stripes); + + m_buffer_list.allocate_buffers(stripes_count, (height*width*2)/stripes_count); + } + + void operator()( const cv::Range& range ) const + { + const int CAT_TAB_SIZE = 4096; + unsigned code = 0; + +#define JPUT_BITS(val, bits) output_buffer.put(val, bits) + +#define JPUT_HUFF(val, table) \ + code = table[(val) + 2]; \ + JPUT_BITS(code >> 8, (int)(code & 255)) + + int x, y; + int i, j; + + short buffer[4096]; + int x_scale = channels > 1 ? 2 : 1, y_scale = x_scale; + int dc_pred[] = { 0, 0, 0 }; + int x_step = x_scale * 8; + int y_step = y_scale * 8; + short block[6][64]; + int luma_count = x_scale*y_scale; + int block_count = luma_count + channels - 1; + int u_plane_ofs = step*height; + int v_plane_ofs = u_plane_ofs + step*height; + const uchar* data = in_data; + const uchar* init_data = data; + + int num_steps = (height - 1)/y_step + 1; + + //if this is not first stripe we need to calculate dc_pred from previous step + if(range.start > 0) + { + y = y_step*int(num_steps*range.start/stripes_count - 1); + data = init_data + y*step; + + for( x = 0; x < width; x += x_step ) + { + int x_limit = x_step; + int y_limit = y_step; + const uchar* pix_data = data + x*input_channels; + short* Y_data = block[0]; + short* UV_data = block[luma_count]; + + if( x + x_limit > width ) x_limit = width - x; + if( y + y_limit > height ) y_limit = height - y; + + memset( block, 0, block_count*64*sizeof(block[0][0])); + + convertToYUV(colorspace, channels, input_channels, UV_data, Y_data, pix_data, y_limit, x_limit, step, u_plane_ofs, v_plane_ofs); + + for( i = 0; i < block_count; i++ ) + { + int is_chroma = i >= luma_count; + int src_step = x_scale * 8; + const short* src_ptr = block[i & -2] + (i & 1)*8; + + aan_fdct8x8( src_ptr, buffer, src_step, fdct_qtab[is_chroma] ); + + j = is_chroma + (i > luma_count); + dc_pred[j] = buffer[0]; + } + } + } + + for(int k = range.start; k < range.end; ++k) + { + mjpeg_buffer& output_buffer = m_buffer_list[k]; + output_buffer.clear(); + + int y_min = y_step*int(num_steps*k/stripes_count); + int y_max = y_step*int(num_steps*(k+1)/stripes_count); + + if(k == stripes_count - 1) + { + y_max = height; + } + + + data = init_data + y_min*step; + + for( y = y_min; y < y_max; y += y_step, data += y_step*step ) + { + for( x = 0; x < width; x += x_step ) + { + int x_limit = x_step; + int y_limit = y_step; + const uchar* pix_data = data + x*input_channels; + short* Y_data = block[0]; + short* UV_data = block[luma_count]; + + if( x + x_limit > width ) x_limit = width - x; + if( y + y_limit > height ) y_limit = height - y; + + memset( block, 0, block_count*64*sizeof(block[0][0])); + + convertToYUV(colorspace, channels, input_channels, UV_data, Y_data, pix_data, y_limit, x_limit, step, u_plane_ofs, v_plane_ofs); + + for( i = 0; i < block_count; i++ ) + { + int is_chroma = i >= luma_count; + int src_step = x_scale * 8; + int run = 0, val; + const short* src_ptr = block[i & -2] + (i & 1)*8; + const unsigned* htable = huff_ac_tab[is_chroma]; + + aan_fdct8x8( src_ptr, buffer, src_step, fdct_qtab[is_chroma] ); + + j = is_chroma + (i > luma_count); + val = buffer[0] - dc_pred[j]; + dc_pred[j] = buffer[0]; + + { + int cat = cat_table[val + CAT_TAB_SIZE]; + + //CV_Assert( cat <= 11 ); + JPUT_HUFF( cat, huff_dc_tab[is_chroma] ); + JPUT_BITS( val - (val < 0 ? 1 : 0), cat ); + } + + for( j = 1; j < 64; j++ ) + { + val = buffer[zigzag[j]]; + + if( val == 0 ) + { + run++; + } + else + { + while( run >= 16 ) + { + JPUT_HUFF( 0xF0, htable ); // encode 16 zeros + run -= 16; + } + + { + int cat = cat_table[val + CAT_TAB_SIZE]; + //CV_Assert( cat <= 10 ); + JPUT_HUFF( cat + run*16, htable ); + JPUT_BITS( val - (val < 0 ? 1 : 0), cat ); + } + + run = 0; + } + } + + if( run ) + { + JPUT_HUFF( 0x00, htable ); // encode EOB + } + } + } + } + } + } + + cv::Range getRange() + { + return cv::Range(0, stripes_count); + } + + double getNStripes() + { + return stripes_count; + } + + mjpeg_buffer_keeper& m_buffer_list; +private: + + MjpegEncoder& operator=( const MjpegEncoder & ) { return *this; } + + const int height; + const int width; + const int step; + const uchar* in_data; + const int input_channels; + const int channels; + const int colorspace; + const unsigned (&huff_dc_tab)[2][16]; + const unsigned (&huff_ac_tab)[2][256]; + const short (&fdct_qtab)[2][64]; + const uchar* cat_table; + int stripes_count; + static const int default_stripes_count; +}; + +const int MjpegEncoder::default_stripes_count = 4; + void MotionJpegWriter::writeFrameData( const uchar* data, int step, int colorspace, int input_channels ) { //double total_cvt = 0, total_dct = 0; @@ -1133,7 +1758,6 @@ void MotionJpegWriter::writeFrameData( const uchar* data, int step, int colorspa // for every block: // calc dct and quantize // encode block. - int x, y; int i, j; const int max_quality = 12; short fdct_qtab[2][64]; @@ -1141,18 +1765,9 @@ void MotionJpegWriter::writeFrameData( const uchar* data, int step, int colorspa unsigned huff_ac_tab[2][256]; int x_scale = channels > 1 ? 2 : 1, y_scale = x_scale; - int dc_pred[] = { 0, 0, 0 }; - int x_step = x_scale * 8; - int y_step = y_scale * 8; - short block[6][64]; short buffer[4096]; int* hbuffer = (int*)buffer; int luma_count = x_scale*y_scale; - int block_count = luma_count + channels - 1; - int Y_step = x_scale*8; - const int UV_step = 16; - int u_plane_ofs = step*height; - int v_plane_ofs = u_plane_ofs + step*height; double _quality = quality*0.01*max_quality; if( _quality < 1. ) _quality = 1.; @@ -1241,229 +1856,27 @@ void MotionJpegWriter::writeFrameData( const uchar* data, int step, int colorspa strm.putByte( 0 ); // successive approximation bit position // high & low - (0,0) for sequential DCT - unsigned currval = 0, code = 0, tempval = 0; - int bit_idx = 32; -#define JPUT_BITS(val, bits) \ - bit_idx -= (bits); \ - tempval = (val) & bit_mask[(bits)]; \ - if( bit_idx <= 0 ) \ - { \ - strm.jput(currval | ((unsigned)tempval >> -bit_idx)); \ - bit_idx += 32; \ - currval = bit_idx < 32 ? (tempval << bit_idx) : 0; \ - } \ - else \ - currval |= (tempval << bit_idx) + buffers_list.reset(); -#define JPUT_HUFF(val, table) \ - code = table[(val) + 2]; \ - JPUT_BITS(code >> 8, (int)(code & 255)) + MjpegEncoder parallel_encoder(height, width, step, data, input_channels, channels, colorspace, huff_dc_tab, huff_ac_tab, fdct_qtab, cat_table, buffers_list, nstripes); - // encode data - for( y = 0; y < height; y += y_step, data += y_step*step ) + cv::parallel_for_(parallel_encoder.getRange(), parallel_encoder, parallel_encoder.getNStripes()); + + //std::vector& v = parallel_encoder.m_buffer_list.get_data(); + unsigned* v = buffers_list.get_data(); + unsigned last_data_elem = buffers_list.get_data_size() - 1; + + for(unsigned k = 0; k < last_data_elem; ++k) { - for( x = 0; x < width; x += x_step ) - { - int x_limit = x_step; - int y_limit = y_step; - const uchar* pix_data = data + x*input_channels; - short* Y_data = block[0]; - - if( x + x_limit > width ) x_limit = width - x; - if( y + y_limit > height ) y_limit = height - y; - - memset( block, 0, block_count*64*sizeof(block[0][0])); - - if( channels > 1 ) - { - short* UV_data = block[luma_count]; - // double t = (double)cv::getTickCount(); - - if( colorspace == COLORSPACE_YUV444P && y_limit == 16 && x_limit == 16 ) - { - for( i = 0; i < y_limit; i += 2, pix_data += step*2, Y_data += Y_step*2, UV_data += UV_step ) - { -#ifdef WITH_NEON - { - uint16x8_t masklo = vdupq_n_u16(255); - uint16x8_t lane = vld1q_u16((unsigned short*)(pix_data+v_plane_ofs)); - uint16x8_t t1 = vaddq_u16(vshrq_n_u16(lane, 8), vandq_u16(lane, masklo)); - lane = vld1q_u16((unsigned short*)(pix_data + v_plane_ofs + step)); - uint16x8_t t2 = vaddq_u16(vshrq_n_u16(lane, 8), vandq_u16(lane, masklo)); - t1 = vaddq_u16(t1, t2); - vst1q_s16(UV_data, vsubq_s16(vreinterpretq_s16_u16(t1), vdupq_n_s16(128*4))); - - lane = vld1q_u16((unsigned short*)(pix_data+u_plane_ofs)); - t1 = vaddq_u16(vshrq_n_u16(lane, 8), vandq_u16(lane, masklo)); - lane = vld1q_u16((unsigned short*)(pix_data + u_plane_ofs + step)); - t2 = vaddq_u16(vshrq_n_u16(lane, 8), vandq_u16(lane, masklo)); - t1 = vaddq_u16(t1, t2); - vst1q_s16(UV_data + 8, vsubq_s16(vreinterpretq_s16_u16(t1), vdupq_n_s16(128*4))); - } - - { - int16x8_t lane = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(pix_data))); - int16x8_t delta = vdupq_n_s16(128); - lane = vsubq_s16(lane, delta); - vst1q_s16(Y_data, lane); - - lane = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(pix_data+8))); - lane = vsubq_s16(lane, delta); - vst1q_s16(Y_data + 8, lane); - - lane = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(pix_data+step))); - lane = vsubq_s16(lane, delta); - vst1q_s16(Y_data+Y_step, lane); - - lane = vreinterpretq_s16_u16(vmovl_u8(vld1_u8(pix_data + step + 8))); - lane = vsubq_s16(lane, delta); - vst1q_s16(Y_data+Y_step + 8, lane); - } -#else - for( j = 0; j < x_limit; j += 2, pix_data += 2 ) - { - Y_data[j] = pix_data[0] - 128; - Y_data[j+1] = pix_data[1] - 128; - Y_data[j+Y_step] = pix_data[step] - 128; - Y_data[j+Y_step+1] = pix_data[step+1] - 128; - - UV_data[j>>1] = pix_data[v_plane_ofs] + pix_data[v_plane_ofs+1] + - pix_data[v_plane_ofs+step] + pix_data[v_plane_ofs+step+1] - 128*4; - UV_data[(j>>1)+8] = pix_data[u_plane_ofs] + pix_data[u_plane_ofs+1] + - pix_data[u_plane_ofs+step] + pix_data[u_plane_ofs+step+1] - 128*4; - - } - - pix_data -= x_limit*input_channels; -#endif - } - } - else - { - for( i = 0; i < y_limit; i++, pix_data += step, Y_data += Y_step ) - { - for( j = 0; j < x_limit; j++, pix_data += input_channels ) - { - int Y, U, V; - - if( colorspace == COLORSPACE_BGR ) - { - int r = pix_data[2]; - int g = pix_data[1]; - int b = pix_data[0]; - - Y = DCT_DESCALE( r*y_r + g*y_g + b*y_b, fixc) - 128; - U = DCT_DESCALE( r*cb_r + g*cb_g + b*cb_b, fixc ); - V = DCT_DESCALE( r*cr_r + g*cr_g + b*cr_b, fixc ); - } - else if( colorspace == COLORSPACE_RGBA ) - { - int r = pix_data[0]; - int g = pix_data[1]; - int b = pix_data[2]; - - Y = DCT_DESCALE( r*y_r + g*y_g + b*y_b, fixc) - 128; - U = DCT_DESCALE( r*cb_r + g*cb_g + b*cb_b, fixc ); - V = DCT_DESCALE( r*cr_r + g*cr_g + b*cr_b, fixc ); - } - else - { - Y = pix_data[0] - 128; - U = pix_data[v_plane_ofs] - 128; - V = pix_data[u_plane_ofs] - 128; - } - - int j2 = j >> (x_scale - 1); - Y_data[j] = (short)Y; - UV_data[j2] = (short)(UV_data[j2] + U); - UV_data[j2 + 8] = (short)(UV_data[j2 + 8] + V); - } - - pix_data -= x_limit*input_channels; - if( ((i+1) & (y_scale - 1)) == 0 ) - { - UV_data += UV_step; - } - } - } - - // total_cvt += (double)cv::getTickCount() - t; - } - else - { - for( i = 0; i < y_limit; i++, pix_data += step, Y_data += Y_step ) - { - for( j = 0; j < x_limit; j++ ) - Y_data[j] = (short)(pix_data[j]*4 - 128*4); - } - } - - for( i = 0; i < block_count; i++ ) - { - int is_chroma = i >= luma_count; - int src_step = x_scale * 8; - int run = 0, val; - const short* src_ptr = block[i & -2] + (i & 1)*8; - const unsigned* htable = huff_ac_tab[is_chroma]; - - //double t = (double)cv::getTickCount(); - aan_fdct8x8( src_ptr, buffer, src_step, fdct_qtab[is_chroma] ); - //total_dct += (double)cv::getTickCount() - t; - - j = is_chroma + (i > luma_count); - val = buffer[0] - dc_pred[j]; - dc_pred[j] = buffer[0]; - - { - int cat = cat_table[val + CAT_TAB_SIZE]; - - //CV_Assert( cat <= 11 ); - JPUT_HUFF( cat, huff_dc_tab[is_chroma] ); - JPUT_BITS( val - (val < 0 ? 1 : 0), cat ); - } - - for( j = 1; j < 64; j++ ) - { - val = buffer[zigzag[j]]; - - if( val == 0 ) - { - run++; - } - else - { - while( run >= 16 ) - { - JPUT_HUFF( 0xF0, htable ); // encode 16 zeros - run -= 16; - } - - { - int cat = cat_table[val + CAT_TAB_SIZE]; - //CV_Assert( cat <= 10 ); - JPUT_HUFF( cat + run*16, htable ); - JPUT_BITS( val - (val < 0 ? 1 : 0), cat ); - } - - run = 0; - } - } - - if( run ) - { - JPUT_HUFF( 0x00, htable ); // encode EOB - } - } - } + strm.jput(v[k]); } - - // Flush - strm.jflush(currval, bit_idx); + strm.jflush(v[last_data_elem], 32 - buffers_list.get_last_bit_len()); strm.jputShort( 0xFFD9 ); // EOI marker /*printf("total dct = %.1fms, total cvt = %.1fms\n", total_dct*1000./cv::getTickFrequency(), total_cvt*1000./cv::getTickFrequency());*/ + size_t pos = strm.getPos(); size_t pos1 = (pos + 3) & ~3; for( ; pos < pos1; pos++ ) diff --git a/modules/videoio/src/cap_openni2.cpp b/modules/videoio/src/cap_openni2.cpp index ca897ae42d..ff18ed4b56 100644 --- a/modules/videoio/src/cap_openni2.cpp +++ b/modules/videoio/src/cap_openni2.cpp @@ -567,13 +567,13 @@ bool CvCapture_OpenNI2::setDepthGeneratorProperty( int propIdx, double propValue { case CV_CAP_PROP_OPENNI_REGISTRATION: { - if( propValue < 1.0 ) // "on" + if( propValue != 0.0 ) // "on" { // if there isn't image generator (i.e. ASUS XtionPro doesn't have it) // then the property isn't avaliable if ( color.isValid() ) { - openni::ImageRegistrationMode mode = propValue < 1.0 ? openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR : openni::IMAGE_REGISTRATION_OFF; + openni::ImageRegistrationMode mode = propValue != 0.0 ? openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR : openni::IMAGE_REGISTRATION_OFF; if( !device.getImageRegistrationMode() == mode ) { if (device.isImageRegistrationModeSupported(mode)) diff --git a/modules/videoio/src/cap_v4l.cpp b/modules/videoio/src/cap_v4l.cpp index 791f0a2347..a879de7d58 100644 --- a/modules/videoio/src/cap_v4l.cpp +++ b/modules/videoio/src/cap_v4l.cpp @@ -2824,21 +2824,8 @@ static void icvCloseCAM_V4L( CvCaptureCAM_V4L* capture ){ { #ifdef HAVE_CAMV4L2 - if (V4L2_SUPPORT == 0) + if (V4L2_SUPPORT == 1) #endif /* HAVE_CAMV4L2 */ -#ifdef HAVE_CAMV4L - { - - if (capture->mmaps) - free(capture->mmaps); - if (capture->memoryMap) - munmap(capture->memoryMap, capture->memoryBuffer.size); - - } -#endif /* HAVE_CAMV4L */ -#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) - else -#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ #ifdef HAVE_CAMV4L2 { capture->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; @@ -2860,6 +2847,19 @@ static void icvCloseCAM_V4L( CvCaptureCAM_V4L* capture ){ } } #endif /* HAVE_CAMV4L2 */ +#if defined(HAVE_CAMV4L) && defined(HAVE_CAMV4L2) + else +#endif /* HAVE_CAMV4L && HAVE_CAMV4L2 */ +#ifdef HAVE_CAMV4L + { + + if (capture->mmaps) + free(capture->mmaps); + if (capture->memoryMap) + munmap(capture->memoryMap, capture->memoryBuffer.size); + + } +#endif /* HAVE_CAMV4L */ if (capture->deviceHandle != -1) close(capture->deviceHandle); diff --git a/modules/videoio/test/test_positioning.cpp b/modules/videoio/test/test_positioning.cpp index 398a160a25..5cead8dcff 100644 --- a/modules/videoio/test/test_positioning.cpp +++ b/modules/videoio/test/test_positioning.cpp @@ -106,7 +106,7 @@ void CV_VideoPositioningTest::generate_idx_seq(CvCapture* cap, int method) { RNG rng(N); idx.clear(); - for( int i = 0; i < N-1; i++ ) + for( int i = 0; i >= 0 && i < N-1; i++ ) idx.push_back(rng.uniform(0, N)); idx.push_back(N-1); std::swap(idx.at(rng.uniform(0, N-1)), idx.at(N-1)); diff --git a/modules/world/CMakeLists.txt b/modules/world/CMakeLists.txt index 8a4170a79b..6377853e2f 100644 --- a/modules/world/CMakeLists.txt +++ b/modules/world/CMakeLists.txt @@ -35,8 +35,10 @@ set(headers_list "HEADERS") set(sources_list "SOURCES") set(link_deps "") foreach(m ${OPENCV_MODULE_${the_module}_DEPS}) - set(headers_list "${headers_list};${OPENCV_MODULE_${m}_HEADERS}") - set(sources_list "${sources_list};${OPENCV_MODULE_${m}_SOURCES}") + if(OPENCV_MODULE_${m}_IS_PART_OF_WORLD) + set(headers_list "${headers_list};${OPENCV_MODULE_${m}_HEADERS}") + set(sources_list "${sources_list};${OPENCV_MODULE_${m}_SOURCES}") + endif() set(link_deps "${link_deps};${OPENCV_MODULE_${m}_LINK_DEPS}") endforeach() diff --git a/platforms/android/android.toolchain.cmake b/platforms/android/android.toolchain.cmake index b540ea47df..ffa26126a7 100644 --- a/platforms/android/android.toolchain.cmake +++ b/platforms/android/android.toolchain.cmake @@ -30,7 +30,7 @@ # ------------------------------------------------------------------------------ # Android CMake toolchain file, for use with the Android NDK r5-r10d -# Requires cmake 2.6.3 or newer (2.8.5 or newer is recommended). +# Requires cmake 2.6.3 or newer (2.8.9 or newer is recommended). # See home page: https://github.com/taka-no-me/android-cmake # # Usage Linux: @@ -39,12 +39,6 @@ # $ cmake -DCMAKE_TOOLCHAIN_FILE=path/to/the/android.toolchain.cmake .. # $ make -j8 # -# Usage Linux (using standalone toolchain): -# $ export ANDROID_STANDALONE_TOOLCHAIN=/absolute/path/to/android-toolchain -# $ mkdir build && cd build -# $ cmake -DCMAKE_TOOLCHAIN_FILE=path/to/the/android.toolchain.cmake .. -# $ make -j8 -# # Usage Windows: # You need native port of make to build your project. # Android NDK r7 (and newer) already has make.exe on board. @@ -63,11 +57,6 @@ # ANDROID_NDK=/opt/android-ndk - path to the NDK root. # Can be set as environment variable. Can be set only at first cmake run. # -# ANDROID_STANDALONE_TOOLCHAIN=/opt/android-toolchain - path to the -# standalone toolchain. This option is not used if full NDK is found -# (ignored if ANDROID_NDK is set). -# Can be set as environment variable. Can be set only at first cmake run. -# # ANDROID_ABI=armeabi-v7a - specifies the target Application Binary # Interface (ABI). This option nearly matches to the APP_ABI variable # used by ndk-build tool from Android NDK. @@ -123,8 +112,8 @@ # * x86_64-clang3.5 # # ANDROID_FORCE_ARM_BUILD=OFF - set ON to generate 32-bit ARM instructions -# instead of Thumb. Is not available for "x86" (inapplicable) and -# "armeabi-v6 with VFP" (is forced to be ON) ABIs. +# instead of Thumb. Is not available for "armeabi-v6 with VFP" +# (is forced to be ON) ABI. # # ANDROID_NO_UNDEFINED=ON - set ON to show all undefined symbols as linker # errors even if they are not used. @@ -133,13 +122,6 @@ # libraries. Automatically turned for NDK r5x and r6x due to GLESv2 # problems. # -# LIBRARY_OUTPUT_PATH_ROOT=${CMAKE_SOURCE_DIR} - where to output binary -# files. See additional details below. -# -# ANDROID_SET_OBSOLETE_VARIABLES=ON - if set, then toolchain defines some -# obsolete variables which were used by previous versions of this file for -# backward compatibility. -# # ANDROID_STL=gnustl_static - specify the runtime to use. # # Possible values are: @@ -200,12 +182,6 @@ # will be set true, mutually exclusive. NEON option will be set true # if VFP is set to NEON. # -# LIBRARY_OUTPUT_PATH_ROOT should be set in cache to determine where Android -# libraries will be installed. -# Default is ${CMAKE_SOURCE_DIR}, and the android libs will always be -# under the ${LIBRARY_OUTPUT_PATH_ROOT}/libs/${ANDROID_NDK_ABI_NAME} -# (depending on the target ABI). This is convenient for Android packaging. -# # ------------------------------------------------------------------------------ cmake_minimum_required( VERSION 2.6.3 ) @@ -235,22 +211,22 @@ endif() # this one not so much set( CMAKE_SYSTEM_VERSION 1 ) -# rpath makes low sence for Android +# rpath makes low sense for Android set( CMAKE_SHARED_LIBRARY_RUNTIME_C_FLAG "" ) set( CMAKE_SKIP_RPATH TRUE CACHE BOOL "If set, runtime paths are not added when using shared libraries." ) # NDK search paths set( ANDROID_SUPPORTED_NDK_VERSIONS ${ANDROID_EXTRA_NDK_VERSIONS} -r10d -r10c -r10b -r10 -r9d -r9c -r9b -r9 -r8e -r8d -r8c -r8b -r8 -r7c -r7b -r7 -r6b -r6 -r5c -r5b -r5 "" ) -if(NOT DEFINED ANDROID_NDK_SEARCH_PATHS) +if( NOT DEFINED ANDROID_NDK_SEARCH_PATHS ) if( CMAKE_HOST_WIN32 ) file( TO_CMAKE_PATH "$ENV{PROGRAMFILES}" ANDROID_NDK_SEARCH_PATHS ) - set( ANDROID_NDK_SEARCH_PATHS "${ANDROID_NDK_SEARCH_PATHS}/android-ndk" "$ENV{SystemDrive}/NVPACK/android-ndk" ) + set( ANDROID_NDK_SEARCH_PATHS "${ANDROID_NDK_SEARCH_PATHS}" "$ENV{SystemDrive}/NVPACK" ) else() file( TO_CMAKE_PATH "$ENV{HOME}" ANDROID_NDK_SEARCH_PATHS ) - set( ANDROID_NDK_SEARCH_PATHS /opt/android-ndk "${ANDROID_NDK_SEARCH_PATHS}/NVPACK/android-ndk" ) + set( ANDROID_NDK_SEARCH_PATHS /opt "${ANDROID_NDK_SEARCH_PATHS}/NVPACK" ) endif() endif() -if(NOT DEFINED ANDROID_STANDALONE_TOOLCHAIN_SEARCH_PATH) +if( NOT DEFINED ANDROID_STANDALONE_TOOLCHAIN_SEARCH_PATH ) set( ANDROID_STANDALONE_TOOLCHAIN_SEARCH_PATH /opt/android-toolchain ) endif() @@ -272,106 +248,90 @@ set( ANDROID_DEFAULT_NDK_API_LEVEL_mips64 21 ) macro( __LIST_FILTER listvar regex ) - if( ${listvar} ) - foreach( __val ${${listvar}} ) - if( __val MATCHES "${regex}" ) - list( REMOVE_ITEM ${listvar} "${__val}" ) - endif() - endforeach() - endif() + if( ${listvar} ) + foreach( __val ${${listvar}} ) + if( __val MATCHES "${regex}" ) + list( REMOVE_ITEM ${listvar} "${__val}" ) + endif() + endforeach() + endif() endmacro() macro( __INIT_VARIABLE var_name ) - set( __test_path 0 ) - foreach( __var ${ARGN} ) - if( __var STREQUAL "PATH" ) - set( __test_path 1 ) - break() - endif() - endforeach() - if( __test_path AND NOT EXISTS "${${var_name}}" ) - unset( ${var_name} CACHE ) - endif() - if( "${${var_name}}" STREQUAL "" ) - set( __values 0 ) + set( __test_path 0 ) foreach( __var ${ARGN} ) - if( __var STREQUAL "VALUES" ) - set( __values 1 ) - elseif( NOT __var STREQUAL "PATH" ) - set( __obsolete 0 ) - if( __var MATCHES "^OBSOLETE_.*$" ) - string( REPLACE "OBSOLETE_" "" __var "${__var}" ) - set( __obsolete 1 ) - endif() - if( __var MATCHES "^ENV_.*$" ) - string( REPLACE "ENV_" "" __var "${__var}" ) - set( __value "$ENV{${__var}}" ) - elseif( DEFINED ${__var} ) - set( __value "${${__var}}" ) - else() - if( __values ) - set( __value "${__var}" ) - else() - set( __value "" ) - endif() - endif() - if( NOT "${__value}" STREQUAL "" ) - if( __test_path ) - if( EXISTS "${__value}" ) - file( TO_CMAKE_PATH "${__value}" ${var_name} ) - if( __obsolete AND NOT _CMAKE_IN_TRY_COMPILE ) - message( WARNING "Using value of obsolete variable ${__var} as initial value for ${var_name}. Please note, that ${__var} can be completely removed in future versions of the toolchain." ) - endif() - break() - endif() - else() - set( ${var_name} "${__value}" ) - if( __obsolete AND NOT _CMAKE_IN_TRY_COMPILE ) - message( WARNING "Using value of obsolete variable ${__var} as initial value for ${var_name}. Please note, that ${__var} can be completely removed in future versions of the toolchain." ) - endif() + if( __var STREQUAL "PATH" ) + set( __test_path 1 ) break() - endif() endif() - endif() endforeach() - unset( __value ) - unset( __values ) - unset( __obsolete ) - elseif( __test_path ) - file( TO_CMAKE_PATH "${${var_name}}" ${var_name} ) - endif() - unset( __test_path ) + + if( __test_path AND NOT EXISTS "${${var_name}}" ) + unset( ${var_name} CACHE ) + endif() + + if( " ${${var_name}}" STREQUAL " " ) + set( __values 0 ) + foreach( __var ${ARGN} ) + if( __var STREQUAL "VALUES" ) + set( __values 1 ) + elseif( NOT __var STREQUAL "PATH" ) + if( __var MATCHES "^ENV_.*$" ) + string( REPLACE "ENV_" "" __var "${__var}" ) + set( __value "$ENV{${__var}}" ) + elseif( DEFINED ${__var} ) + set( __value "${${__var}}" ) + elseif( __values ) + set( __value "${__var}" ) + else() + set( __value "" ) + endif() + + if( NOT " ${__value}" STREQUAL " " AND (NOT __test_path OR EXISTS "${__value}") ) + set( ${var_name} "${__value}" ) + break() + endif() + endif() + endforeach() + unset( __value ) + unset( __values ) + endif() + + if( __test_path ) + file( TO_CMAKE_PATH "${${var_name}}" ${var_name} ) + endif() + unset( __test_path ) endmacro() macro( __DETECT_NATIVE_API_LEVEL _var _path ) - SET( __ndkApiLevelRegex "^[\t ]*#define[\t ]+__ANDROID_API__[\t ]+([0-9]+)[\t ]*.*$" ) - FILE( STRINGS ${_path} __apiFileContent REGEX "${__ndkApiLevelRegex}" ) - if( NOT __apiFileContent ) - message( SEND_ERROR "Could not get Android native API level. Probably you have specified invalid level value, or your copy of NDK/toolchain is broken." ) - endif() - string( REGEX REPLACE "${__ndkApiLevelRegex}" "\\1" ${_var} "${__apiFileContent}" ) - unset( __apiFileContent ) - unset( __ndkApiLevelRegex ) + set( __ndkApiLevelRegex "^[\t ]*#define[\t ]+__ANDROID_API__[\t ]+([0-9]+)[\t ]*.*$" ) + file( STRINGS ${_path} __apiFileContent REGEX "${__ndkApiLevelRegex}" ) + if( NOT __apiFileContent ) + message( SEND_ERROR "Could not get Android native API level. Probably you have specified invalid level value, or your copy of NDK/toolchain is broken." ) + endif() + string( REGEX REPLACE "${__ndkApiLevelRegex}" "\\1" ${_var} "${__apiFileContent}" ) + unset( __apiFileContent ) + unset( __ndkApiLevelRegex ) endmacro() macro( __DETECT_TOOLCHAIN_MACHINE_NAME _var _root ) if( EXISTS "${_root}" ) - file( GLOB __gccExePath RELATIVE "${_root}/bin/" "${_root}/bin/*-gcc${TOOL_OS_SUFFIX}" ) - __LIST_FILTER( __gccExePath "^[.].*" ) - list( LENGTH __gccExePath __gccExePathsCount ) - if( NOT __gccExePathsCount EQUAL 1 AND NOT _CMAKE_IN_TRY_COMPILE ) - message( WARNING "Could not determine machine name for compiler from ${_root}" ) - set( ${_var} "" ) + file( GLOB __gccExePath RELATIVE "${_root}/bin/" "${_root}/bin/*-gcc${TOOL_OS_SUFFIX}" ) + __LIST_FILTER( __gccExePath "^[.].*" ) + list( LENGTH __gccExePath __gccExePathsCount ) + if( NOT __gccExePathsCount EQUAL 1 AND NOT _CMAKE_IN_TRY_COMPILE ) + message( WARNING "Could not determine machine name for compiler from ${_root}" ) + set( ${_var} "" ) + else() + get_filename_component( __gccExeName "${__gccExePath}" NAME_WE ) + string( REPLACE "-gcc" "" ${_var} "${__gccExeName}" ) + endif() + unset( __gccExePath ) + unset( __gccExePathsCount ) + unset( __gccExeName ) else() - get_filename_component( __gccExeName "${__gccExePath}" NAME_WE ) - string( REPLACE "-gcc" "" ${_var} "${__gccExeName}" ) + set( ${_var} "" ) endif() - unset( __gccExePath ) - unset( __gccExePathsCount ) - unset( __gccExeName ) - else() - set( ${_var} "" ) - endif() endmacro() @@ -419,17 +379,19 @@ if( NOT ANDROID_NDK_HOST_X64 ) endif() # see if we have path to Android NDK -__INIT_VARIABLE( ANDROID_NDK PATH ENV_ANDROID_NDK ) +if( NOT ANDROID_NDK AND NOT ANDROID_STANDALONE_TOOLCHAIN ) + __INIT_VARIABLE( ANDROID_NDK PATH ENV_ANDROID_NDK ) +endif() if( NOT ANDROID_NDK ) # see if we have path to Android standalone toolchain - __INIT_VARIABLE( ANDROID_STANDALONE_TOOLCHAIN PATH ENV_ANDROID_STANDALONE_TOOLCHAIN OBSOLETE_ANDROID_NDK_TOOLCHAIN_ROOT OBSOLETE_ENV_ANDROID_NDK_TOOLCHAIN_ROOT ) + __INIT_VARIABLE( ANDROID_STANDALONE_TOOLCHAIN PATH ENV_ANDROID_STANDALONE_TOOLCHAIN ) if( NOT ANDROID_STANDALONE_TOOLCHAIN ) #try to find Android NDK in one of the the default locations set( __ndkSearchPaths ) foreach( __ndkSearchPath ${ANDROID_NDK_SEARCH_PATHS} ) foreach( suffix ${ANDROID_SUPPORTED_NDK_VERSIONS} ) - list( APPEND __ndkSearchPaths "${__ndkSearchPath}${suffix}" ) + list( APPEND __ndkSearchPaths "${__ndkSearchPath}/android-ndk${suffix}" ) endforeach() endforeach() __INIT_VARIABLE( ANDROID_NDK PATH VALUES ${__ndkSearchPaths} ) @@ -487,7 +449,7 @@ else() or export ANDROID_STANDALONE_TOOLCHAIN=~/my-android-toolchain or put the toolchain or NDK in the default path: - sudo ln -s ~/my-android-ndk ${ANDROID_NDK_SEARCH_PATH} + sudo ln -s ~/my-android-ndk ${ANDROID_NDK_SEARCH_PATH}/android-ndk sudo ln -s ~/my-android-toolchain ${ANDROID_STANDALONE_TOOLCHAIN_SEARCH_PATH}" ) endif() @@ -636,7 +598,7 @@ if( BUILD_WITH_ANDROID_NDK ) endif() if( NOT __availableToolchains ) file( GLOB __availableToolchainsLst RELATIVE "${ANDROID_NDK_TOOLCHAINS_PATH}" "${ANDROID_NDK_TOOLCHAINS_PATH}/*" ) - if( __availableToolchains ) + if( __availableToolchainsLst ) list(SORT __availableToolchainsLst) # we need clang to go after gcc endif() __LIST_FILTER( __availableToolchainsLst "^[.]" ) @@ -669,7 +631,7 @@ if( NOT ANDROID_SUPPORTED_ABIS ) endif() # choose target ABI -__INIT_VARIABLE( ANDROID_ABI OBSOLETE_ARM_TARGET OBSOLETE_ARM_TARGETS VALUES ${ANDROID_SUPPORTED_ABIS} ) +__INIT_VARIABLE( ANDROID_ABI VALUES ${ANDROID_SUPPORTED_ABIS} ) # verify that target ABI is supported list( FIND ANDROID_SUPPORTED_ABIS "${ANDROID_ABI}" __androidAbiIdx ) if( __androidAbiIdx EQUAL -1 ) @@ -760,7 +722,7 @@ if( CMAKE_BINARY_DIR AND EXISTS "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMa endif() if( ANDROID_ARCH_NAME STREQUAL "arm" AND NOT ARMEABI_V6 ) - __INIT_VARIABLE( ANDROID_FORCE_ARM_BUILD OBSOLETE_FORCE_ARM VALUES OFF ) + __INIT_VARIABLE( ANDROID_FORCE_ARM_BUILD VALUES OFF ) set( ANDROID_FORCE_ARM_BUILD ${ANDROID_FORCE_ARM_BUILD} CACHE BOOL "Use 32-bit ARM instructions instead of Thumb-1" FORCE ) mark_as_advanced( ANDROID_FORCE_ARM_BUILD ) else() @@ -845,6 +807,7 @@ else() unset( __realApiLevel ) endif() set( ANDROID_NATIVE_API_LEVEL "${ANDROID_NATIVE_API_LEVEL}" CACHE STRING "Android API level for native code" FORCE ) + set( CMAKE_ANDROID_API ${ANDROID_NATIVE_API_LEVEL} ) if( CMAKE_VERSION VERSION_GREATER "2.8" ) list( SORT ANDROID_SUPPORTED_NATIVE_API_LEVELS ) set_property( CACHE ANDROID_NATIVE_API_LEVEL PROPERTY STRINGS ${ANDROID_SUPPORTED_NATIVE_API_LEVELS} ) @@ -863,16 +826,7 @@ endif() # runtime choice (STL, rtti, exceptions) if( NOT ANDROID_STL ) - # honor legacy ANDROID_USE_STLPORT - if( DEFINED ANDROID_USE_STLPORT ) - if( ANDROID_USE_STLPORT ) - set( ANDROID_STL stlport_static ) - endif() - message( WARNING "You are using an obsolete variable ANDROID_USE_STLPORT to select the STL variant. Use -DANDROID_STL=stlport_static instead." ) - endif() - if( NOT ANDROID_STL ) set( ANDROID_STL gnustl_static ) - endif() endif() set( ANDROID_STL "${ANDROID_STL}" CACHE STRING "C++ runtime" ) set( ANDROID_STL_FORCE_FEATURES ON CACHE BOOL "automatically configure rtti and exceptions support based on C++ runtime" ) @@ -1033,7 +987,7 @@ if( BUILD_WITH_ANDROID_NDK ) set( ANDROID_STL_INCLUDE_DIRS "${ANDROID_NDK}/sources/cxx-stl/system/include" ) elseif( ANDROID_STL MATCHES "gabi" ) if( ANDROID_NDK_RELEASE_NUM LESS 7000 ) # before r7 - message( FATAL_ERROR "gabi++ is not awailable in your NDK. You have to upgrade to NDK r7 or newer to use gabi++.") + message( FATAL_ERROR "gabi++ is not available in your NDK. You have to upgrade to NDK r7 or newer to use gabi++.") endif() set( ANDROID_RTTI ON ) set( ANDROID_EXCEPTIONS OFF ) @@ -1144,7 +1098,12 @@ if( NOT CMAKE_C_COMPILER ) endif() set( CMAKE_ASM_COMPILER "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-gcc${TOOL_OS_SUFFIX}" CACHE PATH "assembler" ) set( CMAKE_STRIP "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-strip${TOOL_OS_SUFFIX}" CACHE PATH "strip" ) - set( CMAKE_AR "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-ar${TOOL_OS_SUFFIX}" CACHE PATH "archive" ) + if( EXISTS "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-gcc-ar${TOOL_OS_SUFFIX}" ) + # Use gcc-ar if we have it for better LTO support. + set( CMAKE_AR "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-gcc-ar${TOOL_OS_SUFFIX}" CACHE PATH "archive" ) + else() + set( CMAKE_AR "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-ar${TOOL_OS_SUFFIX}" CACHE PATH "archive" ) + endif() set( CMAKE_LINKER "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-ld${TOOL_OS_SUFFIX}" CACHE PATH "linker" ) set( CMAKE_NM "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-nm${TOOL_OS_SUFFIX}" CACHE PATH "nm" ) set( CMAKE_OBJCOPY "${ANDROID_TOOLCHAIN_ROOT}/bin/${ANDROID_TOOLCHAIN_MACHINE_NAME}-objcopy${TOOL_OS_SUFFIX}" CACHE PATH "objcopy" ) @@ -1233,7 +1192,7 @@ endif() # NDK flags if (ARM64_V8A ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -ffunction-sections -funwind-tables" ) + set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -funwind-tables" ) set( ANDROID_CXX_FLAGS_RELEASE "-fomit-frame-pointer -fstrict-aliasing" ) set( ANDROID_CXX_FLAGS_DEBUG "-fno-omit-frame-pointer -fno-strict-aliasing" ) if( NOT ANDROID_COMPILER_IS_CLANG ) @@ -1263,7 +1222,7 @@ elseif( X86 OR X86_64 ) set( ANDROID_CXX_FLAGS_RELEASE "-fomit-frame-pointer -fstrict-aliasing" ) set( ANDROID_CXX_FLAGS_DEBUG "-fno-omit-frame-pointer -fno-strict-aliasing" ) elseif( MIPS OR MIPS64 ) - set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -fno-strict-aliasing -finline-functions -ffunction-sections -funwind-tables -fmessage-length=0" ) + set( ANDROID_CXX_FLAGS "${ANDROID_CXX_FLAGS} -fno-strict-aliasing -finline-functions -funwind-tables -fmessage-length=0" ) set( ANDROID_CXX_FLAGS_RELEASE "-fomit-frame-pointer" ) set( ANDROID_CXX_FLAGS_DEBUG "-fno-omit-frame-pointer" ) if( NOT ANDROID_COMPILER_IS_CLANG ) @@ -1348,7 +1307,7 @@ if( ANDROID_NDK_RELEASE_NUM LESS 7000 ) # before r7 else() __INIT_VARIABLE( ANDROID_SO_UNDEFINED VALUES OFF ) endif() -__INIT_VARIABLE( ANDROID_NO_UNDEFINED OBSOLETE_NO_UNDEFINED VALUES ON ) +__INIT_VARIABLE( ANDROID_NO_UNDEFINED VALUES ON ) __INIT_VARIABLE( ANDROID_FUNCTION_LEVEL_LINKING VALUES ON ) __INIT_VARIABLE( ANDROID_GOLD_LINKER VALUES ON ) __INIT_VARIABLE( ANDROID_NOEXECSTACK VALUES ON ) @@ -1356,7 +1315,7 @@ __INIT_VARIABLE( ANDROID_RELRO VALUES ON ) set( ANDROID_NO_UNDEFINED ${ANDROID_NO_UNDEFINED} CACHE BOOL "Show all undefined symbols as linker errors" ) set( ANDROID_SO_UNDEFINED ${ANDROID_SO_UNDEFINED} CACHE BOOL "Allows or disallows undefined symbols in shared libraries" ) -set( ANDROID_FUNCTION_LEVEL_LINKING ${ANDROID_FUNCTION_LEVEL_LINKING} CACHE BOOL "Allows or disallows undefined symbols in shared libraries" ) +set( ANDROID_FUNCTION_LEVEL_LINKING ${ANDROID_FUNCTION_LEVEL_LINKING} CACHE BOOL "Put each function in separate section and enable garbage collection of unused input sections at link time" ) set( ANDROID_GOLD_LINKER ${ANDROID_GOLD_LINKER} CACHE BOOL "Enables gold linker" ) set( ANDROID_NOEXECSTACK ${ANDROID_NOEXECSTACK} CACHE BOOL "Allows or disallows undefined symbols in shared libraries" ) set( ANDROID_RELRO ${ANDROID_RELRO} CACHE BOOL "Enables RELRO - a memory corruption mitigation technique" ) @@ -1531,27 +1490,31 @@ if( ANDROID_EXPLICIT_CRT_LINK ) endif() # setup output directories -set( LIBRARY_OUTPUT_PATH_ROOT ${CMAKE_SOURCE_DIR} CACHE PATH "root for library output, set this to change where android libs are installed to" ) set( CMAKE_INSTALL_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/user" CACHE STRING "path for installing" ) -if(NOT _CMAKE_IN_TRY_COMPILE) - if( EXISTS "${CMAKE_SOURCE_DIR}/jni/CMakeLists.txt" ) - set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin/${ANDROID_NDK_ABI_NAME}" CACHE PATH "Output directory for applications" ) - else() - set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin" CACHE PATH "Output directory for applications" ) - endif() - set( LIBRARY_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/libs/${ANDROID_NDK_ABI_NAME}" CACHE PATH "path for android libs" ) +if( DEFINED LIBRARY_OUTPUT_PATH_ROOT + OR EXISTS "${CMAKE_SOURCE_DIR}/AndroidManifest.xml" + OR (EXISTS "${CMAKE_SOURCE_DIR}/../AndroidManifest.xml" AND EXISTS "${CMAKE_SOURCE_DIR}/../jni/") ) + set( LIBRARY_OUTPUT_PATH_ROOT ${CMAKE_SOURCE_DIR} CACHE PATH "Root for binaries output, set this to change where Android libs are installed to" ) + if( NOT _CMAKE_IN_TRY_COMPILE ) + if( EXISTS "${CMAKE_SOURCE_DIR}/jni/CMakeLists.txt" ) + set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin/${ANDROID_NDK_ABI_NAME}" CACHE PATH "Output directory for applications" ) + else() + set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin" CACHE PATH "Output directory for applications" ) + endif() + set( LIBRARY_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/libs/${ANDROID_NDK_ABI_NAME}" CACHE PATH "Output directory for Android libs" ) + endif() endif() # copy shaed stl library to build directory -if( NOT _CMAKE_IN_TRY_COMPILE AND __libstl MATCHES "[.]so$" ) - get_filename_component( __libstlname "${__libstl}" NAME ) - execute_process( COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${__libstl}" "${LIBRARY_OUTPUT_PATH}/${__libstlname}" RESULT_VARIABLE __fileCopyProcess ) - if( NOT __fileCopyProcess EQUAL 0 OR NOT EXISTS "${LIBRARY_OUTPUT_PATH}/${__libstlname}") - message( SEND_ERROR "Failed copying of ${__libstl} to the ${LIBRARY_OUTPUT_PATH}/${__libstlname}" ) - endif() - unset( __fileCopyProcess ) - unset( __libstlname ) +if( NOT _CMAKE_IN_TRY_COMPILE AND __libstl MATCHES "[.]so$" AND DEFINED LIBRARY_OUTPUT_PATH ) + get_filename_component( __libstlname "${__libstl}" NAME ) + execute_process( COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${__libstl}" "${LIBRARY_OUTPUT_PATH}/${__libstlname}" RESULT_VARIABLE __fileCopyProcess ) + if( NOT __fileCopyProcess EQUAL 0 OR NOT EXISTS "${LIBRARY_OUTPUT_PATH}/${__libstlname}") + message( SEND_ERROR "Failed copying of ${__libstl} to the ${LIBRARY_OUTPUT_PATH}/${__libstlname}" ) + endif() + unset( __fileCopyProcess ) + unset( __libstlname ) endif() @@ -1612,25 +1575,10 @@ macro( find_host_program ) endmacro() -macro( ANDROID_GET_ABI_RAWNAME TOOLCHAIN_FLAG VAR ) - if( "${TOOLCHAIN_FLAG}" STREQUAL "ARMEABI" ) - set( ${VAR} "armeabi" ) - elseif( "${TOOLCHAIN_FLAG}" STREQUAL "ARMEABI_V7A" ) - set( ${VAR} "armeabi-v7a" ) - elseif( "${TOOLCHAIN_FLAG}" STREQUAL "X86" ) - set( ${VAR} "x86" ) - elseif( "${TOOLCHAIN_FLAG}" STREQUAL "MIPS" ) - set( ${VAR} "mips" ) - else() - set( ${VAR} "unknown" ) - endif() -endmacro() - - # export toolchain settings for the try_compile() command -if( NOT PROJECT_NAME STREQUAL "CMAKE_TRY_COMPILE" ) +if( NOT _CMAKE_IN_TRY_COMPILE ) set( __toolchain_config "") - foreach( __var NDK_CCACHE LIBRARY_OUTPUT_PATH_ROOT ANDROID_FORBID_SYGWIN ANDROID_SET_OBSOLETE_VARIABLES + foreach( __var NDK_CCACHE LIBRARY_OUTPUT_PATH_ROOT ANDROID_FORBID_SYGWIN ANDROID_NDK_HOST_X64 ANDROID_NDK ANDROID_NDK_LAYOUT @@ -1652,7 +1600,7 @@ if( NOT PROJECT_NAME STREQUAL "CMAKE_TRY_COMPILE" ) ANDROID_APP_PIE ) if( DEFINED ${__var} ) - if( "${__var}" MATCHES " ") + if( ${__var} MATCHES " ") set( __toolchain_config "${__toolchain_config}set( ${__var} \"${${__var}}\" CACHE INTERNAL \"\" )\n" ) else() set( __toolchain_config "${__toolchain_config}set( ${__var} ${${__var}} CACHE INTERNAL \"\" )\n" ) @@ -1677,16 +1625,6 @@ if( CMAKE_GENERATOR MATCHES "Ninja" AND CMAKE_HOST_WIN32 ) endif() -# set some obsolete variables for backward compatibility -set( ANDROID_SET_OBSOLETE_VARIABLES ON CACHE BOOL "Define obsolete Andrid-specific cmake variables" ) -mark_as_advanced( ANDROID_SET_OBSOLETE_VARIABLES ) -if( ANDROID_SET_OBSOLETE_VARIABLES ) - set( ANDROID_API_LEVEL ${ANDROID_NATIVE_API_LEVEL} ) - set( ARM_TARGET "${ANDROID_ABI}" ) - set( ARMEABI_NDK_NAME "${ANDROID_NDK_ABI_NAME}" ) -endif() - - # Variables controlling behavior or set by cmake toolchain: # ANDROID_ABI : "armeabi-v7a" (default), "armeabi", "armeabi-v7a with NEON", "armeabi-v7a with VFPV3", "armeabi-v6 with VFP", "x86", "mips", "arm64-v8a", "x86_64", "mips64" # ANDROID_NATIVE_API_LEVEL : 3,4,5,8,9,14,15,16,17,18,19,21 (depends on NDK version) @@ -1700,22 +1638,15 @@ endif() # ANDROID_RELRO : ON/OFF # ANDROID_FORCE_ARM_BUILD : ON/OFF # ANDROID_STL_FORCE_FEATURES : ON/OFF -# ANDROID_SET_OBSOLETE_VARIABLES : ON/OFF +# ANDROID_LIBM_PATH : path to libm.so (set to something like $(TOP)/out/target/product//obj/lib/libm.so) to workaround unresolved `sincos` # Can be set only at the first run: -# ANDROID_NDK -# ANDROID_STANDALONE_TOOLCHAIN +# ANDROID_NDK : path to your NDK install +# NDK_CCACHE : path to your ccache executable # ANDROID_TOOLCHAIN_NAME : the NDK name of compiler toolchain # ANDROID_NDK_HOST_X64 : try to use x86_64 toolchain (default for x64 host systems) # ANDROID_NDK_LAYOUT : the inner NDK structure (RELEASE, LINARO, ANDROID) # LIBRARY_OUTPUT_PATH_ROOT : -# NDK_CCACHE : -# Obsolete: -# ANDROID_API_LEVEL : superseded by ANDROID_NATIVE_API_LEVEL -# ARM_TARGET : superseded by ANDROID_ABI -# ARM_TARGETS : superseded by ANDROID_ABI (can be set only) -# ANDROID_NDK_TOOLCHAIN_ROOT : superseded by ANDROID_STANDALONE_TOOLCHAIN (can be set only) -# ANDROID_USE_STLPORT : superseded by ANDROID_STL=stlport_static -# ANDROID_LEVEL : superseded by ANDROID_NATIVE_API_LEVEL (completely removed) +# ANDROID_STANDALONE_TOOLCHAIN # # Primary read-only variables: # ANDROID : always TRUE @@ -1729,7 +1660,6 @@ endif() # X86_64 : TRUE if configured for x86_64 # MIPS : TRUE if configured for mips # MIPS64 : TRUE if configured for mips64 -# BUILD_ANDROID : always TRUE # BUILD_WITH_ANDROID_NDK : TRUE if NDK is used # BUILD_WITH_STANDALONE_TOOLCHAIN : TRUE if standalone toolchain is used # ANDROID_NDK_HOST_SYSTEM_NAME : "windows", "linux-x86" or "darwin-x86" depending on host platform @@ -1740,8 +1670,6 @@ endif() # ANDROID_SYSROOT : path to the compiler sysroot # TOOL_OS_SUFFIX : "" or ".exe" depending on host platform # ANDROID_COMPILER_IS_CLANG : TRUE if clang compiler is used -# Obsolete: -# ARMEABI_NDK_NAME : superseded by ANDROID_NDK_ABI_NAME # # Secondary (less stable) read-only variables: # ANDROID_COMPILER_VERSION : GCC version used (not Clang version) @@ -1756,12 +1684,10 @@ endif() # ANDROID_RTTI : if rtti is enabled by the runtime # ANDROID_EXCEPTIONS : if exceptions are enabled by the runtime # ANDROID_GCC_TOOLCHAIN_NAME : read-only, differs from ANDROID_TOOLCHAIN_NAME only if clang is used -# ANDROID_LIBM_PATH : path to libm.so (set to something like $(TOP)/out/target/product//obj/lib/libm.so) to workaround unresolved `sincos` # # Defaults: # ANDROID_DEFAULT_NDK_API_LEVEL # ANDROID_DEFAULT_NDK_API_LEVEL_${ARCH} # ANDROID_NDK_SEARCH_PATHS -# ANDROID_STANDALONE_TOOLCHAIN_SEARCH_PATH # ANDROID_SUPPORTED_ABIS_${ARCH} # ANDROID_SUPPORTED_NDK_VERSIONS diff --git a/platforms/android/service/readme.txt b/platforms/android/service/readme.txt index 51853c24e5..5bd773b0e6 100644 --- a/platforms/android/service/readme.txt +++ b/platforms/android/service/readme.txt @@ -2,32 +2,22 @@ How to select the proper version of OpenCV Manager -------------------------------------------------- Since version 1.7 several packages of OpenCV Manager are built. Every package is targeted for some -specific hardware platform and includes corresponding OpenCV binaries. So, in most cases OpenCV -Manager uses built-in version of OpenCV. Separate package with OpenCV binaries is currently used in -a single rare case, when an ARMv7-A processor without NEON support is detected. In this case an -additional binary package is used. The new package selection logic in most cases simplifies OpenCV -installation on end user devices. In most cases OpenCV Manager may be installed automatically from -Google Play. +specific hardware platform and includes corresponding OpenCV binaries. So, in all cases OpenCV +Manager uses built-in version of OpenCV. The new package selection logic in most cases simplifies +OpenCV installation on end user devices. In most cases OpenCV Manager may be installed automatically +from Google Play. If Google Play is not available (i.e. on emulator, developer board, etc), you can install it manually using adb tool: -.. code-block:: sh + adb install /apk/OpenCV_3.0.0_Manager_3.00_.apk - adb install OpenCV-2.4.9-android-sdk/apk/OpenCV_2.4.9_Manager_2.18_.apk +Use the list below to determine proper OpenCV Manager package for your device: -Use the table below to determine proper OpenCV Manager package for your device: - -+------------------------------+--------------+----------------------------------------------------+ -| Hardware Platform | Android ver. | Package name | -+==============================+==============+====================================================+ -| armeabi-v7a (ARMv7-A + NEON) | >= 2.3 | OpenCV_2.4.9_Manager_2.18_armv7a-neon.apk | -+------------------------------+--------------+----------------------------------------------------+ -| armeabi-v7a (ARMv7-A + NEON) | = 2.2 | OpenCV_2.4.9_Manager_2.18_armv7a-neon-android8.apk | -+------------------------------+--------------+----------------------------------------------------+ -| armeabi (ARMv5, ARMv6) | >= 2.3 | OpenCV_2.4.9_Manager_2.18_armeabi.apk | -+------------------------------+--------------+----------------------------------------------------+ -| Intel x86 | >= 2.3 | OpenCV_2.4.9_Manager_2.18_x86.apk | -+------------------------------+--------------+----------------------------------------------------+ -| MIPS | >= 2.3 | OpenCV_2.4.9_Manager_2.18_mips.apk | -+------------------------------+--------------+----------------------------------------------------+ +- OpenCV_3.0.0-dev_Manager_3.00_armeabi.apk - armeabi (ARMv5, ARMv6) +- OpenCV_3.0.0-dev_Manager_3.00_armeabi-v7a.apk - armeabi-v7a (ARMv7-A + NEON) +- OpenCV_3.0.0-dev_Manager_3.00_arm64-v8a.apk - arm64-v8a (ARM64-v8a) +- OpenCV_3.0.0-dev_Manager_3.00_mips.apk - mips (MIPS) +- OpenCV_3.0.0-dev_Manager_3.00_mips64.apk - mips64 (MIPS64) +- OpenCV_3.0.0-dev_Manager_3.00_x86.apk - x86 +- OpenCV_3.0.0-dev_Manager_3.00_x86_64.apk - x86_64 diff --git a/platforms/winrt/readme.txt b/platforms/winrt/readme.txt index 011e449161..c35d18d21f 100644 --- a/platforms/winrt/readme.txt +++ b/platforms/winrt/readme.txt @@ -119,3 +119,41 @@ To generate Windows Phone 8.1 x86 project files in the opencv/bin dir mkdir bin cd bin cmake -G "Visual Studio 12 2013" -DCMAKE_SYSTEM_NAME=WindowsPhone -DCMAKE_SYSTEM_VERSION=8.1 ../ + +Running tests for Windows Store +=============================== +1. You might need to install this if you haven't already: http://www.microsoft.com/en-US/download/details.aspx?id=40784 + +2. Set OPENCV_TEST_DATA_PATH environment variable to location of opencv_extra/testdata (cloning of https://github.com/Itseez/opencv_extra repo required) to get tests work correctly. Also, set OPENCV_PERF_VALIDATION_DIR environment variable in case you are planning to have place where to store performance test results and compare them with the future test runs. + +3. In case you'd like to adjust some flags that are defaulted by setup_winrt script, go to "Manual build" section. Otherwise go to platforms/winrt and execute + +setup_winrt.bat "WS" "8.1" "x64" + +This will generate all files needed to build open_cv projects for selected platform in opencv\bin\. Open the opencv\bin\ directory and open the OpenCV.sln. + +4. Set OCV solution to Release mode and build it. They should build without errors and generate executables in "bin\WS\8.1\x64\bin\Release\" (or similar path depending on the configuration) + +5. Running tests: + - **Accuracy:** Run opencv_test_{module}.exe via console or as usual by double clicking it. You should see output in the console window + - **Performance:** Run opencv_perf_{module}.exe via console or as usual by double clicking it. You should see output in the console window. In case you'd like to write test results to file use --perf_write_validation_results= parameter; To compare current results to previous use --perf_read_validation_results=. This should read/write files from/to OPENCV_PERF_VALIDATION_DIR + +Manual build +============ + + CMake interface: +----------------- + 1. Set CMAKE_SYSTEM_NAME to WindowsStore or WindowsPhone and CMAKE_SYSTEM_VERSION to 8.0 or 8.1 + 2. Set CMAKE_INSTALL_PREFIX using format "\WS\8.1\x64" (this structure is required by samples) + 3. Click "Configure" and choose required generator + 4. Click "Generate" + + Command line: +-------------- + 1. md bin + 2. cd bin + 3. Add any required parameters to this command and execute it: + + cmake -G "Visual Studio 12 2013 Win64" -DCMAKE_SYSTEM_NAME:String=WindowsStore -DCMAKE_SYSTEM_VERSION:String=8.1 -DCMAKE_VS_EFFECTIVE_PLATFORMS:String=x64 -DCMAKE_INSTALL_PREFIX:PATH=.\install\WS\8.1\x64\ .. + +Return to "Running tests for Windows Store", list item 4. \ No newline at end of file diff --git a/platforms/winrt/setup_winrt.ps1 b/platforms/winrt/setup_winrt.ps1 index ddd82864df..47a9946ace 100644 --- a/platforms/winrt/setup_winrt.ps1 +++ b/platforms/winrt/setup_winrt.ps1 @@ -51,6 +51,10 @@ Param( [ValidateNotNull()] $ARCHITECTURES_IN = "x86", + [parameter(Mandatory=$False)] + [String] + $TESTS = "None", + [parameter(Mandatory=$False)] [String] [ValidateNotNull()] @@ -129,6 +133,16 @@ function Call-MSBuild($path, $config) return $true } +function RunAccuracyTests($path) { + md "$path\bin\Release\accuracy" + python "$PSScriptRoot\..\..\modules\ts\misc\run.py" -w "$path\bin\Release\accuracy" -a "$path\bin\Release" +} + +function RunPerfTests($path) { + md "$path\bin\Release\perf" + python "$PSScriptRoot\..\..\modules\ts\misc\run.py" -w "$path\bin\Release\perf" "$path\bin\Release" +} + Function Execute() { If ($HELP.IsPresent) { ShowHelp @@ -174,6 +188,7 @@ Function Execute() { Throw "$($_) is not valid! Please use x86, x64, ARM" } } + D "Processed Architectures: $architectures" # Assuming we are in '/platforms/winrt' we should move up to sources root directory @@ -263,6 +278,25 @@ Function Execute() { Call-MSBuild "OpenCV.sln" "Release" Call-MSBuild "INSTALL.vcxproj" "Release" + + Try { + # Running tests for release versions: + If ($TESTS -eq "ALL") { + RunAccuracyTests "$path" + RunPerfTests "$path" + } else { + If($TESTS -eq "ACC") { + RunAccuracyTests "$path" + } + If($TESTS -eq "PERF") { + RunPerfTests "$path" + } + } + } Catch { + $ErrorMessage = $_.Exception.Message + L "Error: $ErrorMessage" + exit + } } } Catch { $ErrorMessage = $_.Exception.Message @@ -305,8 +339,10 @@ Function ShowHelp() { Write-Host " cmd> setup_winrt.bat [params]" Write-Host " cmd> PowerShell.exe -ExecutionPolicy Unrestricted -File setup_winrt.ps1 [params]" Write-Host " Parameters:" - Write-Host " setup_winrt [options] [platform] [version] [architecture] [generator] [install-path]" + Write-Host " setup_winrt [options] [platform] [version] [architecture] [tests] [generator] [install-path]" Write-Host " setup_winrt -b 'WP' 'x86,ARM' " + Write-Host " setup_winrt -b 'WP' 'x86,ARM' ALL" + Write-Host " setup_winrt -b 'WP' 'x86,ARM' -test PERF " Write-Host " setup_winrt -architecture x86 -platform WP " Write-Host " setup_winrt -arc x86 -plat 'WP,WS' " Write-Host " setup_winrt -a x86 -g 'Visual Studio 11 2012' -pl WP " @@ -329,6 +365,10 @@ Function ShowHelp() { Write-Host " Example: 'ARM,x64' " Write-Host " Options: x86, ARM, x64. Available options may be limited depending on your local setup. " Write-Host " Note that you'll need to use quotes to specify more than one architecture. " + Write-Host " tests - Test sets to run. Requires -b option otherwise ignored. " + Write-Host " Default: None. " + Write-Host " Example: 'ALL' " + Write-Host " Options: ACC, PERF, ALL. " Write-Host " generator - Visual Studio instance used to generate the projects. " Write-Host " Default: Visual Studio 12 2013 " Write-Host " Example: 'Visual Studio 11 2012' " diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index 467ca162a7..31d7d8021a 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -18,6 +18,10 @@ if(WIN32 AND HAVE_DIRECTX) add_subdirectory(directx) endif() +if((NOT ANDROID) AND HAVE_OPENGL) + add_subdirectory(opengl) +endif() + if(ANDROID AND BUILD_ANDROID_EXAMPLES) add_subdirectory(android) endif() @@ -66,6 +70,8 @@ endif() add_subdirectory(cpp) # FIXIT: can't use cvconfig.h in samples: add_subdirectory(gpu) +add_subdirectory(opencl) + if(WIN32) add_subdirectory(directx) endif() diff --git a/samples/android/15-puzzle/AndroidManifest.xml b/samples/android/15-puzzle/AndroidManifest.xml index 356382bf14..c783076a84 100644 --- a/samples/android/15-puzzle/AndroidManifest.xml +++ b/samples/android/15-puzzle/AndroidManifest.xml @@ -1,7 +1,8 @@ + + android:versionCode="301" + android:versionName="3.01" > @@ -31,4 +32,4 @@ - \ No newline at end of file + diff --git a/samples/android/camera-calibration/AndroidManifest.xml b/samples/android/camera-calibration/AndroidManifest.xml index 619c919eec..7c03ba0bee 100644 --- a/samples/android/camera-calibration/AndroidManifest.xml +++ b/samples/android/camera-calibration/AndroidManifest.xml @@ -1,8 +1,8 @@ + android:versionCode="301" + android:versionName="3.01" > + android:versionCode="301" + android:versionName="3.01"> + android:versionCode="301" + android:versionName="3.01"> + android:versionCode="301" + android:versionName="3.01"> + android:versionCode="301" + android:versionName="3.01"> + android:versionCode="301" + android:versionName="3.01"> + android:versionCode="301" + android:versionName="3.01"> [--algorithm=bm|sgbm|hh] [--blocksize=]\n" + printf("\nUsage: stereo_match [--algorithm=bm|sgbm|hh|sgbm3way] [--blocksize=]\n" "[--max-disparity=] [--scale=scale_factor>] [-i ] [-e ]\n" "[--no-display] [-o ] [-p ]\n"); } @@ -61,7 +61,7 @@ int main(int argc, char** argv) const char* disparity_filename = 0; const char* point_cloud_filename = 0; - enum { STEREO_BM=0, STEREO_SGBM=1, STEREO_HH=2, STEREO_VAR=3 }; + enum { STEREO_BM=0, STEREO_SGBM=1, STEREO_HH=2, STEREO_VAR=3, STEREO_3WAY=4 }; int alg = STEREO_SGBM; int SADWindowSize = 0, numberOfDisparities = 0; bool no_display = false; @@ -85,7 +85,8 @@ int main(int argc, char** argv) alg = strcmp(_alg, "bm") == 0 ? STEREO_BM : strcmp(_alg, "sgbm") == 0 ? STEREO_SGBM : strcmp(_alg, "hh") == 0 ? STEREO_HH : - strcmp(_alg, "var") == 0 ? STEREO_VAR : -1; + strcmp(_alg, "var") == 0 ? STEREO_VAR : + strcmp(_alg, "sgbm3way") == 0 ? STEREO_3WAY : -1; if( alg < 0 ) { printf("Command-line parameter error: Unknown stereo algorithm\n\n"); @@ -257,7 +258,12 @@ int main(int argc, char** argv) sgbm->setSpeckleWindowSize(100); sgbm->setSpeckleRange(32); sgbm->setDisp12MaxDiff(1); - sgbm->setMode(alg == STEREO_HH ? StereoSGBM::MODE_HH : StereoSGBM::MODE_SGBM); + if(alg==STEREO_HH) + sgbm->setMode(StereoSGBM::MODE_HH); + else if(alg==STEREO_SGBM) + sgbm->setMode(StereoSGBM::MODE_SGBM); + else if(alg==STEREO_3WAY) + sgbm->setMode(StereoSGBM::MODE_SGBM_3WAY); Mat disp, disp8; //Mat img1p, img2p, dispp; @@ -267,7 +273,7 @@ int main(int argc, char** argv) int64 t = getTickCount(); if( alg == STEREO_BM ) bm->compute(img1, img2, disp); - else if( alg == STEREO_SGBM || alg == STEREO_HH ) + else if( alg == STEREO_SGBM || alg == STEREO_HH || alg == STEREO_3WAY ) sgbm->compute(img1, img2, disp); t = getTickCount() - t; printf("Time elapsed: %fms\n", t*1000/getTickFrequency()); diff --git a/samples/data/aloeGT.png b/samples/data/aloeGT.png new file mode 100644 index 0000000000..43e62ebfe9 Binary files /dev/null and b/samples/data/aloeGT.png differ diff --git a/data/detect_blob.png b/samples/data/detect_blob.png similarity index 100% rename from data/detect_blob.png rename to samples/data/detect_blob.png diff --git a/samples/directx/d3d10_interop.cpp b/samples/directx/d3d10_interop.cpp index d308797c01..2869e6b86e 100644 --- a/samples/directx/d3d10_interop.cpp +++ b/samples/directx/d3d10_interop.cpp @@ -175,19 +175,17 @@ public: return -1; } + m_timer.start(); + switch (m_mode) { - case MODE_NOP: - // no processing - break; - case MODE_CPU: { // process video frame on CPU UINT subResource = ::D3D10CalcSubresource(0, 0, 1); D3D10_MAPPED_TEXTURE2D mappedTex; - r = m_pSurface->Map(subResource, D3D10_MAP_WRITE_DISCARD, 0, &mappedTex); + r = pSurface->Map(subResource, D3D10_MAP_WRITE_DISCARD, 0, &mappedTex); if (FAILED(r)) { return r; @@ -195,13 +193,23 @@ public: cv::Mat m(m_height, m_width, CV_8UC4, mappedTex.pData, (int)mappedTex.RowPitch); - if (!m_disableProcessing) + if (m_demo_processing) { // blur D3D10 surface with OpenCV on CPU cv::blur(m, m, cv::Size(15, 15), cv::Point(-7, -7)); } - m_pSurface->Unmap(subResource); + cv::String strMode = cv::format("mode: %s", m_modeStr[MODE_CPU].c_str()); + cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame"; + cv::String strTime = cv::format("time: %4.1f msec", m_timer.time(Timer::UNITS::MSEC)); + cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str()); + + cv::putText(m, strMode, cv::Point(0, 16), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(m, strProcessing, cv::Point(0, 32), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(m, strTime, cv::Point(0, 48), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(m, strDevName, cv::Point(0, 64), 1, 0.8, cv::Scalar(0, 0, 0)); + + pSurface->Unmap(subResource); break; } @@ -213,12 +221,22 @@ public: cv::directx::convertFromD3D10Texture2D(pSurface, u); - if (!m_disableProcessing) + if (m_demo_processing) { - // blur D3D9 surface with OpenCV on GPU with OpenCL + // blur D3D10 surface with OpenCV on GPU with OpenCL cv::blur(u, u, cv::Size(15, 15), cv::Point(-7, -7)); } + cv::String strMode = cv::format("mode: %s", m_modeStr[MODE_GPU].c_str()); + cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame"; + cv::String strTime = cv::format("time: %4.1f msec", m_timer.time(Timer::UNITS::MSEC)); + cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str()); + + cv::putText(u, strMode, cv::Point(0, 16), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(u, strProcessing, cv::Point(0, 32), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(u, strTime, cv::Point(0, 48), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(u, strDevName, cv::Point(0, 64), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::directx::convertToD3D10Texture2D(u, pSurface); break; @@ -226,7 +244,7 @@ public: } // switch - print_info(pSurface, m_mode, getFps(), m_oclDevName); + m_timer.stop(); // traditional DX render pipeline: // BitBlt surface to backBuffer and flip backBuffer to frontBuffer @@ -251,35 +269,6 @@ public: } // render() - void print_info(ID3D10Texture2D* pSurface, int mode, float fps, cv::String oclDevName) - { - HRESULT r; - - UINT subResource = ::D3D10CalcSubresource(0, 0, 1); - - D3D10_MAPPED_TEXTURE2D mappedTex; - r = pSurface->Map(subResource, D3D10_MAP_WRITE_DISCARD, 0, &mappedTex); - if (FAILED(r)) - { - return; - } - - cv::Mat m(m_height, m_width, CV_8UC4, mappedTex.pData, (int)mappedTex.RowPitch); - - cv::String strMode = cv::format("%s", m_modeStr[mode].c_str()); - cv::String strFPS = cv::format("%2.1f", fps); - cv::String strDevName = cv::format("%s", oclDevName.c_str()); - - cv::putText(m, strMode, cv::Point(0, 16), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(m, strFPS, cv::Point(0, 32), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(m, strDevName, cv::Point(0, 48), 1, 0.8, cv::Scalar(0, 0, 0)); - - m_pSurface->Unmap(subResource); - - return; - } // print_info() - - int cleanup(void) { SAFE_RELEASE(m_pSurface); diff --git a/samples/directx/d3d11_interop.cpp b/samples/directx/d3d11_interop.cpp index 5e8bc90905..3ac2b063f0 100644 --- a/samples/directx/d3d11_interop.cpp +++ b/samples/directx/d3d11_interop.cpp @@ -71,19 +71,19 @@ public: &m_pD3D11Ctx); if (FAILED(r)) { - return -1; + throw std::runtime_error("D3D11CreateDeviceAndSwapChain() failed!"); } r = m_pD3D11SwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&m_pBackBuffer); if (FAILED(r)) { - return -1; + throw std::runtime_error("GetBufer() failed!"); } r = m_pD3D11Dev->CreateRenderTargetView(m_pBackBuffer, NULL, &m_pRenderTarget); if (FAILED(r)) { - return -1; + throw std::runtime_error("CreateRenderTargetView() failed!"); } m_pD3D11Ctx->OMSetRenderTargets(1, &m_pRenderTarget, NULL); @@ -98,23 +98,24 @@ public: m_pD3D11Ctx->RSSetViewports(1, &viewport); - D3D11_TEXTURE2D_DESC desc = { 0 }; + D3D11_TEXTURE2D_DESC desc; - desc.Width = m_width; - desc.Height = m_height; - desc.MipLevels = 1; - desc.ArraySize = 1; - desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; - desc.SampleDesc.Count = 1; - desc.BindFlags = D3D11_BIND_SHADER_RESOURCE; - desc.Usage = D3D11_USAGE_DYNAMIC; - desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE; + desc.Width = m_width; + desc.Height = m_height; + desc.MipLevels = 1; + desc.ArraySize = 1; + desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; + desc.SampleDesc.Count = 1; + desc.SampleDesc.Quality = 0; + desc.BindFlags = D3D11_BIND_SHADER_RESOURCE; + desc.Usage = D3D11_USAGE_DYNAMIC; + desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE; + desc.MiscFlags = 0; r = m_pD3D11Dev->CreateTexture2D(&desc, NULL, &m_pSurface); if (FAILED(r)) { - std::cerr << "Can't create texture with input image" << std::endl; - return -1; + throw std::runtime_error("Can't create texture with input image"); } // initialize OpenCL context of OpenCV lib from DirectX @@ -137,7 +138,7 @@ public: HRESULT r; if (!m_cap.read(m_frame_bgr)) - return -1; + throw std::runtime_error("Can't get frame"); cv::cvtColor(m_frame_bgr, m_frame_rgba, CV_RGB2BGRA); @@ -147,7 +148,7 @@ public: r = m_pD3D11Ctx->Map(m_pSurface, subResource, D3D11_MAP_WRITE_DISCARD, 0, &mappedTex); if (FAILED(r)) { - return r; + throw std::runtime_error("surface mapping failed!"); } cv::Mat m(m_height, m_width, CV_8UC4, mappedTex.pData, (int)mappedTex.RowPitch); @@ -171,41 +172,49 @@ public: return 0; HRESULT r; - ID3D11Texture2D* pSurface; + ID3D11Texture2D* pSurface = 0; r = get_surface(&pSurface); if (FAILED(r)) { - return -1; + throw std::runtime_error("get_surface() failed!"); } + m_timer.start(); + switch (m_mode) { - case MODE_NOP: - // no processing - break; - case MODE_CPU: { // process video frame on CPU UINT subResource = ::D3D11CalcSubresource(0, 0, 1); D3D11_MAPPED_SUBRESOURCE mappedTex; - r = m_pD3D11Ctx->Map(m_pSurface, subResource, D3D11_MAP_WRITE_DISCARD, 0, &mappedTex); + r = m_pD3D11Ctx->Map(pSurface, subResource, D3D11_MAP_WRITE_DISCARD, 0, &mappedTex); if (FAILED(r)) { - return r; + throw std::runtime_error("surface mapping failed!"); } cv::Mat m(m_height, m_width, CV_8UC4, mappedTex.pData, (int)mappedTex.RowPitch); - if (!m_disableProcessing) + if (m_demo_processing) { - // blur D3D10 surface with OpenCV on CPU + // blur data from D3D11 surface with OpenCV on CPU cv::blur(m, m, cv::Size(15, 15), cv::Point(-7, -7)); } - m_pD3D11Ctx->Unmap(m_pSurface, subResource); + cv::String strMode = cv::format("mode: %s", m_modeStr[MODE_CPU].c_str()); + cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame"; + cv::String strTime = cv::format("time: %4.1f msec", m_timer.time(Timer::UNITS::MSEC)); + cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str()); + + cv::putText(m, strMode, cv::Point(0, 16), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(m, strProcessing, cv::Point(0, 32), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(m, strTime, cv::Point(0, 48), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(m, strDevName, cv::Point(0, 64), 1, 0.8, cv::Scalar(0, 0, 0)); + + m_pD3D11Ctx->Unmap(pSurface, subResource); break; } @@ -217,12 +226,22 @@ public: cv::directx::convertFromD3D11Texture2D(pSurface, u); - if (!m_disableProcessing) + if (m_demo_processing) { - // blur D3D9 surface with OpenCV on GPU with OpenCL + // blur data from D3D11 surface with OpenCV on GPU with OpenCL cv::blur(u, u, cv::Size(15, 15), cv::Point(-7, -7)); } + cv::String strMode = cv::format("mode: %s", m_modeStr[MODE_GPU].c_str()); + cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame"; + cv::String strTime = cv::format("time: %4.1f msec", m_timer.time(Timer::UNITS::MSEC)); + cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str()); + + cv::putText(u, strMode, cv::Point(0, 16), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(u, strProcessing, cv::Point(0, 32), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(u, strTime, cv::Point(0, 48), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(u, strDevName, cv::Point(0, 64), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::directx::convertToD3D11Texture2D(u, pSurface); break; @@ -230,7 +249,7 @@ public: } // switch - print_info(pSurface, m_mode, getFps(), m_oclDevName); + m_timer.stop(); // traditional DX render pipeline: // BitBlt surface to backBuffer and flip backBuffer to frontBuffer @@ -241,7 +260,7 @@ public: r = m_pD3D11SwapChain->Present(0, 0); if (FAILED(r)) { - return -1; + throw std::runtime_error("switch betweem fronat and back buffers failed!"); } } // try @@ -251,37 +270,14 @@ public: return 10; } - return 0; - } // render() - - - void print_info(ID3D11Texture2D* pSurface, int mode, float fps, cv::String oclDevName) - { - HRESULT r; - - UINT subResource = ::D3D11CalcSubresource(0, 0, 1); - - D3D11_MAPPED_SUBRESOURCE mappedTex; - r = m_pD3D11Ctx->Map(pSurface, subResource, D3D11_MAP_WRITE_DISCARD, 0, &mappedTex); - if (FAILED(r)) + catch (const std::exception& e) { - return; + std::cerr << "Exception: " << e.what() << std::endl; + return 11; } - cv::Mat m(m_height, m_width, CV_8UC4, mappedTex.pData, (int)mappedTex.RowPitch); - - cv::String strMode = cv::format("%s", m_modeStr[mode].c_str()); - cv::String strFPS = cv::format("%2.1f", fps); - cv::String strDevName = cv::format("%s", oclDevName.c_str()); - - cv::putText(m, strMode, cv::Point(0, 16), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(m, strFPS, cv::Point(0, 32), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(m, strDevName, cv::Point(0, 48), 1, 0.8, cv::Scalar(0, 0, 0)); - - m_pD3D11Ctx->Unmap(pSurface, subResource); - - return; - } // printf_info() + return 0; + } // render() int cleanup(void) diff --git a/samples/directx/d3d9_interop.cpp b/samples/directx/d3d9_interop.cpp index 851bd7d520..afe12b3de1 100644 --- a/samples/directx/d3d9_interop.cpp +++ b/samples/directx/d3d9_interop.cpp @@ -152,12 +152,10 @@ public: return -1; } + m_timer.start(); + switch (m_mode) { - case MODE_NOP: - // no processing - break; - case MODE_CPU: { // process video frame on CPU @@ -172,7 +170,7 @@ public: cv::Mat m(m_height, m_width, CV_8UC4, memDesc.pBits, memDesc.Pitch); - if (!m_disableProcessing) + if (m_demo_processing) { // blur D3D9 surface with OpenCV on CPU cv::blur(m, m, cv::Size(15, 15), cv::Point(-7, -7)); @@ -194,7 +192,7 @@ public: cv::directx::convertFromDirect3DSurface9(pSurface, u); - if (!m_disableProcessing) + if (m_demo_processing) { // blur D3D9 surface with OpenCV on GPU with OpenCL cv::blur(u, u, cv::Size(15, 15), cv::Point(-7, -7)); @@ -207,7 +205,9 @@ public: } // switch - print_info(pSurface, m_mode, getFps(), m_oclDevName); + m_timer.stop(); + + print_info(pSurface, m_mode, m_timer.time(Timer::UNITS::MSEC), m_oclDevName); // traditional DX render pipeline: // BitBlt surface to backBuffer and flip backBuffer to frontBuffer @@ -235,7 +235,7 @@ public: } // render() - void print_info(LPDIRECT3DSURFACE9 pSurface, int mode, float fps, cv::String oclDevName) + void print_info(LPDIRECT3DSURFACE9 pSurface, int mode, float time, cv::String oclDevName) { HDC hDC; @@ -258,12 +258,17 @@ public: int y = 0; buf[0] = 0; - sprintf(buf, "Mode: %s", m_modeStr[mode].c_str()); + sprintf(buf, "mode: %s", m_modeStr[mode].c_str()); ::TextOut(hDC, 0, y, buf, (int)strlen(buf)); y += tm.tmHeight; buf[0] = 0; - sprintf(buf, "FPS: %2.1f", fps); + sprintf(buf, m_demo_processing ? "blur frame" : "copy frame"); + ::TextOut(hDC, 0, y, buf, (int)strlen(buf)); + + y += tm.tmHeight; + buf[0] = 0; + sprintf(buf, "time: %4.1f msec", time); ::TextOut(hDC, 0, y, buf, (int)strlen(buf)); y += tm.tmHeight; diff --git a/samples/directx/d3d9ex_interop.cpp b/samples/directx/d3d9ex_interop.cpp index d59416bf0a..187177061c 100644 --- a/samples/directx/d3d9ex_interop.cpp +++ b/samples/directx/d3d9ex_interop.cpp @@ -152,12 +152,10 @@ public: return -1; } + m_timer.start(); + switch (m_mode) { - case MODE_NOP: - // no processing - break; - case MODE_CPU: { // process video frame on CPU @@ -172,7 +170,7 @@ public: cv::Mat m(m_height, m_width, CV_8UC4, memDesc.pBits, memDesc.Pitch); - if (!m_disableProcessing) + if (m_demo_processing) { // blur D3D9 surface with OpenCV on CPU cv::blur(m, m, cv::Size(15, 15), cv::Point(-7, -7)); @@ -194,7 +192,7 @@ public: cv::directx::convertFromDirect3DSurface9(pSurface, u); - if (!m_disableProcessing) + if (m_demo_processing) { // blur D3D9 surface with OpenCV on GPU with OpenCL cv::blur(u, u, cv::Size(15, 15), cv::Point(-7, -7)); @@ -207,7 +205,9 @@ public: } // switch - print_info(pSurface, m_mode, getFps(), m_oclDevName); + m_timer.stop(); + + print_info(pSurface, m_mode, m_timer.time(Timer::UNITS::MSEC), m_oclDevName); // traditional DX render pipeline: // BitBlt surface to backBuffer and flip backBuffer to frontBuffer @@ -236,7 +236,7 @@ public: } // render() - void print_info(LPDIRECT3DSURFACE9 pSurface, int mode, float fps, cv::String oclDevName) + void print_info(LPDIRECT3DSURFACE9 pSurface, int mode, float time, cv::String oclDevName) { HDC hDC; @@ -259,12 +259,17 @@ public: int y = 0; buf[0] = 0; - sprintf(buf, "Mode: %s", m_modeStr[mode].c_str()); + sprintf(buf, "mode: %s", m_modeStr[mode].c_str()); ::TextOut(hDC, 0, y, buf, (int)strlen(buf)); y += tm.tmHeight; buf[0] = 0; - sprintf(buf, "FPS: %2.1f", fps); + sprintf(buf, m_demo_processing ? "blur frame" : "copy frame"); + ::TextOut(hDC, 0, y, buf, (int)strlen(buf)); + + y += tm.tmHeight; + buf[0] = 0; + sprintf(buf, "time: %4.1f msec", time); ::TextOut(hDC, 0, y, buf, (int)strlen(buf)); y += tm.tmHeight; diff --git a/samples/directx/d3dsample.hpp b/samples/directx/d3dsample.hpp index 8fc429a1e9..4b7545fc69 100644 --- a/samples/directx/d3dsample.hpp +++ b/samples/directx/d3dsample.hpp @@ -17,12 +17,55 @@ #define SAFE_RELEASE(p) if (p) { p->Release(); p = NULL; } +class Timer +{ +public: + enum UNITS + { + USEC = 0, + MSEC, + SEC + }; + + Timer() : m_t0(0), m_diff(0) + { + m_tick_frequency = (float)cv::getTickFrequency(); + + m_unit_mul[USEC] = 1000000; + m_unit_mul[MSEC] = 1000; + m_unit_mul[SEC] = 1; + } + + void start() + { + m_t0 = cv::getTickCount(); + } + + void stop() + { + m_diff = cv::getTickCount() - m_t0; + } + + float time(UNITS u = UNITS::MSEC) + { + float sec = m_diff / m_tick_frequency; + + return sec * m_unit_mul[u]; + } + +public: + float m_tick_frequency; + int64 m_t0; + int64 m_diff; + int m_unit_mul[3]; +}; + + class D3DSample : public WinApp { public: enum MODE { - MODE_NOP, MODE_CPU, MODE_GPU }; @@ -31,11 +74,10 @@ public: WinApp(width, height, window_name) { m_shutdown = false; - m_mode = MODE_NOP; - m_modeStr[0] = cv::String("No processing"); - m_modeStr[1] = cv::String("Processing on CPU"); - m_modeStr[2] = cv::String("Processing on GPU"); - m_disableProcessing = false; + m_mode = MODE_CPU; + m_modeStr[0] = cv::String("Processing on CPU"); + m_modeStr[1] = cv::String("Processing on GPU"); + m_demo_processing = false; m_cap = cap; } @@ -49,41 +91,25 @@ public: return WinApp::cleanup(); } - static float getFps() - { - static std::queue time_queue; - - int64 now = cv::getTickCount(); - int64 then = 0; - time_queue.push(now); - - if (time_queue.size() >= 2) - then = time_queue.front(); - - if (time_queue.size() >= 25) - time_queue.pop(); - - size_t sz = time_queue.size(); - - float fps = sz * (float)cv::getTickFrequency() / (now - then); - - return fps; - } - protected: virtual LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) { switch (message) { case WM_CHAR: - if (wParam >= '0' && wParam <= '2') + if (wParam == '1') { - m_mode = static_cast((char)wParam - '0'); + m_mode = MODE_CPU; + return 0; + } + if (wParam == '2') + { + m_mode = MODE_GPU; return 0; } else if (wParam == VK_SPACE) { - m_disableProcessing = !m_disableProcessing; + m_demo_processing = !m_demo_processing; return 0; } else if (wParam == VK_ESCAPE) @@ -108,12 +134,13 @@ protected: protected: bool m_shutdown; - bool m_disableProcessing; + bool m_demo_processing; MODE m_mode; - cv::String m_modeStr[3]; + cv::String m_modeStr[2]; cv::VideoCapture m_cap; cv::Mat m_frame_bgr; cv::Mat m_frame_rgba; + Timer m_timer; }; @@ -122,10 +149,10 @@ static void help() printf( "\nSample demonstrating interoperability of DirectX and OpenCL with OpenCV.\n" "Hot keys: \n" - " 0 - no processing\n" - " 1 - blur DX surface on CPU through OpenCV\n" - " 2 - blur DX surface on GPU through OpenCV using OpenCL\n" - " ESC - exit\n\n"); + " SPACE - turn processing on/off\n" + " 1 - process DX surface through OpenCV on CPU\n" + " 2 - process DX surface through OpenCV on GPU (via OpenCL)\n" + " ESC - exit\n\n"); } diff --git a/samples/opencl/CMakeLists.txt b/samples/opencl/CMakeLists.txt new file mode 100644 index 0000000000..a4525650e1 --- /dev/null +++ b/samples/opencl/CMakeLists.txt @@ -0,0 +1,68 @@ +# cmake 3.1 needed for find_package(OpenCL) + +if(CMAKE_VERSION VERSION_LESS "3.1") + message(STATUS "OpenCL samples require CMakes 3.1+") + return() +endif() + +set( + OPENCV_OPENCL_SAMPLES_REQUIRED_DEPS + opencv_core + opencv_imgproc + opencv_video + opencv_imgcodecs + opencv_videoio + opencv_highgui) + +ocv_check_dependencies(${OPENCV_OPENCL_SAMPLES_REQUIRED_DEPS}) + +if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND) + + find_package(OpenCL 1.2 REQUIRED) + + set(project "opencl") + string(TOUPPER "${project}" project_upper) + + project("${project}_samples") + + ocv_include_modules_recurse(${OPENCV_OPENCL_SAMPLES_REQUIRED_DEPS}) + + include_directories(${OpenCL_INCLUDE_DIR}) + + # --------------------------------------------- + # Define executable targets + # --------------------------------------------- + MACRO(OPENCV_DEFINE_OPENCL_EXAMPLE name srcs) + set(the_target "example_${project}_${name}") + add_executable(${the_target} ${srcs}) + + ocv_target_link_libraries( + ${the_target} + ${OPENCV_LINKER_LIBS} + ${OPENCV_OPENCL_SAMPLES_REQUIRED_DEPS} + ${OpenCL_LIBRARY}) + + set_target_properties(${the_target} PROPERTIES + OUTPUT_NAME "${project}-example-${name}" + PROJECT_LABEL "(EXAMPLE_${project_upper}) ${name}") + + if(ENABLE_SOLUTION_FOLDERS) + set_target_properties(${the_target} PROPERTIES FOLDER "samples//${project}") + endif() + + if(WIN32) + if(MSVC AND NOT BUILD_SHARED_LIBS) + set_target_properties(${the_target} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG") + endif() + install(TARGETS ${the_target} RUNTIME DESTINATION "${OPENCV_SAMPLES_BIN_INSTALL_PATH}/${project}" COMPONENT main) + endif() + ENDMACRO() + + file(GLOB all_samples RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp) + + foreach(sample_filename ${all_samples}) + get_filename_component(sample ${sample_filename} NAME_WE) + file(GLOB sample_srcs RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${sample}.*) + OPENCV_DEFINE_OPENCL_EXAMPLE(${sample} ${sample_srcs}) + endforeach() +endif() diff --git a/samples/opencl/opencl-opencv-interop.cpp b/samples/opencl/opencl-opencv-interop.cpp new file mode 100644 index 0000000000..8deb799eea --- /dev/null +++ b/samples/opencl/opencl-opencv-interop.cpp @@ -0,0 +1,1011 @@ +/* +// The example of interoperability between OpenCL and OpenCV. +// This will loop through frames of video either from input media file +// or camera device and do processing of these data in OpenCL and then +// in OpenCV. In OpenCL it does inversion of pixels in left half of frame and +// in OpenCV it does bluring in the right half of frame. +*/ +#include +#include +#include +#include +#include +#include +#include +#include + +#if __APPLE__ +#include +#else +#include +#endif + +#include +#include +#include +#include +#include + + +using namespace std; +using namespace cv; + +namespace opencl { + +class PlatformInfo +{ +public: + PlatformInfo() + {} + + ~PlatformInfo() + {} + + cl_int QueryInfo(cl_platform_id id) + { + query_param(id, CL_PLATFORM_PROFILE, m_profile); + query_param(id, CL_PLATFORM_VERSION, m_version); + query_param(id, CL_PLATFORM_NAME, m_name); + query_param(id, CL_PLATFORM_VENDOR, m_vendor); + query_param(id, CL_PLATFORM_EXTENSIONS, m_extensions); + return CL_SUCCESS; + } + + std::string Profile() { return m_profile; } + std::string Version() { return m_version; } + std::string Name() { return m_name; } + std::string Vendor() { return m_vendor; } + std::string Extensions() { return m_extensions; } + +private: + cl_int query_param(cl_platform_id id, cl_platform_info param, std::string& paramStr) + { + cl_int res; + + size_t psize; + cv::AutoBuffer buf; + + res = clGetPlatformInfo(id, param, 0, 0, &psize); + if (CL_SUCCESS != res) + throw std::runtime_error(std::string("clGetPlatformInfo failed")); + + buf.resize(psize); + res = clGetPlatformInfo(id, param, psize, buf, 0); + if (CL_SUCCESS != res) + throw std::runtime_error(std::string("clGetPlatformInfo failed")); + + // just in case, ensure trailing zero for ASCIIZ string + buf[psize] = 0; + + paramStr = buf; + + return CL_SUCCESS; + } + +private: + std::string m_profile; + std::string m_version; + std::string m_name; + std::string m_vendor; + std::string m_extensions; +}; + + +class DeviceInfo +{ +public: + DeviceInfo() + {} + + ~DeviceInfo() + {} + + cl_int QueryInfo(cl_device_id id) + { + query_param(id, CL_DEVICE_TYPE, m_type); + query_param(id, CL_DEVICE_VENDOR_ID, m_vendor_id); + query_param(id, CL_DEVICE_MAX_COMPUTE_UNITS, m_max_compute_units); + query_param(id, CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, m_max_work_item_dimensions); + query_param(id, CL_DEVICE_MAX_WORK_ITEM_SIZES, m_max_work_item_sizes); + query_param(id, CL_DEVICE_MAX_WORK_GROUP_SIZE, m_max_work_group_size); + query_param(id, CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR, m_preferred_vector_width_char); + query_param(id, CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT, m_preferred_vector_width_short); + query_param(id, CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, m_preferred_vector_width_int); + query_param(id, CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG, m_preferred_vector_width_long); + query_param(id, CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT, m_preferred_vector_width_float); + query_param(id, CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE, m_preferred_vector_width_double); +#if defined(CL_VERSION_1_1) + query_param(id, CL_DEVICE_PREFERRED_VECTOR_WIDTH_HALF, m_preferred_vector_width_half); + query_param(id, CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, m_native_vector_width_char); + query_param(id, CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, m_native_vector_width_short); + query_param(id, CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, m_native_vector_width_int); + query_param(id, CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, m_native_vector_width_long); + query_param(id, CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, m_native_vector_width_float); + query_param(id, CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, m_native_vector_width_double); + query_param(id, CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, m_native_vector_width_half); +#endif + query_param(id, CL_DEVICE_MAX_CLOCK_FREQUENCY, m_max_clock_frequency); + query_param(id, CL_DEVICE_ADDRESS_BITS, m_address_bits); + query_param(id, CL_DEVICE_MAX_MEM_ALLOC_SIZE, m_max_mem_alloc_size); + query_param(id, CL_DEVICE_IMAGE_SUPPORT, m_image_support); + query_param(id, CL_DEVICE_MAX_READ_IMAGE_ARGS, m_max_read_image_args); + query_param(id, CL_DEVICE_MAX_WRITE_IMAGE_ARGS, m_max_write_image_args); +#if defined(CL_VERSION_2_0) + query_param(id, CL_DEVICE_MAX_READ_WRITE_IMAGE_ARGS, m_max_read_write_image_args); +#endif + query_param(id, CL_DEVICE_IMAGE2D_MAX_WIDTH, m_image2d_max_width); + query_param(id, CL_DEVICE_IMAGE2D_MAX_HEIGHT, m_image2d_max_height); + query_param(id, CL_DEVICE_IMAGE3D_MAX_WIDTH, m_image3d_max_width); + query_param(id, CL_DEVICE_IMAGE3D_MAX_HEIGHT, m_image3d_max_height); + query_param(id, CL_DEVICE_IMAGE3D_MAX_DEPTH, m_image3d_max_depth); +#if defined(CL_VERSION_1_2) + query_param(id, CL_DEVICE_IMAGE_MAX_BUFFER_SIZE, m_image_max_buffer_size); + query_param(id, CL_DEVICE_IMAGE_MAX_ARRAY_SIZE, m_image_max_array_size); +#endif + query_param(id, CL_DEVICE_MAX_SAMPLERS, m_max_samplers); +#if defined(CL_VERSION_1_2) + query_param(id, CL_DEVICE_IMAGE_PITCH_ALIGNMENT, m_image_pitch_alignment); + query_param(id, CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT, m_image_base_address_alignment); +#endif +#if defined(CL_VERSION_2_0) + query_param(id, CL_DEVICE_MAX_PIPE_ARGS, m_max_pipe_args); + query_param(id, CL_DEVICE_PIPE_MAX_ACTIVE_RESERVATIONS, m_pipe_max_active_reservations); + query_param(id, CL_DEVICE_PIPE_MAX_PACKET_SIZE, m_pipe_max_packet_size); +#endif + query_param(id, CL_DEVICE_MAX_PARAMETER_SIZE, m_max_parameter_size); + query_param(id, CL_DEVICE_MEM_BASE_ADDR_ALIGN, m_mem_base_addr_align); + query_param(id, CL_DEVICE_SINGLE_FP_CONFIG, m_single_fp_config); +#if defined(CL_VERSION_1_2) + query_param(id, CL_DEVICE_DOUBLE_FP_CONFIG, m_double_fp_config); +#endif + query_param(id, CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, m_global_mem_cache_type); + query_param(id, CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, m_global_mem_cacheline_size); + query_param(id, CL_DEVICE_GLOBAL_MEM_CACHE_SIZE, m_global_mem_cache_size); + query_param(id, CL_DEVICE_GLOBAL_MEM_SIZE, m_global_mem_size); + query_param(id, CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, m_max_constant_buffer_size); + query_param(id, CL_DEVICE_MAX_CONSTANT_ARGS, m_max_constant_args); +#if defined(CL_VERSION_2_0) + query_param(id, CL_DEVICE_MAX_GLOBAL_VARIABLE_SIZE, m_max_global_variable_size); + query_param(id, CL_DEVICE_GLOBAL_VARIABLE_PREFERRED_TOTAL_SIZE, m_global_variable_preferred_total_size); +#endif + query_param(id, CL_DEVICE_LOCAL_MEM_TYPE, m_local_mem_type); + query_param(id, CL_DEVICE_LOCAL_MEM_SIZE, m_local_mem_size); + query_param(id, CL_DEVICE_ERROR_CORRECTION_SUPPORT, m_error_correction_support); +#if defined(CL_VERSION_1_1) + query_param(id, CL_DEVICE_HOST_UNIFIED_MEMORY, m_host_unified_memory); +#endif + query_param(id, CL_DEVICE_PROFILING_TIMER_RESOLUTION, m_profiling_timer_resolution); + query_param(id, CL_DEVICE_ENDIAN_LITTLE, m_endian_little); + query_param(id, CL_DEVICE_AVAILABLE, m_available); + query_param(id, CL_DEVICE_COMPILER_AVAILABLE, m_compiler_available); +#if defined(CL_VERSION_1_2) + query_param(id, CL_DEVICE_LINKER_AVAILABLE, m_linker_available); +#endif + query_param(id, CL_DEVICE_EXECUTION_CAPABILITIES, m_execution_capabilities); + query_param(id, CL_DEVICE_QUEUE_PROPERTIES, m_queue_properties); +#if defined(CL_VERSION_2_0) + query_param(id, CL_DEVICE_QUEUE_ON_HOST_PROPERTIES, m_queue_on_host_properties); + query_param(id, CL_DEVICE_QUEUE_ON_DEVICE_PROPERTIES, m_queue_on_device_properties); + query_param(id, CL_DEVICE_QUEUE_ON_DEVICE_PREFERRED_SIZE, m_queue_on_device_preferred_size); + query_param(id, CL_DEVICE_QUEUE_ON_DEVICE_MAX_SIZE, m_queue_on_device_max_size); + query_param(id, CL_DEVICE_MAX_ON_DEVICE_QUEUES, m_max_on_device_queues); + query_param(id, CL_DEVICE_MAX_ON_DEVICE_EVENTS, m_max_on_device_events); +#endif +#if defined(CL_VERSION_1_2) + query_param(id, CL_DEVICE_BUILT_IN_KERNELS, m_built_in_kernels); +#endif + query_param(id, CL_DEVICE_PLATFORM, m_platform); + query_param(id, CL_DEVICE_NAME, m_name); + query_param(id, CL_DEVICE_VENDOR, m_vendor); + query_param(id, CL_DRIVER_VERSION, m_driver_version); + query_param(id, CL_DEVICE_PROFILE, m_profile); + query_param(id, CL_DEVICE_VERSION, m_version); +#if defined(CL_VERSION_1_1) + query_param(id, CL_DEVICE_OPENCL_C_VERSION, m_opencl_c_version); +#endif + query_param(id, CL_DEVICE_EXTENSIONS, m_extensions); +#if defined(CL_VERSION_1_2) + query_param(id, CL_DEVICE_PRINTF_BUFFER_SIZE, m_printf_buffer_size); + query_param(id, CL_DEVICE_PREFERRED_INTEROP_USER_SYNC, m_preferred_interop_user_sync); + query_param(id, CL_DEVICE_PARENT_DEVICE, m_parent_device); + query_param(id, CL_DEVICE_PARTITION_MAX_SUB_DEVICES, m_partition_max_sub_devices); + query_param(id, CL_DEVICE_PARTITION_PROPERTIES, m_partition_properties); + query_param(id, CL_DEVICE_PARTITION_AFFINITY_DOMAIN, m_partition_affinity_domain); + query_param(id, CL_DEVICE_PARTITION_TYPE, m_partition_type); + query_param(id, CL_DEVICE_REFERENCE_COUNT, m_reference_count); +#endif + return CL_SUCCESS; + } + + std::string Name() { return m_name; } + +private: + template + cl_int query_param(cl_device_id id, cl_device_info param, T& value) + { + cl_int res; + size_t size = 0; + + res = clGetDeviceInfo(id, param, 0, 0, &size); + if (CL_SUCCESS != res && size != 0) + throw std::runtime_error(std::string("clGetDeviceInfo failed")); + + if (0 == size) + return CL_SUCCESS; + + if (sizeof(T) != size) + throw std::runtime_error(std::string("clGetDeviceInfo: param size mismatch")); + + res = clGetDeviceInfo(id, param, size, &value, 0); + if (CL_SUCCESS != res) + throw std::runtime_error(std::string("clGetDeviceInfo failed")); + + return CL_SUCCESS; + } + + template + cl_int query_param(cl_device_id id, cl_device_info param, std::vector& value) + { + cl_int res; + size_t size; + + res = clGetDeviceInfo(id, param, 0, 0, &size); + if (CL_SUCCESS != res) + throw std::runtime_error(std::string("clGetDeviceInfo failed")); + + if (0 == size) + return CL_SUCCESS; + + value.resize(size / sizeof(T)); + + res = clGetDeviceInfo(id, param, size, &value[0], 0); + if (CL_SUCCESS != res) + throw std::runtime_error(std::string("clGetDeviceInfo failed")); + + return CL_SUCCESS; + } + + cl_int query_param(cl_device_id id, cl_device_info param, std::string& value) + { + cl_int res; + size_t size; + + res = clGetDeviceInfo(id, param, 0, 0, &size); + if (CL_SUCCESS != res) + throw std::runtime_error(std::string("clGetDeviceInfo failed")); + + value.resize(size + 1); + + res = clGetDeviceInfo(id, param, size, &value[0], 0); + if (CL_SUCCESS != res) + throw std::runtime_error(std::string("clGetDeviceInfo failed")); + + // just in case, ensure trailing zero for ASCIIZ string + value[size] = 0; + + return CL_SUCCESS; + } + +private: + cl_device_type m_type; + cl_uint m_vendor_id; + cl_uint m_max_compute_units; + cl_uint m_max_work_item_dimensions; + std::vector m_max_work_item_sizes; + size_t m_max_work_group_size; + cl_uint m_preferred_vector_width_char; + cl_uint m_preferred_vector_width_short; + cl_uint m_preferred_vector_width_int; + cl_uint m_preferred_vector_width_long; + cl_uint m_preferred_vector_width_float; + cl_uint m_preferred_vector_width_double; +#if defined(CL_VERSION_1_1) + cl_uint m_preferred_vector_width_half; + cl_uint m_native_vector_width_char; + cl_uint m_native_vector_width_short; + cl_uint m_native_vector_width_int; + cl_uint m_native_vector_width_long; + cl_uint m_native_vector_width_float; + cl_uint m_native_vector_width_double; + cl_uint m_native_vector_width_half; +#endif + cl_uint m_max_clock_frequency; + cl_uint m_address_bits; + cl_ulong m_max_mem_alloc_size; + cl_bool m_image_support; + cl_uint m_max_read_image_args; + cl_uint m_max_write_image_args; +#if defined(CL_VERSION_2_0) + cl_uint m_max_read_write_image_args; +#endif + size_t m_image2d_max_width; + size_t m_image2d_max_height; + size_t m_image3d_max_width; + size_t m_image3d_max_height; + size_t m_image3d_max_depth; +#if defined(CL_VERSION_1_2) + size_t m_image_max_buffer_size; + size_t m_image_max_array_size; +#endif + cl_uint m_max_samplers; +#if defined(CL_VERSION_1_2) + cl_uint m_image_pitch_alignment; + cl_uint m_image_base_address_alignment; +#endif +#if defined(CL_VERSION_2_0) + cl_uint m_max_pipe_args; + cl_uint m_pipe_max_active_reservations; + cl_uint m_pipe_max_packet_size; +#endif + size_t m_max_parameter_size; + cl_uint m_mem_base_addr_align; + cl_device_fp_config m_single_fp_config; +#if defined(CL_VERSION_1_2) + cl_device_fp_config m_double_fp_config; +#endif + cl_device_mem_cache_type m_global_mem_cache_type; + cl_uint m_global_mem_cacheline_size; + cl_ulong m_global_mem_cache_size; + cl_ulong m_global_mem_size; + cl_ulong m_max_constant_buffer_size; + cl_uint m_max_constant_args; +#if defined(CL_VERSION_2_0) + size_t m_max_global_variable_size; + size_t m_global_variable_preferred_total_size; +#endif + cl_device_local_mem_type m_local_mem_type; + cl_ulong m_local_mem_size; + cl_bool m_error_correction_support; +#if defined(CL_VERSION_1_1) + cl_bool m_host_unified_memory; +#endif + size_t m_profiling_timer_resolution; + cl_bool m_endian_little; + cl_bool m_available; + cl_bool m_compiler_available; +#if defined(CL_VERSION_1_2) + cl_bool m_linker_available; +#endif + cl_device_exec_capabilities m_execution_capabilities; + cl_command_queue_properties m_queue_properties; +#if defined(CL_VERSION_2_0) + cl_command_queue_properties m_queue_on_host_properties; + cl_command_queue_properties m_queue_on_device_properties; + cl_uint m_queue_on_device_preferred_size; + cl_uint m_queue_on_device_max_size; + cl_uint m_max_on_device_queues; + cl_uint m_max_on_device_events; +#endif +#if defined(CL_VERSION_1_2) + std::string m_built_in_kernels; +#endif + cl_platform_id m_platform; + std::string m_name; + std::string m_vendor; + std::string m_driver_version; + std::string m_profile; + std::string m_version; +#if defined(CL_VERSION_1_1) + std::string m_opencl_c_version; +#endif + std::string m_extensions; +#if defined(CL_VERSION_1_2) + size_t m_printf_buffer_size; + cl_bool m_preferred_interop_user_sync; + cl_device_id m_parent_device; + cl_uint m_partition_max_sub_devices; + std::vector m_partition_properties; + cl_device_affinity_domain m_partition_affinity_domain; + std::vector m_partition_type; + cl_uint m_reference_count; +#endif +}; + +} // namespace opencl + + +class App +{ +public: + App(CommandLineParser& cmd); + ~App(); + + int initOpenCL(); + int initVideoSource(); + + int process_frame_with_open_cl(cv::Mat& frame, bool use_buffer, cl_mem* cl_buffer); + int process_cl_buffer_with_opencv(cl_mem buffer, size_t step, int rows, int cols, int type, cv::UMat& u); + int process_cl_image_with_opencv(cl_mem image, cv::UMat& u); + + int run(); + + bool isRunning() { return m_running; } + bool doProcess() { return m_process; } + bool useBuffer() { return m_use_buffer; } + + void setRunning(bool running) { m_running = running; } + void setDoProcess(bool process) { m_process = process; } + void setUseBuffer(bool use_buffer) { m_use_buffer = use_buffer; } + +protected: + bool nextFrame(cv::Mat& frame) { return m_cap.read(frame); } + void handleKey(char key); + void timerStart(); + void timerEnd(); + std::string timeStr() const; + std::string message() const; + +private: + bool m_running; + bool m_process; + bool m_use_buffer; + + int64 m_t0; + int64 m_t1; + float m_time; + float m_frequency; + + string m_file_name; + int m_camera_id; + cv::VideoCapture m_cap; + cv::Mat m_frame; + cv::Mat m_frameGray; + + opencl::PlatformInfo m_platformInfo; + opencl::DeviceInfo m_deviceInfo; + std::vector m_platform_ids; + cl_context m_context; + cl_device_id m_device_id; + cl_command_queue m_queue; + cl_program m_program; + cl_kernel m_kernelBuf; + cl_kernel m_kernelImg; + cl_mem m_img_src; // used as src in case processing of cl image + cl_mem m_mem_obj; + cl_event m_event; +}; + + +App::App(CommandLineParser& cmd) +{ + cout << "\nPress ESC to exit\n" << endl; + cout << "\n 'p' to toggle ON/OFF processing\n" << endl; + cout << "\n SPACE to switch between OpenCL buffer/image\n" << endl; + + m_camera_id = cmd.get("camera"); + m_file_name = cmd.get("video"); + + m_running = false; + m_process = false; + m_use_buffer = false; + + m_t0 = 0; + m_t1 = 0; + m_time = 0.0; + m_frequency = (float)cv::getTickFrequency(); + + m_context = 0; + m_device_id = 0; + m_queue = 0; + m_program = 0; + m_kernelBuf = 0; + m_kernelImg = 0; + m_img_src = 0; + m_mem_obj = 0; + m_event = 0; +} // ctor + + +App::~App() +{ + if (m_queue) + { + clFinish(m_queue); + clReleaseCommandQueue(m_queue); + m_queue = 0; + } + + if (m_program) + { + clReleaseProgram(m_program); + m_program = 0; + } + + if (m_img_src) + { + clReleaseMemObject(m_img_src); + m_img_src = 0; + } + + if (m_mem_obj) + { + clReleaseMemObject(m_mem_obj); + m_mem_obj = 0; + } + + if (m_event) + { + clReleaseEvent(m_event); + } + + if (m_kernelBuf) + { + clReleaseKernel(m_kernelBuf); + m_kernelBuf = 0; + } + + if (m_kernelImg) + { + clReleaseKernel(m_kernelImg); + m_kernelImg = 0; + } + + if (m_device_id) + { + clReleaseDevice(m_device_id); + m_device_id = 0; + } + + if (m_context) + { + clReleaseContext(m_context); + m_context = 0; + } +} // dtor + + +int App::initOpenCL() +{ + cl_int res = CL_SUCCESS; + cl_uint num_entries = 0; + + res = clGetPlatformIDs(0, 0, &num_entries); + if (CL_SUCCESS != res) + return -1; + + m_platform_ids.resize(num_entries); + + res = clGetPlatformIDs(num_entries, &m_platform_ids[0], 0); + if (CL_SUCCESS != res) + return -1; + + unsigned int i; + + // create context from first platform with GPU device + for (i = 0; i < m_platform_ids.size(); i++) + { + cl_context_properties props[] = + { + CL_CONTEXT_PLATFORM, + (cl_context_properties)(m_platform_ids[i]), + 0 + }; + + m_context = clCreateContextFromType(props, CL_DEVICE_TYPE_GPU, 0, 0, &res); + if (0 == m_context || CL_SUCCESS != res) + continue; + + res = clGetContextInfo(m_context, CL_CONTEXT_DEVICES, sizeof(cl_device_id), &m_device_id, 0); + if (CL_SUCCESS != res) + return -1; + + m_queue = clCreateCommandQueue(m_context, m_device_id, 0, &res); + if (0 == m_queue || CL_SUCCESS != res) + return -1; + + const char* kernelSrc = + "__kernel " + "void bitwise_inv_buf_8uC1(" + " __global unsigned char* pSrcDst," + " int srcDstStep," + " int rows," + " int cols)" + "{" + " int x = get_global_id(0);" + " int y = get_global_id(1);" + " int idx = mad24(y, srcDstStep, x);" + " pSrcDst[idx] = ~pSrcDst[idx];" + "}" + "__kernel " + "void bitwise_inv_img_8uC1(" + " read_only image2d_t srcImg," + " write_only image2d_t dstImg)" + "{" + " int x = get_global_id(0);" + " int y = get_global_id(1);" + " int2 coord = (int2)(x, y);" + " uint4 val = read_imageui(srcImg, coord);" + " val.x = (~val.x) & 0x000000FF;" + " write_imageui(dstImg, coord, val);" + "}"; + size_t len = strlen(kernelSrc); + m_program = clCreateProgramWithSource(m_context, 1, &kernelSrc, &len, &res); + if (0 == m_program || CL_SUCCESS != res) + return -1; + + res = clBuildProgram(m_program, 1, &m_device_id, 0, 0, 0); + if (CL_SUCCESS != res) + return -1; + + m_kernelBuf = clCreateKernel(m_program, "bitwise_inv_buf_8uC1", &res); + if (0 == m_kernelBuf || CL_SUCCESS != res) + return -1; + + m_kernelImg = clCreateKernel(m_program, "bitwise_inv_img_8uC1", &res); + if (0 == m_kernelImg || CL_SUCCESS != res) + return -1; + + m_platformInfo.QueryInfo(m_platform_ids[i]); + m_deviceInfo.QueryInfo(m_device_id); + + // attach OpenCL context to OpenCV + cv::ocl::attachContext(m_platformInfo.Name(), m_platform_ids[i], m_context, m_device_id); + + break; + } + + return m_context != 0 ? CL_SUCCESS : -1; +} // initOpenCL() + + +int App::initVideoSource() +{ + try + { + if (!m_file_name.empty() && m_camera_id == -1) + { + m_cap.open(m_file_name.c_str()); + if (!m_cap.isOpened()) + throw std::runtime_error(std::string("can't open video file: " + m_file_name)); + } + else if (m_camera_id != -1) + { + m_cap.open(m_camera_id); + if (!m_cap.isOpened()) + { + std::stringstream msg; + msg << "can't open camera: " << m_camera_id; + throw std::runtime_error(msg.str()); + } + } + else + throw std::runtime_error(std::string("specify video source")); + } + + catch (std::exception e) + { + cerr << "ERROR: " << e.what() << std::endl; + return -1; + } + + return 0; +} // initVideoSource() + + +// this function is an example of "typical" OpenCL processing pipeline +// It creates OpenCL buffer or image, depending on use_buffer flag, +// from input media frame and process these data +// (inverts each pixel value in half of frame) with OpenCL kernel +int App::process_frame_with_open_cl(cv::Mat& frame, bool use_buffer, cl_mem* mem_obj) +{ + cl_int res = CL_SUCCESS; + + CV_Assert(mem_obj); + + cl_kernel kernel = 0; + cl_mem mem = mem_obj[0]; + + if (0 == mem || 0 == m_img_src) + { + // allocate/delete cl memory objects every frame for the simplicity. + // in real applicaton more efficient pipeline can be built. + + if (use_buffer) + { + cl_mem_flags flags = CL_MEM_READ_WRITE | CL_MEM_USE_HOST_PTR; + + mem = clCreateBuffer(m_context, flags, frame.total(), frame.ptr(), &res); + if (0 == mem || CL_SUCCESS != res) + return -1; + + res = clSetKernelArg(m_kernelBuf, 0, sizeof(cl_mem), &mem); + if (CL_SUCCESS != res) + return -1; + + res = clSetKernelArg(m_kernelBuf, 1, sizeof(int), &frame.step[0]); + if (CL_SUCCESS != res) + return -1; + + res = clSetKernelArg(m_kernelBuf, 2, sizeof(int), &frame.rows); + if (CL_SUCCESS != res) + return -1; + + int cols2 = frame.cols / 2; + res = clSetKernelArg(m_kernelBuf, 3, sizeof(int), &cols2); + if (CL_SUCCESS != res) + return -1; + + kernel = m_kernelBuf; + } + else + { + cl_mem_flags flags_src = CL_MEM_READ_ONLY | CL_MEM_USE_HOST_PTR; + + cl_image_format fmt; + fmt.image_channel_order = CL_R; + fmt.image_channel_data_type = CL_UNSIGNED_INT8; + + cl_image_desc desc_src; + desc_src.image_type = CL_MEM_OBJECT_IMAGE2D; + desc_src.image_width = frame.cols; + desc_src.image_height = frame.rows; + desc_src.image_depth = 0; + desc_src.image_array_size = 0; + desc_src.image_row_pitch = frame.step[0]; + desc_src.image_slice_pitch = 0; + desc_src.num_mip_levels = 0; + desc_src.num_samples = 0; + desc_src.buffer = 0; + m_img_src = clCreateImage(m_context, flags_src, &fmt, &desc_src, frame.ptr(), &res); + if (0 == m_img_src || CL_SUCCESS != res) + return -1; + + cl_mem_flags flags_dst = CL_MEM_READ_WRITE | CL_MEM_ALLOC_HOST_PTR; + + cl_image_desc desc_dst; + desc_dst.image_type = CL_MEM_OBJECT_IMAGE2D; + desc_dst.image_width = frame.cols; + desc_dst.image_height = frame.rows; + desc_dst.image_depth = 0; + desc_dst.image_array_size = 0; + desc_dst.image_row_pitch = 0; + desc_dst.image_slice_pitch = 0; + desc_dst.num_mip_levels = 0; + desc_dst.num_samples = 0; + desc_dst.buffer = 0; + mem = clCreateImage(m_context, flags_dst, &fmt, &desc_dst, 0, &res); + if (0 == mem || CL_SUCCESS != res) + return -1; + + size_t origin[] = { 0, 0, 0 }; + size_t region[] = { frame.cols, frame.rows, 1 }; + res = clEnqueueCopyImage(m_queue, m_img_src, mem, origin, origin, region, 0, 0, &m_event); + if (CL_SUCCESS != res) + return -1; + + res = clWaitForEvents(1, &m_event); + if (CL_SUCCESS != res) + return -1; + + res = clSetKernelArg(m_kernelImg, 0, sizeof(cl_mem), &m_img_src); + if (CL_SUCCESS != res) + return -1; + + res = clSetKernelArg(m_kernelImg, 1, sizeof(cl_mem), &mem); + if (CL_SUCCESS != res) + return -1; + + kernel = m_kernelImg; + } + } + + m_event = clCreateUserEvent(m_context, &res); + if (0 == m_event || CL_SUCCESS != res) + return -1; + + // process left half of frame in OpenCL + size_t size[] = { frame.cols / 2, frame.rows }; + res = clEnqueueNDRangeKernel(m_queue, kernel, 2, 0, size, 0, 0, 0, &m_event); + if (CL_SUCCESS != res) + return -1; + + res = clWaitForEvents(1, &m_event); + if (CL_SUCCESS != res) + return - 1; + + mem_obj[0] = mem; + + return 0; +} + + +// this function is an example of interoperability between OpenCL buffer +// and OpenCV UMat objects. It converts (without copying data) OpenCL buffer +// to OpenCV UMat and then do blur on these data +int App::process_cl_buffer_with_opencv(cl_mem buffer, size_t step, int rows, int cols, int type, cv::UMat& u) +{ + cv::ocl::convertFromBuffer(buffer, step, rows, cols, type, u); + + // process right half of frame in OpenCV + cv::Point pt(u.cols / 2, 0); + cv::Size sz(u.cols / 2, u.rows); + cv::Rect roi(pt, sz); + cv::UMat uroi(u, roi); + cv::blur(uroi, uroi, cv::Size(7, 7), cv::Point(-3, -3)); + + if (buffer) + clReleaseMemObject(buffer); + m_mem_obj = 0; + + return 0; +} + + +// this function is an example of interoperability between OpenCL image +// and OpenCV UMat objects. It converts OpenCL image +// to OpenCV UMat and then do blur on these data +int App::process_cl_image_with_opencv(cl_mem image, cv::UMat& u) +{ + cv::ocl::convertFromImage(image, u); + + // process right half of frame in OpenCV + cv::Point pt(u.cols / 2, 0); + cv::Size sz(u.cols / 2, u.rows); + cv::Rect roi(pt, sz); + cv::UMat uroi(u, roi); + cv::blur(uroi, uroi, cv::Size(7, 7), cv::Point(-3, -3)); + + if (image) + clReleaseMemObject(image); + m_mem_obj = 0; + + if (m_img_src) + clReleaseMemObject(m_img_src); + m_img_src = 0; + + return 0; +} + + +int App::run() +{ + if (0 != initOpenCL()) + return -1; + + if (0 != initVideoSource()) + return -1; + + Mat img_to_show; + + // set running state until ESC pressed + setRunning(true); + // set process flag to show some data processing + // can be toggled on/off by 'p' button + setDoProcess(true); + // set use buffer flag, + // when it is set to true, will demo interop opencl buffer and cv::Umat, + // otherwise demo interop opencl image and cv::UMat + // can be switched on/of by SPACE button + setUseBuffer(true); + + // Iterate over all frames + while (isRunning() && nextFrame(m_frame)) + { + cv::cvtColor(m_frame, m_frameGray, COLOR_BGR2GRAY); + + UMat uframe; + + // work + timerStart(); + + if (doProcess()) + { + process_frame_with_open_cl(m_frameGray, useBuffer(), &m_mem_obj); + + if (useBuffer()) + process_cl_buffer_with_opencv( + m_mem_obj, m_frameGray.step[0], m_frameGray.rows, m_frameGray.cols, m_frameGray.type(), uframe); + else + process_cl_image_with_opencv(m_mem_obj, uframe); + } + else + { + m_frameGray.copyTo(uframe); + } + + timerEnd(); + + uframe.copyTo(img_to_show); + + putText(img_to_show, "Version : " + m_platformInfo.Version(), Point(5, 30), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2); + putText(img_to_show, "Name : " + m_platformInfo.Name(), Point(5, 60), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2); + putText(img_to_show, "Device : " + m_deviceInfo.Name(), Point(5, 90), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2); + cv::String memtype = useBuffer() ? "buffer" : "image"; + putText(img_to_show, "interop with OpenCL " + memtype, Point(5, 120), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2); + putText(img_to_show, "Time : " + timeStr() + " msec", Point(5, 150), FONT_HERSHEY_SIMPLEX, 1., Scalar(255, 100, 0), 2); + + imshow("opencl_interop", img_to_show); + + handleKey((char)waitKey(3)); + } + + return 0; +} + + +void App::handleKey(char key) +{ + switch (key) + { + case 27: + setRunning(false); + break; + + case ' ': + setUseBuffer(!useBuffer()); + break; + + case 'p': + case 'P': + setDoProcess( !doProcess() ); + break; + + default: + break; + } +} + + +inline void App::timerStart() +{ + m_t0 = getTickCount(); +} + + +inline void App::timerEnd() +{ + m_t1 = getTickCount(); + int64 delta = m_t1 - m_t0; + m_time = (delta / m_frequency) * 1000; // units msec +} + + +inline string App::timeStr() const +{ + stringstream ss; + ss << std::fixed << std::setprecision(1) << m_time; + return ss.str(); +} + + +int main(int argc, char** argv) +{ + const char* keys = + "{ help h ? | | print help message }" + "{ camera c | -1 | use camera as input }" + "{ video v | | use video as input }"; + + CommandLineParser cmd(argc, argv, keys); + if (cmd.has("help")) + { + cmd.printMessage(); + return EXIT_SUCCESS; + } + + App app(cmd); + + try + { + app.run(); + } + + catch (const cv::Exception& e) + { + cout << "error: " << e.what() << endl; + return 1; + } + + catch (const std::exception& e) + { + cout << "error: " << e.what() << endl; + return 1; + } + + catch (...) + { + cout << "unknown exception" << endl; + return 1; + } + + return EXIT_SUCCESS; +} // main() diff --git a/samples/opengl/CMakeLists.txt b/samples/opengl/CMakeLists.txt new file mode 100644 index 0000000000..4bf48925e3 --- /dev/null +++ b/samples/opengl/CMakeLists.txt @@ -0,0 +1,45 @@ +SET(OPENCV_OPENGL_SAMPLES_REQUIRED_DEPS opencv_core opencv_imgproc opencv_imgcodecs opencv_videoio opencv_highgui) + +ocv_check_dependencies(${OPENCV_OPENGL_SAMPLES_REQUIRED_DEPS}) + +if(BUILD_EXAMPLES AND OCV_DEPENDENCIES_FOUND) + set(project "opengl") + string(TOUPPER "${project}" project_upper) + + project("${project}_samples") + + ocv_include_modules_recurse(${OPENCV_OPENGL_SAMPLES_REQUIRED_DEPS}) + + # --------------------------------------------- + # Define executable targets + # --------------------------------------------- + MACRO(OPENCV_DEFINE_OPENGL_EXAMPLE name srcs) + set(the_target "example_${project}_${name}") + add_executable(${the_target} ${srcs}) + + ocv_target_link_libraries(${the_target} ${OPENCV_LINKER_LIBS} ${OPENCV_OPENGL_SAMPLES_REQUIRED_DEPS}) + + set_target_properties(${the_target} PROPERTIES + OUTPUT_NAME "${project}-example-${name}" + PROJECT_LABEL "(EXAMPLE_${project_upper}) ${name}") + + if(ENABLE_SOLUTION_FOLDERS) + set_target_properties(${the_target} PROPERTIES FOLDER "samples//${project}") + endif() + + if(WIN32) + if(MSVC AND NOT BUILD_SHARED_LIBS) + set_target_properties(${the_target} PROPERTIES LINK_FLAGS "/NODEFAULTLIB:atlthunk.lib /NODEFAULTLIB:atlsd.lib /DEBUG") + endif() + install(TARGETS ${the_target} RUNTIME DESTINATION "${OPENCV_SAMPLES_BIN_INSTALL_PATH}/${project}" COMPONENT samples) + endif() + ENDMACRO() + + file(GLOB all_samples RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cpp) + + foreach(sample_filename ${all_samples}) + get_filename_component(sample ${sample_filename} NAME_WE) + file(GLOB sample_srcs RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${sample}.*) + OPENCV_DEFINE_OPENGL_EXAMPLE(${sample} ${sample_srcs}) + endforeach() +endif() diff --git a/samples/opengl/opengl_interop.cpp b/samples/opengl/opengl_interop.cpp new file mode 100644 index 0000000000..4900d1c5dd --- /dev/null +++ b/samples/opengl/opengl_interop.cpp @@ -0,0 +1,526 @@ +/* +// Sample demonstrating interoperability of OpenCV UMat with OpenGL texture. +// At first, the data obtained from video file or camera and placed onto +// OpenGL texture, following mapping of this OpenGL texture to OpenCV UMat +// and call cv::Blur function. The result is mapped back to OpenGL texture +// and rendered through OpenGL API. +*/ +#if defined(WIN32) || defined(_WIN32) +# define WIN32_LEAN_AND_MEAN +# include +#elif defined(__linux__) +# include +# include +#endif + +#include +#include +#include + +#include + +#include "opencv2/core.hpp" +#include "opencv2/core/opengl.hpp" +#include "opencv2/core/ocl.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/videoio.hpp" + +#include "winapp.hpp" + +#if defined(WIN32) || defined(_WIN32) +# pragma comment(lib, "opengl32.lib") +# pragma comment(lib, "glu32.lib") +#endif + +/* +// Press key to +// 0 no processing +// 1 processing on CPU +// 2 processing on GPU +// 9 toggle texture/buffer +// space toggle processing on/off, preserve mode +// esc quit +*/ + +class GLWinApp : public WinApp +{ +public: + GLWinApp(int width, int height, std::string& window_name, cv::VideoCapture& cap) : + WinApp(width, height, window_name) + { + m_shutdown = false; + m_mode = 0; + m_modeStr[0] = cv::String("Texture/No processing"); + m_modeStr[1] = cv::String("Texture/Processing on CPU"); + m_modeStr[2] = cv::String("Texture/Processing on GPU"); + m_modeStr[3] = cv::String("Buffer/No processing"); + m_modeStr[4] = cv::String("Buffer/Processing on CPU"); + m_modeStr[5] = cv::String("Buffer/Processing on GPU"); + m_disableProcessing = false; + m_cap = cap; + } + + ~GLWinApp() {} + + virtual void cleanup() + { + m_shutdown = true; +#if defined(__linux__) + glXMakeCurrent(m_display, None, NULL); + glXDestroyContext(m_display, m_glctx); +#endif + WinApp::cleanup(); + } + +#if defined(WIN32) || defined(_WIN32) + virtual LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) + { + switch (message) + { + case WM_CHAR: + if (wParam >= '0' && wParam <= '2') + { + set_mode((char)wParam - '0'); + return 0; + } + else if (wParam == '9') + { + toggle_buffer(); + return 0; + } + else if (wParam == VK_SPACE) + { + m_disableProcessing = !m_disableProcessing; + return 0; + } + else if (wParam == VK_ESCAPE) + { + cleanup(); + return 0; + } + break; + + case WM_CLOSE: + cleanup(); + return 0; + + case WM_DESTROY: + ::PostQuitMessage(0); + return 0; + } + + return ::DefWindowProc(hWnd, message, wParam, lParam); + } +#endif + + static float getFps() + { + static std::queue time_queue; + + int64 now = cv::getTickCount(); + int64 then = 0; + time_queue.push(now); + + if (time_queue.size() >= 2) + then = time_queue.front(); + + if (time_queue.size() >= 25) + time_queue.pop(); + + return time_queue.size() * (float)cv::getTickFrequency() / (now - then); + } + +#if defined(__linux__) + int handle_event(XEvent& e) + { + switch(e.type) + { + case ClientMessage: + if ((Atom)e.xclient.data.l[0] == m_WM_DELETE_WINDOW) + { + m_end_loop = true; + cleanup(); + } + else + { + return 0; + } + break; + case Expose: + render(); + break; + case KeyPress: + switch(keycode_to_keysym(e.xkey.keycode)) + { + case XK_space: + m_disableProcessing = !m_disableProcessing; + break; + case XK_0: + set_mode(0); + break; + case XK_1: + set_mode(1); + break; + case XK_2: + set_mode(2); + break; + case XK_9: + toggle_buffer(); + break; + case XK_Escape: + m_end_loop = true; + cleanup(); + break; + } + break; + default: + return 0; + } + return 1; + } +#endif + + int init() + { +#if defined(WIN32) || defined(_WIN32) + m_hDC = GetDC(m_hWnd); + + if (setup_pixel_format() != 0) + { + std::cerr << "Can't setup pixel format" << std::endl; + return -1; + } + + m_hRC = wglCreateContext(m_hDC); + wglMakeCurrent(m_hDC, m_hRC); +#elif defined(__linux__) + m_glctx = glXCreateContext(m_display, m_visual_info, NULL, GL_TRUE); + glXMakeCurrent(m_display, m_window, m_glctx); +#endif + + glEnable(GL_TEXTURE_2D); + glEnable(GL_DEPTH_TEST); + + glViewport(0, 0, m_width, m_height); + + if (cv::ocl::haveOpenCL()) + { + (void) cv::ogl::ocl::initializeContextFromGL(); + } + + m_oclDevName = cv::ocl::useOpenCL() ? + cv::ocl::Context::getDefault().device(0).name() : + (char*) "No OpenCL device"; + + return 0; + } // init() + + int get_frame(cv::ogl::Texture2D& texture, cv::ogl::Buffer& buffer) + { + if (!m_cap.read(m_frame_bgr)) + return -1; + + cv::cvtColor(m_frame_bgr, m_frame_rgba, CV_RGB2RGBA); + + if (use_buffer()) + buffer.copyFrom(m_frame_rgba); + else + texture.copyFrom(m_frame_rgba); + + return 0; + } + + void print_info(int mode, float fps, cv::String oclDevName) + { +#if defined(WIN32) || defined(_WIN32) + HDC hDC = m_hDC; + + HFONT hFont = (HFONT)::GetStockObject(SYSTEM_FONT); + + HFONT hOldFont = (HFONT)::SelectObject(hDC, hFont); + + if (hOldFont) + { + TEXTMETRIC tm; + ::GetTextMetrics(hDC, &tm); + + char buf[256+1]; + int y = 0; + + buf[0] = 0; + sprintf_s(buf, sizeof(buf)-1, "Mode: %s", m_modeStr[mode].c_str()); + ::TextOut(hDC, 0, y, buf, (int)strlen(buf)); + + y += tm.tmHeight; + buf[0] = 0; + sprintf_s(buf, sizeof(buf)-1, "FPS: %2.1f", fps); + ::TextOut(hDC, 0, y, buf, (int)strlen(buf)); + + y += tm.tmHeight; + buf[0] = 0; + sprintf_s(buf, sizeof(buf)-1, "OpenCL device: %s", oclDevName.c_str()); + ::TextOut(hDC, 0, y, buf, (int)strlen(buf)); + + ::SelectObject(hDC, hOldFont); + } +#elif defined(__linux__) + + char buf[256+1]; + snprintf(buf, sizeof(buf)-1, "FPS: %2.1f Mode: %s Device: %s", fps, m_modeStr[mode].c_str(), oclDevName.c_str()); + XStoreName(m_display, m_window, buf); +#endif + } + + void idle() + { + render(); + } + + int render() + { + try + { + if (m_shutdown) + return 0; + + int r; + cv::ogl::Texture2D texture; + cv::ogl::Buffer buffer; + + r = get_frame(texture, buffer); + if (r != 0) + { + return -1; + } + + bool do_buffer = use_buffer(); + switch (get_mode()) + { + case 0: + // no processing + break; + + case 1: + { + // process video frame on CPU + cv::Mat m(m_height, m_width, CV_8UC4); + + if (do_buffer) + buffer.copyTo(m); + else + texture.copyTo(m); + + if (!m_disableProcessing) + { + // blur texture image with OpenCV on CPU + cv::blur(m, m, cv::Size(15, 15), cv::Point(-7, -7)); + } + + if (do_buffer) + buffer.copyFrom(m); + else + texture.copyFrom(m); + + break; + } + + case 2: + { + // process video frame on GPU + cv::UMat u; + + if (do_buffer) + u = cv::ogl::mapGLBuffer(buffer); + else + cv::ogl::convertFromGLTexture2D(texture, u); + + if (!m_disableProcessing) + { + // blur texture image with OpenCV on GPU with OpenCL + cv::blur(u, u, cv::Size(15, 15), cv::Point(-7, -7)); + } + + if (do_buffer) + cv::ogl::unmapGLBuffer(u); + else + cv::ogl::convertToGLTexture2D(u, texture); + + break; + } + + } // switch + + if (do_buffer) // buffer -> texture + { + cv::Mat m(m_height, m_width, CV_8UC4); + buffer.copyTo(m); + texture.copyFrom(m); + } + +#if defined(__linux__) + XWindowAttributes window_attributes; + XGetWindowAttributes(m_display, m_window, &window_attributes); + glViewport(0, 0, window_attributes.width, window_attributes.height); +#endif + + glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); + glLoadIdentity(); + glEnable(GL_TEXTURE_2D); + + texture.bind(); + + glBegin(GL_QUADS); + glTexCoord2f(0.0f, 0.0f); glVertex3f(-1.0f, 1.0f, 0.1f); + glTexCoord2f(0.0f, 1.0f); glVertex3f(-1.0f, -1.0f, 0.1f); + glTexCoord2f(1.0f, 1.0f); glVertex3f(1.0f, -1.0f, 0.1f); + glTexCoord2f(1.0f, 0.0f); glVertex3f(1.0f, 1.0f, 0.1f); + glEnd(); + +#if defined(WIN32) || defined(_WIN32) + SwapBuffers(m_hDC); +#elif defined(__linux__) + glXSwapBuffers(m_display, m_window); +#endif + + print_info(m_mode, getFps(), m_oclDevName); + } + + + catch (cv::Exception& e) + { + std::cerr << "Exception: " << e.what() << std::endl; + return 10; + } + + return 0; + } + +protected: + +#if defined(WIN32) || defined(_WIN32) + int setup_pixel_format() + { + PIXELFORMATDESCRIPTOR pfd; + + pfd.nSize = sizeof(PIXELFORMATDESCRIPTOR); + pfd.nVersion = 1; + pfd.dwFlags = PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER; + pfd.iPixelType = PFD_TYPE_RGBA; + pfd.cColorBits = 24; + pfd.cRedBits = 8; + pfd.cRedShift = 0; + pfd.cGreenBits = 8; + pfd.cGreenShift = 0; + pfd.cBlueBits = 8; + pfd.cBlueShift = 0; + pfd.cAlphaBits = 8; + pfd.cAlphaShift = 0; + pfd.cAccumBits = 0; + pfd.cAccumRedBits = 0; + pfd.cAccumGreenBits = 0; + pfd.cAccumBlueBits = 0; + pfd.cAccumAlphaBits = 0; + pfd.cDepthBits = 24; + pfd.cStencilBits = 8; + pfd.cAuxBuffers = 0; + pfd.iLayerType = PFD_MAIN_PLANE; + pfd.bReserved = 0; + pfd.dwLayerMask = 0; + pfd.dwVisibleMask = 0; + pfd.dwDamageMask = 0; + + int pfmt = ChoosePixelFormat(m_hDC, &pfd); + if (pfmt == 0) + return -1; + if (SetPixelFormat(m_hDC, pfmt, &pfd) == 0) + return -2; + return 0; + } +#endif + +#if defined(__linux__) + KeySym keycode_to_keysym(unsigned keycode) + { // note that XKeycodeToKeysym() is considered deprecated + int keysyms_per_keycode_return = 0; + KeySym *keysyms = XGetKeyboardMapping(m_display, keycode, 1, &keysyms_per_keycode_return); + KeySym keysym = keysyms[0]; + XFree(keysyms); + return keysym; + } +#endif + + // modes: 0,1,2 - use texture + // 3,4,5 - use buffer + bool use_buffer() + { + return bool(m_mode >= 3); + } + void toggle_buffer() + { + if (m_mode < 3) + m_mode += 3; + else + m_mode -= 3; + } + int get_mode() + { + return (m_mode % 3); + } + void set_mode(int mode) + { + bool do_buffer = bool(m_mode >= 3); + m_mode = (mode % 3); + if (do_buffer) + m_mode += 3; + } + +private: + bool m_shutdown; + int m_mode; + cv::String m_modeStr[3*2]; + int m_disableProcessing; +#if defined(WIN32) || defined(_WIN32) + HDC m_hDC; + HGLRC m_hRC; +#elif defined(__linux__) + GLXContext m_glctx; +#endif + cv::VideoCapture m_cap; + cv::Mat m_frame_bgr; + cv::Mat m_frame_rgba; + cv::String m_oclDevName; +}; + +using namespace cv; + +int main(int argc, char** argv) +{ + cv::VideoCapture cap; + + if (argc > 1) + cap.open(argv[1]); + else + cap.open(0); + + int width = (int)cap.get(CAP_PROP_FRAME_WIDTH); + int height = (int)cap.get(CAP_PROP_FRAME_HEIGHT); + std::string wndname = "WGL Window"; + + GLWinApp app(width, height, wndname, cap); + + try + { + app.create(); + return app.run(); + } + catch (cv::Exception& e) + { + std::cerr << "Exception: " << e.what() << std::endl; + return 10; + } + catch (...) + { + std::cerr << "FATAL ERROR: Unknown exception" << std::endl; + return 11; + } +} diff --git a/samples/opengl/winapp.hpp b/samples/opengl/winapp.hpp new file mode 100644 index 0000000000..b70dfa0fe9 --- /dev/null +++ b/samples/opengl/winapp.hpp @@ -0,0 +1,221 @@ +#if defined(WIN32) || defined(_WIN32) +# define WIN32_LEAN_AND_MEAN +# include +#elif defined(__linux__) +# include +# include +# include +#endif + +#include + +#include +#if defined(WIN32) || defined(_WIN32) +# include +#elif defined(__linux__) +# include +#endif + +#if defined(WIN32) || defined(_WIN32) +# define WINCLASS "WinAppWnd" +#endif + +#define SAFE_RELEASE(p) if (p) { p->Release(); p = NULL; } + +class WinApp +{ +public: + WinApp(int width, int height, std::string& window_name) + { + m_width = width; + m_height = height; + m_window_name = window_name; +#if defined(WIN32) || defined(_WIN32) + m_hInstance = ::GetModuleHandle(NULL); +#endif + } + + virtual ~WinApp() + { +#if defined(WIN32) || defined(_WIN32) + ::UnregisterClass(WINCLASS, m_hInstance); +#endif + } + + int create() + { +#if defined(WIN32) || defined(_WIN32) + WNDCLASSEX wcex; + + wcex.cbSize = sizeof(WNDCLASSEX); + wcex.style = CS_HREDRAW | CS_VREDRAW; + wcex.lpfnWndProc = &WinApp::StaticWndProc; + wcex.cbClsExtra = 0; + wcex.cbWndExtra = 0; + wcex.hInstance = m_hInstance; + wcex.hIcon = LoadIcon(0, IDI_APPLICATION); + wcex.hCursor = LoadCursor(0, IDC_ARROW); + wcex.hbrBackground = 0; + wcex.lpszMenuName = 0L; + wcex.lpszClassName = WINCLASS; + wcex.hIconSm = 0; + + ATOM wc = ::RegisterClassEx(&wcex); + + RECT rc = { 0, 0, m_width, m_height }; + ::AdjustWindowRect(&rc, WS_OVERLAPPEDWINDOW, false); + + m_hWnd = ::CreateWindow( + (LPCTSTR)wc, m_window_name.c_str(), + WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, + rc.right - rc.left, rc.bottom - rc.top, + NULL, NULL, m_hInstance, (void*)this); + + if (!m_hWnd) + return -1; + + ::ShowWindow(m_hWnd, SW_SHOW); + ::UpdateWindow(m_hWnd); + ::SetFocus(m_hWnd); +#elif defined(__linux__) + m_display = XOpenDisplay(NULL); + + if (m_display == NULL) + { + return -1; + } + + m_WM_DELETE_WINDOW = XInternAtom(m_display, "WM_DELETE_WINDOW", False); + + static GLint visual_attributes[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, GLX_DOUBLEBUFFER, None }; + m_visual_info = glXChooseVisual(m_display, 0, visual_attributes); + + if (m_visual_info == NULL) + { + XCloseDisplay(m_display); + return -2; + } + + Window root = DefaultRootWindow(m_display); + + m_event_mask = ExposureMask | KeyPressMask; + + XSetWindowAttributes window_attributes; + window_attributes.colormap = XCreateColormap(m_display, root, m_visual_info->visual, AllocNone); + window_attributes.event_mask = m_event_mask; + + m_window = XCreateWindow( + m_display, root, 0, 0, m_width, m_height, 0, m_visual_info->depth, + InputOutput, m_visual_info->visual, CWColormap | CWEventMask, &window_attributes); + + XMapWindow(m_display, m_window); + XSetWMProtocols(m_display, m_window, &m_WM_DELETE_WINDOW, 1); + XStoreName(m_display, m_window, m_window_name.c_str()); +#endif + + return init(); + } + + virtual void cleanup() + { +#if defined(WIN32) || defined(_WIN32) + ::DestroyWindow(m_hWnd); +#elif defined(__linux__) + XDestroyWindow(m_display, m_window); + XCloseDisplay(m_display); +#endif + } + +#if defined(WIN32) || defined(_WIN32) + virtual LRESULT CALLBACK WndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) = 0; +#endif + + int run() + { +#if defined(WIN32) || defined(_WIN32) + MSG msg; + + ::ZeroMemory(&msg, sizeof(msg)); + + while (msg.message != WM_QUIT) + { + if (::PeekMessage(&msg, NULL, 0U, 0U, PM_REMOVE)) + { + ::TranslateMessage(&msg); + ::DispatchMessage(&msg); + } + else + { + idle(); + } + } + + return static_cast(msg.wParam); +#elif defined(__linux__) + m_end_loop = false; + + do { + XEvent e; + + if (!XCheckWindowEvent(m_display, m_window, m_event_mask, &e) || !handle_event(e)) + { + idle(); + } + } while (!m_end_loop); +#endif + + return 0; + } + +protected: + +#if defined(WIN32) || defined(_WIN32) + static LRESULT CALLBACK StaticWndProc(HWND hWnd, UINT message, WPARAM wParam, LPARAM lParam) + { + WinApp* pWnd; + + if (message == WM_NCCREATE) + { + LPCREATESTRUCT pCreateStruct = ((LPCREATESTRUCT)lParam); + pWnd = (WinApp*)(pCreateStruct->lpCreateParams); + ::SetWindowLongPtr(hWnd, GWLP_USERDATA, (LONG_PTR)pWnd); + } + + pWnd = GetObjectFromWindow(hWnd); + + if (pWnd) + return pWnd->WndProc(hWnd, message, wParam, lParam); + else + return ::DefWindowProc(hWnd, message, wParam, lParam); + } + + inline static WinApp* GetObjectFromWindow(HWND hWnd) + { + return (WinApp*)::GetWindowLongPtr(hWnd, GWLP_USERDATA); + } +#endif + +#if defined(__linux__) + virtual int handle_event(XEvent& e) = 0; +#endif + + virtual int init() = 0; + virtual int render() = 0; + + virtual void idle() = 0; + +#if defined(WIN32) || defined(_WIN32) + HINSTANCE m_hInstance; + HWND m_hWnd; +#elif defined(__linux__) + Display* m_display; + XVisualInfo* m_visual_info; + Window m_window; + long m_event_mask; + Atom m_WM_DELETE_WINDOW; + bool m_end_loop; +#endif + int m_width; + int m_height; + std::string m_window_name; +}; diff --git a/samples/python2/camshift.py b/samples/python2/camshift.py index 6e9402095d..72c790803f 100755 --- a/samples/python2/camshift.py +++ b/samples/python2/camshift.py @@ -83,7 +83,7 @@ class App(object): hsv_roi = hsv[y0:y1, x0:x1] mask_roi = mask[y0:y1, x0:x1] hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] ) - cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX); + cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX) self.hist = hist.reshape(-1) self.show_hist() diff --git a/samples/winrt/FaceDetection/FaceDetection/MainPage.xaml b/samples/winrt/FaceDetection/FaceDetection/MainPage.xaml index c9ebdd2017..31c6ba8ac6 100644 --- a/samples/winrt/FaceDetection/FaceDetection/MainPage.xaml +++ b/samples/winrt/FaceDetection/FaceDetection/MainPage.xaml @@ -1,16 +1,34 @@ - + -