Merge remote-tracking branch 'upstream/3.4' into merge-3.4

This commit is contained in:
Alexander Alekhin 2019-11-17 22:51:16 +00:00 committed by Alexander Alekhin
commit fc41c18c6f
37 changed files with 1265 additions and 359 deletions

1
.gitignore vendored
View File

@ -22,3 +22,4 @@ bin/
*.log
*.tlog
build
node_modules

View File

@ -22,6 +22,7 @@ else()
-Wenum-compare-switch
-Wsuggest-override -Winconsistent-missing-override
-Wimplicit-fallthrough
-Warray-bounds # GCC 9+
)
endif()
if(CV_ICC)

View File

@ -385,6 +385,19 @@ if(MSVC)
add_definitions(-D_VARIADIC_MAX=10)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
get_directory_property(__DIRECTORY_COMPILE_DEFINITIONS COMPILE_DEFINITIONS)
if((NOT " ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE} ${OPENCV_EXTRA_CXX_FLAGS} ${OPENCV_EXTRA_FLAGS_RELEASE} ${__DIRECTORY_COMPILE_DEFINITIONS}" MATCHES "_WIN32_WINNT"
AND NOT OPENCV_CMAKE_SKIP_MACRO_WIN32_WINNT)
OR OPENCV_CMAKE_FORCE_MACRO_WIN32_WINNT
)
# https://docs.microsoft.com/en-us/cpp/porting/modifying-winver-and-win32-winnt
# Target Windows 7 API
set(OPENCV_CMAKE_MACRO_WIN32_WINNT "0x0601" CACHE STRING "Value of _WIN32_WINNT macro")
add_definitions(-D_WIN32_WINNT=${OPENCV_CMAKE_MACRO_WIN32_WINNT})
endif()
endif()
# Enable compiler options for OpenCV modules/apps/samples only (ignore 3rdparty)
macro(ocv_add_modules_compiler_options)
if(MSVC AND NOT OPENCV_SKIP_MSVC_W4_OPTION)

View File

@ -377,6 +377,7 @@ macro(ocv_clear_vars)
endmacro()
set(OCV_COMPILER_FAIL_REGEX
"argument '.*' is not valid" # GCC 9+
"command line option .* is valid for .* but not for C\\+\\+" # GNU
"command line option .* is valid for .* but not for C" # GNU
"unrecognized .*option" # GNU

View File

@ -0,0 +1,345 @@
Using OpenCV.js In Node.js {#tutorial_js_nodejs}
==========================
Goals
-----
In this tutorial, you will learn:
- Use OpenCV.js in a [Node.js](https://nodejs.org) application.
- Load images with [jimp](https://www.npmjs.com/package/jimp) in order to use them with OpenCV.js.
- Using [jsdom](https://www.npmjs.com/package/canvas) and [node-canvas](https://www.npmjs.com/package/canvas) to support `cv.imread()`, `cv.imshow()`
- The basics of [emscripten](https://emscripten.org/) APIs, like [Module](https://emscripten.org/docs/api_reference/module.html) and [File System](https://emscripten.org/docs/api_reference/Filesystem-API.html) on which OpenCV.js is based.
- Learn Node.js basics. Although this tutorial assumes the user knows JavaScript, experience with Node.js is not required.
@note Besides giving instructions to run OpenCV.js in Node.js, another objective of this tutorial is to introduce users to the basics of [emscripten](https://emscripten.org/) APIs, like [Module](https://emscripten.org/docs/api_reference/module.html) and [File System](https://emscripten.org/docs/api_reference/Filesystem-API.html) and also Node.js.
Minimal example
-----
Create a file `example1.js` with the following content:
@code{.js}
// Define a global variable 'Module' with a method 'onRuntimeInitialized':
Module = {
onRuntimeInitialized() {
// this is our application:
console.log(cv.getBuildInformation())
}
}
// Load 'opencv.js' assigning the value to the global variable 'cv'
cv = require('./opencv.js')
@endcode
### Execute it ###
- Save the file as `example1.js`.
- Make sure the file `opencv.js` is in the same folder.
- Make sure [Node.js](https://nodejs.org) is installed on your system.
The following command should print OpenCV build information:
@code{.bash}
node example1.js
@endcode
### What just happened? ###
* **In the first statement**:, by defining a global variable named 'Module', emscripten will call `Module.onRuntimeInitialized()` when the library is ready to use. Our program is in that method and uses the global variable `cv` just like in the browser.
* The statement **"cv = require('./opencv.js')"** requires the file `opencv.js` and assign the return value to the global variable `cv`.
`require()` which is a Node.js API, is used to load modules and files.
In this case we load the file `opencv.js` form the current folder, and, as said previously emscripten will call `Module.onRuntimeInitialized()` when its ready.
* See [emscripten Module API](https://emscripten.org/docs/api_reference/module.html) for more details.
Working with images
-----
OpenCV.js doesn't support image formats so we can't load png or jpeg images directly. In the browser it uses the HTML DOM (like HTMLCanvasElement and HTMLImageElement to decode and decode images). In node.js we will need to use a library for this.
In this example we use [jimp](https://www.npmjs.com/package/jimp), which supports common image formats and is pretty easy to use.
### Example setup ###
Execute the following commands to create a new node.js package and install [jimp](https://www.npmjs.com/package/jimp) dependency:
@code{.bash}
mkdir project1
cd project1
npm init -y
npm install jimp
@endcode
### The example ###
@code{.js}
const Jimp = require('jimp');
async function onRuntimeInitialized(){
// load local image file with jimp. It supports jpg, png, bmp, tiff and gif:
var jimpSrc = await Jimp.read('./lena.jpg');
// `jimpImage.bitmap` property has the decoded ImageData that we can use to create a cv:Mat
var src = cv.matFromImageData(jimpSrc.bitmap);
// following lines is copy&paste of opencv.js dilate tutorial:
let dst = new cv.Mat();
let M = cv.Mat.ones(5, 5, cv.CV_8U);
let anchor = new cv.Point(-1, -1);
cv.dilate(src, dst, M, anchor, 1, cv.BORDER_CONSTANT, cv.morphologyDefaultBorderValue());
// Now that we are finish, we want to write `dst` to file `output.png`. For this we create a `Jimp`
// image which accepts the image data as a [`Buffer`](https://nodejs.org/docs/latest-v10.x/api/buffer.html).
// `write('output.png')` will write it to disk and Jimp infers the output format from given file name:
new Jimp({
width: dst.cols,
height: dst.rows,
data: Buffer.from(dst.data)
})
.write('output.png');
src.delete();
dst.delete();
}
// Finally, load the open.js as before. The function `onRuntimeInitialized` contains our program.
Module = {
onRuntimeInitialized
};
cv = require('./opencv.js');
@endcode
### Execute it ###
- Save the file as `exampleNodeJimp.js`.
- Make sure a sample image `lena.jpg` exists in the current directory.
The following command should generate the file `output.png`:
@code{.bash}
node exampleNodeJimp.js
@endcode
Emulating HTML DOM and canvas
-----
As you might already seen, the rest of the examples use functions like `cv.imread()`, `cv.imshow()` to read and write images. Unfortunately as mentioned they won't work on Node.js since there is no HTML DOM.
In this section, you will learn how to use [jsdom](https://www.npmjs.com/package/canvas) and [node-canvas](https://www.npmjs.com/package/canvas) to emulate the HTML DOM on Node.js so those functions work.
### Example setup ###
As before, we create a Node.js project and install the dependencies we need:
@code{.bash}
mkdir project2
cd project2
npm init -y
npm install canvas jsdom
@endcode
### The example ###
@code{.js}
const { Canvas, createCanvas, Image, ImageData, loadImage } = require('canvas');
const { JSDOM } = require('jsdom');
const { writeFileSync } = require('fs');
// This is our program. This time we use JavaScript async / await and promises to handle asynchronicity.
(async () => {
// before loading opencv.js we emulate a minimal HTML DOM. See the function declaration below.
installDOM();
await loadOpenCV();
// using node-canvas, we an image file to an object compatible with HTML DOM Image and therefore with cv.imread()
const image = await loadImage('./lena.jpg');
const src = cv.imread(image);
const dst = new cv.Mat();
const M = cv.Mat.ones(5, 5, cv.CV_8U);
const anchor = new cv.Point(-1, -1);
cv.dilate(src, dst, M, anchor, 1, cv.BORDER_CONSTANT, cv.morphologyDefaultBorderValue());
// we create an object compatible HTMLCanvasElement
const canvas = createCanvas(300, 300);
cv.imshow(canvas, dst);
writeFileSync('output.jpg', canvas.toBuffer('image/jpeg'));
src.delete();
dst.delete();
})();
// Load opencv.js just like before but using Promise instead of callbacks:
function loadOpenCV() {
return new Promise(resolve => {
global.Module = {
onRuntimeInitialized: resolve
};
global.cv = require('./opencv.js');
});
}
// Using jsdom and node-canvas we define some global variables to emulate HTML DOM.
// Although a complete emulation can be archived, here we only define those globals used
// by cv.imread() and cv.imshow().
function installDOM() {
const dom = new JSDOM();
global.document = dom.window.document;
// The rest enables DOM image and canvas and is provided by node-canvas
global.Image = Image;
global.HTMLCanvasElement = Canvas;
global.ImageData = ImageData;
global.HTMLImageElement = Image;
}
@endcode
### Execute it ###
- Save the file as `exampleNodeCanvas.js`.
- Make sure a sample image `lena.jpg` exists in the current directory.
The following command should generate the file `output.jpg`:
@code{.bash}
node exampleNodeCanvas.js
@endcode
Dealing with files
-----
In this tutorial you will learn how to configure emscripten so it uses the local filesystem for file operations instead of using memory. Also it tries to describe how [files are supported by emscripten applications](https://emscripten.org/docs/api_reference/Filesystem-API.html)
Accessing the emscripten filesystem is often needed in OpenCV applications for example to load machine learning models such as the ones used in @ref tutorial_dnn_googlenet and @ref tutorial_dnn_javascript.
### Example setup ###
Before the example, is worth consider first how files are handled in emscripten applications such as OpenCV.js. Remember that OpenCV library is written in C++ and the file opencv.js is just that C++ code being translated to JavaScript or WebAssembly by emscripten C++ compiler.
These C++ sources use standard APIs to access the filesystem and the implementation often ends up in system calls that read a file in the hard drive. Since JavaScript applications in the browser don't have access to the local filesystem, [emscripten emulates a standard filesystem](https://emscripten.org/docs/api_reference/Filesystem-API.html) so compiled C++ code works out of the box.
In the browser, this filesystem is emulated in memory while in Node.js there's also the possibility of using the local filesystem directly. This is often preferable since there's no need of copy file's content in memory. This section is explains how to do do just that, this is, configuring emscripten so files are accessed directly from our local filesystem and relative paths match files relative to the current local directory as expected.
### The example ###
The following is an adaptation of @ref tutorial_js_face_detection.
@code{.js}
const { Canvas, createCanvas, Image, ImageData, loadImage } = require('canvas');
const { JSDOM } = require('jsdom');
const { writeFileSync, readFileSync } = require('fs');
(async () => {
await loadOpenCV();
const image = await loadImage('lena.jpg');
const src = cv.imread(image);
let gray = new cv.Mat();
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
let faces = new cv.RectVector();
let eyes = new cv.RectVector();
let faceCascade = new cv.CascadeClassifier();
let eyeCascade = new cv.CascadeClassifier();
// Load pre-trained classifier files. Notice how we reference local files using relative paths just
// like we normally would do
faceCascade.load('./haarcascade_frontalface_default.xml');
eyeCascade.load('./haarcascade_eye.xml');
let mSize = new cv.Size(0, 0);
faceCascade.detectMultiScale(gray, faces, 1.1, 3, 0, mSize, mSize);
for (let i = 0; i < faces.size(); ++i) {
let roiGray = gray.roi(faces.get(i));
let roiSrc = src.roi(faces.get(i));
let point1 = new cv.Point(faces.get(i).x, faces.get(i).y);
let point2 = new cv.Point(faces.get(i).x + faces.get(i).width, faces.get(i).y + faces.get(i).height);
cv.rectangle(src, point1, point2, [255, 0, 0, 255]);
eyeCascade.detectMultiScale(roiGray, eyes);
for (let j = 0; j < eyes.size(); ++j) {
let point1 = new cv.Point(eyes.get(j).x, eyes.get(j).y);
let point2 = new cv.Point(eyes.get(j).x + eyes.get(j).width, eyes.get(j).y + eyes.get(j).height);
cv.rectangle(roiSrc, point1, point2, [0, 0, 255, 255]);
}
roiGray.delete();
roiSrc.delete();
}
const canvas = createCanvas(image.width, image.height);
cv.imshow(canvas, src);
writeFileSync('output3.jpg', canvas.toBuffer('image/jpeg'));
src.delete(); gray.delete(); faceCascade.delete(); eyeCascade.delete(); faces.delete(); eyes.delete()
})();
/**
* Loads opencv.js.
*
* Installs HTML Canvas emulation to support `cv.imread()` and `cv.imshow`
*
* Mounts given local folder `localRootDir` in emscripten filesystem folder `rootDir`. By default it will mount the local current directory in emscripten `/work` directory. This means that `/work/foo.txt` will be resolved to the local file `./foo.txt`
* @param {string} rootDir The directory in emscripten filesystem in which the local filesystem will be mount.
* @param {string} localRootDir The local directory to mount in emscripten filesystem.
* @returns {Promise} resolved when the library is ready to use.
*/
function loadOpenCV(rootDir = '/work', localRootDir = process.cwd()) {
if(global.Module && global.Module.onRuntimeInitialized && global.cv && global.cv.imread) {
return Promise.resolve()
}
return new Promise(resolve => {
installDOM()
global.Module = {
onRuntimeInitialized() {
// We change emscripten current work directory to 'rootDir' so relative paths are resolved
// relative to the current local folder, as expected
cv.FS.chdir(rootDir)
resolve()
},
preRun() {
// preRun() is another callback like onRuntimeInitialized() but is called just before the
// library code runs. Here we mount a local folder in emscripten filesystem and we want to
// do this before the library is executed so the filesystem is accessible from the start
const FS = global.Module.FS
// create rootDir if it doesn't exists
if(!FS.analyzePath(rootDir).exists) {
FS.mkdir(rootDir);
}
// create localRootFolder if it doesn't exists
if(!existsSync(localRootDir)) {
mkdirSync(localRootDir, { recursive: true});
}
// FS.mount() is similar to Linux/POSIX mount operation. It basically mounts an external
// filesystem with given format, in given current filesystem directory.
FS.mount(FS.filesystems.NODEFS, { root: localRootDir}, rootDir);
}
};
global.cv = require('./opencv.js')
});
}
function installDOM(){
const dom = new JSDOM();
global.document = dom.window.document;
global.Image = Image;
global.HTMLCanvasElement = Canvas;
global.ImageData = ImageData;
global.HTMLImageElement = Image;
}
@endcode
### Execute it ###
- Save the file as `exampleNodeCanvasData.js`.
- Make sure the files `aarcascade_frontalface_default.xml` and `haarcascade_eye.xml` are present in project's directory. They can be obtained from [OpenCV sources](https://github.com/opencv/opencv/tree/master/data/haarcascades).
- Make sure a sample image file `lena.jpg` exists in project's directory. It should display people's faces for this example to make sense. The following image is known to work:
![image](lena.jpg)
The following command should generate the file `output3.jpg`:
@code{.bash}
node exampleNodeCanvasData.js
@endcode

View File

@ -91,21 +91,60 @@ Building OpenCV.js from Source
python ./platforms/js/build_js.py build_js --build_test
@endcode
To run tests, launch a local web server in \<build_dir\>/bin folder. For example, node http-server which serves on `localhost:8080`.
Running OpenCV.js Tests
---------------------------------------
Navigate the web browser to `http://localhost:8080/tests.html`, which runs the unit tests automatically.
Remember to launch the build command passing `--build_test` as mentioned previously. This will generate test source code ready to run together with `opencv.js` file in `build_js/bin`
You can also run tests using Node.js.
### Manually in your browser
For example:
@code{.sh}
cd bin
npm install
node tests.js
@endcode
To run tests, launch a local web server in `\<build_dir\>/bin` folder. For example, node http-server which serves on `localhost:8080`.
Navigate the web browser to `http://localhost:8080/tests.html`, which runs the unit tests automatically. Command example:
@code{.sh}
npx http-server build_js/bin
firefox http://localhost:8080/tests.html
@endcode
@note
This snippet and the following require [Node.js](https://nodejs.org) to be installed.
### Headless with Puppeteer
Alternatively tests can run with [GoogleChrome/puppeteer](https://github.com/GoogleChrome/puppeteer#readme) which is a version of Google Chrome that runs in the terminal (useful for Continuos integration like travis CI, etc)
@code{.sh}
cd build_js/bin
npm install
npm install --no-save puppeteer # automatically downloads Chromium package
node run_puppeteer.js
@endcode
@note
Checkout `node run_puppeteer --help` for more options to debug and reporting.
@note
The command `npm install` only needs to be executed once, since installs the tools dependencies; after that they are ready to use.
@note
Use `PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=1 npm install --no-save puppeteer` to skip automatic downloading of Chromium.
You may specify own Chromium/Chrome binary through `PUPPETEER_EXECUTABLE_PATH=$(which google-chrome)` environment variable.
**BEWARE**: Puppeteer is only guaranteed to work with the bundled Chromium, use at your own risk.
### Using Node.js.
For example:
@code{.sh}
cd build_js/bin
npm install
node tests.js
@endcode
@note If all tests are failed, then consider using Node.js from 8.x version (`lts/carbon` from `nvm`).
@note
It requires `node` installed in your development environment.
-# [optional] To build `opencv.js` with threads optimization, append `--threads` option.

View File

@ -12,3 +12,7 @@ Introduction to OpenCV.js {#tutorial_js_table_of_contents_setup}
- @subpage tutorial_js_setup
Build OpenCV.js from source
- @subpage tutorial_js_nodejs
Using OpenCV.js In Node.js

View File

@ -700,15 +700,6 @@ Ellipse::Ellipse(const cv::Point2f &_center, const cv::Size2f &_axes, float _ang
{
}
Ellipse::Ellipse(const Ellipse &other)
{
center = other.center;
axes= other.axes;
angle= other.angle;
cosf = other.cosf;
sinf = other.sinf;
}
const cv::Size2f &Ellipse::getAxes()const
{
return axes;

View File

@ -111,8 +111,6 @@ class Ellipse
public:
Ellipse();
Ellipse(const cv::Point2f &center, const cv::Size2f &axes, float angle);
Ellipse(const Ellipse &other);
void draw(cv::InputOutputArray img,const cv::Scalar &color = cv::Scalar::all(120))const;
bool contains(const cv::Point2f &pt)const;

View File

@ -41,7 +41,7 @@
//M*/
#include "precomp.hpp"
#include "upnp.h"
//#include "upnp.h"
#include "dls.h"
#include "epnp.h"
#include "p3p.h"

View File

@ -49,6 +49,8 @@
#include "upnp.h"
#include <limits>
#if 0 // fix buffer overflow first (FIXIT mark in .cpp file)
using namespace std;
using namespace cv;
@ -313,7 +315,9 @@ void upnp::compute_ccs(const double * betas, const double * ut)
const double * v = ut + 12 * (9 + i);
for(int j = 0; j < 4; ++j)
for(int k = 0; k < 3; ++k)
ccs[j][k] += betas[i] * v[3 * j + k];
ccs[j][k] += betas[i] * v[3 * j + k]; // FIXIT: array subscript 144 is outside array bounds of 'double [144]' [-Warray-bounds]
// line 109: double ut[12 * 12]
// line 359: double u[12*12]
}
for (int i = 0; i < 4; ++i) ccs[i][2] *= fu;
@ -821,3 +825,5 @@ void upnp::qr_solve(Mat * A, Mat * b, Mat * X)
pX[i] = (pb[i] - sum) / A2[i];
}
}
#endif

View File

@ -52,6 +52,8 @@
#include "opencv2/core/core_c.h"
#include <iostream>
#if 0 // fix buffer overflow first (FIXIT mark in .cpp file)
class upnp
{
public:
@ -133,4 +135,6 @@ private:
double * A1, * A2;
};
#endif
#endif // OPENCV_CALIB3D_UPNP_H_

View File

@ -395,7 +395,7 @@ bool CV_ChessboardDetectorTest::checkByGenerator()
Mat bg(Size(800, 600), CV_8UC3, Scalar::all(255));
randu(bg, Scalar::all(0), Scalar::all(255));
GaussianBlur(bg, bg, Size(7,7), 3.0);
GaussianBlur(bg, bg, Size(5, 5), 0.0);
Mat_<float> camMat(3, 3);
camMat << 300.f, 0.f, bg.cols/2.f, 0, 300.f, bg.rows/2.f, 0.f, 0.f, 1.f;

View File

@ -47,6 +47,23 @@ static std::string bytesToStringRepr(size_t value)
s = s.substr(0, s.size() - 1);
return s;
}
static String getDeviceTypeString(const cv::ocl::Device& device)
{
if (device.type() == cv::ocl::Device::TYPE_CPU) {
return "CPU";
}
if (device.type() == cv::ocl::Device::TYPE_GPU) {
if (device.hostUnifiedMemory()) {
return "iGPU";
} else {
return "dGPU";
}
}
return "unkown";
}
} // namespace
static void dumpOpenCLInformation()
@ -80,12 +97,11 @@ static void dumpOpenCLInformation()
for (int j = 0; j < platform->deviceNumber(); j++)
{
platform->getDevice(current_device, j);
const char* deviceTypeStr = (current_device.type() == Device::TYPE_CPU) ? "CPU" :
(current_device.type() == Device::TYPE_GPU ? current_device.hostUnifiedMemory() ? "iGPU" : "dGPU" : "unknown");
String deviceTypeStr = getDeviceTypeString(current_device);
DUMP_MESSAGE_STDOUT( " " << deviceTypeStr << ": " << current_device.name() << " (" << current_device.version() << ")");
DUMP_CONFIG_PROPERTY( cv::format("cv_ocl_platform_%d_device_%d", (int)i, j ),
cv::format("(Platform=%s)(Type=%s)(Name=%s)(Version=%s)",
platform->name().c_str(), deviceTypeStr, current_device.name().c_str(), current_device.version().c_str()) );
platform->name().c_str(), deviceTypeStr.c_str(), current_device.name().c_str(), current_device.version().c_str()) );
}
}
const Device& device = Device::getDefault();
@ -94,13 +110,7 @@ static void dumpOpenCLInformation()
DUMP_MESSAGE_STDOUT("Current OpenCL device: ");
#if 0
DUMP_MESSAGE_STDOUT(" Platform = " << device.getPlatform().name());
DUMP_CONFIG_PROPERTY("cv_ocl_current_platformName", device.getPlatform().name());
#endif
const char* deviceTypeStr = (device.type() == Device::TYPE_CPU) ? "CPU" :
(device.type() == Device::TYPE_GPU ? device.hostUnifiedMemory() ? "iGPU" : "dGPU" : "unknown");
String deviceTypeStr = getDeviceTypeString(device);
DUMP_MESSAGE_STDOUT(" Type = " << deviceTypeStr);
DUMP_CONFIG_PROPERTY("cv_ocl_current_deviceType", deviceTypeStr);

View File

@ -510,6 +510,8 @@ public:
*/
FileNode(const FileNode& node);
FileNode& operator=(const FileNode& node);
/** @brief Returns element of a mapping node or a sequence node.
@param nodename Name of an element in the mapping node.
@returns Returns the element with the given identifier.
@ -640,6 +642,8 @@ public:
*/
FileNodeIterator(const FileNodeIterator& it);
FileNodeIterator& operator=(const FileNodeIterator& it);
//! returns the currently observed element
FileNode operator *() const;

View File

@ -2061,6 +2061,14 @@ FileNode::FileNode(const FileNode& node)
ofs = node.ofs;
}
FileNode& FileNode::operator=(const FileNode& node)
{
fs = node.fs;
blockIdx = node.blockIdx;
ofs = node.ofs;
return *this;
}
FileNode FileNode::operator[](const std::string& nodename) const
{
if(!fs)
@ -2403,6 +2411,17 @@ FileNodeIterator::FileNodeIterator(const FileNodeIterator& it)
idx = it.idx;
}
FileNodeIterator& FileNodeIterator::operator=(const FileNodeIterator& it)
{
fs = it.fs;
blockIdx = it.blockIdx;
ofs = it.ofs;
blockSize = it.blockSize;
nodeNElems = it.nodeNElems;
idx = it.idx;
return *this;
}
FileNode FileNodeIterator::operator *() const
{
return FileNode(idx < nodeNElems ? fs : 0, blockIdx, ofs);

View File

@ -784,6 +784,8 @@ void ONNXImporter::populateNet(Net dstNet)
CV_Assert_N(node_proto.input_size() == 1, layerParams.has("axes"));
CV_Assert(graph_proto.node_size() > li + 1 && graph_proto.node(li + 1).op_type() == "Div");
++li;
node_proto = graph_proto.node(li);
layerParams.name = node_proto.output(0);
layerParams.type = "Normalize";
DictValue axes_dict = layerParams.get("axes");

View File

@ -85,11 +85,11 @@ public class BruteForceDescriptorMatcherTest extends OpenCVTestCase {
matSize = 100;
truth = new DMatch[] {
new DMatch(0, 0, 0, 0.6211397f),
new DMatch(0, 0, 0, 0.6159003f),
new DMatch(1, 1, 0, 0.9177120f),
new DMatch(2, 1, 0, 0.3112163f),
new DMatch(3, 1, 0, 0.2925074f),
new DMatch(4, 1, 0, 0.9309178f)
new DMatch(4, 1, 0, 0.26520672f)
};
}

View File

@ -85,11 +85,11 @@ public class BruteForceL1DescriptorMatcherTest extends OpenCVTestCase {
matSize = 100;
truth = new DMatch[] {
new DMatch(0, 0, 0, 3.0975165f),
new DMatch(1, 1, 0, 3.5680308f),
new DMatch(2, 1, 0, 1.3722466f),
new DMatch(3, 1, 0, 1.3041023f),
new DMatch(4, 1, 0, 3.5970376f)
new DMatch(0, 0, 0, 3.0710702f),
new DMatch(1, 1, 0, 3.562016f),
new DMatch(2, 1, 0, 1.3682679f),
new DMatch(3, 1, 0, 1.3012862f),
new DMatch(4, 1, 0, 1.1852086f)
};
}

View File

@ -90,11 +90,11 @@ public class BruteForceSL2DescriptorMatcherTest extends OpenCVTestCase {
matSize = 100;
truth = new DMatch[] {
new DMatch(0, 0, 0, 0.3858146f),
new DMatch(0, 0, 0, 0.37933317f),
new DMatch(1, 1, 0, 0.8421953f),
new DMatch(2, 1, 0, 0.0968556f),
new DMatch(3, 1, 0, 0.0855606f),
new DMatch(4, 1, 0, 0.8666080f)
new DMatch(4, 1, 0, 0.07033461f)
};
}

View File

@ -160,11 +160,11 @@ public class FlannBasedDescriptorMatcherTest extends OpenCVTestCase {
matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
matSize = 100;
truth = new DMatch[] {
new DMatch(0, 0, 0, 0.6211397f),
new DMatch(0, 0, 0, 0.6159003f),
new DMatch(1, 1, 0, 0.9177120f),
new DMatch(2, 1, 0, 0.3112163f),
new DMatch(3, 1, 0, 0.2925075f),
new DMatch(4, 1, 0, 0.9309179f)
new DMatch(4, 1, 0, 0.26520672f)
};
}

View File

@ -52,7 +52,7 @@ public class ORBDescriptorExtractorTest extends OpenCVTestCase {
Mat truth = new Mat(1, 32, CvType.CV_8UC1) {
{
put(0, 0,
6, 74, 6, 129, 2, 130, 56, 0, 36, 132, 66, 165, 172, 6, 3, 72, 102, 61, 163, 214, 0, 144, 65, 232, 4, 32, 138, 129, 4, 21, 37, 88);
6, 74, 6, 129, 2, 130, 56, 0, 44, 132, 66, 165, 172, 6, 3, 72, 102, 61, 171, 214, 0, 144, 65, 232, 4, 32, 138, 131, 4, 21, 37, 217);
}
};
assertDescriptorsClose(truth, descriptors, 1);
@ -91,7 +91,7 @@ public class ORBDescriptorExtractorTest extends OpenCVTestCase {
Mat truth = new Mat(1, 32, CvType.CV_8UC1) {
{
put(0, 0,
6, 10, 22, 5, 2, 130, 56, 0, 44, 164, 66, 165, 140, 6, 1, 72, 38, 61, 163, 210, 0, 208, 1, 104, 4, 32, 10, 131, 0, 37, 37, 67);
6, 10, 22, 5, 2, 130, 56, 0, 44, 164, 66, 165, 140, 6, 1, 72, 38, 61, 163, 210, 0, 208, 1, 104, 4, 32, 74, 131, 0, 37, 37, 67);
}
};
assertDescriptorsClose(truth, descriptors, 1);

View File

@ -634,10 +634,13 @@ CV_IMPL int cvStartWindowThread(){
cvInitSystem(0,NULL);
if (!thread_started)
{
if (!g_thread_supported ()) {
#if !GLIB_CHECK_VERSION(2, 32, 0) // https://github.com/GNOME/glib/blame/b4d58a7105bb9d75907233968bb534b38f9a6e43/glib/deprecated/gthread.h#L274
if (!g_thread_supported ())
{
/* the GThread system wasn't inited, so init it */
g_thread_init(NULL);
}
#endif
(void)getWindowMutex(); // force mutex initialization

View File

@ -1027,11 +1027,6 @@ static void Bayer2RGB_VNG_8u( const Mat& srcmat, Mat& dstmat, int code )
bayer += bstep*2;
#if CV_SSE2
bool haveSSE = cv::checkHardwareSupport(CV_CPU_SSE2);
#define _mm_absdiff_epu16(a,b) _mm_adds_epu16(_mm_subs_epu16(a, b), _mm_subs_epu16(b, a))
#endif
for( int y = 2; y < size.height - 4; y++ )
{
uchar* dstrow = dst + dststep*y + 6;
@ -1047,52 +1042,41 @@ static void Bayer2RGB_VNG_8u( const Mat& srcmat, Mat& dstmat, int code )
i = 1;
#if CV_SSE2
if( haveSSE )
{
__m128i z = _mm_setzero_si128();
#if CV_SIMD128
for( ; i <= N-9; i += 8, srow += 8, brow += 8 )
{
__m128i s1, s2, s3, s4, s6, s7, s8, s9;
v_uint16x8 s1, s2, s3, s4, s6, s7, s8, s9;
s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-1-bstep)),z);
s2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep)),z);
s3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+1-bstep)),z);
s1 = v_load_expand(srow-1-bstep);
s2 = v_load_expand(srow-bstep);
s3 = v_load_expand(srow+1-bstep);
s4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-1)),z);
s6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+1)),z);
s4 = v_load_expand(srow-1);
s6 = v_load_expand(srow+1);
s7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-1+bstep)),z);
s8 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep)),z);
s9 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+1+bstep)),z);
s7 = v_load_expand(srow-1+bstep);
s8 = v_load_expand(srow+bstep);
s9 = v_load_expand(srow+1+bstep);
__m128i b0, b1, b2, b3, b4, b5, b6;
v_uint16x8 b0, b1, b2, b3, b4, b5, b6;
b0 = _mm_adds_epu16(_mm_slli_epi16(_mm_absdiff_epu16(s2,s8),1),
_mm_adds_epu16(_mm_absdiff_epu16(s1, s7),
_mm_absdiff_epu16(s3, s9)));
b1 = _mm_adds_epu16(_mm_slli_epi16(_mm_absdiff_epu16(s4,s6),1),
_mm_adds_epu16(_mm_absdiff_epu16(s1, s3),
_mm_absdiff_epu16(s7, s9)));
b2 = _mm_slli_epi16(_mm_absdiff_epu16(s3,s7),1);
b3 = _mm_slli_epi16(_mm_absdiff_epu16(s1,s9),1);
b0 = (v_absdiff(s2, s8)<<1) + v_absdiff(s1, s7) + v_absdiff(s3, s9);
b1 = (v_absdiff(s4, s6)<<1) + v_absdiff(s1, s3) + v_absdiff(s7, s9);
b2 = v_absdiff(s3, s7)<<1;
b3 = v_absdiff(s1, s9)<<1;
_mm_storeu_si128((__m128i*)brow, b0);
_mm_storeu_si128((__m128i*)(brow + N), b1);
_mm_storeu_si128((__m128i*)(brow + N2), b2);
_mm_storeu_si128((__m128i*)(brow + N3), b3);
v_store(brow, b0);
v_store(brow + N, b1);
v_store(brow + N2, b2);
v_store(brow + N3, b3);
b4 = _mm_adds_epu16(b2,_mm_adds_epu16(_mm_absdiff_epu16(s2, s4),
_mm_absdiff_epu16(s6, s8)));
b5 = _mm_adds_epu16(b3,_mm_adds_epu16(_mm_absdiff_epu16(s2, s6),
_mm_absdiff_epu16(s4, s8)));
b6 = _mm_adds_epu16(_mm_adds_epu16(s2, s4), _mm_adds_epu16(s6, s8));
b6 = _mm_srli_epi16(b6, 1);
b4 = b2 + v_absdiff(s2, s4) + v_absdiff(s6, s8);
b5 = b3 + v_absdiff(s2, s6) + v_absdiff(s4, s8);
b6 = (s2 + s4 + s6 + s8)>>1;
_mm_storeu_si128((__m128i*)(brow + N4), b4);
_mm_storeu_si128((__m128i*)(brow + N5), b5);
_mm_storeu_si128((__m128i*)(brow + N6), b6);
}
v_store(brow + N4, b4);
v_store(brow + N5, b5);
v_store(brow + N6, b6);
}
#endif
@ -1122,8 +1106,8 @@ static void Bayer2RGB_VNG_8u( const Mat& srcmat, Mat& dstmat, int code )
bool greenCell = greenCell0;
i = 2;
#if CV_SSE2
int limit = !haveSSE ? N-2 : greenCell ? std::min(3, N-2) : 2;
#if CV_SIMD128
int limit = greenCell ? std::min(3, N-2) : 2;
#else
int limit = N - 2;
#endif
@ -1290,237 +1274,229 @@ static void Bayer2RGB_VNG_8u( const Mat& srcmat, Mat& dstmat, int code )
greenCell = !greenCell;
}
#if CV_SSE2
if( !haveSSE )
break;
#if CV_SIMD128
v_uint32x4 emask = v_setall_u32(0x0000ffff), omask = v_setall_u32(0xffff0000);
v_uint16x8 one = v_setall_u16(1), z = v_setzero_u16();
v_float32x4 _0_5 = v_setall_f32(0.5f);
__m128i emask = _mm_set1_epi32(0x0000ffff),
omask = _mm_set1_epi32(0xffff0000),
z = _mm_setzero_si128(),
one = _mm_set1_epi16(1);
__m128 _0_5 = _mm_set1_ps(0.5f);
#define _mm_merge_epi16(a, b) _mm_or_si128(_mm_and_si128(a, emask), _mm_and_si128(b, omask)) //(aA_aA_aA_aA) * (bB_bB_bB_bB) => (bA_bA_bA_bA)
#define _mm_cvtloepi16_ps(a) _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(a,a), 16)) //(1,2,3,4,5,6,7,8) => (1f,2f,3f,4f)
#define _mm_cvthiepi16_ps(a) _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(a,a), 16)) //(1,2,3,4,5,6,7,8) => (5f,6f,7f,8f)
#define _mm_loadl_u8_s16(ptr, offset) _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)((ptr) + (offset))), z) //load 8 uchars to 8 shorts
#define v_merge_u16(a, b) (((a) & v_reinterpret_as_u16(emask)) | ((b) & v_reinterpret_as_u16(omask))) //(aA_aA_aA_aA) * (bB_bB_bB_bB) => (bA_bA_bA_bA)
#define v_cvt_s16f32_lo(a) v_cvt_f32(v_expand_low(v_reinterpret_as_s16(a))) //(1,2,3,4,5,6,7,8) => (1f,2f,3f,4f)
#define v_cvt_s16f32_hi(a) v_cvt_f32(v_expand_high(v_reinterpret_as_s16(a))) //(1,2,3,4,5,6,7,8) => (5f,6f,7f,8f)
// process 8 pixels at once
for( ; i <= N - 10; i += 8, srow += 8, brow0 += 8, brow1 += 8, brow2 += 8 )
{
//int gradN = brow0[0] + brow1[0];
__m128i gradN = _mm_adds_epi16(_mm_loadu_si128((__m128i*)brow0), _mm_loadu_si128((__m128i*)brow1));
v_uint16x8 gradN = v_load(brow0) + v_load(brow1);
//int gradS = brow1[0] + brow2[0];
__m128i gradS = _mm_adds_epi16(_mm_loadu_si128((__m128i*)brow1), _mm_loadu_si128((__m128i*)brow2));
v_uint16x8 gradS = v_load(brow1) + v_load(brow2);
//int gradW = brow1[N-1] + brow1[N];
__m128i gradW = _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow1+N-1)), _mm_loadu_si128((__m128i*)(brow1+N)));
v_uint16x8 gradW = v_load(brow1+N-1) + v_load(brow1+N);
//int gradE = brow1[N+1] + brow1[N];
__m128i gradE = _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow1+N+1)), _mm_loadu_si128((__m128i*)(brow1+N)));
v_uint16x8 gradE = v_load(brow1+N+1) + v_load(brow1+N);
//int minGrad = std::min(std::min(std::min(gradN, gradS), gradW), gradE);
//int maxGrad = std::max(std::max(std::max(gradN, gradS), gradW), gradE);
__m128i minGrad = _mm_min_epi16(_mm_min_epi16(gradN, gradS), _mm_min_epi16(gradW, gradE));
__m128i maxGrad = _mm_max_epi16(_mm_max_epi16(gradN, gradS), _mm_max_epi16(gradW, gradE));
v_uint16x8 minGrad = v_min(v_min(gradN, gradS), v_min(gradW, gradE));
v_uint16x8 maxGrad = v_max(v_max(gradN, gradS), v_max(gradW, gradE));
__m128i grad0, grad1;
v_uint16x8 grad0, grad1;
//int gradNE = brow0[N4+1] + brow1[N4];
//int gradNE = brow0[N2] + brow0[N2+1] + brow1[N2] + brow1[N2+1];
grad0 = _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow0+N4+1)), _mm_loadu_si128((__m128i*)(brow1+N4)));
grad1 = _mm_adds_epi16( _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow0+N2)), _mm_loadu_si128((__m128i*)(brow0+N2+1))),
_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow1+N2)), _mm_loadu_si128((__m128i*)(brow1+N2+1))));
__m128i gradNE = _mm_merge_epi16(grad0, grad1);
grad0 = v_load(brow0+N4+1) + v_load(brow1+N4);
grad1 = v_load(brow0+N2) + v_load(brow0+N2+1) + v_load(brow1+N2) + v_load(brow1+N2+1);
v_uint16x8 gradNE = v_merge_u16(grad0, grad1);
//int gradSW = brow1[N4] + brow2[N4-1];
//int gradSW = brow1[N2] + brow1[N2-1] + brow2[N2] + brow2[N2-1];
grad0 = _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow2+N4-1)), _mm_loadu_si128((__m128i*)(brow1+N4)));
grad1 = _mm_adds_epi16(_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow2+N2)), _mm_loadu_si128((__m128i*)(brow2+N2-1))),
_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow1+N2)), _mm_loadu_si128((__m128i*)(brow1+N2-1))));
__m128i gradSW = _mm_merge_epi16(grad0, grad1);
grad0 = v_load(brow2+N4-1) + v_load(brow1+N4);
grad1 = v_load(brow2+N2) + v_load(brow2+N2-1) + v_load(brow1+N2) + v_load(brow1+N2-1);
v_uint16x8 gradSW = v_merge_u16(grad0, grad1);
minGrad = _mm_min_epi16(_mm_min_epi16(minGrad, gradNE), gradSW);
maxGrad = _mm_max_epi16(_mm_max_epi16(maxGrad, gradNE), gradSW);
minGrad = v_min(v_min(minGrad, gradNE), gradSW);
maxGrad = v_max(v_max(maxGrad, gradNE), gradSW);
//int gradNW = brow0[N5-1] + brow1[N5];
//int gradNW = brow0[N3] + brow0[N3-1] + brow1[N3] + brow1[N3-1];
grad0 = _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow0+N5-1)), _mm_loadu_si128((__m128i*)(brow1+N5)));
grad1 = _mm_adds_epi16(_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow0+N3)), _mm_loadu_si128((__m128i*)(brow0+N3-1))),
_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow1+N3)), _mm_loadu_si128((__m128i*)(brow1+N3-1))));
__m128i gradNW = _mm_merge_epi16(grad0, grad1);
grad0 = v_load(brow0+N5-1) + v_load(brow1+N5);
grad1 = v_load(brow0+N3) + v_load(brow0+N3-1) + v_load(brow1+N3) + v_load(brow1+N3-1);
v_uint16x8 gradNW = v_merge_u16(grad0, grad1);
//int gradSE = brow1[N5] + brow2[N5+1];
//int gradSE = brow1[N3] + brow1[N3+1] + brow2[N3] + brow2[N3+1];
grad0 = _mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow2+N5+1)), _mm_loadu_si128((__m128i*)(brow1+N5)));
grad1 = _mm_adds_epi16(_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow2+N3)), _mm_loadu_si128((__m128i*)(brow2+N3+1))),
_mm_adds_epi16(_mm_loadu_si128((__m128i*)(brow1+N3)), _mm_loadu_si128((__m128i*)(brow1+N3+1))));
__m128i gradSE = _mm_merge_epi16(grad0, grad1);
grad0 = v_load(brow2+N5+1) + v_load(brow1+N5);
grad1 = v_load(brow2+N3) + v_load(brow2+N3+1) + v_load(brow1+N3) + v_load(brow1+N3+1);
v_uint16x8 gradSE = v_merge_u16(grad0, grad1);
minGrad = _mm_min_epi16(_mm_min_epi16(minGrad, gradNW), gradSE);
maxGrad = _mm_max_epi16(_mm_max_epi16(maxGrad, gradNW), gradSE);
minGrad = v_min(v_min(minGrad, gradNW), gradSE);
maxGrad = v_max(v_max(maxGrad, gradNW), gradSE);
//int T = minGrad + maxGrad/2;
__m128i T = _mm_adds_epi16(_mm_max_epi16(_mm_srli_epi16(maxGrad, 1), one), minGrad);
v_uint16x8 T = v_max((maxGrad >> 1), one) + minGrad;
__m128i RGs = z, GRs = z, Bs = z, ng = z;
v_uint16x8 RGs = z, GRs = z, Bs = z, ng = z;
__m128i x0 = _mm_loadl_u8_s16(srow, +0 );
__m128i x1 = _mm_loadl_u8_s16(srow, -1 - bstep );
__m128i x2 = _mm_loadl_u8_s16(srow, -1 - bstep*2);
__m128i x3 = _mm_loadl_u8_s16(srow, - bstep );
__m128i x4 = _mm_loadl_u8_s16(srow, +1 - bstep*2);
__m128i x5 = _mm_loadl_u8_s16(srow, +1 - bstep );
__m128i x6 = _mm_loadl_u8_s16(srow, +2 - bstep );
__m128i x7 = _mm_loadl_u8_s16(srow, +1 );
__m128i x8 = _mm_loadl_u8_s16(srow, +2 + bstep );
__m128i x9 = _mm_loadl_u8_s16(srow, +1 + bstep );
__m128i x10 = _mm_loadl_u8_s16(srow, +1 + bstep*2);
__m128i x11 = _mm_loadl_u8_s16(srow, + bstep );
__m128i x12 = _mm_loadl_u8_s16(srow, -1 + bstep*2);
__m128i x13 = _mm_loadl_u8_s16(srow, -1 + bstep );
__m128i x14 = _mm_loadl_u8_s16(srow, -2 + bstep );
__m128i x15 = _mm_loadl_u8_s16(srow, -1 );
__m128i x16 = _mm_loadl_u8_s16(srow, -2 - bstep );
v_uint16x8 x0 = v_load_expand(srow +0);
v_uint16x8 x1 = v_load_expand(srow -1 - bstep);
v_uint16x8 x2 = v_load_expand(srow -1 - bstep*2);
v_uint16x8 x3 = v_load_expand(srow - bstep);
v_uint16x8 x4 = v_load_expand(srow +1 - bstep*2);
v_uint16x8 x5 = v_load_expand(srow +1 - bstep);
v_uint16x8 x6 = v_load_expand(srow +2 - bstep);
v_uint16x8 x7 = v_load_expand(srow +1);
v_uint16x8 x8 = v_load_expand(srow +2 + bstep);
v_uint16x8 x9 = v_load_expand(srow +1 + bstep);
v_uint16x8 x10 = v_load_expand(srow +1 + bstep*2);
v_uint16x8 x11 = v_load_expand(srow + bstep);
v_uint16x8 x12 = v_load_expand(srow -1 + bstep*2);
v_uint16x8 x13 = v_load_expand(srow -1 + bstep);
v_uint16x8 x14 = v_load_expand(srow -2 + bstep);
v_uint16x8 x15 = v_load_expand(srow -1);
v_uint16x8 x16 = v_load_expand(srow -2 - bstep);
__m128i t0, t1, mask;
v_uint16x8 t0, t1, mask;
// gradN ***********************************************
mask = _mm_cmpgt_epi16(T, gradN); // mask = T>gradN
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradN)
mask = (T > gradN); // mask = T>gradN
ng = v_reinterpret_as_u16(v_reinterpret_as_s16(ng) - v_reinterpret_as_s16(mask)); // ng += (T>gradN)
t0 = _mm_slli_epi16(x3, 1); // srow[-bstep]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, -bstep*2), x0); // srow[-bstep*2] + srow[0]
t0 = (x3 << 1); // srow[-bstep]*2
t1 = v_load_expand(srow - bstep*2) + x0; // srow[-bstep*2] + srow[0]
// RGs += (srow[-bstep*2] + srow[0]) * (T>gradN)
RGs = _mm_adds_epi16(RGs, _mm_and_si128(t1, mask));
RGs += (t1 & mask);
// GRs += {srow[-bstep]*2; (srow[-bstep*2-1] + srow[-bstep*2+1])} * (T>gradN)
GRs = _mm_adds_epi16(GRs, _mm_and_si128(_mm_merge_epi16(t0, _mm_adds_epi16(x2,x4)), mask));
GRs += (v_merge_u16(t0, x2 + x4) & mask);
// Bs += {(srow[-bstep-1]+srow[-bstep+1]); srow[-bstep]*2 } * (T>gradN)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epi16(x1,x5), t0), mask));
Bs += (v_merge_u16(x1 + x5, t0) & mask);
// gradNE **********************************************
mask = _mm_cmpgt_epi16(T, gradNE); // mask = T>gradNE
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradNE)
mask = (T > gradNE); // mask = T>gradNE
ng = v_reinterpret_as_u16(v_reinterpret_as_s16(ng) - v_reinterpret_as_s16(mask)); // ng += (T>gradNE)
t0 = _mm_slli_epi16(x5, 1); // srow[-bstep+1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, -bstep*2+2), x0); // srow[-bstep*2+2] + srow[0]
t0 = (x5 << 1); // srow[-bstep+1]*2
t1 = v_load_expand(srow - bstep*2+2) + x0; // srow[-bstep*2+2] + srow[0]
// RGs += {(srow[-bstep*2+2] + srow[0]); srow[-bstep+1]*2} * (T>gradNE)
RGs = _mm_adds_epi16(RGs, _mm_and_si128(_mm_merge_epi16(t1, t0), mask));
RGs += (v_merge_u16(t1, t0) & mask);
// GRs += {brow0[N6+1]; (srow[-bstep*2+1] + srow[1])} * (T>gradNE)
GRs = _mm_adds_epi16(GRs, _mm_and_si128(_mm_merge_epi16(_mm_loadu_si128((__m128i*)(brow0+N6+1)), _mm_adds_epi16(x4,x7)), mask));
GRs += (v_merge_u16(v_load(brow0+N6+1), x4 + x7) & mask);
// Bs += {srow[-bstep+1]*2; (srow[-bstep] + srow[-bstep+2])} * (T>gradNE)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(t0,_mm_adds_epi16(x3,x6)), mask));
Bs += (v_merge_u16(t0, x3 + x6) & mask);
// gradE ***********************************************
mask = _mm_cmpgt_epi16(T, gradE); // mask = T>gradE
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradE)
mask = (T > gradE); // mask = T>gradE
ng = v_reinterpret_as_u16(v_reinterpret_as_s16(ng) - v_reinterpret_as_s16(mask)); // ng += (T>gradE)
t0 = _mm_slli_epi16(x7, 1); // srow[1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, 2), x0); // srow[2] + srow[0]
t0 = (x7 << 1); // srow[1]*2
t1 = v_load_expand(srow +2) + x0; // srow[2] + srow[0]
// RGs += (srow[2] + srow[0]) * (T>gradE)
RGs = _mm_adds_epi16(RGs, _mm_and_si128(t1, mask));
RGs += (t1 & mask);
// GRs += (srow[1]*2) * (T>gradE)
GRs = _mm_adds_epi16(GRs, _mm_and_si128(t0, mask));
GRs += (t0 & mask);
// Bs += {(srow[-bstep+1]+srow[bstep+1]); (srow[-bstep+2]+srow[bstep+2])} * (T>gradE)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epi16(x5,x9), _mm_adds_epi16(x6,x8)), mask));
Bs += (v_merge_u16(x5 + x9, x6 + x8) & mask);
// gradSE **********************************************
mask = _mm_cmpgt_epi16(T, gradSE); // mask = T>gradSE
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradSE)
mask = (T > gradSE); // mask = T>gradSE
ng = v_reinterpret_as_u16(v_reinterpret_as_s16(ng) - v_reinterpret_as_s16(mask)); // ng += (T>gradSE)
t0 = _mm_slli_epi16(x9, 1); // srow[bstep+1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, bstep*2+2), x0); // srow[bstep*2+2] + srow[0]
t0 = (x9 << 1); // srow[bstep+1]*2
t1 = v_load_expand(srow + bstep*2+2) + x0; // srow[bstep*2+2] + srow[0]
// RGs += {(srow[bstep*2+2] + srow[0]); srow[bstep+1]*2} * (T>gradSE)
RGs = _mm_adds_epi16(RGs, _mm_and_si128(_mm_merge_epi16(t1, t0), mask));
RGs += (v_merge_u16(t1, t0) & mask);
// GRs += {brow2[N6+1]; (srow[1]+srow[bstep*2+1])} * (T>gradSE)
GRs = _mm_adds_epi16(GRs, _mm_and_si128(_mm_merge_epi16(_mm_loadu_si128((__m128i*)(brow2+N6+1)), _mm_adds_epi16(x7,x10)), mask));
GRs += (v_merge_u16(v_load(brow2+N6+1), x7 + x10) & mask);
// Bs += {srow[bstep+1]*2; (srow[bstep+2]+srow[bstep])} * (T>gradSE)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_slli_epi16(x9, 1), _mm_adds_epi16(x8,x11)), mask));
Bs += (v_merge_u16((x9 << 1), x8 + x11) & mask);
// gradS ***********************************************
mask = _mm_cmpgt_epi16(T, gradS); // mask = T>gradS
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradS)
mask = (T > gradS); // mask = T>gradS
ng = v_reinterpret_as_u16(v_reinterpret_as_s16(ng) - v_reinterpret_as_s16(mask)); // ng += (T>gradS)
t0 = _mm_slli_epi16(x11, 1); // srow[bstep]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow,bstep*2), x0); // srow[bstep*2]+srow[0]
t0 = (x11 << 1); // srow[bstep]*2
t1 = v_load_expand(srow + bstep*2) + x0; // srow[bstep*2]+srow[0]
// RGs += (srow[bstep*2]+srow[0]) * (T>gradS)
RGs = _mm_adds_epi16(RGs, _mm_and_si128(t1, mask));
RGs += (t1 & mask);
// GRs += {srow[bstep]*2; (srow[bstep*2+1]+srow[bstep*2-1])} * (T>gradS)
GRs = _mm_adds_epi16(GRs, _mm_and_si128(_mm_merge_epi16(t0, _mm_adds_epi16(x10,x12)), mask));
GRs += (v_merge_u16(t0, x10 + x12) & mask);
// Bs += {(srow[bstep+1]+srow[bstep-1]); srow[bstep]*2} * (T>gradS)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epi16(x9,x13), t0), mask));
Bs += (v_merge_u16(x9 + x13, t0) & mask);
// gradSW **********************************************
mask = _mm_cmpgt_epi16(T, gradSW); // mask = T>gradSW
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradSW)
mask = (T > gradSW); // mask = T>gradSW
ng = v_reinterpret_as_u16(v_reinterpret_as_s16(ng) - v_reinterpret_as_s16(mask)); // ng += (T>gradSW)
t0 = _mm_slli_epi16(x13, 1); // srow[bstep-1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, bstep*2-2), x0); // srow[bstep*2-2]+srow[0]
t0 = (x13 << 1); // srow[bstep-1]*2
t1 = v_load_expand(srow + bstep*2-2) + x0; // srow[bstep*2-2]+srow[0]
// RGs += {(srow[bstep*2-2]+srow[0]); srow[bstep-1]*2} * (T>gradSW)
RGs = _mm_adds_epi16(RGs, _mm_and_si128(_mm_merge_epi16(t1, t0), mask));
RGs += (v_merge_u16(t1, t0) & mask);
// GRs += {brow2[N6-1]; (srow[bstep*2-1]+srow[-1])} * (T>gradSW)
GRs = _mm_adds_epi16(GRs, _mm_and_si128(_mm_merge_epi16(_mm_loadu_si128((__m128i*)(brow2+N6-1)), _mm_adds_epi16(x12,x15)), mask));
GRs += (v_merge_u16(v_load(brow2+N6-1), x12 + x15) & mask);
// Bs += {srow[bstep-1]*2; (srow[bstep]+srow[bstep-2])} * (T>gradSW)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(t0,_mm_adds_epi16(x11,x14)), mask));
Bs += (v_merge_u16(t0, x11 + x14) & mask);
// gradW ***********************************************
mask = _mm_cmpgt_epi16(T, gradW); // mask = T>gradW
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradW)
mask = (T > gradW); // mask = T>gradW
ng = v_reinterpret_as_u16(v_reinterpret_as_s16(ng) - v_reinterpret_as_s16(mask)); // ng += (T>gradW)
t0 = _mm_slli_epi16(x15, 1); // srow[-1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow, -2), x0); // srow[-2]+srow[0]
t0 = (x15 << 1); // srow[-1]*2
t1 = v_load_expand(srow -2) + x0; // srow[-2]+srow[0]
// RGs += (srow[-2]+srow[0]) * (T>gradW)
RGs = _mm_adds_epi16(RGs, _mm_and_si128(t1, mask));
RGs += (t1 & mask);
// GRs += (srow[-1]*2) * (T>gradW)
GRs = _mm_adds_epi16(GRs, _mm_and_si128(t0, mask));
GRs += (t0 & mask);
// Bs += {(srow[-bstep-1]+srow[bstep-1]); (srow[bstep-2]+srow[-bstep-2])} * (T>gradW)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epi16(x1,x13), _mm_adds_epi16(x14,x16)), mask));
Bs += (v_merge_u16(x1 + x13, x14 + x16) & mask);
// gradNW **********************************************
mask = _mm_cmpgt_epi16(T, gradNW); // mask = T>gradNW
ng = _mm_sub_epi16(ng, mask); // ng += (T>gradNW)
mask = (T > gradNW); // mask = T>gradNW
ng = v_reinterpret_as_u16(v_reinterpret_as_s16(ng) - v_reinterpret_as_s16(mask)); // ng += (T>gradNW)
t0 = _mm_slli_epi16(x1, 1); // srow[-bstep-1]*2
t1 = _mm_adds_epi16(_mm_loadl_u8_s16(srow,-bstep*2-2), x0); // srow[-bstep*2-2]+srow[0]
t0 = (x1 << 1); // srow[-bstep-1]*2
t1 = v_load_expand(srow -bstep*2-2) + x0; // srow[-bstep*2-2]+srow[0]
// RGs += {(srow[-bstep*2-2]+srow[0]); srow[-bstep-1]*2} * (T>gradNW)
RGs = _mm_adds_epi16(RGs, _mm_and_si128(_mm_merge_epi16(t1, t0), mask));
RGs += (v_merge_u16(t1, t0) & mask);
// GRs += {brow0[N6-1]; (srow[-bstep*2-1]+srow[-1])} * (T>gradNW)
GRs = _mm_adds_epi16(GRs, _mm_and_si128(_mm_merge_epi16(_mm_loadu_si128((__m128i*)(brow0+N6-1)), _mm_adds_epi16(x2,x15)), mask));
GRs += (v_merge_u16(v_load(brow0+N6-1), x2 + x15) & mask);
// Bs += {srow[-bstep-1]*2; (srow[-bstep]+srow[-bstep-2])} * (T>gradNW)
Bs = _mm_adds_epi16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_slli_epi16(x1, 1),_mm_adds_epi16(x3,x16)), mask));
Bs += (v_merge_u16((x1 << 1), x3 + x16) & mask);
__m128 ngf0 = _mm_div_ps(_0_5, _mm_cvtloepi16_ps(ng));
__m128 ngf1 = _mm_div_ps(_0_5, _mm_cvthiepi16_ps(ng));
v_float32x4 ngf0 = _0_5 / v_cvt_s16f32_lo(ng);
v_float32x4 ngf1 = _0_5 / v_cvt_s16f32_hi(ng);
// now interpolate r, g & b
t0 = _mm_subs_epi16(GRs, RGs);
t1 = _mm_subs_epi16(Bs, RGs);
t0 = v_reinterpret_as_u16(v_reinterpret_as_s16(GRs) - v_reinterpret_as_s16(RGs));
t1 = v_reinterpret_as_u16(v_reinterpret_as_s16(Bs) - v_reinterpret_as_s16(RGs));
t0 = _mm_add_epi16(x0, _mm_packs_epi32(
_mm_cvtps_epi32(_mm_mul_ps(_mm_cvtloepi16_ps(t0), ngf0)),
_mm_cvtps_epi32(_mm_mul_ps(_mm_cvthiepi16_ps(t0), ngf1))));
t0 = v_reinterpret_as_u16(v_reinterpret_as_s16(x0) +
v_pack(
v_round(v_cvt_s16f32_lo(t0) * ngf0),
v_round(v_cvt_s16f32_hi(t0) * ngf1)));
t1 = _mm_add_epi16(x0, _mm_packs_epi32(
_mm_cvtps_epi32(_mm_mul_ps(_mm_cvtloepi16_ps(t1), ngf0)),
_mm_cvtps_epi32(_mm_mul_ps(_mm_cvthiepi16_ps(t1), ngf1))));
t1 = v_reinterpret_as_u16(v_reinterpret_as_s16(x0) +
v_pack(
v_round(v_cvt_s16f32_lo(t1) * ngf0),
v_round(v_cvt_s16f32_hi(t1) * ngf1)));
x1 = _mm_merge_epi16(x0, t0);
x2 = _mm_merge_epi16(t0, x0);
x1 = v_merge_u16(x0, t0);
x2 = v_merge_u16(t0, x0);
uchar R[8], G[8], B[8];
_mm_storel_epi64(blueIdx ? (__m128i*)B : (__m128i*)R, _mm_packus_epi16(x1, z));
_mm_storel_epi64((__m128i*)G, _mm_packus_epi16(x2, z));
_mm_storel_epi64(blueIdx ? (__m128i*)R : (__m128i*)B, _mm_packus_epi16(t1, z));
v_store_low(blueIdx ? B : R, v_pack_u(v_reinterpret_as_s16(x1), v_reinterpret_as_s16(z)));
v_store_low(G, v_pack_u(v_reinterpret_as_s16(x2), v_reinterpret_as_s16(z)));
v_store_low(blueIdx ? R : B, v_pack_u(v_reinterpret_as_s16(t1), v_reinterpret_as_s16(z)));
for( int j = 0; j < 8; j++, dstrow += 3 )
{

View File

@ -795,7 +795,7 @@ void cv::distanceTransform( InputArray _src, OutputArray _dst, OutputArray _labe
{
if( maskSize == CV_DIST_MASK_3 )
{
#if defined (HAVE_IPP) && (IPP_VERSION_X100 >= 700)
#if defined (HAVE_IPP) && (IPP_VERSION_X100 >= 700) && 0 // disabled: https://github.com/opencv/opencv/issues/15904
CV_IPP_CHECK()
{
IppiSize roi = { src.cols, src.rows };

View File

@ -41,6 +41,12 @@
//M*/
#include "precomp.hpp"
#include <opencv2/core/utils/logger.defines.hpp>
#undef CV_LOG_STRIP_LEVEL
#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_DEBUG + 1
#include <opencv2/core/utils/logger.hpp>
#include "opencv2/core/opencl/ocl_defs.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "hal_replacement.hpp"
@ -273,6 +279,22 @@ Ptr<BaseColumnFilter> getLinearColumnFilter(
CV_CPU_DISPATCH_MODES_ALL);
}
static bool createBitExactKernel_32S(const Mat& kernel, Mat& kernel_dst, int bits)
{
kernel.convertTo(kernel_dst, CV_32S, (1 << bits));
Mat_<double> kernel_64f;
kernel.convertTo(kernel_64f, CV_64F, (1 << bits));
int ksize = (int)kernel.total();
const double eps = 10 * FLT_EPSILON * (1 << bits);
for (int i = 0; i < ksize; i++)
{
int bitExactValue = kernel_dst.at<int>(i);
double approxValue = kernel_64f.at<double>(i);
if (fabs(approxValue - bitExactValue) > eps)
return false;
}
return true;
}
Ptr<FilterEngine> createSeparableLinearFilter(
int _srcType, int _dstType,
@ -299,6 +321,7 @@ Ptr<FilterEngine> createSeparableLinearFilter(
_columnKernel.rows == 1 ? Point(_anchor.y, 0) : Point(0, _anchor.y));
Mat rowKernel, columnKernel;
bool isBitExactMode = false;
int bdepth = std::max(CV_32F,std::max(sdepth, ddepth));
int bits = 0;
@ -311,14 +334,27 @@ Ptr<FilterEngine> createSeparableLinearFilter(
(rtype & ctype & KERNEL_INTEGER) &&
ddepth == CV_16S)) )
{
bdepth = CV_32S;
bits = ddepth == CV_8U ? 8 : 0;
_rowKernel.convertTo( rowKernel, CV_32S, 1 << bits );
_columnKernel.convertTo( columnKernel, CV_32S, 1 << bits );
bits *= 2;
_delta *= (1 << bits);
int bits_ = ddepth == CV_8U ? 8 : 0;
bool isValidBitExactRowKernel = createBitExactKernel_32S(_rowKernel, rowKernel, bits_);
bool isValidBitExactColumnKernel = createBitExactKernel_32S(_columnKernel, columnKernel, bits_);
if (!isValidBitExactRowKernel)
{
CV_LOG_DEBUG(NULL, "createSeparableLinearFilter: bit-exact row-kernel can't be applied: ksize=" << _rowKernel.total());
}
else if (!isValidBitExactColumnKernel)
{
CV_LOG_DEBUG(NULL, "createSeparableLinearFilter: bit-exact column-kernel can't be applied: ksize=" << _columnKernel.total());
}
else
{
bdepth = CV_32S;
bits = bits_;
bits *= 2;
_delta *= (1 << bits);
isBitExactMode = true;
}
}
if (!isBitExactMode)
{
if( _rowKernel.type() != bdepth )
_rowKernel.convertTo( rowKernel, bdepth );

View File

@ -22,6 +22,7 @@ private:
public:
typedef fixedpoint64 WT;
CV_ALWAYS_INLINE fixedpoint64() { val = 0; }
CV_ALWAYS_INLINE fixedpoint64(const fixedpoint64& v) { val = v.val; }
CV_ALWAYS_INLINE fixedpoint64(const int8_t& _val) { val = ((int64_t)_val) << fixedShift; }
CV_ALWAYS_INLINE fixedpoint64(const uint8_t& _val) { val = ((int64_t)_val) << fixedShift; }
CV_ALWAYS_INLINE fixedpoint64(const int16_t& _val) { val = ((int64_t)_val) << fixedShift; }
@ -104,6 +105,7 @@ private:
public:
typedef ufixedpoint64 WT;
CV_ALWAYS_INLINE ufixedpoint64() { val = 0; }
CV_ALWAYS_INLINE ufixedpoint64(const ufixedpoint64& v) { val = v.val; }
CV_ALWAYS_INLINE ufixedpoint64(const uint8_t& _val) { val = ((uint64_t)_val) << fixedShift; }
CV_ALWAYS_INLINE ufixedpoint64(const uint16_t& _val) { val = ((uint64_t)_val) << fixedShift; }
CV_ALWAYS_INLINE ufixedpoint64(const uint32_t& _val) { val = ((uint64_t)_val) << fixedShift; }
@ -169,6 +171,7 @@ private:
public:
typedef fixedpoint64 WT;
CV_ALWAYS_INLINE fixedpoint32() { val = 0; }
CV_ALWAYS_INLINE fixedpoint32(const fixedpoint32& v) { val = v.val; }
CV_ALWAYS_INLINE fixedpoint32(const int8_t& _val) { val = ((int32_t)_val) << fixedShift; }
CV_ALWAYS_INLINE fixedpoint32(const uint8_t& _val) { val = ((int32_t)_val) << fixedShift; }
CV_ALWAYS_INLINE fixedpoint32(const int16_t& _val) { val = ((int32_t)_val) << fixedShift; }
@ -222,6 +225,7 @@ private:
public:
typedef ufixedpoint64 WT;
CV_ALWAYS_INLINE ufixedpoint32() { val = 0; }
CV_ALWAYS_INLINE ufixedpoint32(const ufixedpoint32& v) { val = v.val; }
CV_ALWAYS_INLINE ufixedpoint32(const uint8_t& _val) { val = ((uint32_t)_val) << fixedShift; }
CV_ALWAYS_INLINE ufixedpoint32(const uint16_t& _val) { val = ((uint32_t)_val) << fixedShift; }
CV_ALWAYS_INLINE ufixedpoint32(const cv::softdouble& _val) { val = _val.getSign() ? 0 : (uint32_t)cvRound(_val * cv::softdouble((1 << fixedShift))); }
@ -271,6 +275,7 @@ private:
public:
typedef fixedpoint32 WT;
CV_ALWAYS_INLINE fixedpoint16() { val = 0; }
CV_ALWAYS_INLINE fixedpoint16(const fixedpoint16& v) { val = v.val; }
CV_ALWAYS_INLINE fixedpoint16(const int8_t& _val) { val = ((int16_t)_val) << fixedShift; }
CV_ALWAYS_INLINE fixedpoint16(const cv::softdouble& _val) { val = (int16_t)cvRound(_val * cv::softdouble((1 << fixedShift))); }
CV_ALWAYS_INLINE fixedpoint16& operator = (const int8_t& _val) { val = ((int16_t)_val) << fixedShift; return *this; }
@ -317,6 +322,7 @@ private:
public:
typedef ufixedpoint32 WT;
CV_ALWAYS_INLINE ufixedpoint16() { val = 0; }
CV_ALWAYS_INLINE ufixedpoint16(const ufixedpoint16& v) { val = v.val; }
CV_ALWAYS_INLINE ufixedpoint16(const uint8_t& _val) { val = ((uint16_t)_val) << fixedShift; }
CV_ALWAYS_INLINE ufixedpoint16(const cv::softdouble& _val) { val = _val.getSign() ? 0 : (uint16_t)cvRound(_val * cv::softdouble((int32_t)(1 << fixedShift))); }
CV_ALWAYS_INLINE ufixedpoint16& operator = (const uint8_t& _val) { val = ((uint16_t)_val) << fixedShift; return *this; }
@ -349,6 +355,9 @@ public:
CV_ALWAYS_INLINE bool isZero() { return val == 0; }
static CV_ALWAYS_INLINE ufixedpoint16 zero() { return ufixedpoint16(); }
static CV_ALWAYS_INLINE ufixedpoint16 one() { return ufixedpoint16((uint16_t)(1 << fixedShift)); }
static CV_ALWAYS_INLINE ufixedpoint16 fromRaw(uint16_t v) { return ufixedpoint16(v); }
CV_ALWAYS_INLINE ufixedpoint16 raw() { return val; }
};
}

View File

@ -43,6 +43,10 @@
#include "precomp.hpp"
#include <opencv2/core/utils/logger.hpp>
#include <opencv2/core/utils/configuration.private.hpp>
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
@ -67,109 +71,212 @@ namespace cv {
Gaussian Blur
\****************************************************************************************/
Mat getGaussianKernel(int n, double sigma, int ktype)
/**
* Bit-exact in terms of softfloat computations
*
* returns sum of kernel values. Should be equal to 1.0
*/
static
softdouble getGaussianKernelBitExact(std::vector<softdouble>& result, int n, double sigma)
{
CV_Assert(n > 0);
const int SMALL_GAUSSIAN_SIZE = 7;
static const float small_gaussian_tab[][SMALL_GAUSSIAN_SIZE] =
//TODO: incorrect SURF implementation requests kernel with n = 20 (PATCH_SZ): https://github.com/opencv/opencv/issues/15856
//CV_Assert((n & 1) == 1); // odd
if (sigma <= 0)
{
{1.f},
{0.25f, 0.5f, 0.25f},
{0.0625f, 0.25f, 0.375f, 0.25f, 0.0625f},
{0.03125f, 0.109375f, 0.21875f, 0.28125f, 0.21875f, 0.109375f, 0.03125f}
if (n == 1)
{
result = std::vector<softdouble>(1, softdouble::one());
return softdouble::one();
}
else if (n == 3)
{
softdouble v3[] = {
softdouble::fromRaw(0x3fd0000000000000), // 0.25
softdouble::fromRaw(0x3fe0000000000000), // 0.5
softdouble::fromRaw(0x3fd0000000000000) // 0.25
};
result.assign(v3, v3 + 3);
return softdouble::one();
}
else if (n == 5)
{
softdouble v5[] = {
softdouble::fromRaw(0x3fb0000000000000), // 0.0625
softdouble::fromRaw(0x3fd0000000000000), // 0.25
softdouble::fromRaw(0x3fd8000000000000), // 0.375
softdouble::fromRaw(0x3fd0000000000000), // 0.25
softdouble::fromRaw(0x3fb0000000000000) // 0.0625
};
result.assign(v5, v5 + 5);
return softdouble::one();
}
else if (n == 7)
{
softdouble v7[] = {
softdouble::fromRaw(0x3fa0000000000000), // 0.03125
softdouble::fromRaw(0x3fbc000000000000), // 0.109375
softdouble::fromRaw(0x3fcc000000000000), // 0.21875
softdouble::fromRaw(0x3fd2000000000000), // 0.28125
softdouble::fromRaw(0x3fcc000000000000), // 0.21875
softdouble::fromRaw(0x3fbc000000000000), // 0.109375
softdouble::fromRaw(0x3fa0000000000000) // 0.03125
};
result.assign(v7, v7 + 7);
return softdouble::one();
}
else if (n == 9)
{
softdouble v9[] = {
softdouble::fromRaw(0x3f90000000000000), // 4 / 256
softdouble::fromRaw(0x3faa000000000000), // 13 / 256
softdouble::fromRaw(0x3fbe000000000000), // 30 / 256
softdouble::fromRaw(0x3fc9800000000000), // 51 / 256
softdouble::fromRaw(0x3fce000000000000), // 60 / 256
softdouble::fromRaw(0x3fc9800000000000), // 51 / 256
softdouble::fromRaw(0x3fbe000000000000), // 30 / 256
softdouble::fromRaw(0x3faa000000000000), // 13 / 256
softdouble::fromRaw(0x3f90000000000000) // 4 / 256
};
result.assign(v9, v9 + 9);
return softdouble::one();
}
}
const float* fixed_kernel = n % 2 == 1 && n <= SMALL_GAUSSIAN_SIZE && sigma <= 0 ?
small_gaussian_tab[n>>1] : 0;
softdouble sd_0_15 = softdouble::fromRaw(0x3fc3333333333333); // 0.15
softdouble sd_0_35 = softdouble::fromRaw(0x3fd6666666666666); // 0.35
softdouble sd_minus_0_125 = softdouble::fromRaw(0xbfc0000000000000); // -0.5*0.25
CV_Assert( ktype == CV_32F || ktype == CV_64F );
softdouble sigmaX = sigma > 0 ? softdouble(sigma) : mulAdd(softdouble(n), sd_0_15, sd_0_35);// softdouble(((n-1)*0.5 - 1)*0.3 + 0.8)
softdouble scale2X = sd_minus_0_125/(sigmaX*sigmaX);
int n2_ = (n - 1) / 2;
cv::AutoBuffer<softdouble> values(n2_ + 1);
softdouble sum = softdouble::zero();
for (int i = 0, x = 1 - n; i < n2_; i++, x+=2)
{
// x = i - (n - 1)*0.5
// t = std::exp(scale2X*x*x)
softdouble t = exp(softdouble(x*x)*scale2X);
values[i] = t;
sum += t;
}
sum *= softdouble(2);
//values[n2_] = softdouble::one(); // x=0 in exp(softdouble(x*x)*scale2X);
sum += softdouble::one();
if ((n & 1) == 0)
{
//values[n2_ + 1] = softdouble::one();
sum += softdouble::one();
}
// normalize: sum(k[i]) = 1
softdouble mul1 = softdouble::one()/sum;
result.resize(n);
softdouble sum2 = softdouble::zero();
for (int i = 0; i < n2_; i++ )
{
softdouble t = values[i] * mul1;
result[i] = t;
result[n - 1 - i] = t;
sum2 += t;
}
sum2 *= softdouble(2);
result[n2_] = /*values[n2_]*/ softdouble::one() * mul1;
sum2 += result[n2_];
if ((n & 1) == 0)
{
result[n2_ + 1] = result[n2_];
sum2 += result[n2_];
}
return sum2;
}
Mat getGaussianKernel(int n, double sigma, int ktype)
{
CV_CheckDepth(ktype, ktype == CV_32F || ktype == CV_64F, "");
Mat kernel(n, 1, ktype);
float* cf = kernel.ptr<float>();
double* cd = kernel.ptr<double>();
double sigmaX = sigma > 0 ? sigma : ((n-1)*0.5 - 1)*0.3 + 0.8;
double scale2X = -0.5/(sigmaX*sigmaX);
double sum = 0;
std::vector<softdouble> kernel_bitexact;
getGaussianKernelBitExact(kernel_bitexact, n, sigma);
int i;
for( i = 0; i < n; i++ )
if (ktype == CV_32F)
{
double x = i - (n-1)*0.5;
double t = fixed_kernel ? (double)fixed_kernel[i] : std::exp(scale2X*x*x);
if( ktype == CV_32F )
{
cf[i] = (float)t;
sum += cf[i];
for (int i = 0; i < n; i++)
kernel.at<float>(i) = (float)kernel_bitexact[i];
}
else
{
cd[i] = t;
sum += cd[i];
}
}
CV_DbgAssert(fabs(sum) > 0);
sum = 1./sum;
for( i = 0; i < n; i++ )
{
if( ktype == CV_32F )
cf[i] = (float)(cf[i]*sum);
else
cd[i] *= sum;
CV_DbgAssert(ktype == CV_64F);
for (int i = 0; i < n; i++)
kernel.at<double>(i) = kernel_bitexact[i];
}
return kernel;
}
template <typename T>
static std::vector<T> getFixedpointGaussianKernel( int n, double sigma )
static
softdouble getGaussianKernelFixedPoint_ED(CV_OUT std::vector<int64_t>& result, const std::vector<softdouble> kernel_bitexact, int fractionBits)
{
if (sigma <= 0)
{
if(n == 1)
return std::vector<T>(1, softdouble(1.0));
else if(n == 3)
{
T v3[] = { softdouble(0.25), softdouble(0.5), softdouble(0.25) };
return std::vector<T>(v3, v3 + 3);
}
else if(n == 5)
{
T v5[] = { softdouble(0.0625), softdouble(0.25), softdouble(0.375), softdouble(0.25), softdouble(0.0625) };
return std::vector<T>(v5, v5 + 5);
}
else if(n == 7)
{
T v7[] = { softdouble(0.03125), softdouble(0.109375), softdouble(0.21875), softdouble(0.28125), softdouble(0.21875), softdouble(0.109375), softdouble(0.03125) };
return std::vector<T>(v7, v7 + 7);
}
}
const int n = (int)kernel_bitexact.size();
CV_Assert((n & 1) == 1); // odd
CV_CheckGT(fractionBits, 0, "");
CV_CheckLE(fractionBits, 32, "");
softdouble sigmaX = sigma > 0 ? softdouble(sigma) : mulAdd(softdouble(n),softdouble(0.15),softdouble(0.35));// softdouble(((n-1)*0.5 - 1)*0.3 + 0.8)
softdouble scale2X = softdouble(-0.5*0.25)/(sigmaX*sigmaX);
std::vector<softdouble> values(n);
softdouble sum(0.);
for(int i = 0, x = 1 - n; i < n; i++, x+=2 )
int64_t fractionMultiplier = CV_BIG_INT(1) << fractionBits;
softdouble fractionMultiplier_sd(fractionMultiplier);
result.resize(n);
int n2_ = n / 2; // n is odd
softdouble err = softdouble::zero();
int64_t sum = 0;
for (int i = 0; i < n2_; i++)
{
// x = i - (n - 1)*0.5
// t = std::exp(scale2X*x*x)
values[i] = exp(softdouble(x*x)*scale2X);
sum += values[i];
}
sum = softdouble::one()/sum;
//softdouble err0 = err;
softdouble adj_v = kernel_bitexact[i] * fractionMultiplier_sd + err;
int64_t v0 = cvRound(adj_v); // cvFloor() provides bad results
err = adj_v - softdouble(v0);
//printf("%3d: adj_v=%8.3f(%8.3f+%8.3f) v0=%d ed_err=%8.3f\n", i, (double)adj_v, (double)(kernel_bitexact[i] * fractionMultiplier_sd), (double)err0, (int)v0, (double)err);
std::vector<T> kernel(n);
for(int i = 0; i < n; i++ )
{
kernel[i] = values[i] * sum;
result[i] = v0;
result[n - 1 - i] = v0;
sum += v0;
}
return kernel;
};
sum *= 2;
softdouble adj_v_center = kernel_bitexact[n2_] * fractionMultiplier_sd + err;
int64_t v_center = fractionMultiplier - sum;
result[n2_] = v_center;
//printf("center = %g ===> %g ===> %g\n", (double)(kernel_bitexact[n2_] * fractionMultiplier), (double)adj_v_center, (double)v_center);
return (adj_v_center - softdouble(v_center));
}
static void getGaussianKernel(int n, double sigma, int ktype, Mat& res) { res = getGaussianKernel(n, sigma, ktype); }
template <typename T> static void getGaussianKernel(int n, double sigma, int, std::vector<T>& res) { res = getFixedpointGaussianKernel<T>(n, sigma); }
template <typename T> static void getGaussianKernel(int n, double sigma, int, std::vector<T>& res);
//{ res = getFixedpointGaussianKernel<T>(n, sigma); }
template<> void getGaussianKernel<ufixedpoint16>(int n, double sigma, int, std::vector<ufixedpoint16>& res)
{
std::vector<softdouble> res_sd;
softdouble s0 = getGaussianKernelBitExact(res_sd, n, sigma);
CV_UNUSED(s0);
std::vector<int64_t> fixed_256;
softdouble approx_err = getGaussianKernelFixedPoint_ED(fixed_256, res_sd, 8);
CV_UNUSED(approx_err);
res.resize(n);
for (int i = 0; i < n; i++)
{
res[i] = ufixedpoint16::fromRaw((uint16_t)fixed_256[i]);
//printf("%03d: %d\n", i, res[i].raw());
}
}
template <typename T>
static void createGaussianKernels( T & kx, T & ky, int type, Size &ksize,
@ -477,6 +584,19 @@ static bool ipp_GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
}
#endif
template<typename T>
static bool validateGaussianBlurKernel(std::vector<T>& kernel)
{
softdouble validation_sum = softdouble::zero();
for (size_t i = 0; i < kernel.size(); i++)
{
validation_sum += softdouble((double)kernel[i]);
}
bool isValid = validation_sum == softdouble::one();
return isValid;
}
void GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
double sigma1, double sigma2,
int borderType)
@ -539,12 +659,25 @@ void GaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
{
std::vector<ufixedpoint16> fkx, fky;
createGaussianKernels(fkx, fky, type, ksize, sigma1, sigma2);
static bool param_check_gaussian_blur_bitexact_kernels = utils::getConfigurationParameterBool("OPENCV_GAUSSIANBLUR_CHECK_BITEXACT_KERNELS", false);
if (param_check_gaussian_blur_bitexact_kernels && !validateGaussianBlurKernel(fkx))
{
CV_LOG_INFO(NULL, "GaussianBlur: bit-exact fx kernel can't be applied: ksize=" << ksize << " sigma=" << Size2d(sigma1, sigma2));
}
else if (param_check_gaussian_blur_bitexact_kernels && !validateGaussianBlurKernel(fky))
{
CV_LOG_INFO(NULL, "GaussianBlur: bit-exact fy kernel can't be applied: ksize=" << ksize << " sigma=" << Size2d(sigma1, sigma2));
}
else
{
if (src.data == dst.data)
src = src.clone();
CV_CPU_DISPATCH(GaussianBlurFixedPoint, (src, dst, (const uint16_t*)&fkx[0], (int)fkx.size(), (const uint16_t*)&fky[0], (int)fky.size(), borderType),
CV_CPU_DISPATCH_MODES_ALL);
return;
}
}
sepFilter2D(src, dst, sdepth, kx, ky, Point(-1, -1), 0, borderType);
}

View File

@ -59,6 +59,13 @@ protected:
bool fp_kernel;
bool inplace;
int border;
void dump_test_case(int test_case_idx, std::ostream* out) CV_OVERRIDE
{
ArrayTest::dump_test_case(test_case_idx, out);
*out << "border=" << border << std::endl;
}
};
@ -685,6 +692,12 @@ protected:
void get_test_array_types_and_sizes( int test_case_idx, vector<vector<Size> >& sizes, vector<vector<int> >& types );
double get_success_error_level( int test_case_idx, int i, int j );
const char* smooth_type;
void dump_test_case(int test_case_idx, std::ostream* out) CV_OVERRIDE
{
CV_FilterBaseTest::dump_test_case(test_case_idx, out);
*out << "smooth_type=" << smooth_type << std::endl;
}
};
@ -795,6 +808,12 @@ protected:
double get_success_error_level( int /*test_case_idx*/, int /*i*/, int /*j*/ );
double sigma;
int param1, param2;
void dump_test_case(int test_case_idx, std::ostream* out) CV_OVERRIDE
{
CV_SmoothBaseTest::dump_test_case(test_case_idx, out);
*out << "kernel=(" << param1 << ", " << param2 << ") sigma=" << sigma << std::endl;
}
};
@ -838,7 +857,7 @@ void CV_GaussianBlurTest::run_func()
// !!! Copied from cvSmooth, if the code is changed in cvSmooth,
// make sure to update this one too.
#define SMALL_GAUSSIAN_SIZE 7
#define SMALL_GAUSSIAN_SIZE 9
static void
calcGaussianKernel( int n, double sigma, vector<float>& kernel )
{
@ -847,14 +866,15 @@ calcGaussianKernel( int n, double sigma, vector<float>& kernel )
{1.f},
{0.25f, 0.5f, 0.25f},
{0.0625f, 0.25f, 0.375f, 0.25f, 0.0625f},
{0.03125, 0.109375, 0.21875, 0.28125, 0.21875, 0.109375, 0.03125}
{0.03125, 0.109375, 0.21875, 0.28125, 0.21875, 0.109375, 0.03125},
{4.0 / 256, 13.0 / 256, 30.0 / 256, 51.0 / 256, 60.0 / 256, 51.0 / 256, 30.0 / 256, 13.0 / 256, 4.0 / 256}
};
kernel.resize(n);
if( n <= SMALL_GAUSSIAN_SIZE && sigma <= 0 )
{
assert( n%2 == 1 );
memcpy( &kernel[0], small_gaussian_tab[n>>1], n*sizeof(kernel[0]));
CV_Assert(n%2 == 1);
memcpy(&kernel[0], small_gaussian_tab[n / 2], n*sizeof(kernel[0]));
}
else
{

View File

@ -14,11 +14,14 @@ namespace opencv_test { namespace {
{ fixedOne >> 2, fixedOne >> 1, fixedOne >> 2 }, // size 3, sigma 0
{ fixedOne >> 4, fixedOne >> 2, 6 * (fixedOne >> 4), fixedOne >> 2, fixedOne >> 4 }, // size 5, sigma 0
{ fixedOne >> 5, 7 * (fixedOne >> 6), 7 * (fixedOne >> 5), 9 * (fixedOne >> 5), 7 * (fixedOne >> 5), 7 * (fixedOne >> 6), fixedOne >> 5 }, // size 7, sigma 0
{ 4, 13, 30, 51, 61, 51, 30, 13, 4 }, // size 9, sigma 0
{ 81, 95, 81 }, // size 3, sigma 1.75
{ 65, 125, 65 }, // size 3, sigma 0.875
{ 4, 13, 30, 51, 60, 51, 30, 13, 4 }, // size 9, sigma 0
#if 1
#define CV_TEST_INACCURATE_GAUSSIAN_BLUR
{ 81, 94, 81 }, // size 3, sigma 1.75
{ 65, 126, 65 }, // size 3, sigma 0.875
{ 0, 7, 242, 7, 0 }, // size 5, sigma 0.375
{ 4, 56, 136, 56, 4 } // size 5, sigma 0.75
#endif
};
template <typename T, int fixedShift>
@ -68,11 +71,13 @@ TEST(GaussianBlur_Bitexact, Linear8U)
{ CV_8UC1, Size( 256, 128), Size(5, 5), 0, 0, vector<int64_t>(v[2], v[2]+5), vector<int64_t>(v[2], v[2]+5) },
{ CV_8UC1, Size( 256, 128), Size(7, 7), 0, 0, vector<int64_t>(v[3], v[3]+7), vector<int64_t>(v[3], v[3]+7) },
{ CV_8UC1, Size( 256, 128), Size(9, 9), 0, 0, vector<int64_t>(v[4], v[4]+9), vector<int64_t>(v[4], v[4]+9) },
#ifdef CV_TEST_INACCURATE_GAUSSIAN_BLUR
{ CV_8UC1, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(v[5], v[5]+3), vector<int64_t>(v[6], v[6]+3) },
{ CV_8UC2, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(v[5], v[5]+3), vector<int64_t>(v[6], v[6]+3) },
{ CV_8UC3, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(v[5], v[5]+3), vector<int64_t>(v[6], v[6]+3) },
{ CV_8UC4, Size( 256, 128), Size(3, 3), 1.75, 0.875, vector<int64_t>(v[5], v[5]+3), vector<int64_t>(v[6], v[6]+3) },
{ CV_8UC1, Size( 256, 128), Size(5, 5), 0.375, 0.75, vector<int64_t>(v[7], v[7]+5), vector<int64_t>(v[8], v[8]+5) }
#endif
};
int bordermodes[] = {
@ -162,8 +167,28 @@ TEST(GaussianBlur_Bitexact, regression_15015)
{
Mat src(100,100,CV_8UC3,Scalar(255,255,255));
Mat dst;
GaussianBlur(src, dst, Size(5, 5), 9);
GaussianBlur(src, dst, Size(5, 5), 0);
ASSERT_EQ(0.0, cvtest::norm(dst, src, NORM_INF));
}
static void checkGaussianBlur_8Uvs32F(const Mat& src8u, const Mat& src32f, int N, double sigma)
{
Mat dst8u; GaussianBlur(src8u, dst8u, Size(N, N), sigma); // through bit-exact path
Mat dst8u_32f; dst8u.convertTo(dst8u_32f, CV_32F);
Mat dst32f; GaussianBlur(src32f, dst32f, Size(N, N), sigma); // without bit-exact computations
double normINF_32f = cv::norm(dst8u_32f, dst32f, NORM_INF);
EXPECT_LE(normINF_32f, 1.0);
}
TEST(GaussianBlur_Bitexact, regression_9863)
{
Mat src8u = imread(cvtest::findDataFile("shared/lena.png"));
Mat src32f; src8u.convertTo(src32f, CV_32F);
checkGaussianBlur_8Uvs32F(src8u, src32f, 151, 30);
}
}} // namespace

View File

@ -1,13 +1,15 @@
{
"name": "opencv_js_tests",
"description": "Tests for opencv js bindings",
"version": "1.0.0",
"dependencies" : {
"node-qunit" : "latest"
"version": "1.0.1",
"dependencies": {
"ansi-colors": "^4.1.1",
"minimist": "^1.2.0",
"node-qunit": "latest"
},
"devDependencies": {
"eslint" : "latest",
"eslint-config-google" : "latest"
"eslint": "latest",
"eslint-config-google": "latest"
},
"scripts": {
"test": "node tests.js"

View File

@ -0,0 +1,214 @@
try {
require('puppeteer')
} catch (e) {
console.error(
"\nFATAL ERROR:" +
"\n Package 'puppeteer' is not available." +
"\n Run 'npm install --no-save puppeteer' before running this script" +
"\n * You may use PUPPETEER_SKIP_CHROMIUM_DOWNLOAD=1 environment variable to avoid automatic Chromium downloading" +
"\n (specify own Chromium/Chrome version through PUPPETEER_EXECUTABLE_PATH=`which google-chrome` environment variable)" +
"\n");
process.exit(1);
}
const puppeteer = require('puppeteer')
const colors = require("ansi-colors")
const path = require("path");
const fs = require("fs");
const http = require("http");
run_main(require('minimist')(process.argv.slice(2)));
async function run_main(o = {}) {
try {
await main(o);
console.magenta("FATAL: Unexpected exit!");
process.exit(1);
} catch (e) {
console.error(colors.magenta("FATAL: Unexpected exception!"));
console.error(e);
process.exit(1);
}
}
async function main(o = {}) {
o = Object.assign({}, {
buildFolder: __dirname,
port: 8080,
debug: false,
noHeadless: false,
serverPrefix: `http://localhost`,
noExit: false,
screenshot: undefined,
help: false,
noTryCatch: false,
maxBlockDuration: 30000
}, o)
if (typeof o.screenshot == 'string' && o.screenshot == 'false') {
console.log(colors.red('ERROR: misused screenshot option, use --no-screenshot instead'));
}
if (o.noExit) {
o.maxBlockDuration = 999999999
}
o.debug && console.log('Current Options', o);
if (o.help) {
printHelpAndExit();
}
const serverAddress = `${o.serverPrefix}:${o.port}`
const url = `${serverAddress}/tests.html${o.noTryCatch ? '?notrycatch=1' : ''}`;
if (!fs.existsSync(o.buildFolder)) {
console.error(`Expected folder "${o.buildFolder}" to exists. Aborting`);
}
o.debug && debug('Server Listening at ' + url);
const server = await staticServer(o.buildFolder, o.port, m => debug, m => error);
o.debug && debug(`Browser launching ${!o.noHeadless ? 'headless' : 'not headless'}`);
const browser = await puppeteer.launch({ headless: !o.noHeadless });
const page = await browser.newPage();
page.on('console', e => {
locationMsg = formatMessage(`${e.location().url}:${e.location().lineNumber}:${e.location().columnNumber}`);
if (e.type() === 'error') {
console.log(colors.red(formatMessage('' + e.text(), `-- ERROR:${locationMsg}: `, )));
}
else if (o.debug) {
o.debug && console.log(colors.grey(formatMessage('' + e.text(), `-- ${locationMsg}: `)));
}
});
o.debug && debug(`Opening page address ${url}`);
await page.goto(url);
await page.waitForFunction(() => (document.querySelector(`#qunit-testresult`) && document.querySelector(`#qunit-testresult`).textContent || '').trim().toLowerCase().startsWith('tests completed'));
const text = await getText(`#qunit-testresult`);
if (!text) {
return await fail(`An error occurred extracting test results. Check the build folder ${o.buildFolder} is correct and has build with tests enabled.`);
}
o.debug && debug(colors.blackBright("* UserAgent: " + await getText('#qunit-userAgent')));
const testFailed = !text.includes(' 0 failed');
if (testFailed && !o.debug) {
process.stdout.write(colors.grey("* Use '--debug' parameter to see details of failed tests.\n"));
}
if (o.screenshot || (o.screenshot === undefined && testFailed)) {
await page.screenshot({ path: 'screenshot.png', fullPage: 'true' });
process.stdout.write(colors.grey(`* Screenshot taken: ${o.buildFolder}/screenshot.png\n`));
}
if (testFailed) {
const report = await failReport();
process.stdout.write(`
${colors.red.bold.underline('Failed tests ! :(')}
${colors.redBright(colors.symbols.cross + ' ' + report.join(`\n${colors.symbols.cross} `))}
${colors.redBright(`=== Summary ===\n${text}`)}
`);
}
else {
process.stdout.write(colors.green(`
${colors.symbols.check} No Errors :)
=== Summary ===\n${text}
`));
}
if (o.noExit) {
while (true) {
await new Promise(r => setTimeout(r, 5000));
}
}
await server && server.close();
await browser.close();
process.exit(testFailed ? 1 : 0);
async function getText(s) {
return await page.evaluate((s) => (document.querySelector(s) && document.querySelector(s).innerText) || ''.trim(), s);
}
async function failReport() {
const failures = await page.evaluate(() => Array.from(document.querySelectorAll('#qunit-tests .fail')).filter(e => e.querySelector('.module-name')).map(e => ({
moduleName: e.querySelector('.module-name') && e.querySelector('.module-name').textContent,
testName: e.querySelector('.test-name') && e.querySelector('.test-name').textContent,
expected: e.querySelector('.test-expected pre') && e.querySelector('.test-expected pre').textContent,
actual: e.querySelector('.test-actual pre') && e.querySelector('.test-actual pre').textContent,
code: e.querySelector('.test-source') && e.querySelector('.test-source').textContent.replace("Source: at ", ""),
})));
return failures.map(f => `${f.moduleName}: ${f.testName} (${formatMessage(f.code)})`);
}
async function fail(s) {
await failReport();
process.stdout.write(colors.red(s) + '\n');
if (o.screenshot || o.screenshot === undefined) {
await page.screenshot({ path: 'screenshot.png', fullPage: 'true' });
process.stdout.write(colors.grey(`* Screenshot taken: ${o.buildFolder}/screenshot.png\n`));
}
process.exit(1);
}
async function debug(s) {
process.stdout.write(s + '\n');
}
async function error(s) {
process.stdout.write(s + '\n');
}
function formatMessage(message, prefix) {
prefix = prefix || '';
return prefix + ('' + message).split('\n').map(l => l.replace(serverAddress, o.buildFolder)).join('\n' + prefix);
}
}
function printHelpAndExit() {
console.log(`
Usage:
# First, remember to build opencv.js with tests enabled:
${colors.blueBright(`python ./platforms/js/build_js.py build_js --build_test`)}
# Install the tool locally (needed only once) and run it
${colors.blueBright(`cd build_js/bin`)}
${colors.blueBright(`npm install`)}
${colors.blueBright(`node run_puppeteer`)}
By default will run a headless browser silently printing a small report in the terminal.
But it could used to debug the tests in the browser, take screenshots, global tool or
targeting external servers exposing the tests.
TIP: you could install the tool globally (npm install --global build_js/bin) to execute it from any local folder.
# Options
* port?: number. Default 8080
* buildFolder?: string. Default __dirname (this folder)
* debug?: boolean. Default false
* noHeadless?: boolean. Default false
* serverPrefix?: string . Default http://localhost
* help?: boolean
* screenshot?: boolean . Make screenshot on failure by default. Use --no-screenshot to disable screenshots completely.
* noExit?: boolean default false. If true it will keep running the server - together with noHeadless you can debug in the browser.
* noTryCatch?: boolean will disable Qunit tryCatch - so exceptions are dump to stdout rather than in the browser.
* maxBlockDuration: QUnit timeout. If noExit is given then is infinity.
`);
process.exit(0);
}
async function staticServer(basePath, port, onFound, onNotFound) {
return new Promise(async (resolve) => {
const server = http.createServer((req, res) => {
var url = resolveUrl(req.url);
onFound && onFound(url);
var stream = fs.createReadStream(path.join(basePath, url || ''));
stream.on('error', function () {
onNotFound && onNotFound(url);
res.writeHead(404);
res.end();
});
stream.pipe(res);
}).listen(port);
server.on('listening', () => {
resolve(server);
});
});
function resolveUrl(url = '') {
var i = url.indexOf('?');
if (i != -1) {
url = url.substr(0, i);
}
i = url.indexOf('#');
if (i != -1) {
url = url.substr(0, i);
}
return url;
}
}

View File

@ -15,32 +15,41 @@
color: #0040ff;
}
</style>
</head>
<body>
<div id="qunit"></div>
<div id="qunit-fixture"></div>
<script src="http://code.jquery.com/qunit/qunit-2.0.1.js"></script>
<script type="application/javascript" async src="opencv.js"></script>
<script type="application/javascript" src="test_mat.js"></script>
<script type="application/javascript" src="test_utils.js"></script>
<script type="application/javascript" src="test_imgproc.js"></script>
<script type="application/javascript" src="test_objdetect.js"></script>
<script type="application/javascript" src="test_video.js"></script>
<script type="application/javascript" src="test_photo.js"></script>
<script type="application/javascript" src="test_features2d.js"></script>
<script type="application/javascript" src="test_calib3d.js"></script>
<script type='text/javascript'>
<script type="text/javascript">
QUnit.config.autostart = false;
QUnit.log(function(details) {
if (details.result) {
return;
}
var loc = details.module + ": " + details.name + ": ",
output = "FAILED: " + loc + ( details.message ? details.message : "" )
prefix = details.message ? ", " : "";
if (details.actual) {
output += prefix + "expected: " + details.expected + ", actual: " + details.actual;
prefix = ', ';
}
if (details.source) {
output += prefix + details.source;
}
console.warn(output);
});
QUnit.done(function(details) {
console.log("Total: " + details.total + " Failed: " + details.failed + " Passed: " + details.passed);
console.log("Time(ms): " + details.runtime);
});
// Helper for opencv.js (see below)
var Module = {
preRun: [function() {
Module.FS_createPreloadedFile('/', 'haarcascade_frontalface_default.xml', 'haarcascade_frontalface_default.xml', true, false);
}],
postRun: [] ,
onRuntimeInitialized: function() {
console.log("Runtime is ready...");
console.log("Emscripten runtime is ready, launching QUnit tests...");
//console.log(cv.getBuildInformation());
QUnit.start();
},
print: (function() {
@ -55,7 +64,7 @@
};
})(),
printErr: function(text) {
console.log(text);
console.error(text);
},
setStatus: function(text) {
console.log(text);
@ -70,6 +79,31 @@
if (text) Module.printErr('[post-exception status] ' + text);
};
};
function opencvjs_LoadError() {
Module.printErr('Failed to load/initialize opencv.js');
QUnit.module('LoaderFatalError', {});
QUnit.config.module = 'LoaderFatalError';
QUnit.only("Failed to load OpenCV.js", function(assert) {
assert.ok(false, "Can't load/initialize opencv.js");
});
QUnit.start();
}
</script>
</head>
<body>
<div id="qunit"></div>
<div id="qunit-fixture"></div>
<script type="application/javascript" async src="opencv.js" onerror="opencvjs_LoadError()"></script>
<script type="application/javascript" src="test_mat.js"></script>
<script type="application/javascript" src="test_utils.js"></script>
<script type="application/javascript" src="test_imgproc.js"></script>
<script type="application/javascript" src="test_objdetect.js"></script>
<script type="application/javascript" src="test_video.js"></script>
<script type="application/javascript" src="test_photo.js"></script>
<script type="application/javascript" src="test_features2d.js"></script>
<script type="application/javascript" src="test_calib3d.js"></script>
</body>
</html>

View File

@ -50,7 +50,7 @@ class facedetect_test(NewOpenCVTests):
img = self.get_sample( sample)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
gray = cv.GaussianBlur(gray, (5, 5), 5.1)
gray = cv.GaussianBlur(gray, (5, 5), 0)
rects = detect(gray, cascade)
faces.append(rects)

View File

@ -428,6 +428,9 @@ protected:
// updates progress bar
virtual int update_progress( int progress, int test_case_idx, int count, double dt );
// dump test case input parameters
virtual void dump_test_case(int test_case_idx, std::ostream* out);
// finds test parameter
cv::FileNode find_param( const cv::FileStorage& fs, const char* param_name );

View File

@ -350,8 +350,14 @@ void BaseTest::run( int start_from )
return;
if( validate_test_results( test_case_idx ) < 0 || ts->get_err_code() < 0 )
{
std::stringstream ss;
dump_test_case(test_case_idx, &ss);
std::string s = ss.str();
ts->printf( TS::LOG, "%s", s.c_str());
return;
}
}
}
@ -401,6 +407,12 @@ int BaseTest::update_progress( int progress, int test_case_idx, int count, doubl
}
void BaseTest::dump_test_case(int test_case_idx, std::ostream* out)
{
*out << "test_case_idx = " << test_case_idx << std::endl;
}
BadArgTest::BadArgTest()
{
test_case_idx = -1;

View File

@ -40,6 +40,7 @@ if __name__ == "__main__":
parser.add_argument('--opencv', metavar='DIR', default=folder, help='folder with opencv repository (default is "../.." relative to script location)')
parser.add_argument('--contrib', metavar='DIR', default=None, help='folder with opencv_contrib repository (default is "None" - build only main framework)')
parser.add_argument('--without', metavar='MODULE', default=[], action='append', help='OpenCV modules to exclude from the framework')
parser.add_argument('--disable', metavar='FEATURE', default=[], action='append', help='OpenCV features to disable (add WITH_*=OFF)')
parser.add_argument('--enable_nonfree', default=False, dest='enablenonfree', action='store_true', help='enable non-free modules (disabled by default)')
parser.add_argument('--macosx_deployment_target', default=os.environ.get('MACOSX_DEPLOYMENT_TARGET', MACOSX_DEPLOYMENT_TARGET), help='specify MACOSX_DEPLOYMENT_TARGET')
parser.add_argument('--debug', action='store_true', help='Build "Debug" binaries (CMAKE_BUILD_TYPE=Debug)')
@ -50,7 +51,7 @@ if __name__ == "__main__":
os.environ['MACOSX_DEPLOYMENT_TARGET'] = args.macosx_deployment_target
print('Using MACOSX_DEPLOYMENT_TARGET=' + os.environ['MACOSX_DEPLOYMENT_TARGET'])
b = OSXBuilder(args.opencv, args.contrib, False, False, args.without, args.enablenonfree,
b = OSXBuilder(args.opencv, args.contrib, False, False, args.without, args.disable, args.enablenonfree,
[
(["x86_64"], "MacOSX")
], args.debug, args.debug_info)