mirror of
https://github.com/opencv/opencv.git
synced 2025-08-06 06:26:29 +08:00
Merge pull request #25324 from kaingwade:clean_haarcascades_jsbindings
Fix broken js build after moving HaarCascades to contrib #25324 The HaarCascades related are not completely cleaned up #25311 after #25198, which breaks the JavaScript build. The PR is to fix the issue. Related PR: opencv/opencv_contrib#3712
This commit is contained in:
parent
72ad06bcf3
commit
94f4678d3a
@ -251,12 +251,6 @@ if(DOXYGEN_FOUND)
|
||||
list(APPEND js_assets "${OPENCV_JS_LOCATION}")
|
||||
endif()
|
||||
|
||||
# copy haar cascade files
|
||||
# set(haar_cascade_files "")
|
||||
# set(data_harrcascades_path "${OpenCV_SOURCE_DIR}/data/haarcascades/")
|
||||
# list(APPEND js_tutorials_assets_deps "${data_harrcascades_path}/haarcascade_frontalface_default.xml" "${data_harrcascades_path}/haarcascade_eye.xml")
|
||||
# list(APPEND js_assets "${data_harrcascades_path}/haarcascade_frontalface_default.xml" "${data_harrcascades_path}/haarcascade_eye.xml")
|
||||
|
||||
foreach(f ${js_assets})
|
||||
get_filename_component(fname "${f}" NAME)
|
||||
add_custom_command(OUTPUT "${opencv_tutorial_html_dir}/${fname}"
|
||||
|
@ -1,100 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Face Detection Example</title>
|
||||
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
|
||||
</head>
|
||||
<body>
|
||||
<h2>Face Detection Example</h2>
|
||||
<p>
|
||||
<canvas> elements named <b>canvasInput</b> and <b>canvasOutput</b> have been prepared.<br>
|
||||
Click <b>Try it</b> button to see the result. You can choose another image.<br>
|
||||
You can change the code in the <textarea> to investigate more.
|
||||
</p>
|
||||
<div>
|
||||
<div class="control"><button id="tryIt" disabled>Try it</button></div>
|
||||
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false">
|
||||
</textarea>
|
||||
<p class="err" id="errorMessage"></p>
|
||||
</div>
|
||||
<div>
|
||||
<table cellpadding="0" cellspacing="0" width="0" border="0">
|
||||
<tr>
|
||||
<td>
|
||||
<canvas id="canvasInput"></canvas>
|
||||
</td>
|
||||
<td>
|
||||
<canvas id="canvasOutput"></canvas>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
|
||||
</td>
|
||||
<td>
|
||||
<div class="caption">canvasOutput</div>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<script src="utils.js" type="text/javascript"></script>
|
||||
<script id="codeSnippet" type="text/code-snippet">
|
||||
let src = cv.imread('canvasInput');
|
||||
let gray = new cv.Mat();
|
||||
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
|
||||
let faces = new cv.RectVector();
|
||||
let eyes = new cv.RectVector();
|
||||
let faceCascade = new cv.CascadeClassifier();
|
||||
let eyeCascade = new cv.CascadeClassifier();
|
||||
// load pre-trained classifiers
|
||||
faceCascade.load('haarcascade_frontalface_default.xml');
|
||||
eyeCascade.load('haarcascade_eye.xml');
|
||||
// detect faces
|
||||
let msize = new cv.Size(0, 0);
|
||||
faceCascade.detectMultiScale(gray, faces, 1.1, 3, 0, msize, msize);
|
||||
for (let i = 0; i < faces.size(); ++i) {
|
||||
let roiGray = gray.roi(faces.get(i));
|
||||
let roiSrc = src.roi(faces.get(i));
|
||||
let point1 = new cv.Point(faces.get(i).x, faces.get(i).y);
|
||||
let point2 = new cv.Point(faces.get(i).x + faces.get(i).width,
|
||||
faces.get(i).y + faces.get(i).height);
|
||||
cv.rectangle(src, point1, point2, [255, 0, 0, 255]);
|
||||
// detect eyes in face ROI
|
||||
eyeCascade.detectMultiScale(roiGray, eyes);
|
||||
for (let j = 0; j < eyes.size(); ++j) {
|
||||
let point1 = new cv.Point(eyes.get(j).x, eyes.get(j).y);
|
||||
let point2 = new cv.Point(eyes.get(j).x + eyes.get(j).width,
|
||||
eyes.get(j).y + eyes.get(j).height);
|
||||
cv.rectangle(roiSrc, point1, point2, [0, 0, 255, 255]);
|
||||
}
|
||||
roiGray.delete(); roiSrc.delete();
|
||||
}
|
||||
cv.imshow('canvasOutput', src);
|
||||
src.delete(); gray.delete(); faceCascade.delete();
|
||||
eyeCascade.delete(); faces.delete(); eyes.delete();
|
||||
</script>
|
||||
<script type="text/javascript">
|
||||
let utils = new Utils('errorMessage');
|
||||
|
||||
utils.loadCode('codeSnippet', 'codeEditor');
|
||||
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
|
||||
utils.addFileInputHandler('fileInput', 'canvasInput');
|
||||
|
||||
let tryIt = document.getElementById('tryIt');
|
||||
tryIt.addEventListener('click', () => {
|
||||
utils.executeCode('codeEditor');
|
||||
});
|
||||
|
||||
utils.loadOpenCv(() => {
|
||||
let eyeCascadeFile = 'haarcascade_eye.xml';
|
||||
utils.createFileFromUrl(eyeCascadeFile, eyeCascadeFile, () => {
|
||||
let faceCascadeFile = 'haarcascade_frontalface_default.xml';
|
||||
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
|
||||
tryIt.removeAttribute('disabled');
|
||||
});
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
@ -1,142 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Face Detection Camera Example</title>
|
||||
<link href="js_example_style.css" rel="stylesheet" type="text/css" />
|
||||
</head>
|
||||
<body>
|
||||
<h2>Face Detection Camera Example</h2>
|
||||
<p>
|
||||
Click <b>Start/Stop</b> button to start or stop the camera capture.<br>
|
||||
The <b>videoInput</b> is a <video> element used as face detector input.
|
||||
The <b>canvasOutput</b> is a <canvas> element used as face detector output.<br>
|
||||
The code of <textarea> will be executed when video is started.
|
||||
You can modify the code to investigate more.
|
||||
</p>
|
||||
<div>
|
||||
<div class="control"><button id="startAndStop" disabled>Start</button></div>
|
||||
<textarea class="code" rows="29" cols="80" id="codeEditor" spellcheck="false">
|
||||
</textarea>
|
||||
</div>
|
||||
<p class="err" id="errorMessage"></p>
|
||||
<div>
|
||||
<table cellpadding="0" cellspacing="0" width="0" border="0">
|
||||
<tr>
|
||||
<td>
|
||||
<video id="videoInput" width=320 height=240></video>
|
||||
</td>
|
||||
<td>
|
||||
<canvas id="canvasOutput" width=320 height=240></canvas>
|
||||
</td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<div class="caption">videoInput</div>
|
||||
</td>
|
||||
<td>
|
||||
<div class="caption">canvasOutput</div>
|
||||
</td>
|
||||
<td></td>
|
||||
<td></td>
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
|
||||
<script src="utils.js" type="text/javascript"></script>
|
||||
<script id="codeSnippet" type="text/code-snippet">
|
||||
let video = document.getElementById('videoInput');
|
||||
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
|
||||
let dst = new cv.Mat(video.height, video.width, cv.CV_8UC4);
|
||||
let gray = new cv.Mat();
|
||||
let cap = new cv.VideoCapture(video);
|
||||
let faces = new cv.RectVector();
|
||||
let classifier = new cv.CascadeClassifier();
|
||||
|
||||
// load pre-trained classifiers
|
||||
classifier.load('haarcascade_frontalface_default.xml');
|
||||
|
||||
const FPS = 30;
|
||||
function processVideo() {
|
||||
try {
|
||||
if (!streaming) {
|
||||
// clean and stop.
|
||||
src.delete();
|
||||
dst.delete();
|
||||
gray.delete();
|
||||
faces.delete();
|
||||
classifier.delete();
|
||||
return;
|
||||
}
|
||||
let begin = Date.now();
|
||||
// start processing.
|
||||
cap.read(src);
|
||||
src.copyTo(dst);
|
||||
cv.cvtColor(dst, gray, cv.COLOR_RGBA2GRAY, 0);
|
||||
// detect faces.
|
||||
classifier.detectMultiScale(gray, faces, 1.1, 3, 0);
|
||||
// draw faces.
|
||||
for (let i = 0; i < faces.size(); ++i) {
|
||||
let face = faces.get(i);
|
||||
let point1 = new cv.Point(face.x, face.y);
|
||||
let point2 = new cv.Point(face.x + face.width, face.y + face.height);
|
||||
cv.rectangle(dst, point1, point2, [255, 0, 0, 255]);
|
||||
}
|
||||
cv.imshow('canvasOutput', dst);
|
||||
// schedule the next one.
|
||||
let delay = 1000/FPS - (Date.now() - begin);
|
||||
setTimeout(processVideo, delay);
|
||||
} catch (err) {
|
||||
utils.printError(err);
|
||||
}
|
||||
};
|
||||
|
||||
// schedule the first one.
|
||||
setTimeout(processVideo, 0);
|
||||
</script>
|
||||
<script type="text/javascript">
|
||||
let utils = new Utils('errorMessage');
|
||||
|
||||
utils.loadCode('codeSnippet', 'codeEditor');
|
||||
|
||||
let streaming = false;
|
||||
let videoInput = document.getElementById('videoInput');
|
||||
let startAndStop = document.getElementById('startAndStop');
|
||||
let canvasOutput = document.getElementById('canvasOutput');
|
||||
let canvasContext = canvasOutput.getContext('2d');
|
||||
|
||||
startAndStop.addEventListener('click', () => {
|
||||
if (!streaming) {
|
||||
utils.clearError();
|
||||
utils.startCamera('qvga', onVideoStarted, 'videoInput');
|
||||
} else {
|
||||
utils.stopCamera();
|
||||
onVideoStopped();
|
||||
}
|
||||
});
|
||||
|
||||
function onVideoStarted() {
|
||||
streaming = true;
|
||||
startAndStop.innerText = 'Stop';
|
||||
videoInput.width = videoInput.videoWidth;
|
||||
videoInput.height = videoInput.videoHeight;
|
||||
utils.executeCode('codeEditor');
|
||||
}
|
||||
|
||||
function onVideoStopped() {
|
||||
streaming = false;
|
||||
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
|
||||
startAndStop.innerText = 'Start';
|
||||
}
|
||||
|
||||
utils.loadOpenCv(() => {
|
||||
let faceCascadeFile = 'haarcascade_frontalface_default.xml';
|
||||
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
|
||||
startAndStop.removeAttribute('disabled');
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
BIN
doc/js_tutorials/js_assets/lena_yunet.jpg
Normal file
BIN
doc/js_tutorials/js_assets/lena_yunet.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 29 KiB |
@ -227,52 +227,128 @@ In the browser, this filesystem is emulated in memory while in Node.js there's a
|
||||
|
||||
### The example ###
|
||||
|
||||
The following is an adaptation of @ref tutorial_js_face_detection.
|
||||
|
||||
@code{.js}
|
||||
const { Canvas, createCanvas, Image, ImageData, loadImage } = require('canvas');
|
||||
const { JSDOM } = require('jsdom');
|
||||
const { writeFileSync, existsSync, mkdirSync } = require('fs');
|
||||
const https = require('https');
|
||||
|
||||
(async () => {
|
||||
await loadOpenCV();
|
||||
const createFileFromUrl = function (path, url, maxRedirects = 10) {
|
||||
console.log('Downloading ' + url + '...');
|
||||
return new Promise((resolve, reject) => {
|
||||
const download = (url, redirectCount) => {
|
||||
if (redirectCount > maxRedirects) {
|
||||
reject(new Error('Too many redirects'));
|
||||
} else {
|
||||
let connection = https.get(url, (response) => {
|
||||
if (response.statusCode === 200) {
|
||||
let data = [];
|
||||
response.on('data', (chunk) => {
|
||||
data.push(chunk);
|
||||
});
|
||||
|
||||
const image = await loadImage('lena.jpg');
|
||||
const src = cv.imread(image);
|
||||
let gray = new cv.Mat();
|
||||
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
|
||||
let faces = new cv.RectVector();
|
||||
let eyes = new cv.RectVector();
|
||||
let faceCascade = new cv.CascadeClassifier();
|
||||
let eyeCascade = new cv.CascadeClassifier();
|
||||
response.on('end', () => {
|
||||
try {
|
||||
writeFileSync(path, Buffer.concat(data));
|
||||
resolve();
|
||||
} catch (err) {
|
||||
reject(new Error('Failed to write file ' + path));
|
||||
}
|
||||
});
|
||||
} else if (response.statusCode === 302 || response.statusCode === 301) {
|
||||
connection.abort();
|
||||
download(response.headers.location, redirectCount + 1);
|
||||
} else {
|
||||
reject(new Error('Failed to load ' + url + ' status: ' + response.statusCode));
|
||||
}
|
||||
}).on('error', (err) => {
|
||||
reject(new Error('Network Error: ' + err.message));
|
||||
});
|
||||
}
|
||||
};
|
||||
download(url, 0);
|
||||
});
|
||||
};
|
||||
|
||||
// Load pre-trained classifier files. Notice how we reference local files using relative paths just
|
||||
// like we normally would do
|
||||
faceCascade.load('./haarcascade_frontalface_default.xml');
|
||||
eyeCascade.load('./haarcascade_eye.xml');
|
||||
if (!existsSync('./face_detection_yunet_2023mar.onnx')) {
|
||||
await createFileFromUrl('./face_detection_yunet_2023mar.onnx', 'https://media.githubusercontent.com/media/opencv/opencv_zoo/main/models/face_detection_yunet/face_detection_yunet_2023mar.onnx')
|
||||
}
|
||||
|
||||
let mSize = new cv.Size(0, 0);
|
||||
faceCascade.detectMultiScale(gray, faces, 1.1, 3, 0, mSize, mSize);
|
||||
for (let i = 0; i < faces.size(); ++i) {
|
||||
let roiGray = gray.roi(faces.get(i));
|
||||
let roiSrc = src.roi(faces.get(i));
|
||||
let point1 = new cv.Point(faces.get(i).x, faces.get(i).y);
|
||||
let point2 = new cv.Point(faces.get(i).x + faces.get(i).width, faces.get(i).y + faces.get(i).height);
|
||||
cv.rectangle(src, point1, point2, [255, 0, 0, 255]);
|
||||
eyeCascade.detectMultiScale(roiGray, eyes);
|
||||
for (let j = 0; j < eyes.size(); ++j) {
|
||||
let point1 = new cv.Point(eyes.get(j).x, eyes.get(j).y);
|
||||
let point2 = new cv.Point(eyes.get(j).x + eyes.get(j).width, eyes.get(j).y + eyes.get(j).height);
|
||||
cv.rectangle(roiSrc, point1, point2, [0, 0, 255, 255]);
|
||||
}
|
||||
roiGray.delete();
|
||||
roiSrc.delete();
|
||||
if (!existsSync('./opencv.js')) {
|
||||
await createFileFromUrl('./opencv.js', 'https://docs.opencv.org/5.x/opencv.js')
|
||||
}
|
||||
|
||||
if (!existsSync('./lena.jpg')) {
|
||||
await createFileFromUrl('./lena.jpg', 'https://docs.opencv.org/5.x/lena.jpg')
|
||||
}
|
||||
|
||||
await loadOpenCV();
|
||||
|
||||
const image = await loadImage('./lena.jpg');
|
||||
const src = cv.imread(image);
|
||||
let srcBGR = new cv.Mat();
|
||||
cv.cvtColor(src, srcBGR, cv.COLOR_RGBA2BGR);
|
||||
|
||||
// Load the deep learning model file. Notice how we reference local files using relative paths just
|
||||
// like we normally would do
|
||||
let netDet = new cv.FaceDetectorYN("./face_detection_yunet_2023mar.onnx", "", new cv.Size(320, 320), 0.9, 0.3, 5000);
|
||||
netDet.setInputSize(new cv.Size(src.cols, src.rows));
|
||||
let out = new cv.Mat();
|
||||
netDet.detect(srcBGR, out);
|
||||
|
||||
let faces = [];
|
||||
for (let i = 0, n = out.data32F.length; i < n; i += 15) {
|
||||
let left = out.data32F[i];
|
||||
let top = out.data32F[i + 1];
|
||||
let right = (out.data32F[i] + out.data32F[i + 2]);
|
||||
let bottom = (out.data32F[i + 1] + out.data32F[i + 3]);
|
||||
left = Math.min(Math.max(0, left), src.cols - 1);
|
||||
top = Math.min(Math.max(0, top), src.rows - 1);
|
||||
right = Math.min(Math.max(0, right), src.cols - 1);
|
||||
bottom = Math.min(Math.max(0, bottom), src.rows - 1);
|
||||
|
||||
if (left < right && top < bottom) {
|
||||
faces.push({
|
||||
x: left,
|
||||
y: top,
|
||||
width: right - left,
|
||||
height: bottom - top,
|
||||
x1: out.data32F[i + 4] < 0 || out.data32F[i + 4] > src.cols - 1 ? -1 : out.data32F[i + 4],
|
||||
y1: out.data32F[i + 5] < 0 || out.data32F[i + 5] > src.rows - 1 ? -1 : out.data32F[i + 5],
|
||||
x2: out.data32F[i + 6] < 0 || out.data32F[i + 6] > src.cols - 1 ? -1 : out.data32F[i + 6],
|
||||
y2: out.data32F[i + 7] < 0 || out.data32F[i + 7] > src.rows - 1 ? -1 : out.data32F[i + 7],
|
||||
x3: out.data32F[i + 8] < 0 || out.data32F[i + 8] > src.cols - 1 ? -1 : out.data32F[i + 8],
|
||||
y3: out.data32F[i + 9] < 0 || out.data32F[i + 9] > src.rows - 1 ? -1 : out.data32F[i + 9],
|
||||
x4: out.data32F[i + 10] < 0 || out.data32F[i + 10] > src.cols - 1 ? -1 : out.data32F[i + 10],
|
||||
y4: out.data32F[i + 11] < 0 || out.data32F[i + 11] > src.rows - 1 ? -1 : out.data32F[i + 11],
|
||||
x5: out.data32F[i + 12] < 0 || out.data32F[i + 12] > src.cols - 1 ? -1 : out.data32F[i + 12],
|
||||
y5: out.data32F[i + 13] < 0 || out.data32F[i + 13] > src.rows - 1 ? -1 : out.data32F[i + 13],
|
||||
confidence: out.data32F[i + 14]
|
||||
})
|
||||
}
|
||||
}
|
||||
out.delete();
|
||||
|
||||
const canvas = createCanvas(image.width, image.height);
|
||||
cv.imshow(canvas, src);
|
||||
writeFileSync('output3.jpg', canvas.toBuffer('image/jpeg'));
|
||||
src.delete(); gray.delete(); faceCascade.delete(); eyeCascade.delete(); faces.delete(); eyes.delete()
|
||||
faces.forEach(function(rect) {
|
||||
cv.rectangle(src, {x: rect.x, y: rect.y}, {x: rect.x + rect.width, y: rect.y + rect.height}, [0, 255, 0, 255]);
|
||||
if(rect.x1>0 && rect.y1>0)
|
||||
cv.circle(src, {x: rect.x1, y: rect.y1}, 2, [255, 0, 0, 255], 2)
|
||||
if(rect.x2>0 && rect.y2>0)
|
||||
cv.circle(src, {x: rect.x2, y: rect.y2}, 2, [0, 0, 255, 255], 2)
|
||||
if(rect.x3>0 && rect.y3>0)
|
||||
cv.circle(src, {x: rect.x3, y: rect.y3}, 2, [0, 255, 0, 255], 2)
|
||||
if(rect.x4>0 && rect.y4>0)
|
||||
cv.circle(src, {x: rect.x4, y: rect.y4}, 2, [255, 0, 255, 255], 2)
|
||||
if(rect.x5>0 && rect.y5>0)
|
||||
cv.circle(src, {x: rect.x5, y: rect.y5}, 2, [0, 255, 255, 255], 2)
|
||||
});
|
||||
|
||||
const canvas = createCanvas(image.width, image.height);
|
||||
cv.imshow(canvas, src);
|
||||
writeFileSync('output3.jpg', canvas.toBuffer('image/jpeg'));
|
||||
console.log('The result is saved.')
|
||||
src.delete(); srcBGR.delete();
|
||||
})();
|
||||
|
||||
/**
|
||||
@ -287,7 +363,7 @@ const { writeFileSync, existsSync, mkdirSync } = require('fs');
|
||||
*/
|
||||
function loadOpenCV(rootDir = '/work', localRootDir = process.cwd()) {
|
||||
if(global.Module && global.Module.onRuntimeInitialized && global.cv && global.cv.imread) {
|
||||
return Promise.resolve()
|
||||
Promise.resolve()
|
||||
}
|
||||
return new Promise(resolve => {
|
||||
installDOM()
|
||||
@ -333,13 +409,12 @@ function installDOM(){
|
||||
### Execute it ###
|
||||
|
||||
- Save the file as `exampleNodeCanvasData.js`.
|
||||
- Make sure the files `aarcascade_frontalface_default.xml` and `haarcascade_eye.xml` are present in project's directory. They can be obtained from [OpenCV sources](https://github.com/opencv/opencv/tree/5.x/data/haarcascades).
|
||||
- Make sure a sample image file `lena.jpg` exists in project's directory. It should display people's faces for this example to make sense. The following image is known to work:
|
||||
- The files `face_detection_yunet_2023mar.onnx`, `lena.jpg` and `opencv.js` will be downloaded if they not present in project's directory.
|
||||
|
||||

|
||||
|
||||
The following command should generate the file `output3.jpg`:
|
||||
The following command should generate the file `output3.jpg` look the image below:
|
||||
|
||||
@code{.bash}
|
||||
node exampleNodeCanvasData.js
|
||||
@endcode
|
||||
|
||||

|
||||
|
@ -112,17 +112,6 @@ foreach(f ${test_files})
|
||||
list(APPEND opencv_test_js_file_deps "${test_dir}/${f}" "${opencv_test_js_bin_dir}/${f}")
|
||||
endforeach()
|
||||
|
||||
# copy test data
|
||||
set(test_data "haarcascade_frontalface_default.xml")
|
||||
set(test_data_path "${PROJECT_SOURCE_DIR}/../../data/haarcascades/${test_data}")
|
||||
|
||||
add_custom_command(OUTPUT "${opencv_test_js_bin_dir}/${test_data}"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different "${test_data_path}" "${opencv_test_js_bin_dir}/${test_data}"
|
||||
DEPENDS "${test_data_path}"
|
||||
COMMENT "Copying ${test_data}"
|
||||
)
|
||||
list(APPEND opencv_test_js_file_deps "${test_data_path}" "${opencv_test_js_bin_dir}/${test_data}")
|
||||
|
||||
add_custom_target(${PROJECT_NAME}_test
|
||||
DEPENDS ${OCV_JS_PATH} ${opencv_test_js_file_deps})
|
||||
|
||||
|
@ -43,9 +43,7 @@
|
||||
|
||||
// Helper for opencv.js (see below)
|
||||
var Module = {
|
||||
preRun: [function() {
|
||||
Module.FS_createPreloadedFile('/', 'haarcascade_frontalface_default.xml', 'haarcascade_frontalface_default.xml', true, false);
|
||||
}],
|
||||
preRun: [],
|
||||
postRun: [] ,
|
||||
onRuntimeInitialized: function() {
|
||||
console.log("Emscripten runtime is ready, launching QUnit tests...");
|
||||
|
Loading…
Reference in New Issue
Block a user