mirror of
https://github.com/opencv/opencv.git
synced 2025-07-25 14:47:07 +08:00
Merge pull request #26934 from BenjaminKnecht/new_4.x
Extend ArUcoDetector to run multiple dictionaries in an efficient manner.
This commit is contained in:
commit
d9956fc24f
@ -285,6 +285,16 @@ public:
|
||||
const DetectorParameters &detectorParams = DetectorParameters(),
|
||||
const RefineParameters& refineParams = RefineParameters());
|
||||
|
||||
/** @brief ArucoDetector constructor for multiple dictionaries
|
||||
*
|
||||
* @param dictionaries indicates the type of markers that will be searched. Empty dictionaries will throw an error.
|
||||
* @param detectorParams marker detection parameters
|
||||
* @param refineParams marker refine detection parameters
|
||||
*/
|
||||
CV_WRAP ArucoDetector(const std::vector<Dictionary> &dictionaries,
|
||||
const DetectorParameters &detectorParams = DetectorParameters(),
|
||||
const RefineParameters& refineParams = RefineParameters());
|
||||
|
||||
/** @brief Basic marker detection
|
||||
*
|
||||
* @param image input image
|
||||
@ -297,7 +307,7 @@ public:
|
||||
* @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
|
||||
* correct codification. Useful for debugging purposes.
|
||||
*
|
||||
* Performs marker detection in the input image. Only markers included in the specific dictionary
|
||||
* Performs marker detection in the input image. Only markers included in the first specified dictionary
|
||||
* are searched. For each detected marker, it returns the 2D position of its corner in the image
|
||||
* and its corresponding identifier.
|
||||
* Note that this function does not perform pose estimation.
|
||||
@ -329,6 +339,8 @@ public:
|
||||
* If camera parameters and distortion coefficients are provided, missing markers are reprojected
|
||||
* using projectPoint function. If not, missing marker projections are interpolated using global
|
||||
* homography, and all the marker corners in the board must have the same Z coordinate.
|
||||
* @note This function assumes that the board only contains markers from one dictionary, so only the
|
||||
* first configured dictionary is used. It has to match the dictionary of the board to work properly.
|
||||
*/
|
||||
CV_WRAP void refineDetectedMarkers(InputArray image, const Board &board,
|
||||
InputOutputArrayOfArrays detectedCorners,
|
||||
@ -336,9 +348,59 @@ public:
|
||||
InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(),
|
||||
OutputArray recoveredIdxs = noArray()) const;
|
||||
|
||||
/** @brief Basic marker detection
|
||||
*
|
||||
* @param image input image
|
||||
* @param corners vector of detected marker corners. For each marker, its four corners
|
||||
* are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
|
||||
* the dimensions of this array is Nx4. The order of the corners is clockwise.
|
||||
* @param ids vector of identifiers of the detected markers. The identifier is of type int
|
||||
* (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
|
||||
* The identifiers have the same order than the markers in the imgPoints array.
|
||||
* @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
|
||||
* correct codification. Useful for debugging purposes.
|
||||
* @param dictIndices vector of dictionary indices for each detected marker. Use getDictionaries() to get the
|
||||
* list of corresponding dictionaries.
|
||||
*
|
||||
* Performs marker detection in the input image. Only markers included in the specific dictionaries
|
||||
* are searched. For each detected marker, it returns the 2D position of its corner in the image
|
||||
* and its corresponding identifier.
|
||||
* Note that this function does not perform pose estimation.
|
||||
* @note The function does not correct lens distortion or takes it into account. It's recommended to undistort
|
||||
* input image with corresponding camera model, if camera parameters are known
|
||||
* @sa undistort, estimatePoseSingleMarkers, estimatePoseBoard
|
||||
*/
|
||||
CV_WRAP void detectMarkersMultiDict(InputArray image, OutputArrayOfArrays corners, OutputArray ids,
|
||||
OutputArrayOfArrays rejectedImgPoints = noArray(), OutputArray dictIndices = noArray()) const;
|
||||
|
||||
/** @brief Returns first dictionary from internal list used for marker detection.
|
||||
*
|
||||
* @return The first dictionary from the configured ArucoDetector.
|
||||
*/
|
||||
CV_WRAP const Dictionary& getDictionary() const;
|
||||
|
||||
/** @brief Sets and replaces the first dictionary in internal list to be used for marker detection.
|
||||
*
|
||||
* @param dictionary The new dictionary that will replace the first dictionary in the internal list.
|
||||
*/
|
||||
CV_WRAP void setDictionary(const Dictionary& dictionary);
|
||||
|
||||
/** @brief Returns all dictionaries currently used for marker detection as a vector.
|
||||
*
|
||||
* @return A std::vector<Dictionary> containing all dictionaries used by the ArucoDetector.
|
||||
*/
|
||||
CV_WRAP std::vector<Dictionary> getDictionaries() const;
|
||||
|
||||
/** @brief Sets the entire collection of dictionaries to be used for marker detection, replacing any existing dictionaries.
|
||||
*
|
||||
* @param dictionaries A std::vector<Dictionary> containing the new set of dictionaries to be used.
|
||||
*
|
||||
* Configures the ArucoDetector to use the provided vector of dictionaries for marker detection.
|
||||
* This method replaces any dictionaries that were previously set.
|
||||
* @note Setting an empty vector of dictionaries will throw an error.
|
||||
*/
|
||||
CV_WRAP void setDictionaries(const std::vector<Dictionary>& dictionaries);
|
||||
|
||||
CV_WRAP const DetectorParameters& getDetectorParameters() const;
|
||||
CV_WRAP void setDetectorParameters(const DetectorParameters& detectorParameters);
|
||||
|
||||
|
@ -458,5 +458,30 @@ class aruco_objdetect_test(NewOpenCVTests):
|
||||
with self.assertRaises(Exception):
|
||||
img = cv.aruco.drawDetectedDiamonds(img, points2, borderColor=255)
|
||||
|
||||
def test_multi_dict_arucodetector(self):
|
||||
aruco_params = cv.aruco.DetectorParameters()
|
||||
aruco_dicts = [
|
||||
cv.aruco.getPredefinedDictionary(cv.aruco.DICT_4X4_250),
|
||||
cv.aruco.getPredefinedDictionary(cv.aruco.DICT_5X5_250)
|
||||
]
|
||||
aruco_detector = cv.aruco.ArucoDetector(aruco_dicts, aruco_params)
|
||||
id = 2
|
||||
marker_size = 100
|
||||
offset = 10
|
||||
img_marker1 = cv.aruco.generateImageMarker(aruco_dicts[0], id, marker_size, aruco_params.markerBorderBits)
|
||||
img_marker1 = np.pad(img_marker1, pad_width=offset, mode='constant', constant_values=255)
|
||||
img_marker2 = cv.aruco.generateImageMarker(aruco_dicts[1], id, marker_size, aruco_params.markerBorderBits)
|
||||
img_marker2 = np.pad(img_marker2, pad_width=offset, mode='constant', constant_values=255)
|
||||
img_markers = np.concatenate((img_marker1, img_marker2), axis=1)
|
||||
|
||||
corners, ids, rejected, dictIndices = aruco_detector.detectMarkersMultiDict(img_markers)
|
||||
|
||||
self.assertEqual(2, len(ids))
|
||||
self.assertEqual(id, ids[0])
|
||||
self.assertEqual(id, ids[1])
|
||||
self.assertEqual(2, len(dictIndices))
|
||||
self.assertEqual(0, dictIndices[0])
|
||||
self.assertEqual(1, dictIndices[1])
|
||||
|
||||
if __name__ == '__main__':
|
||||
NewOpenCVTests.bootstrap()
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "apriltag/apriltag_quad_thresh.hpp"
|
||||
#include "aruco_utils.hpp"
|
||||
#include <cmath>
|
||||
#include <map>
|
||||
|
||||
namespace cv {
|
||||
namespace aruco {
|
||||
@ -640,9 +641,14 @@ static inline void findCornerInPyrImage(const float scale_init, const int closes
|
||||
}
|
||||
}
|
||||
|
||||
enum class DictionaryMode {
|
||||
Single,
|
||||
Multi
|
||||
};
|
||||
|
||||
struct ArucoDetector::ArucoDetectorImpl {
|
||||
/// dictionary indicates the type of markers that will be searched
|
||||
Dictionary dictionary;
|
||||
/// dictionaries indicates the types of markers that will be searched
|
||||
vector<Dictionary> dictionaries;
|
||||
|
||||
/// marker detection parameters, check DetectorParameters docs to see available settings
|
||||
DetectorParameters detectorParams;
|
||||
@ -651,9 +657,210 @@ struct ArucoDetector::ArucoDetectorImpl {
|
||||
RefineParameters refineParams;
|
||||
ArucoDetectorImpl() {}
|
||||
|
||||
ArucoDetectorImpl(const Dictionary &_dictionary, const DetectorParameters &_detectorParams,
|
||||
const RefineParameters& _refineParams): dictionary(_dictionary),
|
||||
detectorParams(_detectorParams), refineParams(_refineParams) {}
|
||||
ArucoDetectorImpl(const vector<Dictionary>&_dictionaries, const DetectorParameters &_detectorParams,
|
||||
const RefineParameters& _refineParams): dictionaries(_dictionaries),
|
||||
detectorParams(_detectorParams), refineParams(_refineParams) {
|
||||
CV_Assert(!dictionaries.empty());
|
||||
}
|
||||
|
||||
/*
|
||||
* @brief Detect markers either using multiple or just first dictionary
|
||||
*/
|
||||
void detectMarkers(InputArray _image, OutputArrayOfArrays _corners, OutputArray _ids,
|
||||
OutputArrayOfArrays _rejectedImgPoints, OutputArray _dictIndices, DictionaryMode dictMode) {
|
||||
CV_Assert(!_image.empty());
|
||||
|
||||
CV_Assert(detectorParams.markerBorderBits > 0);
|
||||
// check that the parameters are set correctly if Aruco3 is used
|
||||
CV_Assert(!(detectorParams.useAruco3Detection == true &&
|
||||
detectorParams.minSideLengthCanonicalImg == 0 &&
|
||||
detectorParams.minMarkerLengthRatioOriginalImg == 0.0));
|
||||
|
||||
Mat grey;
|
||||
_convertToGrey(_image, grey);
|
||||
|
||||
// Aruco3 functionality is the extension of Aruco.
|
||||
// The description can be found in:
|
||||
// [1] Speeded up detection of squared fiducial markers, 2018, FJ Romera-Ramirez et al.
|
||||
// if Aruco3 functionality if not wanted
|
||||
// change some parameters to be sure to turn it off
|
||||
if (!detectorParams.useAruco3Detection) {
|
||||
detectorParams.minMarkerLengthRatioOriginalImg = 0.0;
|
||||
detectorParams.minSideLengthCanonicalImg = 0;
|
||||
}
|
||||
else {
|
||||
// always turn on corner refinement in case of Aruco3, due to upsampling
|
||||
detectorParams.cornerRefinementMethod = (int)CORNER_REFINE_SUBPIX;
|
||||
// only CORNER_REFINE_SUBPIX implement correctly for useAruco3Detection
|
||||
// Todo: update other CORNER_REFINE methods
|
||||
}
|
||||
|
||||
/// Step 0: equation (2) from paper [1]
|
||||
const float fxfy = (!detectorParams.useAruco3Detection ? 1.f : detectorParams.minSideLengthCanonicalImg /
|
||||
(detectorParams.minSideLengthCanonicalImg + std::max(grey.cols, grey.rows)*
|
||||
detectorParams.minMarkerLengthRatioOriginalImg));
|
||||
|
||||
/// Step 1: create image pyramid. Section 3.4. in [1]
|
||||
vector<Mat> grey_pyramid;
|
||||
int closest_pyr_image_idx = 0, num_levels = 0;
|
||||
//// Step 1.1: resize image with equation (1) from paper [1]
|
||||
if (detectorParams.useAruco3Detection) {
|
||||
const float scale_pyr = 2.f;
|
||||
const float img_area = static_cast<float>(grey.rows*grey.cols);
|
||||
const float min_area_marker = static_cast<float>(detectorParams.minSideLengthCanonicalImg*
|
||||
detectorParams.minSideLengthCanonicalImg);
|
||||
// find max level
|
||||
num_levels = static_cast<int>(log2(img_area / min_area_marker)/scale_pyr);
|
||||
// the closest pyramid image to the downsampled segmentation image
|
||||
// will later be used as start index for corner upsampling
|
||||
const float scale_img_area = img_area * fxfy * fxfy;
|
||||
closest_pyr_image_idx = cvRound(log2(img_area / scale_img_area)/scale_pyr);
|
||||
}
|
||||
buildPyramid(grey, grey_pyramid, num_levels);
|
||||
|
||||
// resize to segmentation image
|
||||
// in this reduces size the contours will be detected
|
||||
if (fxfy != 1.f)
|
||||
resize(grey, grey, Size(cvRound(fxfy * grey.cols), cvRound(fxfy * grey.rows)));
|
||||
|
||||
/// STEP 2: Detect marker candidates
|
||||
vector<vector<Point2f> > candidates;
|
||||
vector<vector<Point> > contours;
|
||||
vector<int> ids;
|
||||
|
||||
/// STEP 2.a Detect marker candidates :: using AprilTag
|
||||
if(detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_APRILTAG){
|
||||
_apriltag(grey, detectorParams, candidates, contours);
|
||||
}
|
||||
/// STEP 2.b Detect marker candidates :: traditional way
|
||||
else {
|
||||
detectCandidates(grey, candidates, contours);
|
||||
}
|
||||
|
||||
/// STEP 2.c FILTER OUT NEAR CANDIDATE PAIRS
|
||||
vector<int> dictIndices;
|
||||
vector<vector<Point2f>> rejectedImgPoints;
|
||||
if (DictionaryMode::Single == dictMode) {
|
||||
Dictionary& dictionary = dictionaries.at(0);
|
||||
auto selectedCandidates = filterTooCloseCandidates(candidates, contours, dictionary.markerSize);
|
||||
candidates.clear();
|
||||
contours.clear();
|
||||
|
||||
/// STEP 2: Check candidate codification (identify markers)
|
||||
identifyCandidates(grey, grey_pyramid, selectedCandidates, candidates, contours,
|
||||
ids, dictionary, rejectedImgPoints);
|
||||
|
||||
/// STEP 3: Corner refinement :: use corner subpix
|
||||
if (detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_SUBPIX) {
|
||||
performCornerSubpixRefinement(grey, grey_pyramid, closest_pyr_image_idx, candidates, dictionary);
|
||||
}
|
||||
} else if (DictionaryMode::Multi == dictMode) {
|
||||
map<int, vector<MarkerCandidateTree>> candidatesPerDictionarySize;
|
||||
for (const Dictionary& dictionary : dictionaries) {
|
||||
candidatesPerDictionarySize.emplace(dictionary.markerSize, vector<MarkerCandidateTree>());
|
||||
}
|
||||
|
||||
// create candidate trees for each dictionary size
|
||||
for (auto& candidatesTreeEntry : candidatesPerDictionarySize) {
|
||||
// copy candidates
|
||||
vector<vector<Point2f>> candidatesCopy = candidates;
|
||||
vector<vector<Point> > contoursCopy = contours;
|
||||
candidatesTreeEntry.second = filterTooCloseCandidates(candidatesCopy, contoursCopy, candidatesTreeEntry.first);
|
||||
}
|
||||
candidates.clear();
|
||||
contours.clear();
|
||||
|
||||
/// STEP 2: Check candidate codification (identify markers)
|
||||
int dictIndex = 0;
|
||||
for (const Dictionary& currentDictionary : dictionaries) {
|
||||
// temporary variable to store the current candidates
|
||||
vector<vector<Point2f>> currentCandidates;
|
||||
identifyCandidates(grey, grey_pyramid, candidatesPerDictionarySize.at(currentDictionary.markerSize), currentCandidates, contours,
|
||||
ids, currentDictionary, rejectedImgPoints);
|
||||
if (_dictIndices.needed()) {
|
||||
dictIndices.insert(dictIndices.end(), currentCandidates.size(), dictIndex);
|
||||
}
|
||||
|
||||
/// STEP 3: Corner refinement :: use corner subpix
|
||||
if (detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_SUBPIX) {
|
||||
performCornerSubpixRefinement(grey, grey_pyramid, closest_pyr_image_idx, currentCandidates, currentDictionary);
|
||||
}
|
||||
candidates.insert(candidates.end(), currentCandidates.begin(), currentCandidates.end());
|
||||
dictIndex++;
|
||||
}
|
||||
|
||||
// Clean up rejectedImgPoints by comparing to itself and all candidates
|
||||
const float epsilon = 0.000001f;
|
||||
auto compareCandidates = [epsilon](vector<Point2f> a, vector<Point2f> b) {
|
||||
for (int i = 0; i < 4; i++) {
|
||||
if (std::abs(a[i].x - b[i].x) > epsilon || std::abs(a[i].y - b[i].y) > epsilon) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
std::sort(rejectedImgPoints.begin(), rejectedImgPoints.end(), [](const vector<Point2f>& a, const vector<Point2f>&b){
|
||||
float avgX = (a[0].x + a[1].x + a[2].x + a[3].x)*.25f;
|
||||
float avgY = (a[0].y + a[1].y + a[2].y + a[3].y)*.25f;
|
||||
float aDist = avgX*avgX + avgY*avgY;
|
||||
avgX = (b[0].x + b[1].x + b[2].x + b[3].x)*.25f;
|
||||
avgY = (b[0].y + b[1].y + b[2].y + b[3].y)*.25f;
|
||||
float bDist = avgX*avgX + avgY*avgY;
|
||||
return aDist < bDist;
|
||||
});
|
||||
auto last = std::unique(rejectedImgPoints.begin(), rejectedImgPoints.end(), compareCandidates);
|
||||
rejectedImgPoints.erase(last, rejectedImgPoints.end());
|
||||
|
||||
for (auto it = rejectedImgPoints.begin(); it != rejectedImgPoints.end();) {
|
||||
bool erased = false;
|
||||
for (const auto& candidate : candidates) {
|
||||
if (compareCandidates(candidate, *it)) {
|
||||
it = rejectedImgPoints.erase(it);
|
||||
erased = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!erased) {
|
||||
it++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// STEP 3, Optional : Corner refinement :: use contour container
|
||||
if (detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_CONTOUR){
|
||||
|
||||
if (!ids.empty()) {
|
||||
|
||||
// do corner refinement using the contours for each detected markers
|
||||
parallel_for_(Range(0, (int)candidates.size()), [&](const Range& range) {
|
||||
for (int i = range.start; i < range.end; i++) {
|
||||
_refineCandidateLines(contours[i], candidates[i]);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (detectorParams.cornerRefinementMethod != (int)CORNER_REFINE_SUBPIX && fxfy != 1.f) {
|
||||
// only CORNER_REFINE_SUBPIX implement correctly for useAruco3Detection
|
||||
// Todo: update other CORNER_REFINE methods
|
||||
|
||||
// scale to orignal size, this however will lead to inaccurate detections!
|
||||
for (auto &vecPoints : candidates)
|
||||
for (auto &point : vecPoints)
|
||||
point *= 1.f/fxfy;
|
||||
}
|
||||
|
||||
// copy to output arrays
|
||||
_copyVector2Output(candidates, _corners);
|
||||
Mat(ids).copyTo(_ids);
|
||||
if(_rejectedImgPoints.needed()) {
|
||||
_copyVector2Output(rejectedImgPoints, _rejectedImgPoints);
|
||||
}
|
||||
if (_dictIndices.needed()) {
|
||||
Mat(dictIndices).copyTo(_dictIndices);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Detect square candidates in the input image
|
||||
*/
|
||||
@ -671,14 +878,12 @@ struct ArucoDetector::ArucoDetectorImpl {
|
||||
* clear candidates and contours
|
||||
*/
|
||||
vector<MarkerCandidateTree>
|
||||
filterTooCloseCandidates(vector<vector<Point2f> > &candidates, vector<vector<Point> > &contours) {
|
||||
filterTooCloseCandidates(vector<vector<Point2f> > &candidates, vector<vector<Point> > &contours, int markerSize) {
|
||||
CV_Assert(detectorParams.minMarkerDistanceRate >= 0.);
|
||||
vector<MarkerCandidateTree> candidateTree(candidates.size());
|
||||
for(size_t i = 0ull; i < candidates.size(); i++) {
|
||||
candidateTree[i] = MarkerCandidateTree(std::move(candidates[i]), std::move(contours[i]));
|
||||
}
|
||||
candidates.clear();
|
||||
contours.clear();
|
||||
|
||||
// sort candidates from big to small
|
||||
std::stable_sort(candidateTree.begin(), candidateTree.end());
|
||||
@ -735,7 +940,7 @@ struct ArucoDetector::ArucoDetectorImpl {
|
||||
for (size_t i = 1ull; i < grouped.size(); i++) {
|
||||
size_t id = grouped[i];
|
||||
float dist = getAverageDistance(candidateTree[id].corners, candidateTree[currId].corners);
|
||||
float moduleSize = getAverageModuleSize(candidateTree[id].corners, dictionary.markerSize, detectorParams.markerBorderBits);
|
||||
float moduleSize = getAverageModuleSize(candidateTree[id].corners, markerSize, detectorParams.markerBorderBits);
|
||||
if (dist > detectorParams.minGroupDistance*moduleSize) {
|
||||
currId = id;
|
||||
candidateTree[grouped[0]].closeContours.push_back(candidateTree[id]);
|
||||
@ -770,9 +975,8 @@ struct ArucoDetector::ArucoDetectorImpl {
|
||||
*/
|
||||
void identifyCandidates(const Mat& grey, const vector<Mat>& image_pyr, vector<MarkerCandidateTree>& selectedContours,
|
||||
vector<vector<Point2f> >& accepted, vector<vector<Point> >& contours,
|
||||
vector<int>& ids, OutputArrayOfArrays _rejected = noArray()) {
|
||||
vector<int>& ids, const Dictionary& currentDictionary, vector<vector<Point2f>>& rejected) const {
|
||||
size_t ncandidates = selectedContours.size();
|
||||
vector<vector<Point2f> > rejected;
|
||||
|
||||
vector<int> idsTmp(ncandidates, -1);
|
||||
vector<int> rotated(ncandidates, 0);
|
||||
@ -807,11 +1011,11 @@ struct ArucoDetector::ArucoDetectorImpl {
|
||||
}
|
||||
const float scale = detectorParams.useAruco3Detection ? img.cols / static_cast<float>(grey.cols) : 1.f;
|
||||
|
||||
validCandidates[v] = _identifyOneCandidate(dictionary, img, selectedContours[v].corners, idsTmp[v], detectorParams, rotated[v], scale);
|
||||
validCandidates[v] = _identifyOneCandidate(currentDictionary, img, selectedContours[v].corners, idsTmp[v], detectorParams, rotated[v], scale);
|
||||
|
||||
if (validCandidates[v] == 0 && checkCloseContours) {
|
||||
for (const MarkerCandidate& closeMarkerCandidate: selectedContours[v].closeContours) {
|
||||
validCandidates[v] = _identifyOneCandidate(dictionary, img, closeMarkerCandidate.corners, idsTmp[v], detectorParams, rotated[v], scale);
|
||||
validCandidates[v] = _identifyOneCandidate(currentDictionary, img, closeMarkerCandidate.corners, idsTmp[v], detectorParams, rotated[v], scale);
|
||||
if (validCandidates[v] > 0) {
|
||||
selectedContours[v].corners = closeMarkerCandidate.corners;
|
||||
selectedContours[v].contour = closeMarkerCandidate.contour;
|
||||
@ -852,105 +1056,11 @@ struct ArucoDetector::ArucoDetectorImpl {
|
||||
rejected.push_back(selectedContours[i].corners);
|
||||
}
|
||||
}
|
||||
|
||||
// parse output
|
||||
if(_rejected.needed()) {
|
||||
_copyVector2Output(rejected, _rejected);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
ArucoDetector::ArucoDetector(const Dictionary &_dictionary,
|
||||
const DetectorParameters &_detectorParams,
|
||||
const RefineParameters& _refineParams) {
|
||||
arucoDetectorImpl = makePtr<ArucoDetectorImpl>(_dictionary, _detectorParams, _refineParams);
|
||||
}
|
||||
|
||||
void ArucoDetector::detectMarkers(InputArray _image, OutputArrayOfArrays _corners, OutputArray _ids,
|
||||
OutputArrayOfArrays _rejectedImgPoints) const {
|
||||
CV_Assert(!_image.empty());
|
||||
DetectorParameters& detectorParams = arucoDetectorImpl->detectorParams;
|
||||
const Dictionary& dictionary = arucoDetectorImpl->dictionary;
|
||||
|
||||
CV_Assert(detectorParams.markerBorderBits > 0);
|
||||
// check that the parameters are set correctly if Aruco3 is used
|
||||
CV_Assert(!(detectorParams.useAruco3Detection == true &&
|
||||
detectorParams.minSideLengthCanonicalImg == 0 &&
|
||||
detectorParams.minMarkerLengthRatioOriginalImg == 0.0));
|
||||
|
||||
Mat grey;
|
||||
_convertToGrey(_image, grey);
|
||||
|
||||
// Aruco3 functionality is the extension of Aruco.
|
||||
// The description can be found in:
|
||||
// [1] Speeded up detection of squared fiducial markers, 2018, FJ Romera-Ramirez et al.
|
||||
// if Aruco3 functionality if not wanted
|
||||
// change some parameters to be sure to turn it off
|
||||
if (!detectorParams.useAruco3Detection) {
|
||||
detectorParams.minMarkerLengthRatioOriginalImg = 0.0;
|
||||
detectorParams.minSideLengthCanonicalImg = 0;
|
||||
}
|
||||
else {
|
||||
// always turn on corner refinement in case of Aruco3, due to upsampling
|
||||
detectorParams.cornerRefinementMethod = (int)CORNER_REFINE_SUBPIX;
|
||||
// only CORNER_REFINE_SUBPIX implement correctly for useAruco3Detection
|
||||
// Todo: update other CORNER_REFINE methods
|
||||
}
|
||||
|
||||
/// Step 0: equation (2) from paper [1]
|
||||
const float fxfy = (!detectorParams.useAruco3Detection ? 1.f : detectorParams.minSideLengthCanonicalImg /
|
||||
(detectorParams.minSideLengthCanonicalImg + std::max(grey.cols, grey.rows)*
|
||||
detectorParams.minMarkerLengthRatioOriginalImg));
|
||||
|
||||
/// Step 1: create image pyramid. Section 3.4. in [1]
|
||||
vector<Mat> grey_pyramid;
|
||||
int closest_pyr_image_idx = 0, num_levels = 0;
|
||||
//// Step 1.1: resize image with equation (1) from paper [1]
|
||||
if (detectorParams.useAruco3Detection) {
|
||||
const float scale_pyr = 2.f;
|
||||
const float img_area = static_cast<float>(grey.rows*grey.cols);
|
||||
const float min_area_marker = static_cast<float>(detectorParams.minSideLengthCanonicalImg*
|
||||
detectorParams.minSideLengthCanonicalImg);
|
||||
// find max level
|
||||
num_levels = static_cast<int>(log2(img_area / min_area_marker)/scale_pyr);
|
||||
// the closest pyramid image to the downsampled segmentation image
|
||||
// will later be used as start index for corner upsampling
|
||||
const float scale_img_area = img_area * fxfy * fxfy;
|
||||
closest_pyr_image_idx = cvRound(log2(img_area / scale_img_area)/scale_pyr);
|
||||
}
|
||||
buildPyramid(grey, grey_pyramid, num_levels);
|
||||
|
||||
// resize to segmentation image
|
||||
// in this reduces size the contours will be detected
|
||||
if (fxfy != 1.f)
|
||||
resize(grey, grey, Size(cvRound(fxfy * grey.cols), cvRound(fxfy * grey.rows)));
|
||||
|
||||
/// STEP 2: Detect marker candidates
|
||||
vector<vector<Point2f> > candidates;
|
||||
vector<vector<Point> > contours;
|
||||
vector<int> ids;
|
||||
|
||||
/// STEP 2.a Detect marker candidates :: using AprilTag
|
||||
if(detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_APRILTAG){
|
||||
_apriltag(grey, detectorParams, candidates, contours);
|
||||
}
|
||||
/// STEP 2.b Detect marker candidates :: traditional way
|
||||
else {
|
||||
arucoDetectorImpl->detectCandidates(grey, candidates, contours);
|
||||
}
|
||||
|
||||
/// STEP 2.c FILTER OUT NEAR CANDIDATE PAIRS
|
||||
auto selectedCandidates = arucoDetectorImpl->filterTooCloseCandidates(candidates, contours);
|
||||
|
||||
/// STEP 2: Check candidate codification (identify markers)
|
||||
arucoDetectorImpl->identifyCandidates(grey, grey_pyramid, selectedCandidates, candidates, contours,
|
||||
ids, _rejectedImgPoints);
|
||||
|
||||
/// STEP 3: Corner refinement :: use corner subpix
|
||||
if (detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_SUBPIX) {
|
||||
void performCornerSubpixRefinement(const Mat& grey, const vector<Mat>& grey_pyramid, int closest_pyr_image_idx, const vector<vector<Point2f>>& candidates, const Dictionary& dictionary) const {
|
||||
CV_Assert(detectorParams.cornerRefinementWinSize > 0 && detectorParams.cornerRefinementMaxIterations > 0 &&
|
||||
detectorParams.cornerRefinementMinAccuracy > 0);
|
||||
detectorParams.cornerRefinementMinAccuracy > 0);
|
||||
// Do subpixel estimation. In Aruco3 start on the lowest pyramid level and upscale the corners
|
||||
parallel_for_(Range(0, (int)candidates.size()), [&](const Range& range) {
|
||||
const int begin = range.start;
|
||||
@ -960,47 +1070,40 @@ void ArucoDetector::detectMarkers(InputArray _image, OutputArrayOfArrays _corner
|
||||
if (detectorParams.useAruco3Detection) {
|
||||
const float scale_init = (float) grey_pyramid[closest_pyr_image_idx].cols / grey.cols;
|
||||
findCornerInPyrImage(scale_init, closest_pyr_image_idx, grey_pyramid, Mat(candidates[i]), detectorParams);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
int cornerRefinementWinSize = std::max(1, cvRound(detectorParams.relativeCornerRefinmentWinSize*
|
||||
getAverageModuleSize(candidates[i], dictionary.markerSize, detectorParams.markerBorderBits)));
|
||||
getAverageModuleSize(candidates[i], dictionary.markerSize, detectorParams.markerBorderBits)));
|
||||
cornerRefinementWinSize = min(cornerRefinementWinSize, detectorParams.cornerRefinementWinSize);
|
||||
cornerSubPix(grey, Mat(candidates[i]), Size(cornerRefinementWinSize, cornerRefinementWinSize), Size(-1, -1),
|
||||
TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS,
|
||||
detectorParams.cornerRefinementMaxIterations,
|
||||
detectorParams.cornerRefinementMinAccuracy));
|
||||
TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS,
|
||||
detectorParams.cornerRefinementMaxIterations,
|
||||
detectorParams.cornerRefinementMinAccuracy));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
/// STEP 3, Optional : Corner refinement :: use contour container
|
||||
if (detectorParams.cornerRefinementMethod == (int)CORNER_REFINE_CONTOUR){
|
||||
ArucoDetector::ArucoDetector(const Dictionary &_dictionary,
|
||||
const DetectorParameters &_detectorParams,
|
||||
const RefineParameters& _refineParams) {
|
||||
arucoDetectorImpl = makePtr<ArucoDetectorImpl>(vector<Dictionary>{_dictionary}, _detectorParams, _refineParams);
|
||||
}
|
||||
|
||||
if (!ids.empty()) {
|
||||
ArucoDetector::ArucoDetector(const vector<Dictionary> &_dictionaries,
|
||||
const DetectorParameters &_detectorParams,
|
||||
const RefineParameters& _refineParams) {
|
||||
arucoDetectorImpl = makePtr<ArucoDetectorImpl>(_dictionaries, _detectorParams, _refineParams);
|
||||
}
|
||||
|
||||
// do corner refinement using the contours for each detected markers
|
||||
parallel_for_(Range(0, (int)candidates.size()), [&](const Range& range) {
|
||||
for (int i = range.start; i < range.end; i++) {
|
||||
_refineCandidateLines(contours[i], candidates[i]);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
void ArucoDetector::detectMarkers(InputArray _image, OutputArrayOfArrays _corners, OutputArray _ids,
|
||||
OutputArrayOfArrays _rejectedImgPoints) const {
|
||||
arucoDetectorImpl->detectMarkers(_image, _corners, _ids, _rejectedImgPoints, noArray(), DictionaryMode::Single);
|
||||
}
|
||||
|
||||
if (detectorParams.cornerRefinementMethod != (int)CORNER_REFINE_SUBPIX && fxfy != 1.f) {
|
||||
// only CORNER_REFINE_SUBPIX implement correctly for useAruco3Detection
|
||||
// Todo: update other CORNER_REFINE methods
|
||||
|
||||
// scale to orignal size, this however will lead to inaccurate detections!
|
||||
for (auto &vecPoints : candidates)
|
||||
for (auto &point : vecPoints)
|
||||
point *= 1.f/fxfy;
|
||||
}
|
||||
|
||||
// copy to output arrays
|
||||
_copyVector2Output(candidates, _corners);
|
||||
Mat(ids).copyTo(_ids);
|
||||
void ArucoDetector::detectMarkersMultiDict(InputArray _image, OutputArrayOfArrays _corners, OutputArray _ids,
|
||||
OutputArrayOfArrays _rejectedImgPoints, OutputArray _dictIndices) const {
|
||||
arucoDetectorImpl->detectMarkers(_image, _corners, _ids, _rejectedImgPoints, _dictIndices, DictionaryMode::Multi);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1114,7 +1217,7 @@ void ArucoDetector::refineDetectedMarkers(InputArray _image, const Board& _board
|
||||
InputOutputArrayOfArrays _rejectedCorners, InputArray _cameraMatrix,
|
||||
InputArray _distCoeffs, OutputArray _recoveredIdxs) const {
|
||||
DetectorParameters& detectorParams = arucoDetectorImpl->detectorParams;
|
||||
const Dictionary& dictionary = arucoDetectorImpl->dictionary;
|
||||
const Dictionary& dictionary = arucoDetectorImpl->dictionaries.at(0);
|
||||
RefineParameters& refineParams = arucoDetectorImpl->refineParams;
|
||||
CV_Assert(refineParams.minRepDistance > 0);
|
||||
|
||||
@ -1280,25 +1383,58 @@ void ArucoDetector::refineDetectedMarkers(InputArray _image, const Board& _board
|
||||
}
|
||||
}
|
||||
|
||||
void ArucoDetector::write(FileStorage &fs) const
|
||||
{
|
||||
arucoDetectorImpl->dictionary.writeDictionary(fs);
|
||||
void ArucoDetector::write(FileStorage &fs) const {
|
||||
// preserve old format for single dictionary case
|
||||
if (1 == arucoDetectorImpl->dictionaries.size()) {
|
||||
arucoDetectorImpl->dictionaries[0].writeDictionary(fs);
|
||||
} else {
|
||||
fs << "dictionaries" << "[";
|
||||
for (auto& dictionary : arucoDetectorImpl->dictionaries) {
|
||||
fs << "{";
|
||||
dictionary.writeDictionary(fs);
|
||||
fs << "}";
|
||||
}
|
||||
fs << "]";
|
||||
}
|
||||
arucoDetectorImpl->detectorParams.writeDetectorParameters(fs);
|
||||
arucoDetectorImpl->refineParams.writeRefineParameters(fs);
|
||||
}
|
||||
|
||||
void ArucoDetector::read(const FileNode &fn) {
|
||||
arucoDetectorImpl->dictionary.readDictionary(fn);
|
||||
arucoDetectorImpl->dictionaries.clear();
|
||||
if (!fn.empty() && !fn["dictionaries"].empty() && fn["dictionaries"].isSeq()) {
|
||||
for (const auto& dictionaryNode : fn["dictionaries"]) {
|
||||
arucoDetectorImpl->dictionaries.emplace_back();
|
||||
arucoDetectorImpl->dictionaries.back().readDictionary(dictionaryNode);
|
||||
}
|
||||
} else {
|
||||
// backward compatibility
|
||||
arucoDetectorImpl->dictionaries.emplace_back();
|
||||
arucoDetectorImpl->dictionaries.back().readDictionary(fn);
|
||||
}
|
||||
arucoDetectorImpl->detectorParams.readDetectorParameters(fn);
|
||||
arucoDetectorImpl->refineParams.readRefineParameters(fn);
|
||||
}
|
||||
|
||||
const Dictionary& ArucoDetector::getDictionary() const {
|
||||
return arucoDetectorImpl->dictionary;
|
||||
return arucoDetectorImpl->dictionaries[0];
|
||||
}
|
||||
|
||||
void ArucoDetector::setDictionary(const Dictionary& dictionary) {
|
||||
arucoDetectorImpl->dictionary = dictionary;
|
||||
if (arucoDetectorImpl->dictionaries.empty()) {
|
||||
arucoDetectorImpl->dictionaries.push_back(dictionary);
|
||||
} else {
|
||||
arucoDetectorImpl->dictionaries[0] = dictionary;
|
||||
}
|
||||
}
|
||||
|
||||
vector<Dictionary> ArucoDetector::getDictionaries() const {
|
||||
return arucoDetectorImpl->dictionaries;
|
||||
}
|
||||
|
||||
void ArucoDetector::setDictionaries(const vector<Dictionary>& dictionaries) {
|
||||
CV_Assert(!dictionaries.empty());
|
||||
arucoDetectorImpl->dictionaries = dictionaries;
|
||||
}
|
||||
|
||||
const DetectorParameters& ArucoDetector::getDetectorParameters() const {
|
||||
|
@ -6,6 +6,18 @@
|
||||
#include "opencv2/objdetect/aruco_detector.hpp"
|
||||
#include "opencv2/calib3d.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace aruco {
|
||||
bool operator==(const Dictionary& d1, const Dictionary& d2);
|
||||
bool operator==(const Dictionary& d1, const Dictionary& d2) {
|
||||
return d1.markerSize == d2.markerSize
|
||||
&& std::equal(d1.bytesList.begin<Vec<uint8_t, 4>>(), d1.bytesList.end<Vec<uint8_t, 4>>(), d2.bytesList.begin<Vec<uint8_t, 4>>())
|
||||
&& std::equal(d2.bytesList.begin<Vec<uint8_t, 4>>(), d2.bytesList.end<Vec<uint8_t, 4>>(), d1.bytesList.begin<Vec<uint8_t, 4>>())
|
||||
&& d1.maxCorrectionBits == d2.maxCorrectionBits;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
namespace opencv_test { namespace {
|
||||
|
||||
/**
|
||||
@ -638,6 +650,155 @@ TEST(CV_ArucoDetectMarkers, regression_contour_24220)
|
||||
}
|
||||
}
|
||||
|
||||
TEST(CV_ArucoMultiDict, setGetDictionaries)
|
||||
{
|
||||
vector<aruco::Dictionary> dictionaries = {aruco::getPredefinedDictionary(aruco::DICT_4X4_50), aruco::getPredefinedDictionary(aruco::DICT_5X5_100)};
|
||||
aruco::ArucoDetector detector(dictionaries);
|
||||
vector<aruco::Dictionary> dicts = detector.getDictionaries();
|
||||
ASSERT_EQ(dicts.size(), 2ul);
|
||||
EXPECT_EQ(dicts[0].markerSize, 4);
|
||||
EXPECT_EQ(dicts[1].markerSize, 5);
|
||||
dictionaries.clear();
|
||||
dictionaries.push_back(aruco::getPredefinedDictionary(aruco::DICT_6X6_100));
|
||||
dictionaries.push_back(aruco::getPredefinedDictionary(aruco::DICT_7X7_250));
|
||||
dictionaries.push_back(aruco::getPredefinedDictionary(aruco::DICT_APRILTAG_25h9));
|
||||
detector.setDictionaries(dictionaries);
|
||||
dicts = detector.getDictionaries();
|
||||
ASSERT_EQ(dicts.size(), 3ul);
|
||||
EXPECT_EQ(dicts[0].markerSize, 6);
|
||||
EXPECT_EQ(dicts[1].markerSize, 7);
|
||||
EXPECT_EQ(dicts[2].markerSize, 5);
|
||||
auto dict = detector.getDictionary();
|
||||
EXPECT_EQ(dict.markerSize, 6);
|
||||
detector.setDictionary(aruco::getPredefinedDictionary(aruco::DICT_APRILTAG_16h5));
|
||||
dicts = detector.getDictionaries();
|
||||
ASSERT_EQ(dicts.size(), 3ul);
|
||||
EXPECT_EQ(dicts[0].markerSize, 4);
|
||||
EXPECT_EQ(dicts[1].markerSize, 7);
|
||||
EXPECT_EQ(dicts[2].markerSize, 5);
|
||||
}
|
||||
|
||||
|
||||
TEST(CV_ArucoMultiDict, noDict)
|
||||
{
|
||||
aruco::ArucoDetector detector;
|
||||
EXPECT_THROW({
|
||||
detector.setDictionaries({});
|
||||
}, Exception);
|
||||
}
|
||||
|
||||
|
||||
TEST(CV_ArucoMultiDict, multiMarkerDetection)
|
||||
{
|
||||
const int markerSidePixels = 100;
|
||||
const int imageSize = markerSidePixels * 2 + 3 * (markerSidePixels / 2);
|
||||
vector<aruco::Dictionary> usedDictionaries;
|
||||
|
||||
// draw synthetic image
|
||||
Mat img = Mat(imageSize, imageSize, CV_8UC1, Scalar::all(255));
|
||||
for(int y = 0; y < 2; y++) {
|
||||
for(int x = 0; x < 2; x++) {
|
||||
Mat marker;
|
||||
int id = y * 2 + x;
|
||||
int dictId = x * 4 + y * 8;
|
||||
auto dict = aruco::getPredefinedDictionary(dictId);
|
||||
usedDictionaries.push_back(dict);
|
||||
aruco::generateImageMarker(dict, id, markerSidePixels, marker);
|
||||
Point2f firstCorner(markerSidePixels / 2.f + x * (1.5f * markerSidePixels),
|
||||
markerSidePixels / 2.f + y * (1.5f * markerSidePixels));
|
||||
Mat aux = img(Rect((int)firstCorner.x, (int)firstCorner.y, markerSidePixels, markerSidePixels));
|
||||
marker.copyTo(aux);
|
||||
}
|
||||
}
|
||||
img.convertTo(img, CV_8UC3);
|
||||
|
||||
aruco::ArucoDetector detector(usedDictionaries);
|
||||
|
||||
vector<vector<Point2f> > markerCorners;
|
||||
vector<int> markerIds;
|
||||
vector<vector<Point2f> > rejectedImgPts;
|
||||
vector<int> dictIds;
|
||||
detector.detectMarkersMultiDict(img, markerCorners, markerIds, rejectedImgPts, dictIds);
|
||||
ASSERT_EQ(markerIds.size(), 4u);
|
||||
ASSERT_EQ(dictIds.size(), 4u);
|
||||
for (size_t i = 0; i < dictIds.size(); ++i) {
|
||||
EXPECT_EQ(dictIds[i], (int)i);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
TEST(CV_ArucoMultiDict, multiMarkerDoubleDetection)
|
||||
{
|
||||
const int markerSidePixels = 100;
|
||||
const int imageWidth = 2 * markerSidePixels + 3 * (markerSidePixels / 2);
|
||||
const int imageHeight = markerSidePixels + 2 * (markerSidePixels / 2);
|
||||
vector<aruco::Dictionary> usedDictionaries = {
|
||||
aruco::getPredefinedDictionary(aruco::DICT_5X5_50),
|
||||
aruco::getPredefinedDictionary(aruco::DICT_5X5_100)
|
||||
};
|
||||
|
||||
// draw synthetic image
|
||||
Mat img = Mat(imageHeight, imageWidth, CV_8UC1, Scalar::all(255));
|
||||
for(int y = 0; y < 2; y++) {
|
||||
Mat marker;
|
||||
int id = 49 + y;
|
||||
auto dict = aruco::getPredefinedDictionary(aruco::DICT_5X5_100);
|
||||
aruco::generateImageMarker(dict, id, markerSidePixels, marker);
|
||||
Point2f firstCorner(markerSidePixels / 2.f + y * (1.5f * markerSidePixels),
|
||||
markerSidePixels / 2.f);
|
||||
Mat aux = img(Rect((int)firstCorner.x, (int)firstCorner.y, markerSidePixels, markerSidePixels));
|
||||
marker.copyTo(aux);
|
||||
}
|
||||
img.convertTo(img, CV_8UC3);
|
||||
|
||||
aruco::ArucoDetector detector(usedDictionaries);
|
||||
|
||||
vector<vector<Point2f> > markerCorners;
|
||||
vector<int> markerIds;
|
||||
vector<vector<Point2f> > rejectedImgPts;
|
||||
vector<int> dictIds;
|
||||
detector.detectMarkersMultiDict(img, markerCorners, markerIds, rejectedImgPts, dictIds);
|
||||
ASSERT_EQ(markerIds.size(), 3u);
|
||||
ASSERT_EQ(dictIds.size(), 3u);
|
||||
EXPECT_EQ(dictIds[0], 0); // 5X5_50
|
||||
EXPECT_EQ(dictIds[1], 1); // 5X5_100
|
||||
EXPECT_EQ(dictIds[2], 1); // 5X5_100
|
||||
}
|
||||
|
||||
|
||||
TEST(CV_ArucoMultiDict, serialization)
|
||||
{
|
||||
aruco::ArucoDetector detector;
|
||||
{
|
||||
FileStorage fs_out(".json", FileStorage::WRITE + FileStorage::MEMORY);
|
||||
ASSERT_TRUE(fs_out.isOpened());
|
||||
detector.write(fs_out);
|
||||
std::string serialized_string = fs_out.releaseAndGetString();
|
||||
FileStorage test_fs(serialized_string, FileStorage::Mode::READ + FileStorage::MEMORY);
|
||||
ASSERT_TRUE(test_fs.isOpened());
|
||||
aruco::ArucoDetector test_detector;
|
||||
test_detector.read(test_fs.root());
|
||||
// compare default constructor result
|
||||
EXPECT_EQ(aruco::getPredefinedDictionary(aruco::DICT_4X4_50), test_detector.getDictionary());
|
||||
}
|
||||
detector.setDictionaries({aruco::getPredefinedDictionary(aruco::DICT_4X4_50), aruco::getPredefinedDictionary(aruco::DICT_5X5_100)});
|
||||
{
|
||||
FileStorage fs_out(".json", FileStorage::WRITE + FileStorage::MEMORY);
|
||||
ASSERT_TRUE(fs_out.isOpened());
|
||||
detector.write(fs_out);
|
||||
std::string serialized_string = fs_out.releaseAndGetString();
|
||||
FileStorage test_fs(serialized_string, FileStorage::Mode::READ + FileStorage::MEMORY);
|
||||
ASSERT_TRUE(test_fs.isOpened());
|
||||
aruco::ArucoDetector test_detector;
|
||||
test_detector.read(test_fs.root());
|
||||
// check for one additional dictionary
|
||||
auto dicts = test_detector.getDictionaries();
|
||||
ASSERT_EQ(2ul, dicts.size());
|
||||
EXPECT_EQ(aruco::getPredefinedDictionary(aruco::DICT_4X4_50), dicts[0]);
|
||||
EXPECT_EQ(aruco::getPredefinedDictionary(aruco::DICT_5X5_100), dicts[1]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct ArucoThreading: public testing::TestWithParam<aruco::CornerRefineMethod>
|
||||
{
|
||||
|
@ -134,14 +134,17 @@ class CV_ArucoRefine : public cvtest::BaseTest {
|
||||
public:
|
||||
CV_ArucoRefine(ArucoAlgParams arucoAlgParams)
|
||||
{
|
||||
aruco::Dictionary dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250);
|
||||
vector<aruco::Dictionary> dictionaries = {aruco::getPredefinedDictionary(aruco::DICT_6X6_250),
|
||||
aruco::getPredefinedDictionary(aruco::DICT_5X5_250),
|
||||
aruco::getPredefinedDictionary(aruco::DICT_4X4_250),
|
||||
aruco::getPredefinedDictionary(aruco::DICT_7X7_250)};
|
||||
aruco::DetectorParameters params;
|
||||
params.minDistanceToBorder = 3;
|
||||
params.cornerRefinementMethod = (int)aruco::CORNER_REFINE_SUBPIX;
|
||||
if (arucoAlgParams == ArucoAlgParams::USE_ARUCO3)
|
||||
params.useAruco3Detection = true;
|
||||
aruco::RefineParameters refineParams(10.f, 3.f, true);
|
||||
detector = aruco::ArucoDetector(dictionary, params, refineParams);
|
||||
detector = aruco::ArucoDetector(dictionaries, params, refineParams);
|
||||
}
|
||||
|
||||
protected:
|
||||
|
@ -45,6 +45,7 @@ typedef std::vector<DMatch> vector_DMatch;
|
||||
typedef std::vector<String> vector_String;
|
||||
typedef std::vector<std::string> vector_string;
|
||||
typedef std::vector<Scalar> vector_Scalar;
|
||||
typedef std::vector<aruco::Dictionary> vector_Dictionary;
|
||||
|
||||
typedef std::vector<std::vector<char> > vector_vector_char;
|
||||
typedef std::vector<std::vector<Point> > vector_vector_Point;
|
||||
|
Loading…
Reference in New Issue
Block a user