Merge pull request #22368 from AleksandrPanov:move_contrib_aruco_to_main_objdetect

Megre together with https://github.com/opencv/opencv_contrib/pull/3325

1. Move aruco_detector, aruco_board, aruco_dictionary, aruco_utils to objdetect
1.1 add virtual Board::draw(), virtual ~Board()
1.2 move `testCharucoCornersCollinear` to Board classes (and rename to `checkCharucoCornersCollinear`)
1.3 add wrappers to keep the old api working
3. Reduce inludes
4. Fix java tests (add objdetect import)
5. Refactoring

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake

```
**WIP**
force_builders=linux,win64,docs,Linux x64 Debug,Custom
Xbuild_contrib:Docs=OFF

build_image:Custom=ubuntu:22.04
build_worker:Custom=linux-1
```
This commit is contained in:
Alexander Panov 2022-12-16 12:28:47 +03:00 committed by GitHub
parent 47fb79bd8c
commit b4b35cff15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 41433 additions and 11 deletions

View File

@ -79,7 +79,7 @@ bool CalibProcessor::detectAndParseChAruco(const cv::Mat &frame)
std::vector<std::vector<cv::Point2f> > corners, rejected;
std::vector<int> ids;
cv::aruco::detectMarkers(frame, mArucoDictionary, corners, ids, cv::aruco::DetectorParameters::create(), rejected);
cv::aruco::detectMarkers(frame, cv::makePtr<cv::aruco::Dictionary>(mArucoDictionary), corners, ids, cv::makePtr<cv::aruco::DetectorParameters>(), rejected);
cv::aruco::refineDetectedMarkers(frame, board, corners, ids, rejected);
cv::Mat currentCharucoCorners, currentCharucoIds;
if(ids.size() > 0)
@ -266,8 +266,8 @@ bool CalibProcessor::checkLastFrame()
allObjPoints.reserve(mCurrentCharucoIds.total());
for(size_t i = 0; i < mCurrentCharucoIds.total(); i++) {
int pointID = mCurrentCharucoIds.at<int>((int)i);
CV_Assert(pointID >= 0 && pointID < (int)mCharucoBoard->chessboardCorners.size());
allObjPoints.push_back(mCharucoBoard->chessboardCorners[pointID]);
CV_Assert(pointID >= 0 && pointID < (int)mCharucoBoard->getChessboardCorners().size());
allObjPoints.push_back(mCharucoBoard->getChessboardCorners()[pointID]);
}
cv::solvePnP(allObjPoints, mCurrentCharucoCorners, tmpCamMatrix, mCalibData->distCoeffs, r, t);
@ -300,8 +300,7 @@ CalibProcessor::CalibProcessor(cv::Ptr<calibrationData> data, captureParameters
{
case chAruco:
#ifdef HAVE_OPENCV_ARUCO
mArucoDictionary = cv::aruco::getPredefinedDictionary(
cv::aruco::PREDEFINED_DICTIONARY_NAME(capParams.charucoDictName));
mArucoDictionary = cv::aruco::getPredefinedDictionary(cv::aruco::PredefinedDictionaryType(capParams.charucoDictName));
mCharucoBoard = cv::aruco::CharucoBoard::create(mBoardSize.width, mBoardSize.height, capParams.charucoSquareLength,
capParams.charucoMarkerSize, mArucoDictionary);
#endif

View File

@ -40,7 +40,7 @@ protected:
cv::Ptr<cv::SimpleBlobDetector> mBlobDetectorPtr;
#ifdef HAVE_OPENCV_ARUCO
cv::Ptr<cv::aruco::Dictionary> mArucoDictionary;
cv::aruco::Dictionary mArucoDictionary;
cv::Ptr<cv::aruco::CharucoBoard> mCharucoBoard;
#endif

View File

@ -180,8 +180,8 @@ int main(int argc, char** argv)
}
else {
#ifdef HAVE_OPENCV_ARUCO
cv::Ptr<cv::aruco::Dictionary> dictionary =
cv::aruco::getPredefinedDictionary(cv::aruco::PREDEFINED_DICTIONARY_NAME(capParams.charucoDictName));
cv::aruco::Dictionary dictionary =
cv::aruco::getPredefinedDictionary(cv::aruco::PredefinedDictionaryType(capParams.charucoDictName));
cv::Ptr<cv::aruco::CharucoBoard> charucoboard =
cv::aruco::CharucoBoard::create(capParams.boardSize.width, capParams.boardSize.height,
capParams.charucoSquareLength, capParams.charucoMarkerSize, dictionary);

View File

@ -59,7 +59,7 @@ pixels. Then to calculate the rotation and translation, we use the function,
**cv.solvePnPRansac()**. Once we those transformation matrices, we use them to project our **axis
points** to the image plane. In simple words, we find the points on image plane corresponding to
each of (3,0,0),(0,3,0),(0,0,3) in 3D space. Once we get them, we draw lines from the first corner
to each of these points using our draw() function. Done !!!
to each of these points using our generateImage() function. Done !!!
@code{.py}
for fname in glob.glob('left*.jpg'):
img = cv.imread(fname)
@ -89,9 +89,9 @@ See some results below. Notice that each axis is 3 squares long.:
### Render a Cube
If you want to draw a cube, modify the draw() function and axis points as follows.
If you want to draw a cube, modify the generateImage() function and axis points as follows.
Modified draw() function:
Modified generateImage() function:
@code{.py}
def draw(img, corners, imgpts):
imgpts = np.int32(imgpts).reshape(-1,2)

View File

@ -0,0 +1,20 @@
@article{Aruco2014,
author = {S. Garrido-Jurado and R. Mu\~noz-Salinas and F.J. Madrid-Cuevas and M.J. Mar\'in-Jim\'enez}
title = {Automatic generation and detection of highly reliable fiducial markers under occlusion},
year = {2014},
pages = {2280 - 2292},
journal = {Pattern Recognition},
volume = {47},
number = {6},
issn = {0031-3203},
doi = {http://dx.doi.org/10.1016/j.patcog.2014.01.005},
url = {http://www.sciencedirect.com/science/article/pii/S0031320314000235}
}
@inproceedings{wang2016iros,
author = {John Wang and Edwin Olson},
title = {{AprilTag} 2: Efficient and robust fiducial detection},
booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent Robots and Systems {(IROS)}},
year = {2016},
month = {October}
}

View File

@ -851,5 +851,6 @@ protected:
#include "opencv2/objdetect/detection_based_tracker.hpp"
#include "opencv2/objdetect/face.hpp"
#include "opencv2/objdetect/aruco_detector.hpp"
#endif

View File

@ -0,0 +1,234 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef OPENCV_OBJDETECT_ARUCO_BOARD_HPP
#define OPENCV_OBJDETECT_ARUCO_BOARD_HPP
#include <opencv2/core.hpp>
namespace cv {
namespace aruco {
//! @addtogroup aruco
//! @{
class Dictionary;
/** @brief Board of ArUco markers
*
* A board is a set of markers in the 3D space with a common coordinate system.
* The common form of a board of marker is a planar (2D) board, however any 3D layout can be used.
* A Board object is composed by:
* - The object points of the marker corners, i.e. their coordinates respect to the board system.
* - The dictionary which indicates the type of markers of the board
* - The identifier of all the markers in the board.
*/
class CV_EXPORTS_W Board {
protected:
Board(); // use ::create()
public:
/** @brief Draw a planar board
*
* @param outSize size of the output image in pixels.
* @param img output image with the board. The size of this image will be outSize
* and the board will be on the center, keeping the board proportions.
* @param marginSize minimum margins (in pixels) of the board in the output image
* @param borderBits width of the marker borders.
*
* This function return the image of the GridBoard, ready to be printed.
*/
CV_WRAP virtual void generateImage(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1) const;
/** @brief Provide way to create Board by passing necessary data. Specially needed in Python.
*
* @param objPoints array of object points of all the marker corners in the board
* @param dictionary the dictionary of markers employed for this board
* @param ids vector of the identifiers of the markers in the board
*/
CV_WRAP static Ptr<Board> create(InputArrayOfArrays objPoints, const Dictionary &dictionary, InputArray ids);
/** @brief return the Dictionary of markers employed for this board
*/
CV_WRAP const Dictionary& getDictionary() const;
/** @brief return array of object points of all the marker corners in the board.
*
* Each marker include its 4 corners in this order:
* - objPoints[i][0] - left-top point of i-th marker
* - objPoints[i][1] - right-top point of i-th marker
* - objPoints[i][2] - right-bottom point of i-th marker
* - objPoints[i][3] - left-bottom point of i-th marker
*
* Markers are placed in a certain order - row by row, left to right in every row. For M markers, the size is Mx4.
*/
CV_WRAP const std::vector<std::vector<Point3f> >& getObjPoints() const;
/** @brief vector of the identifiers of the markers in the board (should be the same size as objPoints)
* @return vector of the identifiers of the markers
*/
CV_WRAP const std::vector<int>& getIds() const;
/** @brief get coordinate of the bottom right corner of the board, is set when calling the function create()
*/
CV_WRAP const Point3f& getRightBottomCorner() const;
/** @brief Given a board configuration and a set of detected markers, returns the corresponding
* image points and object points to call solvePnP
*
* @param detectedCorners List of detected marker corners of the board.
* @param detectedIds List of identifiers for each marker.
* @param objPoints Vector of vectors of board marker points in the board coordinate space.
* @param imgPoints Vector of vectors of the projections of board marker corner points.
*/
CV_WRAP void matchImagePoints(InputArrayOfArrays detectedCorners, InputArray detectedIds,
OutputArray objPoints, OutputArray imgPoints) const;
virtual ~Board();
protected:
struct BoardImpl;
Ptr<BoardImpl> boardImpl;
};
/** @brief Planar board with grid arrangement of markers
*
* More common type of board. All markers are placed in the same plane in a grid arrangement.
* The board image can be drawn using generateImage() method.
*/
class CV_EXPORTS_W GridBoard : public Board {
protected:
GridBoard();
public:
/** @brief Draw a GridBoard
*
* @param outSize size of the output image in pixels.
* @param img output image with the board. The size of this image will be outSize
* and the board will be on the center, keeping the board proportions.
* @param marginSize minimum margins (in pixels) of the board in the output image
* @param borderBits width of the marker borders.
*
* This function return the image of the GridBoard, ready to be printed.
*/
CV_WRAP void generateImage(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1) const CV_OVERRIDE;
/**
* @brief Create a GridBoard object
*
* @param markersX number of markers in X direction
* @param markersY number of markers in Y direction
* @param markerLength marker side length (normally in meters)
* @param markerSeparation separation between two markers (same unit as markerLength)
* @param dictionary dictionary of markers indicating the type of markers
* @param ids set marker ids in dictionary to use on board.
* @return the output GridBoard object
*
* This functions creates a GridBoard object given the number of markers in each direction and
* the marker size and marker separation.
*/
CV_WRAP static Ptr<GridBoard> create(int markersX, int markersY, float markerLength, float markerSeparation,
const Dictionary &dictionary, InputArray ids);
/**
* @overload
* @brief Create a GridBoard object
*
* @param markersX number of markers in X direction
* @param markersY number of markers in Y direction
* @param markerLength marker side length (normally in meters)
* @param markerSeparation separation between two markers (same unit as markerLength)
* @param dictionary dictionary of markers indicating the type of markers
* @param firstMarker id of first marker in dictionary to use on board.
* @return the output GridBoard object
*/
CV_WRAP static Ptr<GridBoard> create(int markersX, int markersY, float markerLength, float markerSeparation,
const Dictionary &dictionary, int firstMarker = 0);
CV_WRAP Size getGridSize() const;
CV_WRAP float getMarkerLength() const;
CV_WRAP float getMarkerSeparation() const;
protected:
struct GridImpl;
Ptr<GridImpl> gridImpl;
friend class CharucoBoard;
};
/**
* @brief ChArUco board is a planar chessboard where the markers are placed inside the white squares of a chessboard.
*
* The benefits of ChArUco boards is that they provide both, ArUco markers versatility and chessboard corner precision,
* which is important for calibration and pose estimation. The board image can be drawn using generateImage() method.
*/
class CV_EXPORTS_W CharucoBoard : public Board {
protected:
CharucoBoard();
public:
/** @brief Draw a ChArUco board
*
* @param outSize size of the output image in pixels.
* @param img output image with the board. The size of this image will be outSize
* and the board will be on the center, keeping the board proportions.
* @param marginSize minimum margins (in pixels) of the board in the output image
* @param borderBits width of the marker borders.
*
* This function return the image of the ChArUco board, ready to be printed.
*/
CV_WRAP void generateImage(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1) const CV_OVERRIDE;
/** @brief Create a CharucoBoard object
*
* @param squaresX number of chessboard squares in X direction
* @param squaresY number of chessboard squares in Y direction
* @param squareLength chessboard square side length (normally in meters)
* @param markerLength marker side length (same unit than squareLength)
* @param dictionary dictionary of markers indicating the type of markers.
* @param ids array of id used markers
* The first markers in the dictionary are used to fill the white chessboard squares.
* @return the output CharucoBoard object
*
* This functions creates a CharucoBoard object given the number of squares in each direction
* and the size of the markers and chessboard squares.
*/
CV_WRAP static Ptr<CharucoBoard> create(int squaresX, int squaresY, float squareLength, float markerLength,
const Dictionary &dictionary, InputArray ids = noArray());
CV_WRAP Size getChessboardSize() const;
CV_WRAP float getSquareLength() const;
CV_WRAP float getMarkerLength() const;
/** @brief get CharucoBoard::chessboardCorners
*/
CV_WRAP std::vector<Point3f> getChessboardCorners() const;
/** @brief get CharucoBoard::nearestMarkerIdx
*/
CV_PROP std::vector<std::vector<int> > getNearestMarkerIdx() const;
/** @brief get CharucoBoard::nearestMarkerCorners
*/
CV_PROP std::vector<std::vector<int> > getNearestMarkerCorners() const;
/** @brief check whether the ChArUco markers are collinear
*
* @param charucoIds list of identifiers for each corner in charucoCorners per frame.
* @return bool value, 1 (true) if detected corners form a line, 0 (false) if they do not.
* solvePnP, calibration functions will fail if the corners are collinear (true).
*
* The number of ids in charucoIDs should be <= the number of chessboard corners in the board.
* This functions checks whether the charuco corners are on a straight line (returns true, if so), or not (false).
* Axis parallel, as well as diagonal and other straight lines detected. Degenerate cases:
* for number of charucoIDs <= 2,the function returns true.
*/
CV_WRAP bool checkCharucoCornersCollinear(InputArray charucoIds) const;
protected:
struct CharucoImpl;
friend struct CharucoImpl;
Ptr<CharucoImpl> charucoImpl;
};
//! @}
}
}
#endif

View File

@ -0,0 +1,396 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef OPENCV_OBJDETECT_ARUCO_DETECTOR_HPP
#define OPENCV_OBJDETECT_ARUCO_DETECTOR_HPP
#include <opencv2/objdetect/aruco_dictionary.hpp>
#include <opencv2/objdetect/aruco_board.hpp>
namespace cv {
namespace aruco {
/** @defgroup aruco ArUco Marker Detection
* Square fiducial markers (also known as Augmented Reality Markers) are useful for easy,
* fast and robust camera pose estimation.
*
* The main functionality of ArucoDetector class is detection of markers in an image. There are even more
* functionalities implemented in the aruco contrib module (files aruco.hpp, charuco.hpp, aruco_calib.hpp):
* - Pose estimation from a single marker or from a board/set of markers
* - Detection of ChArUco board for high subpixel accuracy
* - Camera calibration from both, ArUco boards and ChArUco boards.
* - Detection of ChArUco diamond markers
* The functionalities from the aruco contrib module is planned to be transferred to the main repository.
*
* The implementation is based on the ArUco Library by R. Muñoz-Salinas and S. Garrido-Jurado @cite Aruco2014.
*
* Markers can also be detected based on the AprilTag 2 @cite wang2016iros fiducial detection method.
*
* @sa @cite Aruco2014
* This code has been originally developed by Sergio Garrido-Jurado as a project
* for Google Summer of Code 2015 (GSoC 15).
*/
//! @addtogroup aruco
//! @{
enum CornerRefineMethod{
CORNER_REFINE_NONE, ///< Tag and corners detection based on the ArUco approach
CORNER_REFINE_SUBPIX, ///< ArUco approach and refine the corners locations using corner subpixel accuracy
CORNER_REFINE_CONTOUR, ///< ArUco approach and refine the corners locations using the contour-points line fitting
CORNER_REFINE_APRILTAG, ///< Tag and corners detection based on the AprilTag 2 approach @cite wang2016iros
};
/** @brief struct DetectorParameters is used by ArucoDetector
*/
struct CV_EXPORTS_W_SIMPLE DetectorParameters {
CV_WRAP DetectorParameters() {
adaptiveThreshWinSizeMin = 3;
adaptiveThreshWinSizeMax = 23;
adaptiveThreshWinSizeStep = 10;
adaptiveThreshConstant = 7;
minMarkerPerimeterRate = 0.03;
maxMarkerPerimeterRate = 4.;
polygonalApproxAccuracyRate = 0.03;
minCornerDistanceRate = 0.05;
minDistanceToBorder = 3;
minMarkerDistanceRate = 0.05;
cornerRefinementMethod = CORNER_REFINE_NONE;
cornerRefinementWinSize = 5;
cornerRefinementMaxIterations = 30;
cornerRefinementMinAccuracy = 0.1;
markerBorderBits = 1;
perspectiveRemovePixelPerCell = 4;
perspectiveRemoveIgnoredMarginPerCell = 0.13;
maxErroneousBitsInBorderRate = 0.35;
minOtsuStdDev = 5.0;
errorCorrectionRate = 0.6;
aprilTagQuadDecimate = 0.0;
aprilTagQuadSigma = 0.0;
aprilTagMinClusterPixels = 5;
aprilTagMaxNmaxima = 10;
aprilTagCriticalRad = (float)(10* CV_PI /180);
aprilTagMaxLineFitMse = 10.0;
aprilTagMinWhiteBlackDiff = 5;
aprilTagDeglitch = 0;
detectInvertedMarker = false;
useAruco3Detection = false;
minSideLengthCanonicalImg = 32;
minMarkerLengthRatioOriginalImg = 0.0;
};
/** @brief Read a new set of DetectorParameters from FileNode (use FileStorage.root()).
*/
CV_WRAP bool readDetectorParameters(const FileNode& fn);
/** @brief Write a set of DetectorParameters to FileStorage
*/
bool writeDetectorParameters(FileStorage& fs);
/** @brief simplified API for language bindings
*/
CV_WRAP bool writeDetectorParameters(const Ptr<FileStorage>& fs, const String& name = String());
/// minimum window size for adaptive thresholding before finding contours (default 3).
CV_PROP_RW int adaptiveThreshWinSizeMin;
/// maximum window size for adaptive thresholding before finding contours (default 23).
CV_PROP_RW int adaptiveThreshWinSizeMax;
/// increments from adaptiveThreshWinSizeMin to adaptiveThreshWinSizeMax during the thresholding (default 10).
CV_PROP_RW int adaptiveThreshWinSizeStep;
/// constant for adaptive thresholding before finding contours (default 7)
CV_PROP_RW double adaptiveThreshConstant;
/** @brief determine minimum perimeter for marker contour to be detected.
*
* This is defined as a rate respect to the maximum dimension of the input image (default 0.03).
*/
CV_PROP_RW double minMarkerPerimeterRate;
/** @brief determine maximum perimeter for marker contour to be detected.
*
* This is defined as a rate respect to the maximum dimension of the input image (default 4.0).
*/
CV_PROP_RW double maxMarkerPerimeterRate;
/// minimum accuracy during the polygonal approximation process to determine which contours are squares. (default 0.03)
CV_PROP_RW double polygonalApproxAccuracyRate;
/// minimum distance between corners for detected markers relative to its perimeter (default 0.05)
CV_PROP_RW double minCornerDistanceRate;
/// minimum distance of any corner to the image border for detected markers (in pixels) (default 3)
CV_PROP_RW int minDistanceToBorder;
/** @brief minimum mean distance beetween two marker corners to be considered imilar, so that the smaller one is removed.
*
* The rate is relative to the smaller perimeter of the two markers (default 0.05).
*/
CV_PROP_RW double minMarkerDistanceRate;
/** @brief default value CORNER_REFINE_NONE */
CV_PROP_RW CornerRefineMethod cornerRefinementMethod;
/// window size for the corner refinement process (in pixels) (default 5).
CV_PROP_RW int cornerRefinementWinSize;
/// maximum number of iterations for stop criteria of the corner refinement process (default 30).
CV_PROP_RW int cornerRefinementMaxIterations;
/// minimum error for the stop cristeria of the corner refinement process (default: 0.1)
CV_PROP_RW double cornerRefinementMinAccuracy;
/// number of bits of the marker border, i.e. marker border width (default 1).
CV_PROP_RW int markerBorderBits;
/// number of bits (per dimension) for each cell of the marker when removing the perspective (default 4).
CV_PROP_RW int perspectiveRemovePixelPerCell;
/** @brief width of the margin of pixels on each cell not considered for the determination of the cell bit.
*
* Represents the rate respect to the total size of the cell, i.e. perspectiveRemovePixelPerCell (default 0.13)
*/
CV_PROP_RW double perspectiveRemoveIgnoredMarginPerCell;
/** @brief maximum number of accepted erroneous bits in the border (i.e. number of allowed white bits in the border).
*
* Represented as a rate respect to the total number of bits per marker (default 0.35).
*/
CV_PROP_RW double maxErroneousBitsInBorderRate;
/** @brief minimun standard deviation in pixels values during the decodification step to apply Otsu
* thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher than 128 or not) (default 5.0)
*/
CV_PROP_RW double minOtsuStdDev;
/// error correction rate respect to the maximun error correction capability for each dictionary (default 0.6).
CV_PROP_RW double errorCorrectionRate;
/** @brief April :: User-configurable parameters.
*
* Detection of quads can be done on a lower-resolution image, improving speed at a cost of
* pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still
*/
CV_PROP_RW float aprilTagQuadDecimate;
/// what Gaussian blur should be applied to the segmented image (used for quad detection?)
CV_PROP_RW float aprilTagQuadSigma;
// April :: Internal variables
/// reject quads containing too few pixels (default 5).
CV_PROP_RW int aprilTagMinClusterPixels;
/// how many corner candidates to consider when segmenting a group of pixels into a quad (default 10).
CV_PROP_RW int aprilTagMaxNmaxima;
/** @brief reject quads where pairs of edges have angles that are close to straight or close to 180 degrees.
*
* Zero means that no quads are rejected. (In radians) (default 10*PI/180)
*/
CV_PROP_RW float aprilTagCriticalRad;
/// when fitting lines to the contours, what is the maximum mean squared error
CV_PROP_RW float aprilTagMaxLineFitMse;
/** @brief add an extra check that the white model must be (overall) brighter than the black model.
*
* When we build our model of black & white pixels, we add an extra check that the white model must be (overall)
* brighter than the black model. How much brighter? (in pixel values, [0,255]), (default 5)
*/
CV_PROP_RW int aprilTagMinWhiteBlackDiff;
/// should the thresholded image be deglitched? Only useful for very noisy images (default 0).
CV_PROP_RW int aprilTagDeglitch;
/** @brief to check if there is a white marker.
*
* In order to generate a "white" marker just invert a normal marker by using a tilde, ~markerImage. (default false)
*/
CV_PROP_RW bool detectInvertedMarker;
/** @brief enable the new and faster Aruco detection strategy.
*
* Proposed in the paper:
* Romero-Ramirez et al: Speeded up detection of squared fiducial markers (2018)
* https://www.researchgate.net/publication/325787310_Speeded_Up_Detection_of_Squared_Fiducial_Markers
*/
CV_PROP_RW bool useAruco3Detection;
/// minimum side length of a marker in the canonical image. Latter is the binarized image in which contours are searched.
CV_PROP_RW int minSideLengthCanonicalImg;
/// range [0,1], eq (2) from paper. The parameter tau_i has a direct influence on the processing speed.
CV_PROP_RW float minMarkerLengthRatioOriginalImg;
};
/** @brief struct RefineParameters is used by ArucoDetector
*/
struct CV_EXPORTS_W_SIMPLE RefineParameters {
CV_WRAP RefineParameters(float minRepDistance = 10.f, float errorCorrectionRate = 3.f, bool checkAllOrders = true);
/** @brief Read a new set of RefineParameters from FileNode (use FileStorage.root()).
*/
CV_WRAP bool readRefineParameters(const FileNode& fn);
/** @brief Write a set of RefineParameters to FileStorage
*/
bool writeRefineParameters(FileStorage& fs);
/** @brief simplified API for language bindings
*/
CV_WRAP bool writeRefineParameters(const Ptr<FileStorage>& fs, const String& name = String());
/** @brief minRepDistance minimum distance between the corners of the rejected candidate and the reprojected marker
in order to consider it as a correspondence.
*/
CV_PROP_RW float minRepDistance;
/** @brief minRepDistance rate of allowed erroneous bits respect to the error correction capability of the used dictionary.
*
* -1 ignores the error correction step.
*/
CV_PROP_RW float errorCorrectionRate;
/** @brief checkAllOrders consider the four posible corner orders in the rejectedCorners array.
*
* If it set to false, only the provided corner order is considered (default true).
*/
CV_PROP_RW bool checkAllOrders;
};
/** @brief The main functionality of ArucoDetector class is detection of markers in an image with detectMarkers() method.
*
* After detecting some markers in the image, you can try to find undetected markers from this dictionary with
* refineDetectedMarkers() method.
*
* @see DetectorParameters, RefineParameters
*/
class CV_EXPORTS_W ArucoDetector : public Algorithm
{
public:
/** @brief Basic ArucoDetector constructor
*
* @param dictionary indicates the type of markers that will be searched
* @param detectorParams marker detection parameters
* @param refineParams marker refine detection parameters
*/
CV_WRAP ArucoDetector(const Dictionary &dictionary = getPredefinedDictionary(cv::aruco::DICT_4X4_50),
const DetectorParameters &detectorParams = DetectorParameters(),
const RefineParameters& refineParams = RefineParameters());
/** @brief Basic marker detection
*
* @param image input image
* @param corners vector of detected marker corners. For each marker, its four corners
* are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
* the dimensions of this array is Nx4. The order of the corners is clockwise.
* @param ids vector of identifiers of the detected markers. The identifier is of type int
* (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
* The identifiers have the same order than the markers in the imgPoints array.
* @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
* correct codification. Useful for debugging purposes.
*
* Performs marker detection in the input image. Only markers included in the specific dictionary
* are searched. For each detected marker, it returns the 2D position of its corner in the image
* and its corresponding identifier.
* Note that this function does not perform pose estimation.
* @note The function does not correct lens distortion or takes it into account. It's recommended to undistort
* input image with corresponging camera model, if camera parameters are known
* @sa undistort, estimatePoseSingleMarkers, estimatePoseBoard
*/
CV_WRAP void detectMarkers(InputArray image, OutputArrayOfArrays corners, OutputArray ids,
OutputArrayOfArrays rejectedImgPoints = noArray());
/** @brief Refind not detected markers based on the already detected and the board layout
*
* @param image input image
* @param board layout of markers in the board.
* @param detectedCorners vector of already detected marker corners.
* @param detectedIds vector of already detected marker identifiers.
* @param rejectedCorners vector of rejected candidates during the marker detection process.
* @param cameraMatrix optional input 3x3 floating-point camera matrix
* \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
* @param distCoeffs optional vector of distortion coefficients
* \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
* @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the
* original rejectedCorners array.
*
* This function tries to find markers that were not detected in the basic detecMarkers function.
* First, based on the current detected marker and the board layout, the function interpolates
* the position of the missing markers. Then it tries to find correspondence between the reprojected
* markers and the rejected candidates based on the minRepDistance and errorCorrectionRate parameters.
* If camera parameters and distortion coefficients are provided, missing markers are reprojected
* using projectPoint function. If not, missing marker projections are interpolated using global
* homography, and all the marker corners in the board must have the same Z coordinate.
*/
CV_WRAP void refineDetectedMarkers(InputArray image, const Ptr<Board> &board,
InputOutputArrayOfArrays detectedCorners,
InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners,
InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(),
OutputArray recoveredIdxs = noArray());
CV_WRAP const Dictionary& getDictionary() const;
CV_WRAP void setDictionary(const Dictionary& dictionary);
CV_WRAP const DetectorParameters& getDetectorParameters() const;
CV_WRAP void setDetectorParameters(const DetectorParameters& detectorParameters);
CV_WRAP const RefineParameters& getRefineParameters() const;
CV_WRAP void setRefineParameters(const RefineParameters& refineParameters);
/** @brief Stores algorithm parameters in a file storage
*/
virtual void write(FileStorage& fs) const override;
/** @brief simplified API for language bindings
*/
CV_WRAP inline void write(const Ptr<FileStorage>& fs, const String& name = String()) { Algorithm::write(fs, name); }
/** @brief Reads algorithm parameters from a file storage
*/
CV_WRAP virtual void read(const FileNode& fn) override;
protected:
struct ArucoDetectorImpl;
Ptr<ArucoDetectorImpl> arucoDetectorImpl;
};
/** @brief Draw detected markers in image
*
* @param image input/output image. It must have 1 or 3 channels. The number of channels is not altered.
* @param corners positions of marker corners on input image.
* (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the dimensions of
* this array should be Nx4. The order of the corners should be clockwise.
* @param ids vector of identifiers for markers in markersCorners .
* Optional, if not provided, ids are not painted.
* @param borderColor color of marker borders. Rest of colors (text color and first corner color)
* are calculated based on this one to improve visualization.
*
* Given an array of detected marker corners and its corresponding ids, this functions draws
* the markers in the image. The marker borders are painted and the markers identifiers if provided.
* Useful for debugging purposes.
*/
CV_EXPORTS_W void drawDetectedMarkers(InputOutputArray image, InputArrayOfArrays corners,
InputArray ids = noArray(), Scalar borderColor = Scalar(0, 255, 0));
/** @brief Generate a canonical marker image
*
* @param dictionary dictionary of markers indicating the type of markers
* @param id identifier of the marker that will be returned. It has to be a valid id in the specified dictionary.
* @param sidePixels size of the image in pixels
* @param img output image with the marker
* @param borderBits width of the marker border.
*
* This function returns a marker image in its canonical form (i.e. ready to be printed)
*/
CV_EXPORTS_W void generateImageMarker(const Dictionary &dictionary, int id, int sidePixels, OutputArray img,
int borderBits = 1);
//! @}
}
}
#endif

View File

@ -0,0 +1,151 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef OPENCV_OBJDETECT_DICTIONARY_HPP
#define OPENCV_OBJDETECT_DICTIONARY_HPP
#include <opencv2/core.hpp>
namespace cv {
namespace aruco {
//! @addtogroup aruco
//! @{
/** @brief Dictionary/Set of markers, it contains the inner codification
*
* BytesList contains the marker codewords where:
* - bytesList.rows is the dictionary size
* - each marker is encoded using `nbytes = ceil(markerSize*markerSize/8.)`
* - each row contains all 4 rotations of the marker, so its length is `4*nbytes`
*
* `bytesList.ptr(i)[k*nbytes + j]` is then the j-th byte of i-th marker, in its k-th rotation.
*/
class CV_EXPORTS_W_SIMPLE Dictionary {
public:
CV_PROP_RW Mat bytesList; // marker code information
CV_PROP_RW int markerSize; // number of bits per dimension
CV_PROP_RW int maxCorrectionBits; // maximum number of bits that can be corrected
CV_WRAP Dictionary();
CV_WRAP Dictionary(const Mat &bytesList, int _markerSize, int maxcorr = 0);
/** @brief Read a new dictionary from FileNode.
*
* Dictionary format:\n
* nmarkers: 35\n
* markersize: 6\n
* maxCorrectionBits: 5\n
* marker_0: "101011111011111001001001101100000000"\n
* ...\n
* marker_34: "011111010000111011111110110101100101"
*/
CV_WRAP bool readDictionary(const cv::FileNode& fn);
/** @brief Write a dictionary to FileStorage, format is the same as in readDictionary().
*/
void writeDictionary(FileStorage& fs);
/** @brief simplified API for language bindings
*/
CV_WRAP void writeDictionary(Ptr<FileStorage>& fs, const String& name = String());
/** @brief Given a matrix of bits. Returns whether if marker is identified or not.
*
* It returns by reference the correct id (if any) and the correct rotation
*/
CV_WRAP bool identify(const Mat &onlyBits, CV_OUT int &idx, CV_OUT int &rotation, double maxCorrectionRate) const;
/** @brief Returns the distance of the input bits to the specific id.
*
* If allRotations is true, the four posible bits rotation are considered
*/
CV_WRAP int getDistanceToId(InputArray bits, int id, bool allRotations = true) const;
/** @brief Generate a canonical marker image
*/
CV_WRAP void generateImageMarker(int id, int sidePixels, OutputArray _img, int borderBits = 1) const;
/** @brief Transform matrix of bits to list of bytes in the 4 rotations
*/
CV_WRAP static Mat getByteListFromBits(const Mat &bits);
/** @brief Transform list of bytes to matrix of bits
*/
CV_WRAP static Mat getBitsFromByteList(const Mat &byteList, int markerSize);
};
/** @brief Predefined markers dictionaries/sets
*
* Each dictionary indicates the number of bits and the number of markers contained
* - DICT_ARUCO_ORIGINAL: standard ArUco Library Markers. 1024 markers, 5x5 bits, 0 minimum
distance
*/
enum PredefinedDictionaryType {
DICT_4X4_50 = 0, ///< 4x4 bits, minimum hamming distance between any two codes = 4, 50 codes
DICT_4X4_100, ///< 4x4 bits, minimum hamming distance between any two codes = 3, 100 codes
DICT_4X4_250, ///< 4x4 bits, minimum hamming distance between any two codes = 3, 250 codes
DICT_4X4_1000, ///< 4x4 bits, minimum hamming distance between any two codes = 2, 1000 codes
DICT_5X5_50, ///< 5x5 bits, minimum hamming distance between any two codes = 8, 50 codes
DICT_5X5_100, ///< 5x5 bits, minimum hamming distance between any two codes = 7, 100 codes
DICT_5X5_250, ///< 5x5 bits, minimum hamming distance between any two codes = 6, 250 codes
DICT_5X5_1000, ///< 5x5 bits, minimum hamming distance between any two codes = 5, 1000 codes
DICT_6X6_50, ///< 6x6 bits, minimum hamming distance between any two codes = 13, 50 codes
DICT_6X6_100, ///< 6x6 bits, minimum hamming distance between any two codes = 12, 100 codes
DICT_6X6_250, ///< 6x6 bits, minimum hamming distance between any two codes = 11, 250 codes
DICT_6X6_1000, ///< 6x6 bits, minimum hamming distance between any two codes = 9, 1000 codes
DICT_7X7_50, ///< 7x7 bits, minimum hamming distance between any two codes = 19, 50 codes
DICT_7X7_100, ///< 7x7 bits, minimum hamming distance between any two codes = 18, 100 codes
DICT_7X7_250, ///< 7x7 bits, minimum hamming distance between any two codes = 17, 250 codes
DICT_7X7_1000, ///< 7x7 bits, minimum hamming distance between any two codes = 14, 1000 codes
DICT_ARUCO_ORIGINAL, ///< 6x6 bits, minimum hamming distance between any two codes = 3, 1024 codes
DICT_APRILTAG_16h5, ///< 4x4 bits, minimum hamming distance between any two codes = 5, 30 codes
DICT_APRILTAG_25h9, ///< 5x5 bits, minimum hamming distance between any two codes = 9, 35 codes
DICT_APRILTAG_36h10, ///< 6x6 bits, minimum hamming distance between any two codes = 10, 2320 codes
DICT_APRILTAG_36h11 ///< 6x6 bits, minimum hamming distance between any two codes = 11, 587 codes
};
/** @brief Returns one of the predefined dictionaries defined in PredefinedDictionaryType
*/
CV_EXPORTS Dictionary getPredefinedDictionary(PredefinedDictionaryType name);
/** @brief Returns one of the predefined dictionaries referenced by DICT_*.
*/
CV_EXPORTS_W Dictionary getPredefinedDictionary(int dict);
/** @brief Extend base dictionary by new nMarkers
*
* @param nMarkers number of markers in the dictionary
* @param markerSize number of bits per dimension of each markers
* @param baseDictionary Include the markers in this dictionary at the beginning (optional)
* @param randomSeed a user supplied seed for theRNG()
*
* This function creates a new dictionary composed by nMarkers markers and each markers composed
* by markerSize x markerSize bits. If baseDictionary is provided, its markers are directly
* included and the rest are generated based on them. If the size of baseDictionary is higher
* than nMarkers, only the first nMarkers in baseDictionary are taken and no new marker is added.
*/
CV_EXPORTS_W Dictionary extendDictionary(int nMarkers, int markerSize, const Dictionary &baseDictionary = Dictionary(),
int randomSeed=0);
//! @}
}
}
#endif

View File

@ -0,0 +1,83 @@
package org.opencv.test.aruco;
import java.util.ArrayList;
import java.util.List;
import org.opencv.test.OpenCVTestCase;
import org.opencv.core.Scalar;
import org.opencv.core.Mat;
import org.opencv.core.Size;
import org.opencv.core.CvType;
import org.opencv.objdetect.*;
public class ArucoTest extends OpenCVTestCase {
public void testGenerateBoards() {
Dictionary dictionary = Objdetect.getPredefinedDictionary(Objdetect.DICT_4X4_50);
Mat point1 = new Mat(4, 3, CvType.CV_32FC1);
int row = 0, col = 0;
double squareLength = 40.;
point1.put(row, col, 0, 0, 0,
0, squareLength, 0,
squareLength, squareLength, 0,
0, squareLength, 0);
List<Mat>objPoints = new ArrayList<Mat>();
objPoints.add(point1);
Mat ids = new Mat(1, 1, CvType.CV_32SC1);
ids.put(row, col, 0);
Board board = Board.create(objPoints, dictionary, ids);
Mat image = new Mat();
board.generateImage(new Size(80, 80), image, 2);
assertTrue(image.total() > 0);
}
public void testArucoIssue3133() {
byte[][] marker = {{0,1,1},{1,1,1},{0,1,1}};
Dictionary dictionary = Objdetect.extendDictionary(1, 3);
dictionary.set_maxCorrectionBits(0);
Mat markerBits = new Mat(3, 3, CvType.CV_8UC1);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
markerBits.put(i, j, marker[i][j]);
}
}
Mat markerCompressed = Dictionary.getByteListFromBits(markerBits);
assertMatNotEqual(markerCompressed, dictionary.get_bytesList());
dictionary.set_bytesList(markerCompressed);
assertMatEqual(markerCompressed, dictionary.get_bytesList());
}
public void testArucoDetector() {
Dictionary dictionary = Objdetect.getPredefinedDictionary(0);
DetectorParameters detectorParameters = new DetectorParameters();
ArucoDetector detector = new ArucoDetector(dictionary, detectorParameters);
Mat markerImage = new Mat();
int id = 1, offset = 5, size = 40;
Objdetect.generateImageMarker(dictionary, id, size, markerImage, detectorParameters.get_markerBorderBits());
Mat image = new Mat(markerImage.rows() + 2*offset, markerImage.cols() + 2*offset,
CvType.CV_8UC1, new Scalar(255));
Mat m = image.submat(offset, size+offset, offset, size+offset);
markerImage.copyTo(m);
List<Mat> corners = new ArrayList();
Mat ids = new Mat();
detector.detectMarkers(image, corners, ids);
assertEquals(1, corners.size());
Mat res = corners.get(0);
assertArrayEquals(new double[]{offset, offset}, res.get(0, 0), 0.0);
assertArrayEquals(new double[]{size + offset - 1, offset}, res.get(0, 1), 0.0);
assertArrayEquals(new double[]{size + offset - 1, size + offset - 1}, res.get(0, 2), 0.0);
assertArrayEquals(new double[]{offset, size + offset - 1}, res.get(0, 3), 0.0);
}
}

View File

@ -0,0 +1,94 @@
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import os, numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class aruco_objdetect_test(NewOpenCVTests):
def test_idsAccessibility(self):
ids = np.arange(17)
rev_ids = ids[::-1]
aruco_dict = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_5X5_250)
board = cv.aruco.CharucoBoard_create(7, 5, 1, 0.5, aruco_dict)
np.testing.assert_array_equal(board.getIds().squeeze(), ids)
board = cv.aruco.CharucoBoard_create(7, 5, 1, 0.5, aruco_dict, rev_ids)
np.testing.assert_array_equal(board.getIds().squeeze(), rev_ids)
board = cv.aruco.CharucoBoard_create(7, 5, 1, 0.5, aruco_dict, ids)
np.testing.assert_array_equal(board.getIds().squeeze(), ids)
def test_identify(self):
aruco_dict = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_4X4_50)
expected_idx = 9
expected_rotation = 2
bit_marker = np.array([[0, 1, 1, 0], [1, 0, 1, 0], [1, 1, 1, 1], [0, 0, 1, 1]], dtype=np.uint8)
check, idx, rotation = aruco_dict.identify(bit_marker, 0)
self.assertTrue(check, True)
self.assertEqual(idx, expected_idx)
self.assertEqual(rotation, expected_rotation)
def test_getDistanceToId(self):
aruco_dict = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_4X4_50)
idx = 7
rotation = 3
bit_marker = np.array([[0, 1, 0, 1], [0, 1, 1, 1], [1, 1, 0, 0], [0, 1, 0, 0]], dtype=np.uint8)
dist = aruco_dict.getDistanceToId(bit_marker, idx)
self.assertEqual(dist, 0)
def test_aruco_detector(self):
aruco_params = cv.aruco.DetectorParameters()
aruco_dict = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_4X4_250)
aruco_detector = cv.aruco.ArucoDetector(aruco_dict, aruco_params)
id = 2
marker_size = 100
offset = 10
img_marker = cv.aruco.generateImageMarker(aruco_dict, id, marker_size, aruco_params.markerBorderBits)
img_marker = np.pad(img_marker, pad_width=offset, mode='constant', constant_values=255)
gold_corners = np.array([[offset, offset],[marker_size+offset-1.0,offset],
[marker_size+offset-1.0,marker_size+offset-1.0],
[offset, marker_size+offset-1.0]], dtype=np.float32)
corners, ids, rejected = aruco_detector.detectMarkers(img_marker)
self.assertEqual(1, len(ids))
self.assertEqual(id, ids[0])
for i in range(0, len(corners)):
np.testing.assert_array_equal(gold_corners, corners[i].reshape(4, 2))
def test_aruco_detector_refine(self):
aruco_params = cv.aruco.DetectorParameters()
aruco_dict = cv.aruco.getPredefinedDictionary(cv.aruco.DICT_4X4_250)
aruco_detector = cv.aruco.ArucoDetector(aruco_dict, aruco_params)
board_size = (3, 4)
board = cv.aruco.GridBoard_create(board_size[0], board_size[1], 5.0, 1.0, aruco_dict)
board_image = board.generateImage((board_size[0]*50, board_size[1]*50), marginSize=10)
corners, ids, rejected = aruco_detector.detectMarkers(board_image)
self.assertEqual(board_size[0]*board_size[1], len(ids))
part_corners, part_ids, part_rejected = corners[:-1], ids[:-1], list(rejected)
part_rejected.append(corners[-1])
refine_corners, refine_ids, refine_rejected, recovered_ids = aruco_detector.refineDetectedMarkers(board_image, board, part_corners, part_ids, part_rejected)
self.assertEqual(board_size[0] * board_size[1], len(refine_ids))
self.assertEqual(1, len(recovered_ids))
self.assertEqual(ids[-1], refine_ids[-1])
self.assertEqual((1, 4, 2), refine_corners[0].shape)
np.testing.assert_array_equal(corners, refine_corners)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,117 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2013-2016, The Regents of The University of Michigan.
//
// This software was developed in the APRIL Robotics Lab under the
// direction of Edwin Olson, ebolson@umich.edu. This software may be
// available under alternative licensing terms; contact the address above.
//
// The views and conclusions contained in the software and documentation are those
// of the authors and should not be interpreted as representing official policies,
// either expressed or implied, of the Regents of The University of Michigan.
// limitation: image size must be <32768 in width and height. This is
// because we use a fixed-point 16 bit integer representation with one
// fractional bit.
#ifndef _OPENCV_APRIL_QUAD_THRESH_HPP_
#define _OPENCV_APRIL_QUAD_THRESH_HPP_
#include "unionfind.hpp"
#include "zmaxheap.hpp"
#include "zarray.hpp"
namespace cv {
namespace aruco {
static inline uint32_t u64hash_2(uint64_t x) {
return uint32_t((2654435761UL * x) >> 32);
}
struct uint64_zarray_entry{
uint64_t id;
zarray_t *cluster;
struct uint64_zarray_entry *next;
};
struct pt{
// Note: these represent 2*actual value.
uint16_t x, y;
float theta;
int16_t gx, gy;
};
struct remove_vertex{
int i; // which vertex to remove?
int left, right; // left vertex, right vertex
double err;
};
struct segment{
int is_vertex;
// always greater than zero, but right can be > size, which denotes
// a wrap around back to the beginning of the points. and left < right.
int left, right;
};
struct line_fit_pt{
double Mx, My;
double Mxx, Myy, Mxy;
double W; // total weight
};
/**
* lfps contains *cumulative* moments for N points, with
* index j reflecting points [0,j] (inclusive).
* fit a line to the points [i0, i1] (inclusive). i0, i1 are both (0, sz)
* if i1 < i0, we treat this as a wrap around.
*/
void fit_line(struct line_fit_pt *lfps, int sz, int i0, int i1, double *lineparm, double *err, double *mse);
int err_compare_descending(const void *_a, const void *_b);
/**
1. Identify A) white points near a black point and B) black points near a white point.
2. Find the connected components within each of the classes above,
yielding clusters of "white-near-black" and
"black-near-white". (These two classes are kept separate). Each
segment has a unique id.
3. For every pair of "white-near-black" and "black-near-white"
clusters, find the set of points that are in one and adjacent to the
other. In other words, a "boundary" layer between the two
clusters. (This is actually performed by iterating over the pixels,
rather than pairs of clusters.) Critically, this helps keep nearby
edges from becoming connected.
**/
int quad_segment_maxima(const DetectorParameters &td, int sz, struct line_fit_pt *lfps, int indices[4]);
/**
* returns 0 if the cluster looks bad.
*/
int quad_segment_agg(int sz, struct line_fit_pt *lfps, int indices[4]);
/**
* return 1 if the quad looks okay, 0 if it should be discarded
* quad
**/
int fit_quad(const DetectorParameters &_params, const Mat im, zarray_t *cluster, struct sQuad *quad);
void threshold(const Mat mIm, const DetectorParameters &parameters, Mat& mThresh);
zarray_t *apriltag_quad_thresh(const DetectorParameters &parameters, const Mat & mImg,
std::vector<std::vector<Point> > &contours);
void _apriltag(Mat im_orig, const DetectorParameters &_params, std::vector<std::vector<Point2f> > &candidates,
std::vector<std::vector<Point> > &contours);
}}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,131 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2013-2016, The Regents of The University of Michigan.
//
// This software was developed in the APRIL Robotics Lab under the
// direction of Edwin Olson, ebolson@umich.edu. This software may be
// available under alternative licensing terms; contact the address above.
//
// The views and conclusions contained in the software and documentation are those
// of the authors and should not be interpreted as representing official policies,
// either expressed or implied, of the Regents of The University of Michigan.
#ifndef _OPENCV_UNIONFIND_HPP_
#define _OPENCV_UNIONFIND_HPP_
namespace cv {
namespace aruco {
typedef struct unionfind unionfind_t;
struct unionfind{
uint32_t maxid;
struct ufrec *data;
};
struct ufrec{
// the parent of this node. If a node's parent is its own index,
// then it is a root.
uint32_t parent;
// for the root of a connected component, the number of components
// connected to it. For intermediate values, it's not meaningful.
uint32_t size;
};
static inline unionfind_t *unionfind_create(uint32_t maxid){
unionfind_t *uf = (unionfind_t*) calloc(1, sizeof(unionfind_t));
uf->maxid = maxid;
uf->data = (struct ufrec*) malloc((maxid+1) * sizeof(struct ufrec));
for (unsigned int i = 0; i <= maxid; i++) {
uf->data[i].size = 1;
uf->data[i].parent = i;
}
return uf;
}
static inline void unionfind_destroy(unionfind_t *uf){
free(uf->data);
free(uf);
}
/*
static inline uint32_t unionfind_get_representative(unionfind_t *uf, uint32_t id)
{
// base case: a node is its own parent
if (uf->data[id].parent == id)
return id;
// otherwise, recurse
uint32_t root = unionfind_get_representative(uf, uf->data[id].parent);
// short circuit the path. [XXX This write prevents tail recursion]
uf->data[id].parent = root;
return root;
}
*/
// this one seems to be every-so-slightly faster than the recursive
// version above.
static inline uint32_t unionfind_get_representative(unionfind_t *uf, uint32_t id){
uint32_t root = id;
// chase down the root
while (uf->data[root].parent != root) {
root = uf->data[root].parent;
}
// go back and collapse the tree.
//
// XXX: on some of our workloads that have very shallow trees
// (e.g. image segmentation), we are actually faster not doing
// this...
while (uf->data[id].parent != root) {
uint32_t tmp = uf->data[id].parent;
uf->data[id].parent = root;
id = tmp;
}
return root;
}
static inline uint32_t unionfind_get_set_size(unionfind_t *uf, uint32_t id){
uint32_t repid = unionfind_get_representative(uf, id);
return uf->data[repid].size;
}
static inline uint32_t unionfind_connect(unionfind_t *uf, uint32_t aid, uint32_t bid){
uint32_t aroot = unionfind_get_representative(uf, aid);
uint32_t broot = unionfind_get_representative(uf, bid);
if (aroot == broot)
return aroot;
// we don't perform "union by rank", but we perform a similar
// operation (but probably without the same asymptotic guarantee):
// We join trees based on the number of *elements* (as opposed to
// rank) contained within each tree. I.e., we use size as a proxy
// for rank. In my testing, it's often *faster* to use size than
// rank, perhaps because the rank of the tree isn't that critical
// if there are very few nodes in it.
uint32_t asize = uf->data[aroot].size;
uint32_t bsize = uf->data[broot].size;
// optimization idea: We could shortcut some or all of the tree
// that is grafted onto the other tree. Pro: those nodes were just
// read and so are probably in cache. Con: it might end up being
// wasted effort -- the tree might be grafted onto another tree in
// a moment!
if (asize > bsize) {
uf->data[broot].parent = aroot;
uf->data[aroot].size += bsize;
return aroot;
} else {
uf->data[aroot].parent = broot;
uf->data[broot].size += asize;
return broot;
}
}
}}
#endif

View File

@ -0,0 +1,148 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2013-2016, The Regents of The University of Michigan.
//
// This software was developed in the APRIL Robotics Lab under the
// direction of Edwin Olson, ebolson@umich.edu. This software may be
// available under alternative licensing terms; contact the address above.
//
// The views and conclusions contained in the software and documentation are those
// of the authors and should not be interpreted as representing official policies,
// either expressed or implied, of the Regents of The University of Michigan.
#ifndef _OPENCV_ZARRAY_HPP_
#define _OPENCV_ZARRAY_HPP_
namespace cv {
namespace aruco {
struct sQuad{
float p[4][2]; // corners
};
/**
* Defines a structure which acts as a resize-able array ala Java's ArrayList.
*/
typedef struct zarray zarray_t;
struct zarray{
size_t el_sz; // size of each element
int size; // how many elements?
int alloc; // we've allocated storage for how many elements?
char *data;
};
/**
* Creates and returns a variable array structure capable of holding elements of
* the specified size. It is the caller's responsibility to call zarray_destroy()
* on the returned array when it is no longer needed.
*/
inline static zarray_t *_zarray_create(size_t el_sz){
zarray_t *za = (zarray_t*) calloc(1, sizeof(zarray_t));
za->el_sz = el_sz;
return za;
}
/**
* Frees all resources associated with the variable array structure which was
* created by zarray_create(). After calling, 'za' will no longer be valid for storage.
*/
inline static void _zarray_destroy(zarray_t *za){
if (za == NULL)
return;
if (za->data != NULL)
free(za->data);
memset(za, 0, sizeof(zarray_t));
free(za);
}
/**
* Retrieves the number of elements currently being contained by the passed
* array, which may be different from its capacity. The index of the last element
* in the array will be one less than the returned value.
*/
inline static int _zarray_size(const zarray_t *za){
return za->size;
}
/**
* Allocates enough internal storage in the supplied variable array structure to
* guarantee that the supplied number of elements (capacity) can be safely stored.
*/
inline static void _zarray_ensure_capacity(zarray_t *za, int capacity){
if (capacity <= za->alloc)
return;
while (za->alloc < capacity) {
za->alloc *= 2;
if (za->alloc < 8)
za->alloc = 8;
}
za->data = (char*) realloc(za->data, za->alloc * za->el_sz);
}
/**
* Adds a new element to the end of the supplied array, and sets its value
* (by copying) from the data pointed to by the supplied pointer 'p'.
* Automatically ensures that enough storage space is available for the new element.
*/
inline static void _zarray_add(zarray_t *za, const void *p){
_zarray_ensure_capacity(za, za->size + 1);
memcpy(&za->data[za->size*za->el_sz], p, za->el_sz);
za->size++;
}
/**
* Retrieves the element from the supplied array located at the zero-based
* index of 'idx' and copies its value into the variable pointed to by the pointer
* 'p'.
*/
inline static void _zarray_get(const zarray_t *za, int idx, void *p){
CV_DbgAssert(idx >= 0);
CV_DbgAssert(idx < za->size);
memcpy(p, &za->data[idx*za->el_sz], za->el_sz);
}
/**
* Similar to zarray_get(), but returns a "live" pointer to the internal
* storage, avoiding a memcpy. This pointer is not valid across
* operations which might move memory around (i.e. zarray_remove_value(),
* zarray_remove_index(), zarray_insert(), zarray_sort(), zarray_clear()).
* 'p' should be a pointer to the pointer which will be set to the internal address.
*/
inline static void _zarray_get_volatile(const zarray_t *za, int idx, void *p){
CV_DbgAssert(idx >= 0);
CV_DbgAssert(idx < za->size);
*((void**) p) = &za->data[idx*za->el_sz];
}
inline static void _zarray_truncate(zarray_t *za, int sz){
za->size = sz;
}
/**
* Sets the value of the current element at index 'idx' by copying its value from
* the data pointed to by 'p'. The previous value of the changed element will be
* copied into the data pointed to by 'outp' if it is not null.
*/
static inline void _zarray_set(zarray_t *za, int idx, const void *p, void *outp){
CV_DbgAssert(idx >= 0);
CV_DbgAssert(idx < za->size);
if (outp != NULL)
memcpy(outp, &za->data[idx*za->el_sz], za->el_sz);
memcpy(&za->data[idx*za->el_sz], p, za->el_sz);
}
}
}
#endif

View File

@ -0,0 +1,207 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2013-2016, The Regents of The University of Michigan.
//
// This software was developed in the APRIL Robotics Lab under the
// direction of Edwin Olson, ebolson@umich.edu. This software may be
// available under alternative licensing terms; contact the address above.
//
// The views and conclusions contained in the software and documentation are those
// of the authors and should not be interpreted as representing official policies,
// either expressed or implied, of the Regents of The University of Michigan.
#include "../../precomp.hpp"
#include "zmaxheap.hpp"
// 0
// 1 2
// 3 4 5 6
// 7 8 9 10 11 12 13 14
//
// Children of node i: 2*i+1, 2*i+2
// Parent of node i: (i-1) / 2
//
// Heap property: a parent is greater than (or equal to) its children.
#define MIN_CAPACITY 16
namespace cv {
namespace aruco {
struct zmaxheap
{
size_t el_sz;
int size;
int alloc;
float *values;
char *data;
void (*swap)(zmaxheap_t *heap, int a, int b);
};
static inline void _swap_default(zmaxheap_t *heap, int a, int b)
{
float t = heap->values[a];
heap->values[a] = heap->values[b];
heap->values[b] = t;
cv::AutoBuffer<char> tmp(heap->el_sz);
memcpy(tmp.data(), &heap->data[a*heap->el_sz], heap->el_sz);
memcpy(&heap->data[a*heap->el_sz], &heap->data[b*heap->el_sz], heap->el_sz);
memcpy(&heap->data[b*heap->el_sz], tmp.data(), heap->el_sz);
}
static inline void _swap_pointer(zmaxheap_t *heap, int a, int b)
{
float t = heap->values[a];
heap->values[a] = heap->values[b];
heap->values[b] = t;
void **pp = (void**) heap->data;
void *tmp = pp[a];
pp[a] = pp[b];
pp[b] = tmp;
}
zmaxheap_t *zmaxheap_create(size_t el_sz)
{
zmaxheap_t *heap = (zmaxheap_t*)calloc(1, sizeof(zmaxheap_t));
heap->el_sz = el_sz;
heap->swap = _swap_default;
if (el_sz == sizeof(void*))
heap->swap = _swap_pointer;
return heap;
}
void zmaxheap_destroy(zmaxheap_t *heap)
{
free(heap->values);
free(heap->data);
memset(heap, 0, sizeof(zmaxheap_t));
free(heap);
}
static void _zmaxheap_ensure_capacity(zmaxheap_t *heap, int capacity)
{
if (heap->alloc >= capacity)
return;
int newcap = heap->alloc;
while (newcap < capacity) {
if (newcap < MIN_CAPACITY) {
newcap = MIN_CAPACITY;
continue;
}
newcap *= 2;
}
heap->values = (float*)realloc(heap->values, newcap * sizeof(float));
heap->data = (char*)realloc(heap->data, newcap * heap->el_sz);
heap->alloc = newcap;
}
void zmaxheap_add(zmaxheap_t *heap, void *p, float v)
{
_zmaxheap_ensure_capacity(heap, heap->size + 1);
int idx = heap->size;
heap->values[idx] = v;
memcpy(&heap->data[idx*heap->el_sz], p, heap->el_sz);
heap->size++;
while (idx > 0) {
int parent = (idx - 1) / 2;
// we're done!
if (heap->values[parent] >= v)
break;
// else, swap and recurse upwards.
heap->swap(heap, idx, parent);
idx = parent;
}
}
// Removes the item in the heap at the given index. Returns 1 if the
// item existed. 0 Indicates an invalid idx (heap is smaller than
// idx). This is mostly intended to be used by zmaxheap_remove_max.
static int zmaxheap_remove_index(zmaxheap_t *heap, int idx, void *p, float *v)
{
if (idx >= heap->size)
return 0;
// copy out the requested element from the heap.
if (v != NULL)
*v = heap->values[idx];
if (p != NULL)
memcpy(p, &heap->data[idx*heap->el_sz], heap->el_sz);
heap->size--;
// If this element is already the last one, then there's nothing
// for us to do.
if (idx == heap->size)
return 1;
// copy last element to first element. (which probably upsets
// the heap property).
heap->values[idx] = heap->values[heap->size];
memcpy(&heap->data[idx*heap->el_sz], &heap->data[heap->el_sz * heap->size], heap->el_sz);
// now fix the heap. Note, as we descend, we're "pushing down"
// the same node the entire time. Thus, while the index of the
// parent might change, the parent_score doesn't.
int parent = idx;
float parent_score = heap->values[idx];
// descend, fixing the heap.
while (parent < heap->size) {
int left = 2*parent + 1;
int right = left + 1;
// assert(parent_score == heap->values[parent]);
float left_score = (left < heap->size) ? heap->values[left] : -INFINITY;
float right_score = (right < heap->size) ? heap->values[right] : -INFINITY;
// put the biggest of (parent, left, right) as the parent.
// already okay?
if (parent_score >= left_score && parent_score >= right_score)
break;
// if we got here, then one of the children is bigger than the parent.
if (left_score >= right_score) {
CV_Assert(left < heap->size);
heap->swap(heap, parent, left);
parent = left;
} else {
// right_score can't be less than left_score if right_score is -INFINITY.
CV_Assert(right < heap->size);
heap->swap(heap, parent, right);
parent = right;
}
}
return 1;
}
int zmaxheap_remove_max(zmaxheap_t *heap, void *p, float *v)
{
return zmaxheap_remove_index(heap, 0, p, v);
}
}}

View File

@ -0,0 +1,38 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2013-2016, The Regents of The University of Michigan.
//
// This software was developed in the APRIL Robotics Lab under the
// direction of Edwin Olson, ebolson@umich.edu. This software may be
// available under alternative licensing terms; contact the address above.
//
// The views and conclusions contained in the software and documentation are those
// of the authors and should not be interpreted as representing official policies,
// either expressed or implied, of the Regents of The University of Michigan.
#ifndef _OPENCV_ZMAXHEAP_HPP_
#define _OPENCV_ZMAXHEAP_HPP_
namespace cv {
namespace aruco {
typedef struct zmaxheap zmaxheap_t;
typedef struct zmaxheap_iterator zmaxheap_iterator_t;
struct zmaxheap_iterator {
zmaxheap_t *heap;
int in, out;
};
zmaxheap_t *zmaxheap_create(size_t el_sz);
void zmaxheap_destroy(zmaxheap_t *heap);
void zmaxheap_add(zmaxheap_t *heap, void *p, float v);
// returns 0 if the heap is empty, so you can do
// while (zmaxheap_remove_max(...)) { }
int zmaxheap_remove_max(zmaxheap_t *heap, void *p, float *v);
}}
#endif

View File

@ -0,0 +1,507 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "../precomp.hpp"
#include <opencv2/objdetect/aruco_dictionary.hpp>
#include <numeric>
namespace cv {
namespace aruco {
using namespace std;
struct Board::BoardImpl {
std::vector<std::vector<Point3f> > objPoints;
Dictionary dictionary;
Point3f rightBottomBorder;
std::vector<int> ids;
BoardImpl() {
dictionary = Dictionary(getPredefinedDictionary(PredefinedDictionaryType::DICT_4X4_50));
}
};
Board::Board(): boardImpl(makePtr<BoardImpl>()) {}
Board::~Board() {}
Ptr<Board> Board::create(InputArrayOfArrays objPoints, const Dictionary &dictionary, InputArray ids) {
CV_Assert(objPoints.total() == ids.total());
CV_Assert(objPoints.type() == CV_32FC3 || objPoints.type() == CV_32FC1);
vector<vector<Point3f> > obj_points_vector;
Point3f rightBottomBorder = Point3f(0.f, 0.f, 0.f);
for (unsigned int i = 0; i < objPoints.total(); i++) {
vector<Point3f> corners;
Mat corners_mat = objPoints.getMat(i);
if (corners_mat.type() == CV_32FC1)
corners_mat = corners_mat.reshape(3);
CV_Assert(corners_mat.total() == 4);
for (int j = 0; j < 4; j++) {
const Point3f &corner = corners_mat.at<Point3f>(j);
corners.push_back(corner);
rightBottomBorder.x = std::max(rightBottomBorder.x, corner.x);
rightBottomBorder.y = std::max(rightBottomBorder.y, corner.y);
rightBottomBorder.z = std::max(rightBottomBorder.z, corner.z);
}
obj_points_vector.push_back(corners);
}
Board board;
Ptr<Board> res = makePtr<Board>(board);
ids.copyTo(res->boardImpl->ids);
res->boardImpl->objPoints = obj_points_vector;
res->boardImpl->dictionary = dictionary;
res->boardImpl->rightBottomBorder = rightBottomBorder;
return res;
}
const Dictionary& Board::getDictionary() const {
return this->boardImpl->dictionary;
}
const vector<vector<Point3f> >& Board::getObjPoints() const {
return this->boardImpl->objPoints;
}
const Point3f& Board::getRightBottomCorner() const {
return this->boardImpl->rightBottomBorder;
}
const vector<int>& Board::getIds() const {
return this->boardImpl->ids;
}
/** @brief Implementation of draw planar board that accepts a raw Board pointer.
*/
void Board::generateImage(Size outSize, OutputArray img, int marginSize, int borderBits) const {
CV_Assert(!outSize.empty());
CV_Assert(marginSize >= 0);
img.create(outSize, CV_8UC1);
Mat out = img.getMat();
out.setTo(Scalar::all(255));
out.adjustROI(-marginSize, -marginSize, -marginSize, -marginSize);
// calculate max and min values in XY plane
CV_Assert(this->getObjPoints().size() > 0);
float minX, maxX, minY, maxY;
minX = maxX = this->getObjPoints()[0][0].x;
minY = maxY = this->getObjPoints()[0][0].y;
for(unsigned int i = 0; i < this->getObjPoints().size(); i++) {
for(int j = 0; j < 4; j++) {
minX = min(minX, this->getObjPoints()[i][j].x);
maxX = max(maxX, this->getObjPoints()[i][j].x);
minY = min(minY, this->getObjPoints()[i][j].y);
maxY = max(maxY, this->getObjPoints()[i][j].y);
}
}
float sizeX = maxX - minX;
float sizeY = maxY - minY;
// proportion transformations
float xReduction = sizeX / float(out.cols);
float yReduction = sizeY / float(out.rows);
// determine the zone where the markers are placed
if(xReduction > yReduction) {
int nRows = int(sizeY / xReduction);
int rowsMargins = (out.rows - nRows) / 2;
out.adjustROI(-rowsMargins, -rowsMargins, 0, 0);
} else {
int nCols = int(sizeX / yReduction);
int colsMargins = (out.cols - nCols) / 2;
out.adjustROI(0, 0, -colsMargins, -colsMargins);
}
// now paint each marker
Mat marker;
Point2f outCorners[3];
Point2f inCorners[3];
for(unsigned int m = 0; m < this->getObjPoints().size(); m++) {
// transform corners to markerZone coordinates
for(int j = 0; j < 3; j++) {
Point2f pf = Point2f(this->getObjPoints()[m][j].x, this->getObjPoints()[m][j].y);
// move top left to 0, 0
pf -= Point2f(minX, minY);
pf.x = pf.x / sizeX * float(out.cols);
pf.y = pf.y / sizeY * float(out.rows);
outCorners[j] = pf;
}
// get marker
Size dst_sz(outCorners[2] - outCorners[0]); // assuming CCW order
dst_sz.width = dst_sz.height = std::min(dst_sz.width, dst_sz.height); //marker should be square
getDictionary().generateImageMarker(this->getIds()[m], dst_sz.width, marker, borderBits);
if((outCorners[0].y == outCorners[1].y) && (outCorners[1].x == outCorners[2].x)) {
// marker is aligned to image axes
marker.copyTo(out(Rect(outCorners[0], dst_sz)));
continue;
}
// interpolate tiny marker to marker position in markerZone
inCorners[0] = Point2f(-0.5f, -0.5f);
inCorners[1] = Point2f(marker.cols - 0.5f, -0.5f);
inCorners[2] = Point2f(marker.cols - 0.5f, marker.rows - 0.5f);
// remove perspective
Mat transformation = getAffineTransform(inCorners, outCorners);
warpAffine(marker, out, transformation, out.size(), INTER_LINEAR,
BORDER_TRANSPARENT);
}
}
void Board::matchImagePoints(InputArray detectedCorners, InputArray detectedIds,
OutputArray _objPoints, OutputArray imgPoints) const {
CV_Assert(getIds().size() == getObjPoints().size());
CV_Assert(detectedIds.total() == detectedCorners.total());
size_t nDetectedMarkers = detectedIds.total();
vector<Point3f> objPnts;
objPnts.reserve(nDetectedMarkers);
vector<Point2f> imgPnts;
imgPnts.reserve(nDetectedMarkers);
// look for detected markers that belong to the board and get their information
for(unsigned int i = 0; i < nDetectedMarkers; i++) {
int currentId = detectedIds.getMat().ptr< int >(0)[i];
for(unsigned int j = 0; j < getIds().size(); j++) {
if(currentId == getIds()[j]) {
for(int p = 0; p < 4; p++) {
objPnts.push_back(getObjPoints()[j][p]);
imgPnts.push_back(detectedCorners.getMat(i).ptr<Point2f>(0)[p]);
}
}
}
}
// create output
Mat(objPnts).copyTo(_objPoints);
Mat(imgPnts).copyTo(imgPoints);
}
struct GridBoard::GridImpl {
GridImpl(){};
// number of markers in X and Y directions
int sizeX = 3, sizeY = 3;
// marker side length (normally in meters)
float markerLength = 1.f;
// separation between markers in the grid
float markerSeparation = .5f;
};
GridBoard::GridBoard(): gridImpl(makePtr<GridImpl>()) {}
Ptr<GridBoard> GridBoard::create(int markersX, int markersY, float markerLength, float markerSeparation,
const Dictionary &dictionary, InputArray ids) {
CV_Assert(markersX > 0 && markersY > 0 && markerLength > 0 && markerSeparation > 0);
GridBoard board;
Ptr<GridBoard> res = makePtr<GridBoard>(board);
res->gridImpl->sizeX = markersX;
res->gridImpl->sizeY = markersY;
res->gridImpl->markerLength = markerLength;
res->gridImpl->markerSeparation = markerSeparation;
res->boardImpl->dictionary = dictionary;
size_t totalMarkers = (size_t) markersX * markersY;
CV_Assert(totalMarkers == ids.total());
vector<vector<Point3f> > objPoints;
objPoints.reserve(totalMarkers);
ids.copyTo(res->boardImpl->ids);
// calculate Board objPoints
for (int y = 0; y < markersY; y++) {
for (int x = 0; x < markersX; x++) {
vector <Point3f> corners(4);
corners[0] = Point3f(x * (markerLength + markerSeparation),
y * (markerLength + markerSeparation), 0);
corners[1] = corners[0] + Point3f(markerLength, 0, 0);
corners[2] = corners[0] + Point3f(markerLength, markerLength, 0);
corners[3] = corners[0] + Point3f(0, markerLength, 0);
objPoints.push_back(corners);
}
}
res->boardImpl->objPoints = objPoints;
res->boardImpl->rightBottomBorder = Point3f(markersX * markerLength + markerSeparation * (markersX - 1),
markersY * markerLength + markerSeparation * (markersY - 1), 0.f);
return res;
}
Ptr<GridBoard> GridBoard::create(int markersX, int markersY, float markerLength, float markerSeparation,
const Dictionary &dictionary, int firstMarker) {
vector<int> ids(markersX*markersY);
std::iota(ids.begin(), ids.end(), firstMarker);
return GridBoard::create(markersX, markersY, markerLength, markerSeparation, dictionary, ids);
}
void GridBoard::generateImage(Size outSize, OutputArray _img, int marginSize, int borderBits) const {
Board::generateImage(outSize, _img, marginSize, borderBits);
}
Size GridBoard::getGridSize() const {
return Size(gridImpl->sizeX, gridImpl->sizeY);
}
float GridBoard::getMarkerLength() const {
return gridImpl->markerLength;
}
float GridBoard::getMarkerSeparation() const {
return gridImpl->markerSeparation;
}
struct CharucoBoard::CharucoImpl : GridBoard::GridImpl {
// size of chessboard squares side (normally in meters)
float squareLength;
// marker side length (normally in meters)
float markerLength;
static void _getNearestMarkerCorners(CharucoBoard &board, float squareLength);
// vector of chessboard 3D corners precalculated
std::vector<Point3f> chessboardCorners;
// for each charuco corner, nearest marker id and nearest marker corner id of each marker
std::vector<std::vector<int> > nearestMarkerIdx;
std::vector<std::vector<int> > nearestMarkerCorners;
};
CharucoBoard::CharucoBoard(): charucoImpl(makePtr<CharucoImpl>()) {}
void CharucoBoard::generateImage(Size outSize, OutputArray _img, int marginSize, int borderBits) const {
CV_Assert(!outSize.empty());
CV_Assert(marginSize >= 0);
_img.create(outSize, CV_8UC1);
_img.setTo(255);
Mat out = _img.getMat();
Mat noMarginsImg =
out.colRange(marginSize, out.cols - marginSize).rowRange(marginSize, out.rows - marginSize);
double totalLengthX, totalLengthY;
totalLengthX = charucoImpl->squareLength * charucoImpl->sizeX;
totalLengthY = charucoImpl->squareLength * charucoImpl->sizeY;
// proportional transformation
double xReduction = totalLengthX / double(noMarginsImg.cols);
double yReduction = totalLengthY / double(noMarginsImg.rows);
// determine the zone where the chessboard is placed
Mat chessboardZoneImg;
if(xReduction > yReduction) {
int nRows = int(totalLengthY / xReduction);
int rowsMargins = (noMarginsImg.rows - nRows) / 2;
chessboardZoneImg = noMarginsImg.rowRange(rowsMargins, noMarginsImg.rows - rowsMargins);
} else {
int nCols = int(totalLengthX / yReduction);
int colsMargins = (noMarginsImg.cols - nCols) / 2;
chessboardZoneImg = noMarginsImg.colRange(colsMargins, noMarginsImg.cols - colsMargins);
}
// determine the margins to draw only the markers
// take the minimum just to be sure
double squareSizePixels = min(double(chessboardZoneImg.cols) / double(charucoImpl->sizeX),
double(chessboardZoneImg.rows) / double(charucoImpl->sizeY));
double diffSquareMarkerLength = (charucoImpl->squareLength - charucoImpl->markerLength) / 2;
int diffSquareMarkerLengthPixels =
int(diffSquareMarkerLength * squareSizePixels / charucoImpl->squareLength);
// draw markers
Mat markersImg;
Board::generateImage(chessboardZoneImg.size(), markersImg, diffSquareMarkerLengthPixels, borderBits);
markersImg.copyTo(chessboardZoneImg);
// now draw black squares
for(int y = 0; y < charucoImpl->sizeY; y++) {
for(int x = 0; x < charucoImpl->sizeX; x++) {
if(y % 2 != x % 2) continue; // white corner, dont do anything
double startX, startY;
startX = squareSizePixels * double(x);
startY = squareSizePixels * double(y);
Mat squareZone = chessboardZoneImg.rowRange(int(startY), int(startY + squareSizePixels))
.colRange(int(startX), int(startX + squareSizePixels));
squareZone.setTo(0);
}
}
}
/**
* Fill nearestMarkerIdx and nearestMarkerCorners arrays
*/
void CharucoBoard::CharucoImpl::_getNearestMarkerCorners(CharucoBoard &board, float squareLength) {
board.charucoImpl->nearestMarkerIdx.resize(board.charucoImpl->chessboardCorners.size());
board.charucoImpl->nearestMarkerCorners.resize(board.charucoImpl->chessboardCorners.size());
unsigned int nMarkers = (unsigned int)board.getIds().size();
unsigned int nCharucoCorners = (unsigned int)board.charucoImpl->chessboardCorners.size();
for(unsigned int i = 0; i < nCharucoCorners; i++) {
double minDist = -1; // distance of closest markers
Point3f charucoCorner = board.charucoImpl->chessboardCorners[i];
for(unsigned int j = 0; j < nMarkers; j++) {
// calculate distance from marker center to charuco corner
Point3f center = Point3f(0, 0, 0);
for(unsigned int k = 0; k < 4; k++)
center += board.getObjPoints()[j][k];
center /= 4.;
double sqDistance;
Point3f distVector = charucoCorner - center;
sqDistance = distVector.x * distVector.x + distVector.y * distVector.y;
if(j == 0 || fabs(sqDistance - minDist) < cv::pow(0.01 * squareLength, 2)) {
// if same minimum distance (or first iteration), add to nearestMarkerIdx vector
board.charucoImpl->nearestMarkerIdx[i].push_back(j);
minDist = sqDistance;
} else if(sqDistance < minDist) {
// if finding a closest marker to the charuco corner
board.charucoImpl->nearestMarkerIdx[i].clear(); // remove any previous added marker
board.charucoImpl->nearestMarkerIdx[i].push_back(j); // add the new closest marker index
minDist = sqDistance;
}
}
// for each of the closest markers, search the marker corner index closer
// to the charuco corner
for(unsigned int j = 0; j < board.charucoImpl->nearestMarkerIdx[i].size(); j++) {
board.charucoImpl->nearestMarkerCorners[i].resize(board.charucoImpl->nearestMarkerIdx[i].size());
double minDistCorner = -1;
for(unsigned int k = 0; k < 4; k++) {
double sqDistance;
Point3f distVector = charucoCorner - board.getObjPoints()[board.charucoImpl->nearestMarkerIdx[i][j]][k];
sqDistance = distVector.x * distVector.x + distVector.y * distVector.y;
if(k == 0 || sqDistance < minDistCorner) {
// if this corner is closer to the charuco corner, assing its index
// to nearestMarkerCorners
minDistCorner = sqDistance;
board.charucoImpl->nearestMarkerCorners[i][j] = k;
}
}
}
}
}
Ptr<CharucoBoard> CharucoBoard::create(int squaresX, int squaresY, float squareLength, float markerLength,
const Dictionary &dictionary, InputArray ids) {
CV_Assert(squaresX > 1 && squaresY > 1 && markerLength > 0 && squareLength > markerLength);
CharucoBoard board;
Ptr<CharucoBoard> res = makePtr<CharucoBoard>(board);
res->charucoImpl->sizeX = squaresX;
res->charucoImpl->sizeY = squaresY;
res->charucoImpl->squareLength = squareLength;
res->charucoImpl->markerLength = markerLength;
res->boardImpl->dictionary = dictionary;
vector<vector<Point3f> > objPoints;
float diffSquareMarkerLength = (squareLength - markerLength) / 2;
int totalMarkers = (int)(ids.total());
ids.copyTo(res->boardImpl->ids);
// calculate Board objPoints
int nextId = 0;
for(int y = 0; y < squaresY; y++) {
for(int x = 0; x < squaresX; x++) {
if(y % 2 == x % 2) continue; // black corner, no marker here
vector<Point3f> corners(4);
corners[0] = Point3f(x * squareLength + diffSquareMarkerLength,
y * squareLength + diffSquareMarkerLength, 0);
corners[1] = corners[0] + Point3f(markerLength, 0, 0);
corners[2] = corners[0] + Point3f(markerLength, markerLength, 0);
corners[3] = corners[0] + Point3f(0, markerLength, 0);
objPoints.push_back(corners);
// first ids in dictionary
if (totalMarkers == 0)
res->boardImpl->ids.push_back(nextId);
nextId++;
}
}
if (totalMarkers > 0 && nextId != totalMarkers)
CV_Error(cv::Error::StsBadSize, "Size of ids must be equal to the number of markers: "+std::to_string(nextId));
res->boardImpl->objPoints = objPoints;
// now fill chessboardCorners
for(int y = 0; y < squaresY - 1; y++) {
for(int x = 0; x < squaresX - 1; x++) {
Point3f corner;
corner.x = (x + 1) * squareLength;
corner.y = (y + 1) * squareLength;
corner.z = 0;
res->charucoImpl->chessboardCorners.push_back(corner);
}
}
res->boardImpl->rightBottomBorder = Point3f(squaresX * squareLength, squaresY * squareLength, 0.f);
CharucoBoard::CharucoImpl::_getNearestMarkerCorners(*res, res->charucoImpl->squareLength);
return res;
}
Size CharucoBoard::getChessboardSize() const { return Size(charucoImpl->sizeX, charucoImpl->sizeY); }
float CharucoBoard::getSquareLength() const { return charucoImpl->squareLength; }
float CharucoBoard::getMarkerLength() const { return charucoImpl->markerLength; }
bool CharucoBoard::checkCharucoCornersCollinear(InputArray charucoIds) const {
unsigned int nCharucoCorners = (unsigned int)charucoIds.getMat().total();
if (nCharucoCorners <= 2)
return true;
// only test if there are 3 or more corners
CV_Assert(charucoImpl->chessboardCorners.size() >= charucoIds.getMat().total());
Vec<double, 3> point0(charucoImpl->chessboardCorners[charucoIds.getMat().at<int>(0)].x,
charucoImpl->chessboardCorners[charucoIds.getMat().at<int>(0)].y, 1);
Vec<double, 3> point1(charucoImpl->chessboardCorners[charucoIds.getMat().at<int>(1)].x,
charucoImpl->chessboardCorners[charucoIds.getMat().at<int>(1)].y, 1);
// create a line from the first two points.
Vec<double, 3> testLine = point0.cross(point1);
Vec<double, 3> testPoint(0, 0, 1);
double divisor = sqrt(testLine[0]*testLine[0] + testLine[1]*testLine[1]);
CV_Assert(divisor != 0.0);
// normalize the line with normal
testLine /= divisor;
double dotProduct;
for (unsigned int i = 2; i < nCharucoCorners; i++){
testPoint(0) = charucoImpl->chessboardCorners[charucoIds.getMat().at<int>(i)].x;
testPoint(1) = charucoImpl->chessboardCorners[charucoIds.getMat().at<int>(i)].y;
// if testPoint is on testLine, dotProduct will be zero (or very, very close)
dotProduct = testPoint.dot(testLine);
if (std::abs(dotProduct) > 1e-6){
return false;
}
}
// no points found that were off of testLine, return true that all points collinear.
return true;
}
std::vector<Point3f> CharucoBoard::getChessboardCorners() const {
return charucoImpl->chessboardCorners;
}
std::vector<std::vector<int> > CharucoBoard::getNearestMarkerIdx() const {
return charucoImpl->nearestMarkerIdx;
}
std::vector<std::vector<int> > CharucoBoard::getNearestMarkerCorners() const {
return charucoImpl->nearestMarkerCorners;
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,441 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "../precomp.hpp"
#include "opencv2/core/hal/hal.hpp"
#include "aruco_utils.hpp"
#include "predefined_dictionaries.hpp"
#include "apriltag/predefined_dictionaries_apriltag.hpp"
#include <opencv2/objdetect/aruco_dictionary.hpp>
namespace cv {
namespace aruco {
using namespace std;
Dictionary::Dictionary(): markerSize(0), maxCorrectionBits(0) {}
Dictionary::Dictionary(const Mat &_bytesList, int _markerSize, int _maxcorr) {
markerSize = _markerSize;
maxCorrectionBits = _maxcorr;
bytesList = _bytesList;
}
bool Dictionary::readDictionary(const cv::FileNode& fn) {
int nMarkers = 0, _markerSize = 0;
if (fn.empty() || !readParameter("nmarkers", nMarkers, fn) || !readParameter("markersize", _markerSize, fn))
return false;
Mat bytes(0, 0, CV_8UC1), marker(_markerSize, _markerSize, CV_8UC1);
std::string markerString;
for (int i = 0; i < nMarkers; i++) {
std::ostringstream ostr;
ostr << i;
if (!readParameter("marker_" + ostr.str(), markerString, fn))
return false;
for (int j = 0; j < (int) markerString.size(); j++)
marker.at<unsigned char>(j) = (markerString[j] == '0') ? 0 : 1;
bytes.push_back(Dictionary::getByteListFromBits(marker));
}
int _maxCorrectionBits = 0;
readParameter("maxCorrectionBits", _maxCorrectionBits, fn);
*this = Dictionary(bytes, _markerSize, _maxCorrectionBits);
return true;
}
void Dictionary::writeDictionary(FileStorage &fs) {
fs << "nmarkers" << bytesList.rows;
fs << "markersize" << markerSize;
fs << "maxCorrectionBits" << maxCorrectionBits;
for (int i = 0; i < bytesList.rows; i++) {
Mat row = bytesList.row(i);;
Mat bitMarker = getBitsFromByteList(row, markerSize);
std::ostringstream ostr;
ostr << i;
string markerName = "marker_" + ostr.str();
string marker;
for (int j = 0; j < markerSize * markerSize; j++)
marker.push_back(bitMarker.at<uint8_t>(j) + '0');
fs << markerName << marker;
}
}
void Dictionary::writeDictionary(Ptr<FileStorage>& fs, const String &name) {
if(name.empty())
return writeDictionary(*fs);
*fs << name << "{";
writeDictionary(*fs);
*fs << "}";
}
bool Dictionary::identify(const Mat &onlyBits, int &idx, int &rotation, double maxCorrectionRate) const {
CV_Assert(onlyBits.rows == markerSize && onlyBits.cols == markerSize);
int maxCorrectionRecalculed = int(double(maxCorrectionBits) * maxCorrectionRate);
// get as a byte list
Mat candidateBytes = getByteListFromBits(onlyBits);
idx = -1; // by default, not found
// search closest marker in dict
for(int m = 0; m < bytesList.rows; m++) {
int currentMinDistance = markerSize * markerSize + 1;
int currentRotation = -1;
for(unsigned int r = 0; r < 4; r++) {
int currentHamming = cv::hal::normHamming(
bytesList.ptr(m)+r*candidateBytes.cols,
candidateBytes.ptr(),
candidateBytes.cols);
if(currentHamming < currentMinDistance) {
currentMinDistance = currentHamming;
currentRotation = r;
}
}
// if maxCorrection is fulfilled, return this one
if(currentMinDistance <= maxCorrectionRecalculed) {
idx = m;
rotation = currentRotation;
break;
}
}
return idx != -1;
}
int Dictionary::getDistanceToId(InputArray bits, int id, bool allRotations) const {
CV_Assert(id >= 0 && id < bytesList.rows);
unsigned int nRotations = 4;
if(!allRotations) nRotations = 1;
Mat candidateBytes = getByteListFromBits(bits.getMat());
int currentMinDistance = int(bits.total() * bits.total());
for(unsigned int r = 0; r < nRotations; r++) {
int currentHamming = cv::hal::normHamming(
bytesList.ptr(id) + r*candidateBytes.cols,
candidateBytes.ptr(),
candidateBytes.cols);
if(currentHamming < currentMinDistance) {
currentMinDistance = currentHamming;
}
}
return currentMinDistance;
}
void Dictionary::generateImageMarker(int id, int sidePixels, OutputArray _img, int borderBits) const {
CV_Assert(sidePixels >= (markerSize + 2*borderBits));
CV_Assert(id < bytesList.rows);
CV_Assert(borderBits > 0);
_img.create(sidePixels, sidePixels, CV_8UC1);
// create small marker with 1 pixel per bin
Mat tinyMarker(markerSize + 2 * borderBits, markerSize + 2 * borderBits, CV_8UC1,
Scalar::all(0));
Mat innerRegion = tinyMarker.rowRange(borderBits, tinyMarker.rows - borderBits)
.colRange(borderBits, tinyMarker.cols - borderBits);
// put inner bits
Mat bits = 255 * getBitsFromByteList(bytesList.rowRange(id, id + 1), markerSize);
CV_Assert(innerRegion.total() == bits.total());
bits.copyTo(innerRegion);
// resize tiny marker to output size
cv::resize(tinyMarker, _img.getMat(), _img.getMat().size(), 0, 0, INTER_NEAREST);
}
Mat Dictionary::getByteListFromBits(const Mat &bits) {
// integer ceil
int nbytes = (bits.cols * bits.rows + 8 - 1) / 8;
Mat candidateByteList(1, nbytes, CV_8UC4, Scalar::all(0));
unsigned char currentBit = 0;
int currentByte = 0;
// the 4 rotations
uchar* rot0 = candidateByteList.ptr();
uchar* rot1 = candidateByteList.ptr() + 1*nbytes;
uchar* rot2 = candidateByteList.ptr() + 2*nbytes;
uchar* rot3 = candidateByteList.ptr() + 3*nbytes;
for(int row = 0; row < bits.rows; row++) {
for(int col = 0; col < bits.cols; col++) {
// circular shift
rot0[currentByte] <<= 1;
rot1[currentByte] <<= 1;
rot2[currentByte] <<= 1;
rot3[currentByte] <<= 1;
// set bit
rot0[currentByte] |= bits.at<uchar>(row, col);
rot1[currentByte] |= bits.at<uchar>(col, bits.cols - 1 - row);
rot2[currentByte] |= bits.at<uchar>(bits.rows - 1 - row, bits.cols - 1 - col);
rot3[currentByte] |= bits.at<uchar>(bits.rows - 1 - col, row);
currentBit++;
if(currentBit == 8) {
// next byte
currentBit = 0;
currentByte++;
}
}
}
return candidateByteList;
}
Mat Dictionary::getBitsFromByteList(const Mat &byteList, int markerSize) {
CV_Assert(byteList.total() > 0 &&
byteList.total() >= (unsigned int)markerSize * markerSize / 8 &&
byteList.total() <= (unsigned int)markerSize * markerSize / 8 + 1);
Mat bits(markerSize, markerSize, CV_8UC1, Scalar::all(0));
unsigned char base2List[] = { 128, 64, 32, 16, 8, 4, 2, 1 };
int currentByteIdx = 0;
// we only need the bytes in normal rotation
unsigned char currentByte = byteList.ptr()[0];
int currentBit = 0;
for(int row = 0; row < bits.rows; row++) {
for(int col = 0; col < bits.cols; col++) {
if(currentByte >= base2List[currentBit]) {
bits.at<unsigned char>(row, col) = 1;
currentByte -= base2List[currentBit];
}
currentBit++;
if(currentBit == 8) {
currentByteIdx++;
currentByte = byteList.ptr()[currentByteIdx];
// if not enough bits for one more byte, we are in the end
// update bit position accordingly
if(8 * (currentByteIdx + 1) > (int)bits.total())
currentBit = 8 * (currentByteIdx + 1) - (int)bits.total();
else
currentBit = 0; // ok, bits enough for next byte
}
}
}
return bits;
}
Dictionary getPredefinedDictionary(PredefinedDictionaryType name) {
// DictionaryData constructors calls
// moved out of globals so construted on first use, which allows lazy-loading of opencv dll
static const Dictionary DICT_ARUCO_DATA = Dictionary(Mat(1024, (5 * 5 + 7) / 8, CV_8UC4, (uchar*)DICT_ARUCO_BYTES), 5, 0);
static const Dictionary DICT_4X4_50_DATA = Dictionary(Mat(50, (4 * 4 + 7) / 8, CV_8UC4, (uchar*)DICT_4X4_1000_BYTES), 4, 1);
static const Dictionary DICT_4X4_100_DATA = Dictionary(Mat(100, (4 * 4 + 7) / 8, CV_8UC4, (uchar*)DICT_4X4_1000_BYTES), 4, 1);
static const Dictionary DICT_4X4_250_DATA = Dictionary(Mat(250, (4 * 4 + 7) / 8, CV_8UC4, (uchar*)DICT_4X4_1000_BYTES), 4, 1);
static const Dictionary DICT_4X4_1000_DATA = Dictionary(Mat(1000, (4 * 4 + 7) / 8, CV_8UC4, (uchar*)DICT_4X4_1000_BYTES), 4, 0);
static const Dictionary DICT_5X5_50_DATA = Dictionary(Mat(50, (5 * 5 + 7) / 8, CV_8UC4, (uchar*)DICT_5X5_1000_BYTES), 5, 3);
static const Dictionary DICT_5X5_100_DATA = Dictionary(Mat(100, (5 * 5 + 7) / 8, CV_8UC4, (uchar*)DICT_5X5_1000_BYTES), 5, 3);
static const Dictionary DICT_5X5_250_DATA = Dictionary(Mat(250, (5 * 5 + 7) / 8, CV_8UC4, (uchar*)DICT_5X5_1000_BYTES), 5, 2);
static const Dictionary DICT_5X5_1000_DATA = Dictionary(Mat(1000, (5 * 5 + 7) / 8, CV_8UC4, (uchar*)DICT_5X5_1000_BYTES), 5, 2);
static const Dictionary DICT_6X6_50_DATA = Dictionary(Mat(50, (6 * 6 + 7) / 8, CV_8UC4, (uchar*)DICT_6X6_1000_BYTES), 6, 6);
static const Dictionary DICT_6X6_100_DATA = Dictionary(Mat(100, (6 * 6 + 7) / 8, CV_8UC4, (uchar*)DICT_6X6_1000_BYTES), 6, 5);
static const Dictionary DICT_6X6_250_DATA = Dictionary(Mat(250, (6 * 6 + 7) / 8, CV_8UC4, (uchar*)DICT_6X6_1000_BYTES), 6, 5);
static const Dictionary DICT_6X6_1000_DATA = Dictionary(Mat(1000, (6 * 6 + 7) / 8, CV_8UC4, (uchar*)DICT_6X6_1000_BYTES), 6, 4);
static const Dictionary DICT_7X7_50_DATA = Dictionary(Mat(50, (7 * 7 + 7) / 8, CV_8UC4, (uchar*)DICT_7X7_1000_BYTES), 7, 9);
static const Dictionary DICT_7X7_100_DATA = Dictionary(Mat(100, (7 * 7 + 7) / 8, CV_8UC4, (uchar*)DICT_7X7_1000_BYTES), 7, 8);
static const Dictionary DICT_7X7_250_DATA = Dictionary(Mat(250, (7 * 7 + 7) / 8, CV_8UC4, (uchar*)DICT_7X7_1000_BYTES), 7, 8);
static const Dictionary DICT_7X7_1000_DATA = Dictionary(Mat(1000, (7 * 7 + 7) / 8, CV_8UC4, (uchar*)DICT_7X7_1000_BYTES), 7, 6);
static const Dictionary DICT_APRILTAG_16h5_DATA = Dictionary(Mat(30, (4 * 4 + 7) / 8, CV_8UC4, (uchar*)DICT_APRILTAG_16h5_BYTES), 4, 0);
static const Dictionary DICT_APRILTAG_25h9_DATA = Dictionary(Mat(35, (5 * 5 + 7) / 8, CV_8UC4, (uchar*)DICT_APRILTAG_25h9_BYTES), 5, 0);
static const Dictionary DICT_APRILTAG_36h10_DATA = Dictionary(Mat(2320, (6 * 6 + 7) / 8, CV_8UC4, (uchar*)DICT_APRILTAG_36h10_BYTES), 6, 0);
static const Dictionary DICT_APRILTAG_36h11_DATA = Dictionary(Mat(587, (6 * 6 + 7) / 8, CV_8UC4, (uchar*)DICT_APRILTAG_36h11_BYTES), 6, 0);
switch(name) {
case DICT_ARUCO_ORIGINAL:
return Dictionary(DICT_ARUCO_DATA);
case DICT_4X4_50:
return Dictionary(DICT_4X4_50_DATA);
case DICT_4X4_100:
return Dictionary(DICT_4X4_100_DATA);
case DICT_4X4_250:
return Dictionary(DICT_4X4_250_DATA);
case DICT_4X4_1000:
return Dictionary(DICT_4X4_1000_DATA);
case DICT_5X5_50:
return Dictionary(DICT_5X5_50_DATA);
case DICT_5X5_100:
return Dictionary(DICT_5X5_100_DATA);
case DICT_5X5_250:
return Dictionary(DICT_5X5_250_DATA);
case DICT_5X5_1000:
return Dictionary(DICT_5X5_1000_DATA);
case DICT_6X6_50:
return Dictionary(DICT_6X6_50_DATA);
case DICT_6X6_100:
return Dictionary(DICT_6X6_100_DATA);
case DICT_6X6_250:
return Dictionary(DICT_6X6_250_DATA);
case DICT_6X6_1000:
return Dictionary(DICT_6X6_1000_DATA);
case DICT_7X7_50:
return Dictionary(DICT_7X7_50_DATA);
case DICT_7X7_100:
return Dictionary(DICT_7X7_100_DATA);
case DICT_7X7_250:
return Dictionary(DICT_7X7_250_DATA);
case DICT_7X7_1000:
return Dictionary(DICT_7X7_1000_DATA);
case DICT_APRILTAG_16h5:
return Dictionary(DICT_APRILTAG_16h5_DATA);
case DICT_APRILTAG_25h9:
return Dictionary(DICT_APRILTAG_25h9_DATA);
case DICT_APRILTAG_36h10:
return Dictionary(DICT_APRILTAG_36h10_DATA);
case DICT_APRILTAG_36h11:
return Dictionary(DICT_APRILTAG_36h11_DATA);
}
return Dictionary(DICT_4X4_50_DATA);
}
Dictionary getPredefinedDictionary(int dict) {
return getPredefinedDictionary(PredefinedDictionaryType(dict));
}
/**
* @brief Generates a random marker Mat of size markerSize x markerSize
*/
static Mat _generateRandomMarker(int markerSize, RNG &rng) {
Mat marker(markerSize, markerSize, CV_8UC1, Scalar::all(0));
for(int i = 0; i < markerSize; i++) {
for(int j = 0; j < markerSize; j++) {
unsigned char bit = (unsigned char) (rng.uniform(0,2));
marker.at<unsigned char>(i, j) = bit;
}
}
return marker;
}
/**
* @brief Calculate selfDistance of the codification of a marker Mat. Self distance is the Hamming
* distance of the marker to itself in the other rotations.
* See S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014.
* "Automatic generation and detection of highly reliable fiducial markers under occlusion".
* Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005
*/
static int _getSelfDistance(const Mat &marker) {
Mat bytes = Dictionary::getByteListFromBits(marker);
int minHamming = (int)marker.total() + 1;
for(int r = 1; r < 4; r++) {
int currentHamming = cv::hal::normHamming(bytes.ptr(), bytes.ptr() + bytes.cols*r, bytes.cols);
if(currentHamming < minHamming) minHamming = currentHamming;
}
return minHamming;
}
Dictionary extendDictionary(int nMarkers, int markerSize, const Dictionary &baseDictionary, int randomSeed) {
RNG rng((uint64)(randomSeed));
Dictionary out = Dictionary(Mat(), markerSize);
out.markerSize = markerSize;
// theoretical maximum intermarker distance
// See S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014.
// "Automatic generation and detection of highly reliable fiducial markers under occlusion".
// Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005
int C = (int)std::floor(float(markerSize * markerSize) / 4.f);
int tau = 2 * (int)std::floor(float(C) * 4.f / 3.f);
// if baseDictionary is provided, calculate its intermarker distance
if(baseDictionary.bytesList.rows > 0) {
CV_Assert(baseDictionary.markerSize == markerSize);
out.bytesList = baseDictionary.bytesList.clone();
int minDistance = markerSize * markerSize + 1;
for(int i = 0; i < out.bytesList.rows; i++) {
Mat markerBytes = out.bytesList.rowRange(i, i + 1);
Mat markerBits = Dictionary::getBitsFromByteList(markerBytes, markerSize);
minDistance = min(minDistance, _getSelfDistance(markerBits));
for(int j = i + 1; j < out.bytesList.rows; j++) {
minDistance = min(minDistance, out.getDistanceToId(markerBits, j));
}
}
tau = minDistance;
}
// current best option
int bestTau = 0;
Mat bestMarker;
// after these number of unproductive iterations, the best option is accepted
const int maxUnproductiveIterations = 5000;
int unproductiveIterations = 0;
while(out.bytesList.rows < nMarkers) {
Mat currentMarker = _generateRandomMarker(markerSize, rng);
int selfDistance = _getSelfDistance(currentMarker);
int minDistance = selfDistance;
// if self distance is better or equal than current best option, calculate distance
// to previous accepted markers
if(selfDistance >= bestTau) {
for(int i = 0; i < out.bytesList.rows; i++) {
int currentDistance = out.getDistanceToId(currentMarker, i);
minDistance = min(currentDistance, minDistance);
if(minDistance <= bestTau) {
break;
}
}
}
// if distance is high enough, accept the marker
if(minDistance >= tau) {
unproductiveIterations = 0;
bestTau = 0;
Mat bytes = Dictionary::getByteListFromBits(currentMarker);
out.bytesList.push_back(bytes);
} else {
unproductiveIterations++;
// if distance is not enough, but is better than the current best option
if(minDistance > bestTau) {
bestTau = minDistance;
bestMarker = currentMarker;
}
// if number of unproductive iterarions has been reached, accept the current best option
if(unproductiveIterations == maxUnproductiveIterations) {
unproductiveIterations = 0;
tau = bestTau;
bestTau = 0;
Mat bytes = Dictionary::getByteListFromBits(bestMarker);
out.bytesList.push_back(bytes);
}
}
}
// update the maximum number of correction bits for the generated dictionary
out.maxCorrectionBits = (tau - 1) / 2;
return out;
}
}
}

View File

@ -0,0 +1,50 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "../precomp.hpp"
#include "aruco_utils.hpp"
namespace cv {
namespace aruco {
using namespace std;
void _copyVector2Output(vector<vector<Point2f> > &vec, OutputArrayOfArrays out, const float scale) {
out.create((int)vec.size(), 1, CV_32FC2);
if(out.isMatVector()) {
for (unsigned int i = 0; i < vec.size(); i++) {
out.create(4, 1, CV_32FC2, i);
Mat &m = out.getMatRef(i);
Mat(Mat(vec[i]).t()*scale).copyTo(m);
}
}
else if(out.isUMatVector()) {
for (unsigned int i = 0; i < vec.size(); i++) {
out.create(4, 1, CV_32FC2, i);
UMat &m = out.getUMatRef(i);
Mat(Mat(vec[i]).t()*scale).copyTo(m);
}
}
else if(out.kind() == _OutputArray::STD_VECTOR_VECTOR){
for (unsigned int i = 0; i < vec.size(); i++) {
out.create(4, 1, CV_32FC2, i);
Mat m = out.getMat(i);
Mat(Mat(vec[i]).t()*scale).copyTo(m);
}
}
else {
CV_Error(cv::Error::StsNotImplemented,
"Only Mat vector, UMat vector, and vector<vector> OutputArrays are currently supported.");
}
}
void _convertToGrey(InputArray _in, OutputArray _out) {
CV_Assert(_in.type() == CV_8UC1 || _in.type() == CV_8UC3);
if(_in.type() == CV_8UC3)
cvtColor(_in, _out, COLOR_BGR2GRAY);
else
_in.copyTo(_out);
}
}
}

View File

@ -0,0 +1,43 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef __OPENCV_OBJDETECT_ARUCO_UTILS_HPP__
#define __OPENCV_OBJDETECT_ARUCO_UTILS_HPP__
#include <opencv2/core.hpp>
#include <vector>
namespace cv {
namespace aruco {
/**
* @brief Copy the contents of a corners vector to an OutputArray, settings its size.
*/
void _copyVector2Output(std::vector<std::vector<Point2f> > &vec, OutputArrayOfArrays out, const float scale = 1.f);
/**
* @brief Convert input image to gray if it is a 3-channels image
*/
void _convertToGrey(InputArray _in, OutputArray _out);
template<typename T>
inline bool readParameter(const std::string& name, T& parameter, const FileNode& node)
{
if (!node.empty() && !node[name].empty()) {
node[name] >> parameter;
return true;
}
return false;
}
template<typename T>
inline bool readWriteParameter(const std::string& name, T& parameter, const FileNode& readNode, FileStorage& writeStorage) {
if (!readNode.empty())
return readParameter(name, parameter, readNode);
writeStorage << name << parameter;
return true;
}
}
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,704 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#include "opencv2/objdetect/aruco_detector.hpp"
#include "opencv2/calib3d.hpp"
namespace opencv_test { namespace {
/**
* @brief Draw 2D synthetic markers and detect them
*/
class CV_ArucoDetectionSimple : public cvtest::BaseTest {
public:
CV_ArucoDetectionSimple();
protected:
void run(int);
};
CV_ArucoDetectionSimple::CV_ArucoDetectionSimple() {}
void CV_ArucoDetectionSimple::run(int) {
aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250));
// 20 images
for(int i = 0; i < 20; i++) {
const int markerSidePixels = 100;
int imageSize = markerSidePixels * 2 + 3 * (markerSidePixels / 2);
// draw synthetic image and store marker corners and ids
vector<vector<Point2f> > groundTruthCorners;
vector<int> groundTruthIds;
Mat img = Mat(imageSize, imageSize, CV_8UC1, Scalar::all(255));
for(int y = 0; y < 2; y++) {
for(int x = 0; x < 2; x++) {
Mat marker;
int id = i * 4 + y * 2 + x;
aruco::generateImageMarker(detector.getDictionary(), id, markerSidePixels, marker);
Point2f firstCorner =
Point2f(markerSidePixels / 2.f + x * (1.5f * markerSidePixels),
markerSidePixels / 2.f + y * (1.5f * markerSidePixels));
Mat aux = img.colRange((int)firstCorner.x, (int)firstCorner.x + markerSidePixels)
.rowRange((int)firstCorner.y, (int)firstCorner.y + markerSidePixels);
marker.copyTo(aux);
groundTruthIds.push_back(id);
groundTruthCorners.push_back(vector<Point2f>());
groundTruthCorners.back().push_back(firstCorner);
groundTruthCorners.back().push_back(firstCorner + Point2f(markerSidePixels - 1, 0));
groundTruthCorners.back().push_back(
firstCorner + Point2f(markerSidePixels - 1, markerSidePixels - 1));
groundTruthCorners.back().push_back(firstCorner + Point2f(0, markerSidePixels - 1));
}
}
if(i % 2 == 1) img.convertTo(img, CV_8UC3);
// detect markers
vector<vector<Point2f> > corners;
vector<int> ids;
detector.detectMarkers(img, corners, ids);
// check detection results
for(unsigned int m = 0; m < groundTruthIds.size(); m++) {
int idx = -1;
for(unsigned int k = 0; k < ids.size(); k++) {
if(groundTruthIds[m] == ids[k]) {
idx = (int)k;
break;
}
}
if(idx == -1) {
ts->printf(cvtest::TS::LOG, "Marker not detected");
ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH);
return;
}
for(int c = 0; c < 4; c++) {
double dist = cv::norm(groundTruthCorners[m][c] - corners[idx][c]); // TODO cvtest
if(dist > 0.001) {
ts->printf(cvtest::TS::LOG, "Incorrect marker corners position");
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
}
}
}
}
static double deg2rad(double deg) { return deg * CV_PI / 180.; }
/**
* @brief Get rvec and tvec from yaw, pitch and distance
*/
static void getSyntheticRT(double yaw, double pitch, double distance, Mat &rvec, Mat &tvec) {
rvec = Mat(3, 1, CV_64FC1);
tvec = Mat(3, 1, CV_64FC1);
// Rvec
// first put the Z axis aiming to -X (like the camera axis system)
Mat rotZ(3, 1, CV_64FC1);
rotZ.ptr<double>(0)[0] = 0;
rotZ.ptr<double>(0)[1] = 0;
rotZ.ptr<double>(0)[2] = -0.5 * CV_PI;
Mat rotX(3, 1, CV_64FC1);
rotX.ptr<double>(0)[0] = 0.5 * CV_PI;
rotX.ptr<double>(0)[1] = 0;
rotX.ptr<double>(0)[2] = 0;
Mat camRvec, camTvec;
composeRT(rotZ, Mat(3, 1, CV_64FC1, Scalar::all(0)), rotX, Mat(3, 1, CV_64FC1, Scalar::all(0)),
camRvec, camTvec);
// now pitch and yaw angles
Mat rotPitch(3, 1, CV_64FC1);
rotPitch.ptr<double>(0)[0] = 0;
rotPitch.ptr<double>(0)[1] = pitch;
rotPitch.ptr<double>(0)[2] = 0;
Mat rotYaw(3, 1, CV_64FC1);
rotYaw.ptr<double>(0)[0] = yaw;
rotYaw.ptr<double>(0)[1] = 0;
rotYaw.ptr<double>(0)[2] = 0;
composeRT(rotPitch, Mat(3, 1, CV_64FC1, Scalar::all(0)), rotYaw,
Mat(3, 1, CV_64FC1, Scalar::all(0)), rvec, tvec);
// compose both rotations
composeRT(camRvec, Mat(3, 1, CV_64FC1, Scalar::all(0)), rvec,
Mat(3, 1, CV_64FC1, Scalar::all(0)), rvec, tvec);
// Tvec, just move in z (camera) direction the specific distance
tvec.ptr<double>(0)[0] = 0.;
tvec.ptr<double>(0)[1] = 0.;
tvec.ptr<double>(0)[2] = distance;
}
/**
* @brief Create a synthetic image of a marker with perspective
*/
static Mat projectMarker(const aruco::Dictionary &dictionary, int id, Mat cameraMatrix, double yaw,
double pitch, double distance, Size imageSize, int markerBorder,
vector<Point2f> &corners, int encloseMarker=0) {
// canonical image
Mat marker, markerImg;
const int markerSizePixels = 100;
aruco::generateImageMarker(dictionary, id, markerSizePixels, marker, markerBorder);
marker.copyTo(markerImg);
if(encloseMarker){ //to enclose the marker
int enclose = int(marker.rows/4);
markerImg = Mat::zeros(marker.rows+(2*enclose), marker.cols+(enclose*2), CV_8UC1);
Mat field= markerImg.rowRange(int(enclose), int(markerImg.rows-enclose))
.colRange(int(0), int(markerImg.cols));
field.setTo(255);
field= markerImg.rowRange(int(0), int(markerImg.rows))
.colRange(int(enclose), int(markerImg.cols-enclose));
field.setTo(255);
field = markerImg(Rect(enclose,enclose,marker.rows,marker.cols));
marker.copyTo(field);
}
// get rvec and tvec for the perspective
Mat rvec, tvec;
getSyntheticRT(yaw, pitch, distance, rvec, tvec);
const float markerLength = 0.05f;
vector<Point3f> markerObjPoints;
markerObjPoints.push_back(Point3f(-markerLength / 2.f, +markerLength / 2.f, 0));
markerObjPoints.push_back(markerObjPoints[0] + Point3f(markerLength, 0, 0));
markerObjPoints.push_back(markerObjPoints[0] + Point3f(markerLength, -markerLength, 0));
markerObjPoints.push_back(markerObjPoints[0] + Point3f(0, -markerLength, 0));
// project markers and draw them
Mat distCoeffs(5, 1, CV_64FC1, Scalar::all(0));
projectPoints(markerObjPoints, rvec, tvec, cameraMatrix, distCoeffs, corners);
vector<Point2f> originalCorners;
originalCorners.push_back(Point2f(0+float(encloseMarker*markerSizePixels/4), 0+float(encloseMarker*markerSizePixels/4)));
originalCorners.push_back(originalCorners[0]+Point2f((float)markerSizePixels, 0));
originalCorners.push_back(originalCorners[0]+Point2f((float)markerSizePixels, (float)markerSizePixels));
originalCorners.push_back(originalCorners[0]+Point2f(0, (float)markerSizePixels));
Mat transformation = getPerspectiveTransform(originalCorners, corners);
Mat img(imageSize, CV_8UC1, Scalar::all(255));
Mat aux;
const char borderValue = 127;
warpPerspective(markerImg, aux, transformation, imageSize, INTER_NEAREST, BORDER_CONSTANT,
Scalar::all(borderValue));
// copy only not-border pixels
for(int y = 0; y < aux.rows; y++) {
for(int x = 0; x < aux.cols; x++) {
if(aux.at<unsigned char>(y, x) == borderValue) continue;
img.at<unsigned char>(y, x) = aux.at<unsigned char>(y, x);
}
}
return img;
}
enum class ArucoAlgParams
{
USE_DEFAULT = 0,
USE_APRILTAG=1, /// Detect marker candidates :: using AprilTag
DETECT_INVERTED_MARKER, /// Check if there is a white marker
USE_ARUCO3 /// Check if aruco3 should be used
};
/**
* @brief Draws markers in perspective and detect them
*/
class CV_ArucoDetectionPerspective : public cvtest::BaseTest {
public:
CV_ArucoDetectionPerspective(ArucoAlgParams arucoAlgParam) : arucoAlgParams(arucoAlgParam) {}
protected:
void run(int);
ArucoAlgParams arucoAlgParams;
};
void CV_ArucoDetectionPerspective::run(int) {
int iter = 0;
int szEnclosed = 0;
Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1);
Size imgSize(500, 500);
cameraMatrix.at<double>(0, 0) = cameraMatrix.at<double>(1, 1) = 650;
cameraMatrix.at<double>(0, 2) = imgSize.width / 2;
cameraMatrix.at<double>(1, 2) = imgSize.height / 2;
aruco::DetectorParameters params;
params.minDistanceToBorder = 1;
aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params);
// detect from different positions
for(double distance = 0.1; distance < 0.7; distance += 0.2) {
for(int pitch = 0; pitch < 360; pitch += (distance == 0.1? 60:180)) {
for(int yaw = 70; yaw <= 120; yaw += 40){
int currentId = iter % 250;
int markerBorder = iter % 2 + 1;
iter++;
vector<Point2f> groundTruthCorners;
aruco::DetectorParameters detectorParameters = params;
detectorParameters.markerBorderBits = markerBorder;
/// create synthetic image
Mat img=
projectMarker(detector.getDictionary(), currentId, cameraMatrix, deg2rad(yaw), deg2rad(pitch),
distance, imgSize, markerBorder, groundTruthCorners, szEnclosed);
// marker :: Inverted
if(ArucoAlgParams::DETECT_INVERTED_MARKER == arucoAlgParams){
img = ~img;
detectorParameters.detectInvertedMarker = true;
}
if(ArucoAlgParams::USE_APRILTAG == arucoAlgParams){
detectorParameters.cornerRefinementMethod = aruco::CORNER_REFINE_APRILTAG;
}
if (ArucoAlgParams::USE_ARUCO3 == arucoAlgParams) {
detectorParameters.useAruco3Detection = true;
detectorParameters.cornerRefinementMethod = aruco::CORNER_REFINE_SUBPIX;
}
detector.setDetectorParameters(detectorParameters);
// detect markers
vector<vector<Point2f> > corners;
vector<int> ids;
detector.detectMarkers(img, corners, ids);
// check results
if(ids.size() != 1 || (ids.size() == 1 && ids[0] != currentId)) {
if(ids.size() != 1)
ts->printf(cvtest::TS::LOG, "Incorrect number of detected markers");
else
ts->printf(cvtest::TS::LOG, "Incorrect marker id");
ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH);
return;
}
for(int c = 0; c < 4; c++) {
double dist = cv::norm(groundTruthCorners[c] - corners[0][c]); // TODO cvtest
if(dist > 5) {
ts->printf(cvtest::TS::LOG, "Incorrect marker corners position");
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
}
}
}
// change the state :: to detect an enclosed inverted marker
if(ArucoAlgParams::DETECT_INVERTED_MARKER == arucoAlgParams && distance == 0.1){
distance -= 0.1;
szEnclosed++;
}
}
}
/**
* @brief Check max and min size in marker detection parameters
*/
class CV_ArucoDetectionMarkerSize : public cvtest::BaseTest {
public:
CV_ArucoDetectionMarkerSize();
protected:
void run(int);
};
CV_ArucoDetectionMarkerSize::CV_ArucoDetectionMarkerSize() {}
void CV_ArucoDetectionMarkerSize::run(int) {
aruco::DetectorParameters params;
aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_6X6_250), params);
int markerSide = 20;
int imageSize = 200;
// 10 cases
for(int i = 0; i < 10; i++) {
Mat marker;
int id = 10 + i * 20;
// create synthetic image
Mat img = Mat(imageSize, imageSize, CV_8UC1, Scalar::all(255));
aruco::generateImageMarker(detector.getDictionary(), id, markerSide, marker);
Mat aux = img.colRange(30, 30 + markerSide).rowRange(50, 50 + markerSide);
marker.copyTo(aux);
vector<vector<Point2f> > corners;
vector<int> ids;
// set a invalid minMarkerPerimeterRate
aruco::DetectorParameters detectorParameters = params;
detectorParameters.minMarkerPerimeterRate = min(4., (4. * markerSide) / float(imageSize) + 0.1);
detector.setDetectorParameters(detectorParameters);
detector.detectMarkers(img, corners, ids);
if(corners.size() != 0) {
ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::minMarkerPerimeterRate");
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
// set an valid minMarkerPerimeterRate
detectorParameters = params;
detectorParameters.minMarkerPerimeterRate = max(0., (4. * markerSide) / float(imageSize) - 0.1);
detector.setDetectorParameters(detectorParameters);
detector.detectMarkers(img, corners, ids);
if(corners.size() != 1 || (corners.size() == 1 && ids[0] != id)) {
ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::minMarkerPerimeterRate");
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
// set a invalid maxMarkerPerimeterRate
detectorParameters = params;
detectorParameters.maxMarkerPerimeterRate = min(4., (4. * markerSide) / float(imageSize) - 0.1);
detector.setDetectorParameters(detectorParameters);
detector.detectMarkers(img, corners, ids);
if(corners.size() != 0) {
ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::maxMarkerPerimeterRate");
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
// set an valid maxMarkerPerimeterRate
detectorParameters = params;
detectorParameters.maxMarkerPerimeterRate = max(0., (4. * markerSide) / float(imageSize) + 0.1);
detector.setDetectorParameters(detectorParameters);
detector.detectMarkers(img, corners, ids);
if(corners.size() != 1 || (corners.size() == 1 && ids[0] != id)) {
ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::maxMarkerPerimeterRate");
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
}
}
/**
* @brief Check error correction in marker bits
*/
class CV_ArucoBitCorrection : public cvtest::BaseTest {
public:
CV_ArucoBitCorrection();
protected:
void run(int);
};
CV_ArucoBitCorrection::CV_ArucoBitCorrection() {}
void CV_ArucoBitCorrection::run(int) {
aruco::Dictionary dictionary1 = aruco::getPredefinedDictionary(aruco::DICT_6X6_250);
aruco::Dictionary dictionary2 = aruco::getPredefinedDictionary(aruco::DICT_6X6_250);
aruco::DetectorParameters params;
aruco::ArucoDetector detector1(dictionary1, params);
int markerSide = 50;
int imageSize = 150;
// 10 markers
for(int l = 0; l < 10; l++) {
Mat marker;
int id = 10 + l * 20;
Mat currentCodeBytes = dictionary1.bytesList.rowRange(id, id + 1);
aruco::DetectorParameters detectorParameters = detector1.getDetectorParameters();
// 5 valid cases
for(int i = 0; i < 5; i++) {
// how many bit errors (the error is low enough so it can be corrected)
detectorParameters.errorCorrectionRate = 0.2 + i * 0.1;
detector1.setDetectorParameters(detectorParameters);
int errors =
(int)std::floor(dictionary1.maxCorrectionBits * detector1.getDetectorParameters().errorCorrectionRate - 1.);
// create erroneous marker in currentCodeBits
Mat currentCodeBits =
aruco::Dictionary::getBitsFromByteList(currentCodeBytes, dictionary1.markerSize);
for(int e = 0; e < errors; e++) {
currentCodeBits.ptr<unsigned char>()[2 * e] =
!currentCodeBits.ptr<unsigned char>()[2 * e];
}
// add erroneous marker to dictionary2 in order to create the erroneous marker image
Mat currentCodeBytesError = aruco::Dictionary::getByteListFromBits(currentCodeBits);
currentCodeBytesError.copyTo(dictionary2.bytesList.rowRange(id, id + 1));
Mat img = Mat(imageSize, imageSize, CV_8UC1, Scalar::all(255));
dictionary2.generateImageMarker(id, markerSide, marker);
Mat aux = img.colRange(30, 30 + markerSide).rowRange(50, 50 + markerSide);
marker.copyTo(aux);
// try to detect using original dictionary
vector<vector<Point2f> > corners;
vector<int> ids;
detector1.detectMarkers(img, corners, ids);
if(corners.size() != 1 || (corners.size() == 1 && ids[0] != id)) {
ts->printf(cvtest::TS::LOG, "Error in bit correction");
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
}
// 5 invalid cases
for(int i = 0; i < 5; i++) {
// how many bit errors (the error is too high to be corrected)
detectorParameters.errorCorrectionRate = 0.2 + i * 0.1;
detector1.setDetectorParameters(detectorParameters);
int errors =
(int)std::floor(dictionary1.maxCorrectionBits * detector1.getDetectorParameters().errorCorrectionRate + 1.);
// create erroneous marker in currentCodeBits
Mat currentCodeBits =
aruco::Dictionary::getBitsFromByteList(currentCodeBytes, dictionary1.markerSize);
for(int e = 0; e < errors; e++) {
currentCodeBits.ptr<unsigned char>()[2 * e] =
!currentCodeBits.ptr<unsigned char>()[2 * e];
}
// dictionary3 is only composed by the modified marker (in its original form)
aruco::Dictionary _dictionary3 = aruco::Dictionary(
dictionary2.bytesList.rowRange(id, id + 1).clone(),
dictionary1.markerSize,
dictionary1.maxCorrectionBits);
aruco::ArucoDetector detector3(_dictionary3, detector1.getDetectorParameters());
// add erroneous marker to dictionary2 in order to create the erroneous marker image
Mat currentCodeBytesError = aruco::Dictionary::getByteListFromBits(currentCodeBits);
currentCodeBytesError.copyTo(dictionary2.bytesList.rowRange(id, id + 1));
Mat img = Mat(imageSize, imageSize, CV_8UC1, Scalar::all(255));
dictionary2.generateImageMarker(id, markerSide, marker);
Mat aux = img.colRange(30, 30 + markerSide).rowRange(50, 50 + markerSide);
marker.copyTo(aux);
// try to detect using dictionary3, it should fail
vector<vector<Point2f> > corners;
vector<int> ids;
detector3.detectMarkers(img, corners, ids);
if(corners.size() != 0) {
ts->printf(cvtest::TS::LOG, "Error in DetectorParameters::errorCorrectionRate");
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
}
}
}
typedef CV_ArucoDetectionPerspective CV_AprilTagDetectionPerspective;
typedef CV_ArucoDetectionPerspective CV_InvertedArucoDetectionPerspective;
typedef CV_ArucoDetectionPerspective CV_Aruco3DetectionPerspective;
TEST(CV_InvertedArucoDetectionPerspective, algorithmic) {
CV_InvertedArucoDetectionPerspective test(ArucoAlgParams::DETECT_INVERTED_MARKER);
test.safe_run();
}
TEST(CV_AprilTagDetectionPerspective, algorithmic) {
CV_AprilTagDetectionPerspective test(ArucoAlgParams::USE_APRILTAG);
test.safe_run();
}
TEST(CV_Aruco3DetectionPerspective, algorithmic) {
CV_Aruco3DetectionPerspective test(ArucoAlgParams::USE_ARUCO3);
test.safe_run();
}
TEST(CV_ArucoDetectionSimple, algorithmic) {
CV_ArucoDetectionSimple test;
test.safe_run();
}
TEST(CV_ArucoDetectionPerspective, algorithmic) {
CV_ArucoDetectionPerspective test(ArucoAlgParams::USE_DEFAULT);
test.safe_run();
}
TEST(CV_ArucoDetectionMarkerSize, algorithmic) {
CV_ArucoDetectionMarkerSize test;
test.safe_run();
}
TEST(CV_ArucoBitCorrection, algorithmic) {
CV_ArucoBitCorrection test;
test.safe_run();
}
TEST(CV_ArucoDetectMarkers, regression_3192)
{
aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_4X4_50));
vector<int> markerIds;
vector<vector<Point2f> > markerCorners;
string imgPath = cvtest::findDataFile("aruco/regression_3192.png");
Mat image = imread(imgPath);
const size_t N = 2ull;
const int goldCorners[N][8] = { {345,120, 520,120, 520,295, 345,295}, {101,114, 270,112, 276,287, 101,287} };
const int goldCornersIds[N] = { 6, 4 };
map<int, const int*> mapGoldCorners;
for (size_t i = 0; i < N; i++)
mapGoldCorners[goldCornersIds[i]] = goldCorners[i];
detector.detectMarkers(image, markerCorners, markerIds);
ASSERT_EQ(N, markerIds.size());
for (size_t i = 0; i < N; i++)
{
int arucoId = markerIds[i];
ASSERT_EQ(4ull, markerCorners[i].size());
ASSERT_TRUE(mapGoldCorners.find(arucoId) != mapGoldCorners.end());
for (int j = 0; j < 4; j++)
{
EXPECT_NEAR(static_cast<float>(mapGoldCorners[arucoId][j * 2]), markerCorners[i][j].x, 1.f);
EXPECT_NEAR(static_cast<float>(mapGoldCorners[arucoId][j * 2 + 1]), markerCorners[i][j].y, 1.f);
}
}
}
TEST(CV_ArucoDetectMarkers, regression_2492)
{
aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_5X5_50));
aruco::DetectorParameters detectorParameters = detector.getDetectorParameters();
detectorParameters.minMarkerDistanceRate = 0.026;
detector.setDetectorParameters(detectorParameters);
vector<int> markerIds;
vector<vector<Point2f> > markerCorners;
string imgPath = cvtest::findDataFile("aruco/regression_2492.png");
Mat image = imread(imgPath);
const size_t N = 8ull;
const int goldCorners[N][8] = { {179,139, 179,95, 223,95, 223,139}, {99,139, 99,95, 143,95, 143,139},
{19,139, 19,95, 63,95, 63,139}, {256,140, 256,93, 303,93, 303,140},
{256,62, 259,21, 300,23, 297,64}, {99,21, 143,17, 147,60, 103,64},
{69,61, 28,61, 14,21, 58,17}, {174,62, 182,13, 230,19, 223,68} };
const int goldCornersIds[N] = {13, 13, 13, 13, 1, 15, 14, 4};
map<int, vector<const int*> > mapGoldCorners;
for (size_t i = 0; i < N; i++)
mapGoldCorners[goldCornersIds[i]].push_back(goldCorners[i]);
detector.detectMarkers(image, markerCorners, markerIds);
ASSERT_EQ(N, markerIds.size());
for (size_t i = 0; i < N; i++)
{
int arucoId = markerIds[i];
ASSERT_EQ(4ull, markerCorners[i].size());
ASSERT_TRUE(mapGoldCorners.find(arucoId) != mapGoldCorners.end());
float totalDist = 8.f;
for (size_t k = 0ull; k < mapGoldCorners[arucoId].size(); k++)
{
float dist = 0.f;
for (int j = 0; j < 4; j++) // total distance up to 4 points
{
dist += abs(mapGoldCorners[arucoId][k][j * 2] - markerCorners[i][j].x);
dist += abs(mapGoldCorners[arucoId][k][j * 2 + 1] - markerCorners[i][j].y);
}
totalDist = min(totalDist, dist);
}
EXPECT_LT(totalDist, 8.f);
}
}
struct ArucoThreading: public testing::TestWithParam<aruco::CornerRefineMethod>
{
struct NumThreadsSetter {
NumThreadsSetter(const int num_threads)
: original_num_threads_(getNumThreads()) {
setNumThreads(num_threads);
}
~NumThreadsSetter() {
setNumThreads(original_num_threads_);
}
private:
int original_num_threads_;
};
};
TEST_P(ArucoThreading, number_of_threads_does_not_change_results)
{
// We are not testing against different dictionaries
// As we are interested mostly in small images, smaller
// markers is better -> 4x4
aruco::ArucoDetector detector(aruco::getPredefinedDictionary(aruco::DICT_4X4_50));
// Height of the test image can be chosen quite freely
// We aim to test against small images as in those the
// number of threads has most effect
const int height_img = 20;
// Just to get nice white boarder
const int shift = height_img > 10 ? 5 : 1;
const int height_marker = height_img-2*shift;
// Create a test image
Mat img_marker;
aruco::generateImageMarker(detector.getDictionary(), 23, height_marker, img_marker, 1);
// Copy to bigger image to get a white border
Mat img(height_img, height_img, CV_8UC1, Scalar(255));
img_marker.copyTo(img(Rect(shift, shift, height_marker, height_marker)));
aruco::DetectorParameters detectorParameters = detector.getDetectorParameters();
detectorParameters.cornerRefinementMethod = GetParam();
detector.setDetectorParameters(detectorParameters);
vector<vector<Point2f> > original_corners;
vector<int> original_ids;
{
NumThreadsSetter thread_num_setter(1);
detector.detectMarkers(img, original_corners, original_ids);
}
ASSERT_EQ(original_ids.size(), 1ull);
ASSERT_EQ(original_corners.size(), 1ull);
int num_threads_to_test[] = { 2, 8, 16, 32, height_img-1, height_img, height_img+1};
for (size_t i_num_threads = 0; i_num_threads < sizeof(num_threads_to_test)/sizeof(int); ++i_num_threads) {
NumThreadsSetter thread_num_setter(num_threads_to_test[i_num_threads]);
vector<vector<Point2f> > corners;
vector<int> ids;
detector.detectMarkers(img, corners, ids);
// If we don't find any markers, the test is broken
ASSERT_EQ(ids.size(), 1ull);
// Make sure we got the same result as the first time
ASSERT_EQ(corners.size(), original_corners.size());
ASSERT_EQ(ids.size(), original_ids.size());
ASSERT_EQ(ids.size(), corners.size());
for (size_t i = 0; i < corners.size(); ++i) {
EXPECT_EQ(ids[i], original_ids[i]);
for (size_t j = 0; j < corners[i].size(); ++j) {
EXPECT_NEAR(corners[i][j].x, original_corners[i][j].x, 0.1f);
EXPECT_NEAR(corners[i][j].y, original_corners[i][j].y, 0.1f);
}
}
}
}
INSTANTIATE_TEST_CASE_P(
CV_ArucoDetectMarkers, ArucoThreading,
::testing::Values(
aruco::CORNER_REFINE_NONE,
aruco::CORNER_REFINE_SUBPIX,
aruco::CORNER_REFINE_CONTOUR,
aruco::CORNER_REFINE_APRILTAG
));
}} // namespace

Binary file not shown.

After

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB