opencv/samples/swift/ios/FaceDetection/FaceDetection/DetectionBasedTracker.mm
Giles Payne 02385472b6
Merge pull request #17165 from komakai:objc-binding
Objc binding

* Initial work on Objective-C wrapper

* Objective-C generator script; update manually generated wrappers

* Add Mat tests

* Core Tests

* Imgproc wrapper generation and tests

* Fixes for Imgcodecs wrapper

* Miscellaneous fixes. Swift build support

* Objective-C wrapper build/install

* Add Swift wrappers for videoio/objdetect/feature2d

* Framework build;iOS support

* Fix toArray functions;Use enum types whenever possible

* Use enum types where possible;prepare test build

* Update test

* Add test runner scripts for iOS and macOS

* Add test scripts and samples

* Build fixes

* Fix build (cmake 3.17.x compatibility)

* Fix warnings

* Fix enum name conflicting handling

* Add support for document generation with Jazzy

* Swift/Native fast accessor functions

* Add Objective-C wrapper for calib3d, dnn, ml, photo and video modules

* Remove IntOut/FloatOut/DoubleOut classes

* Fix iOS default test platform value

* Fix samples

* Revert default framework name to opencv2

* Add converter util functions

* Fix failing test

* Fix whitespace

* Add handling for deprecated methods;fix warnings;define __OPENCV_BUILD

* Suppress cmake warnings

* Reduce severity of "jazzy not found" log message

* Fix incorrect #include of compatibility header in ios.h

* Use explicit returns in subscript/get implementation

* Reduce minimum required cmake version to 3.15 for Objective-C/Swift binding
2020-06-08 18:32:53 +00:00

86 lines
2.5 KiB
Plaintext

//
// DetectionBasedTracker.mm
//
// Created by Giles Payne on 2020/04/05.
//
#import "DetectionBasedTracker.h"
#import "Mat.h"
#import "Rect2i.h"
#import "CVObjcUtil.h"
class CascadeDetectorAdapter: public cv::DetectionBasedTracker::IDetector
{
public:
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):IDetector(), Detector(detector) {}
void detect(const cv::Mat &Image, std::vector<cv::Rect> &objects)
{
Detector->detectMultiScale(Image, objects, scaleFactor, minNeighbours, 0, minObjSize, maxObjSize);
}
virtual ~CascadeDetectorAdapter() {}
private:
CascadeDetectorAdapter();
cv::Ptr<cv::CascadeClassifier> Detector;
};
struct DetectorAgregator
{
cv::Ptr<CascadeDetectorAdapter> mainDetector;
cv::Ptr<CascadeDetectorAdapter> trackingDetector;
cv::Ptr<cv::DetectionBasedTracker> tracker;
DetectorAgregator(cv::Ptr<CascadeDetectorAdapter>& _mainDetector, cv::Ptr<CascadeDetectorAdapter>& _trackingDetector):mainDetector(_mainDetector), trackingDetector(_trackingDetector) {
CV_Assert(_mainDetector);
CV_Assert(_trackingDetector);
cv::DetectionBasedTracker::Parameters DetectorParams;
tracker = cv::makePtr<cv::DetectionBasedTracker>(mainDetector, trackingDetector, DetectorParams);
}
};
@implementation DetectionBasedTracker {
DetectorAgregator* agregator;
}
- (instancetype)initWithCascadeName:(NSString*)cascadeName minFaceSize:(int)faceSize {
self = [super init];
if (self) {
auto mainDetector = cv::makePtr<CascadeDetectorAdapter>(cv::makePtr<cv::CascadeClassifier>(cascadeName.UTF8String));
auto trackingDetector = cv::makePtr<CascadeDetectorAdapter>(
cv::makePtr<cv::CascadeClassifier>(cascadeName.UTF8String));
agregator = new DetectorAgregator(mainDetector, trackingDetector);
if (faceSize > 0) {
agregator->mainDetector->setMinObjectSize(cv::Size(faceSize, faceSize));
}
}
return self;
}
- (void)dealloc
{
delete agregator;
}
- (void)start {
agregator->tracker->run();
}
- (void)stop {
agregator->tracker->stop();
}
- (void)setFaceSize:(int)size {
agregator->mainDetector->setMinObjectSize(cv::Size(size, size));
}
- (void)detect:(Mat*)imageGray faces:(NSMutableArray<Rect2i*>*)faces {
std::vector<cv::Rect> rectFaces;
agregator->tracker->process(*((cv::Mat*)imageGray.nativePtr));
agregator->tracker->getObjects(rectFaces);
CV2OBJC(cv::Rect, Rect2i, rectFaces, faces);
}
@end