diff --git a/doc/opencv.bib b/doc/opencv.bib index 73c4668ff8..bdfbc8cf1e 100644 --- a/doc/opencv.bib +++ b/doc/opencv.bib @@ -1215,3 +1215,16 @@ year = {1996}, publisher = {Elsevier} } +@Article{Wu2009, + author={Wu, Kesheng + and Otoo, Ekow + and Suzuki, Kenji}, + title={Optimizing two-pass connected-component labeling algorithms}, + journal={Pattern Analysis and Applications}, + year={2009}, + month={Jun}, + day={01}, + volume={12}, + number={2}, + pages={117-135}, +} diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index 3765472d80..ed87a3e2fc 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -765,9 +765,14 @@ struct MishFunctor : public BaseFunctor { // Use fast approximation introduced in https://github.com/opencv/opencv/pull/17200 float x = srcptr[i]; - float eX = exp(std::min(x, 20.f)); - float n = (eX + 2) * eX; - dstptr[i] = (x * n) / (n + 2); + if (x >= 8.f) + dstptr[i] = x; + else + { + float eX = exp(x); + float n = (eX + 2) * eX; + dstptr[i] = (x * n) / (n + 2); + } } } } diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index eac422f6b0..2e1c185bbe 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -1342,32 +1342,64 @@ void ONNXImporter::populateNet(Net dstNet) else if (layer_type == "Gather") { CV_Assert(node_proto.input_size() == 2); - Mat input = getBlob(node_proto, constBlobs, 0); Mat indexMat = getBlob(node_proto, constBlobs, 1); CV_Assert_N(indexMat.type() == CV_32S, indexMat.total() == 1); int index = indexMat.at(0); + int axis = layerParams.get("axis", 0); - Mat out; - if (layerParams.has("axis")) + if ((constBlobs.find(node_proto.input(0)) != constBlobs.end())) { - int axis = layerParams.get("axis"); - + Mat input = getBlob(node_proto, constBlobs, 0); + Mat out; std::vector ranges(input.dims, Range::all()); ranges[axis] = Range(index, index + 1); out = input(ranges); + MatShape outShape = shape(out); + if (outShape.size() > 1) + { + outShape.erase(outShape.begin() + axis); + out.reshape(0, outShape); + } + addConstant(layerParams.name, out, constBlobs, outShapes); + continue; } else { - CV_Assert(index < input.total()); - const int dims = input.dims; - input = input.reshape(1, 1); - input.dims = 2; - out = input.reshape(1, 1).colRange(index, index + 1); - out.dims = dims; + shapeIt = outShapes.find(node_proto.input(0)); + CV_Assert(shapeIt != outShapes.end()); + MatShape inpShape = shapeIt->second; + + LayerParams sliceLp; + sliceLp.type = "Slice"; + sliceLp.name = inpShape.size() > 1 ? layerParams.name + "/slice" : layerParams.name; + std::vector begin(inpShape.size(), 0); + std::vector end(inpShape.size(), -1); + begin[axis] = index; + end[axis] = index + 1; + + cv::dnn::DictValue paramBegin = cv::dnn::DictValue::arrayInt(begin.data(), begin.size()); + cv::dnn::DictValue paramEnd = cv::dnn::DictValue::arrayInt(end.data(), end.size()); + sliceLp.set("begin", paramBegin); + sliceLp.set("end", paramEnd); + + if (inpShape.size() > 1) + { + opencv_onnx::NodeProto proto; + proto.add_input(node_proto.input(0)); + proto.add_output(sliceLp.name); + addLayer(dstNet, sliceLp, proto, layer_id, outShapes); + + inpShape.erase(inpShape.begin() + axis); + layerParams.type = "Reshape"; + layerParams.set("dim", DictValue::arrayInt(&inpShape[0], inpShape.size())); + node_proto.set_input(0, sliceLp.name); + } + else + { + layerParams = sliceLp; + } } - addConstant(layerParams.name, out, constBlobs, outShapes); - continue; } else if (layer_type == "Concat") { diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 7bb5a6fef2..86dfcae080 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -114,6 +114,17 @@ TEST_P(Test_ONNX_layers, Convolution) testONNXModels("convolution"); } +TEST_P(Test_ONNX_layers, Gather) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + testONNXModels("gather"); + // GPU plugin unsupported slice for constant + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + testONNXModels("gather_scalar", npy, 0, 0, false, false); +} + TEST_P(Test_ONNX_layers, Convolution3D) { #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index d23f2b0db9..6c22ea4f8f 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -401,7 +401,7 @@ enum ConnectedComponentsTypes { //! connected components algorithm enum ConnectedComponentsAlgorithmsTypes { - CCL_WU = 0, //!< SAUF algorithm for 8-way connectivity, SAUF algorithm for 4-way connectivity + CCL_WU = 0, //!< SAUF @cite Wu2009 algorithm for 8-way connectivity, SAUF algorithm for 4-way connectivity CCL_DEFAULT = -1, //!< BBDT algorithm for 8-way connectivity, SAUF algorithm for 4-way connectivity CCL_GRANA = 1 //!< BBDT algorithm for 8-way connectivity, SAUF algorithm for 4-way connectivity }; @@ -3747,7 +3747,7 @@ image with 4 or 8 way connectivity - returns N, the total number of labels [0, N represents the background label. ltype specifies the output label image type, an important consideration based on the total number of labels or alternatively the total number of pixels in the source image. ccltype specifies the connected components labeling algorithm to use, currently -Grana (BBDT) and Wu's (SAUF) algorithms are supported, see the #ConnectedComponentsAlgorithmsTypes +Grana (BBDT) and Wu's (SAUF) @cite Wu2009 algorithms are supported, see the #ConnectedComponentsAlgorithmsTypes for details. Note that SAUF algorithm forces a row major ordering of labels while BBDT does not. This function uses parallel version of both Grana and Wu's algorithms if at least one allowed parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs. @@ -3779,7 +3779,7 @@ image with 4 or 8 way connectivity - returns N, the total number of labels [0, N represents the background label. ltype specifies the output label image type, an important consideration based on the total number of labels or alternatively the total number of pixels in the source image. ccltype specifies the connected components labeling algorithm to use, currently -Grana's (BBDT) and Wu's (SAUF) algorithms are supported, see the #ConnectedComponentsAlgorithmsTypes +Grana's (BBDT) and Wu's (SAUF) @cite Wu2009 algorithms are supported, see the #ConnectedComponentsAlgorithmsTypes for details. Note that SAUF algorithm forces a row major ordering of labels while BBDT does not. This function uses parallel version of both Grana and Wu's algorithms (statistics included) if at least one allowed parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs. diff --git a/modules/videoio/include/opencv2/videoio.hpp b/modules/videoio/include/opencv2/videoio.hpp index 7aa71857da..eb5645ab77 100644 --- a/modules/videoio/include/opencv2/videoio.hpp +++ b/modules/videoio/include/opencv2/videoio.hpp @@ -178,6 +178,8 @@ enum VideoCaptureProperties { CAP_PROP_WB_TEMPERATURE=45, //!< white-balance color temperature CAP_PROP_CODEC_PIXEL_FORMAT =46, //!< (read-only) codec's pixel format. 4-character code - see VideoWriter::fourcc . Subset of [AV_PIX_FMT_*](https://github.com/FFmpeg/FFmpeg/blob/master/libavcodec/raw.c) or -1 if unknown CAP_PROP_BITRATE =47, //!< (read-only) Video bitrate in kbits/s + CAP_PROP_ORIENTATION_META=48, //!< (read-only) Frame rotation defined by stream meta (applicable for FFmpeg back-end only) + CAP_PROP_ORIENTATION_AUTO=49, //!< if true - rotates output frames of CvCapture considering video file's metadata (applicable for FFmpeg back-end only) (https://github.com/opencv/opencv/issues/15499) #ifndef CV_DOXYGEN CV__CAP_PROP_LATEST #endif diff --git a/modules/videoio/src/cap_ffmpeg.cpp b/modules/videoio/src/cap_ffmpeg.cpp index b436b75e11..1c73f6a09c 100644 --- a/modules/videoio/src/cap_ffmpeg.cpp +++ b/modules/videoio/src/cap_ffmpeg.cpp @@ -90,7 +90,11 @@ public: if (!ffmpegCapture || !icvRetrieveFrame_FFMPEG_p(ffmpegCapture, &data, &step, &width, &height, &cn)) return false; - cv::Mat(height, width, CV_MAKETYPE(CV_8U, cn), data, step).copyTo(frame); + + cv::Mat tmp(height, width, CV_MAKETYPE(CV_8U, cn), data, step); + this->rotateFrame(tmp); + tmp.copyTo(frame); + return true; } virtual bool open( const cv::String& filename ) @@ -113,6 +117,30 @@ public: protected: CvCapture_FFMPEG* ffmpegCapture; + + void rotateFrame(cv::Mat &mat) const + { + bool rotation_auto = 0 != getProperty(CAP_PROP_ORIENTATION_AUTO); + int rotation_angle = static_cast(getProperty(CAP_PROP_ORIENTATION_META)); + + if(!rotation_auto || rotation_angle%360 == 0) + { + return; + } + + cv::RotateFlags flag; + if(rotation_angle == 90 || rotation_angle == -270) { // Rotate clockwise 90 degrees + flag = cv::ROTATE_90_CLOCKWISE; + } else if(rotation_angle == 270 || rotation_angle == -90) { // Rotate clockwise 270 degrees + flag = cv::ROTATE_90_COUNTERCLOCKWISE; + } else if(rotation_angle == 180 || rotation_angle == -180) { // Rotate clockwise 180 degrees + flag = cv::ROTATE_180; + } else { // Unsupported rotation + return; + } + + cv::rotate(mat, mat, flag); + } }; } // namespace diff --git a/modules/videoio/src/cap_ffmpeg_impl.hpp b/modules/videoio/src/cap_ffmpeg_impl.hpp index fb1a927fe8..97524efaa3 100644 --- a/modules/videoio/src/cap_ffmpeg_impl.hpp +++ b/modules/videoio/src/cap_ffmpeg_impl.hpp @@ -485,6 +485,7 @@ struct CvCapture_FFMPEG bool setProperty(int, double); bool grabFrame(); bool retrieveFrame(int, unsigned char** data, int* step, int* width, int* height, int* cn); + void rotateFrame(cv::Mat &mat) const; void init(); @@ -500,6 +501,7 @@ struct CvCapture_FFMPEG double r2d(AVRational r) const; int64_t dts_to_frame_number(int64_t dts); double dts_to_sec(int64_t dts) const; + void get_rotation_angle(); AVFormatContext * ic; AVCodec * avcodec; @@ -515,6 +517,8 @@ struct CvCapture_FFMPEG int64_t frame_number, first_frame_number; + bool rotation_auto; + int rotation_angle; // valid 0, 90, 180, 270 double eps_zero; /* 'filename' contains the filename of the videosource, @@ -563,8 +567,17 @@ void CvCapture_FFMPEG::init() frame_number = 0; eps_zero = 0.000025; -#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0) + rotation_angle = 0; + +#if (LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)) +#if (LIBAVUTIL_BUILD >= CALC_FFMPEG_VERSION(52, 92, 100)) + rotation_auto = true; +#else + rotation_auto = false; +#endif dict = NULL; +#else + rotation_auto = false; #endif rawMode = false; @@ -1033,6 +1046,7 @@ bool CvCapture_FFMPEG::open( const char* _filename ) frame.cn = 3; frame.step = 0; frame.data = NULL; + get_rotation_angle(); break; } } @@ -1283,7 +1297,6 @@ bool CvCapture_FFMPEG::grabFrame() return valid; } - bool CvCapture_FFMPEG::retrieveFrame(int, unsigned char** data, int* step, int* width, int* height, int* cn) { if (!video_st) @@ -1392,9 +1405,9 @@ double CvCapture_FFMPEG::getProperty( int property_id ) const case CAP_PROP_FRAME_COUNT: return (double)get_total_frames(); case CAP_PROP_FRAME_WIDTH: - return (double)frame.width; + return (double)((rotation_auto && rotation_angle%180) ? frame.height : frame.width); case CAP_PROP_FRAME_HEIGHT: - return (double)frame.height; + return (double)((rotation_auto && rotation_angle%180) ? frame.width : frame.height); case CAP_PROP_FPS: return get_fps(); case CAP_PROP_FOURCC: @@ -1438,6 +1451,15 @@ double CvCapture_FFMPEG::getProperty( int property_id ) const break; case CAP_PROP_BITRATE: return static_cast(get_bitrate()); + case CAP_PROP_ORIENTATION_META: + return static_cast(rotation_angle); + case CAP_PROP_ORIENTATION_AUTO: +#if ((LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)) && \ + (LIBAVUTIL_BUILD >= CALC_FFMPEG_VERSION(52, 94, 100))) + return static_cast(rotation_auto); +#else + return 0; +#endif default: break; } @@ -1516,6 +1538,17 @@ double CvCapture_FFMPEG::dts_to_sec(int64_t dts) const r2d(ic->streams[video_stream]->time_base); } +void CvCapture_FFMPEG::get_rotation_angle() +{ + rotation_angle = 0; +#if ((LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)) && \ + (LIBAVUTIL_BUILD >= CALC_FFMPEG_VERSION(52, 94, 100))) + AVDictionaryEntry *rotate_tag = av_dict_get(video_st->metadata, "rotate", NULL, 0); + if (rotate_tag != NULL) + rotation_angle = atoi(rotate_tag->value); +#endif +} + void CvCapture_FFMPEG::seek(int64_t _frame_number) { _frame_number = std::min(_frame_number, get_total_frames()); @@ -1611,6 +1644,16 @@ bool CvCapture_FFMPEG::setProperty( int property_id, double value ) if (value == -1) return setRaw(); return false; + case CAP_PROP_ORIENTATION_AUTO: +#if ((LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(52, 111, 0)) && \ + (LIBAVUTIL_BUILD >= CALC_FFMPEG_VERSION(52, 94, 100))) + rotation_auto = static_cast(value); + return true; +#else + rotation_auto = 0; + return false; +#endif + break; default: return false; } diff --git a/modules/videoio/test/test_ffmpeg.cpp b/modules/videoio/test/test_ffmpeg.cpp index 7fb8339dea..4bda7f37a8 100644 --- a/modules/videoio/test/test_ffmpeg.cpp +++ b/modules/videoio/test/test_ffmpeg.cpp @@ -399,4 +399,64 @@ const ffmpeg_cap_properties_param_t videoio_ffmpeg_properties[] = { INSTANTIATE_TEST_CASE_P(videoio, ffmpeg_cap_properties, testing::ValuesIn(videoio_ffmpeg_properties)); + + +// related issue: https://github.com/opencv/opencv/issues/15499 +TEST(videoio, mp4_orientation_meta_auto) +{ + if (!videoio_registry::hasBackend(CAP_FFMPEG)) + throw SkipTestException("FFmpeg backend was not found"); + + string video_file = string(cvtest::TS::ptr()->get_data_path()) + "video/big_buck_bunny_rotated.mp4"; + + VideoCapture cap; + EXPECT_NO_THROW(cap.open(video_file, CAP_FFMPEG)); + ASSERT_TRUE(cap.isOpened()) << "Can't open the video: " << video_file << " with backend " << CAP_FFMPEG << std::endl; + + cap.set(CAP_PROP_ORIENTATION_AUTO, true); + if (cap.get(CAP_PROP_ORIENTATION_AUTO) == 0) + throw SkipTestException("FFmpeg frame rotation metadata is not supported"); + + Size actual; + EXPECT_NO_THROW(actual = Size((int)cap.get(CAP_PROP_FRAME_WIDTH), + (int)cap.get(CAP_PROP_FRAME_HEIGHT))); + EXPECT_EQ(384, actual.width); + EXPECT_EQ(672, actual.height); + + Mat frame; + + cap >> frame; + + ASSERT_EQ(384, frame.cols); + ASSERT_EQ(672, frame.rows); +} + +// related issue: https://github.com/opencv/opencv/issues/15499 +TEST(videoio, mp4_orientation_no_rotation) +{ + if (!videoio_registry::hasBackend(CAP_FFMPEG)) + throw SkipTestException("FFmpeg backend was not found"); + + string video_file = string(cvtest::TS::ptr()->get_data_path()) + "video/big_buck_bunny_rotated.mp4"; + + VideoCapture cap; + EXPECT_NO_THROW(cap.open(video_file, CAP_FFMPEG)); + cap.set(CAP_PROP_ORIENTATION_AUTO, 0); + ASSERT_TRUE(cap.isOpened()) << "Can't open the video: " << video_file << " with backend " << CAP_FFMPEG << std::endl; + ASSERT_FALSE(cap.get(CAP_PROP_ORIENTATION_AUTO)); + + Size actual; + EXPECT_NO_THROW(actual = Size((int)cap.get(CAP_PROP_FRAME_WIDTH), + (int)cap.get(CAP_PROP_FRAME_HEIGHT))); + EXPECT_EQ(672, actual.width); + EXPECT_EQ(384, actual.height); + + Mat frame; + + cap >> frame; + + ASSERT_EQ(672, frame.cols); + ASSERT_EQ(384, frame.rows); +} + }} // namespace diff --git a/samples/cpp/create_mask.cpp b/samples/cpp/create_mask.cpp index dc54953678..67b0ec9bbd 100644 --- a/samples/cpp/create_mask.cpp +++ b/samples/cpp/create_mask.cpp @@ -74,9 +74,7 @@ void mouseHandler(int event, int x, int y, int, void*) final = Mat::zeros(src.size(), CV_8UC3); mask = Mat::zeros(src.size(), CV_8UC1); - vector > vpts; - vpts.push_back(pts); - fillPoly(mask, vpts, Scalar(255, 255, 255), 8, 0); + fillPoly(mask, pts, Scalar(255, 255, 255), 8, 0); bitwise_and(src, src, final, mask); imshow("Mask", mask); imshow("Result", final); diff --git a/samples/cpp/intersectExample.cpp b/samples/cpp/intersectExample.cpp index a8a897241f..187aebbea9 100644 --- a/samples/cpp/intersectExample.cpp +++ b/samples/cpp/intersectExample.cpp @@ -50,9 +50,7 @@ static float drawIntersection(Mat &image, vector polygon1, vector { fillColor = Scalar(0, 0, 255); } - vector > pp; - pp.push_back(intersectionPolygon); - fillPoly(image, pp, fillColor); + fillPoly(image, intersectionPolygon, fillColor); } polylines(image, polygons, true, Scalar(0, 0, 0)); diff --git a/samples/cpp/squares.cpp b/samples/cpp/squares.cpp index 36535d1ee0..042a716f90 100644 --- a/samples/cpp/squares.cpp +++ b/samples/cpp/squares.cpp @@ -121,21 +121,6 @@ static void findSquares( const Mat& image, vector >& squares ) } } - -// the function draws all the squares in the image -static void drawSquares( Mat& image, const vector >& squares ) -{ - for( size_t i = 0; i < squares.size(); i++ ) - { - const Point* p = &squares[i][0]; - int n = (int)squares[i].size(); - polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, LINE_AA); - } - - imshow(wndname, image); -} - - int main(int argc, char** argv) { static const char* names[] = { "pic1.png", "pic2.png", "pic3.png", @@ -148,8 +133,6 @@ int main(int argc, char** argv) names[1] = "0"; } - vector > squares; - for( int i = 0; names[i] != 0; i++ ) { string filename = samples::findFile(names[i]); @@ -160,8 +143,11 @@ int main(int argc, char** argv) continue; } + vector > squares; findSquares(image, squares); - drawSquares(image, squares); + + polylines(image, squares, true, Scalar(0, 255, 0), 3, LINE_AA); + imshow(wndname, image); int c = waitKey(); if( c == 27 ) diff --git a/samples/cpp/train_HOG.cpp b/samples/cpp/train_HOG.cpp index 356ff0ec3f..4a160fe4eb 100644 --- a/samples/cpp/train_HOG.cpp +++ b/samples/cpp/train_HOG.cpp @@ -74,9 +74,9 @@ void load_images( const String & dirname, vector< Mat > & img_lst, bool showImag for ( size_t i = 0; i < files.size(); ++i ) { Mat img = imread( files[i] ); // load the image - if ( img.empty() ) // invalid image, skip it. + if ( img.empty() ) { - cout << files[i] << " is invalid!" << endl; + cout << files[i] << " is invalid!" << endl; // invalid image, skip it. continue; } @@ -95,16 +95,13 @@ void sample_neg( const vector< Mat > & full_neg_lst, vector< Mat > & neg_lst, co box.width = size.width; box.height = size.height; - const int size_x = box.width; - const int size_y = box.height; - srand( (unsigned int)time( NULL ) ); for ( size_t i = 0; i < full_neg_lst.size(); i++ ) if ( full_neg_lst[i].cols > box.width && full_neg_lst[i].rows > box.height ) { - box.x = rand() % ( full_neg_lst[i].cols - size_x ); - box.y = rand() % ( full_neg_lst[i].rows - size_y ); + box.x = rand() % ( full_neg_lst[i].cols - box.width ); + box.y = rand() % ( full_neg_lst[i].rows - box.height ); Mat roi = full_neg_lst[i]( box ); neg_lst.push_back( roi.clone() ); } @@ -259,7 +256,7 @@ int main( int argc, char** argv ) load_images( pos_dir, pos_lst, visualization ); if ( pos_lst.size() > 0 ) { - clog << "...[done]" << endl; + clog << "...[done] " << pos_lst.size() << " files." << endl; } else { @@ -287,22 +284,25 @@ int main( int argc, char** argv ) } clog << "Negative images are being loaded..."; - load_images( neg_dir, full_neg_lst, false ); + load_images( neg_dir, full_neg_lst, visualization ); + clog << "...[done] " << full_neg_lst.size() << " files." << endl; + + clog << "Negative images are being processed..."; sample_neg( full_neg_lst, neg_lst, pos_image_size ); - clog << "...[done]" << endl; + clog << "...[done] " << neg_lst.size() << " files." << endl; clog << "Histogram of Gradients are being calculated for positive images..."; computeHOGs( pos_image_size, pos_lst, gradient_lst, flip_samples ); size_t positive_count = gradient_lst.size(); labels.assign( positive_count, +1 ); - clog << "...[done] ( positive count : " << positive_count << " )" << endl; + clog << "...[done] ( positive images count : " << positive_count << " )" << endl; clog << "Histogram of Gradients are being calculated for negative images..."; computeHOGs( pos_image_size, neg_lst, gradient_lst, flip_samples ); size_t negative_count = gradient_lst.size() - positive_count; labels.insert( labels.end(), negative_count, -1 ); CV_Assert( positive_count < labels.size() ); - clog << "...[done] ( negative count : " << negative_count << " )" << endl; + clog << "...[done] ( negative images count : " << negative_count << " )" << endl; Mat train_data; convert_to_ml( gradient_lst, train_data ); @@ -324,7 +324,7 @@ int main( int argc, char** argv ) if ( train_twice ) { - clog << "Testing trained detector on negative images. This may take a few minutes..."; + clog << "Testing trained detector on negative images. This might take a few minutes..."; HOGDescriptor my_hog; my_hog.winSize = pos_image_size;