Merge pull request #10859 from luzpaz:misc-modules-typos-cont-2

This commit is contained in:
Alexander Alekhin 2018-02-14 09:56:11 +00:00
commit cfe84b953c
75 changed files with 195 additions and 195 deletions

View File

@ -65,7 +65,7 @@ namespace cv { namespace cudacodec {
////////////////////////////////// Video Encoding //////////////////////////////////
// Works only under Windows.
// Supports olny H264 video codec and AVI files.
// Supports only H264 video codec and AVI files.
enum SurfaceFormat
{

View File

@ -107,7 +107,7 @@ namespace
return ARGBpixel;
}
// CUDA kernel for outputing the final ARGB output from NV12
// CUDA kernel for outputting the final ARGB output from NV12
#define COLOR_COMPONENT_BIT_SIZE 10
#define COLOR_COMPONENT_MASK 0x3FF

View File

@ -68,7 +68,7 @@ public:
// Spins until frame becomes available or decoding gets canceled.
// If the requested frame is available the method returns true.
// If decoding was interupted before the requested frame becomes
// If decoding was interrupted before the requested frame becomes
// available, the method returns false.
bool waitUntilFrameAvailable(int pictureIndex);

View File

@ -179,7 +179,7 @@ namespace
GpuMat decodedFrame = videoDecoder_->mapFrame(frameInfo.first.picture_index, frameInfo.second);
// perform post processing on the CUDA surface (performs colors space conversion and post processing)
// comment this out if we inclue the line of code seen above
// comment this out if we include the line of code seen above
videoDecPostProcessFrame(decodedFrame, frame, videoDecoder_->targetWidth(), videoDecoder_->targetHeight());
// unmap video frame

View File

@ -257,7 +257,7 @@ PARAM_TEST_CASE(BruteForceMatcher, cv::cuda::DeviceInfo, NormCode, DescriptorSiz
rng.fill(queryBuf, cv::RNG::UNIFORM, cv::Scalar::all(0), cv::Scalar::all(3));
queryBuf.convertTo(queryBuf, CV_32FC1);
// Generate train decriptors as follows:
// Generate train descriptors as follows:
// copy each query descriptor to train set countFactor times
// and perturb some one element of the copied descriptors in
// in ascending order. General boundaries of the perturbation

View File

@ -460,7 +460,7 @@ Ptr<Filter> cv::cuda::createDerivFilter(int srcType, int dstType, int dx, int dy
if (scale != 1)
{
// usually the smoothing part is the slowest to compute,
// so try to scale it instead of the faster differenciating part
// so try to scale it instead of the faster differentiating part
if (dx == 0)
kx *= scale;
else

View File

@ -700,7 +700,7 @@ CV_EXPORTS Ptr<TemplateMatching> createTemplateMatching(int srcType, int method,
/** @brief Performs bilateral filtering of passed image
@param src Source image. Supports only (channles != 2 && depth() != CV_8S && depth() != CV_32S
@param src Source image. Supports only (channels != 2 && depth() != CV_8S && depth() != CV_32S
&& depth() != CV_64F).
@param dst Destination imagwe.
@param kernel_size Kernel window size.

View File

@ -134,7 +134,7 @@ namespace clahe
cudaSafeCall( cudaDeviceSynchronize() );
}
__global__ void tranformKernel(const PtrStepSzb src, PtrStepb dst, const PtrStepb lut, const int2 tileSize, const int tilesX, const int tilesY)
__global__ void transformKernel(const PtrStepSzb src, PtrStepb dst, const PtrStepb lut, const int2 tileSize, const int tilesX, const int tilesY)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
@ -173,9 +173,9 @@ namespace clahe
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(tranformKernel, cudaFuncCachePreferL1) );
cudaSafeCall( cudaFuncSetCacheConfig(transformKernel, cudaFuncCachePreferL1) );
tranformKernel<<<grid, block, 0, stream>>>(src, dst, lut, tileSize, tilesX, tilesY);
transformKernel<<<grid, block, 0, stream>>>(src, dst, lut, tileSize, tilesX, tilesY);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)

View File

@ -330,7 +330,7 @@ void cv::cuda::meanShiftSegmentation(InputArray _src, OutputArray _dst, int sp,
std::vector<SegmLink> edges;
edges.reserve(g.numv);
// Prepare edges connecting differnet components
// Prepare edges connecting different components
for (int v = 0; v < g.numv; ++v)
{
int c1 = comps.find(v);
@ -342,7 +342,7 @@ void cv::cuda::meanShiftSegmentation(InputArray _src, OutputArray _dst, int sp,
}
}
// Sort all graph's edges connecting differnet components (in asceding order)
// Sort all graph's edges connecting different components (in asceding order)
std::sort(edges.begin(), edges.end());
// Exclude small components (starting from the nearest couple)

View File

@ -2147,8 +2147,8 @@ static NCVStatus loadFromXML(const cv::String &filename,
haar.ClassifierSize.width = oldCascade->orig_window_size.width;
haar.ClassifierSize.height = oldCascade->orig_window_size.height;
int stagesCound = oldCascade->count;
for(int s = 0; s < stagesCound; ++s) // by stages
int stagesCount = oldCascade->count;
for(int s = 0; s < stagesCount; ++s) // by stages
{
HaarStage64 curStage;
curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size()));

View File

@ -115,7 +115,7 @@ public:
virtual int getNumLevels() const = 0;
//! Threshold for the distance between features and SVM classifying plane.
//! Usually it is 0 and should be specfied in the detector coefficients (as the last free
//! Usually it is 0 and should be specified in the detector coefficients (as the last free
//! coefficient). But if the free coefficient is omitted (which is allowed), you can specify it
//! manually here.
virtual void setHitThreshold(double hit_threshold) = 0;

View File

@ -574,7 +574,7 @@ namespace
int totalWidth = level.workArea.width / step;
total += totalWidth * (level.workArea.height / step);
// go to next pyramide level
// go to next pyramid level
level = level.next(scaleFactor_, image.size(), NxM, minObjectSize_);
area = level.workArea;

View File

@ -104,7 +104,7 @@ __device__ __forceinline__ static void blockCopy(InIt beg, InIt end, OutIt out)
}
template <class InIt, class OutIt, class UnOp>
__device__ __forceinline__ static void blockTransfrom(InIt beg, InIt end, OutIt out, const UnOp& op)
__device__ __forceinline__ static void blockTransform(InIt beg, InIt end, OutIt out, const UnOp& op)
{
uint STRIDE = Block::blockSize();
InIt t = beg + Block::threadLineId();
@ -115,7 +115,7 @@ __device__ __forceinline__ static void blockTransfrom(InIt beg, InIt end, OutIt
}
template <class InIt1, class InIt2, class OutIt, class BinOp>
__device__ __forceinline__ static void blockTransfrom(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, const BinOp& op)
__device__ __forceinline__ static void blockTransform(InIt1 beg1, InIt1 end1, InIt2 beg2, OutIt out, const BinOp& op)
{
uint STRIDE = Block::blockSize();
InIt1 t1 = beg1 + Block::threadLineId();

View File

@ -76,7 +76,7 @@ public:
//! copy constructor
__host__ GpuMat_(const GpuMat_& m);
//! copy/conversion contructor. If m is of different type, it's converted
//! copy/conversion constructor. If m is of different type, it's converted
__host__ explicit GpuMat_(const GpuMat& m, Allocator* allocator = defaultAllocator());
//! constructs a matrix on top of user-allocated data. step is in bytes(!!!), regardless of the type

View File

@ -382,7 +382,7 @@ public:
@param _delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$
@param _min_area prune the area which smaller than minArea
@param _max_area prune the area which bigger than maxArea
@param _max_variation prune the area have simliar size to its children
@param _max_variation prune the area have similar size to its children
@param _min_diversity for color image, trace back to cut off mser with diversity less than min_diversity
@param _max_evolution for color image, the evolution steps
@param _area_threshold for color image, the area threshold to cause re-initialize

View File

@ -142,7 +142,7 @@ void generateData( Mat& query, Mat& train, const int sourceType )
rng.fill( buf, RNG::UNIFORM, Scalar::all(0), Scalar(3) );
buf.convertTo( query, sourceType );
// Generate train decriptors as follows:
// Generate train descriptors as follows:
// copy each query descriptor to train set countFactor times
// and perturb some one element of the copied descriptors in
// in ascending order. General boundaries of the perturbation

View File

@ -78,7 +78,7 @@ void KeyPointsFilter::retainBest(std::vector<KeyPoint>& keypoints, int n_points)
}
//first use nth element to partition the keypoints into the best and worst.
std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
//this is the boundary response, and in the case of FAST may be ambigous
//this is the boundary response, and in the case of FAST may be ambiguous
float ambiguous_response = keypoints[n_points - 1].response;
//use std::partition to grab all of the keypoints with the boundary response.
std::vector<KeyPoint>::const_iterator new_end =

View File

@ -83,7 +83,7 @@ PARAM_TEST_CASE(BruteForceMatcher, int, int)
rng.fill(queryBuf, cv::RNG::UNIFORM, cv::Scalar::all(0), cv::Scalar::all(3));
queryBuf.convertTo(queryBuf, CV_32FC1);
// Generate train decriptors as follows:
// Generate train descriptors as follows:
// copy each query descriptor to train set countFactor times
// and perturb some one element of the copied descriptors in
// in ascending order. General boundaries of the perturbation

View File

@ -227,7 +227,7 @@ protected:
if (!fs.isOpened())
{
fs.open(keypoints_filename, FileStorage::WRITE);
ASSERT_TRUE(fs.isOpened()) << "File for writting keypoints can not be opened.";
ASSERT_TRUE(fs.isOpened()) << "File for writing keypoints can not be opened.";
if (detector.empty())
{
Ptr<ORB> fd = ORB::create();

View File

@ -170,7 +170,7 @@ void CV_DescriptorMatcherTest::generateData( Mat& query, Mat& train )
rng.fill( buf, RNG::UNIFORM, Scalar::all(0), Scalar(3) );
buf.convertTo( query, CV_32FC1 );
// Generate train decriptors as follows:
// Generate train descriptors as follows:
// copy each query descriptor to train set countFactor times
// and perturb some one element of the copied descriptors in
// in ascending order. General boundaries of the perturbation

View File

@ -60,7 +60,7 @@ class ROISelector
Scalar(255, 0, 0), 2, 1);
}
// show the image bouding box
// show the image bounding box
imshow(windowName, selectorParams.image);
// reset the image
@ -177,7 +177,7 @@ class ROISelector
}
}
// save the keypressed characted
// save the keypressed character
int key;
Size imageSize;
};

View File

@ -1596,7 +1596,7 @@ void CvWinProperties::showEvent(QShowEvent* evnt)
{
//why -1,-1 ?: do this trick because the first time the code is run,
//no value pos was saved so we let Qt move the window in the middle of its parent (event ignored).
//then hide will save the last position and thus, we want to retreive it (event accepted).
//then hide will save the last position and thus, we want to retrieve it (event accepted).
QPoint mypos(-1, -1);
QSettings settings("OpenCV2", objectName());
mypos = settings.value("pos", mypos).toPoint();

View File

@ -258,7 +258,7 @@ private:
void* userdata;
};
//Both are top level window, so that a way to differenciate them.
//Both are top level window, so that a way to differentiate them.
//if (obj->metaObject ()->className () == "CvWindow") does not give me robust result
enum typeWindow { type_CvWindow = 1, type_CvWinProperties = 2 };

View File

@ -910,7 +910,7 @@ CV_IMPL int cvNamedWindow( const char* name, int flags )
}
else
{
fprintf(stderr, "Failed to tranform process type: %d\n", (int) ret);
fprintf(stderr, "Failed to transform process type: %d\n", (int) ret);
fflush (stderr);
}
}

View File

@ -469,7 +469,7 @@ bool GdalDecoder::readHeader(){
return false;
}
//extract the driver infomation
//extract the driver information
m_driver = m_dataset->GetDriver();
// if the driver failed, then exit

View File

@ -517,7 +517,7 @@ bool PAMDecoder::readData( Mat& img )
/* the case where data fits the opencv matrix */
if (m_sampledepth == img.depth() && target_channels == m_channels && !bit_mode) {
/* special case for 16bit images with wrong endianess */
/* special case for 16bit images with wrong endianness */
if (m_sampledepth == CV_16U && !isBigEndian())
{
for (y = 0; y < m_height; y++, data += imp_stride )
@ -564,7 +564,7 @@ bool PAMDecoder::readData( Mat& img )
{
m_strm.getBytes( src, src_stride );
/* endianess correction */
/* endianness correction */
if( m_sampledepth == CV_16U && !isBigEndian() )
{
for( x = 0; x < src_elems_per_row; x++ )
@ -698,7 +698,7 @@ bool PAMEncoder::write( const Mat& img, const std::vector<int>& params )
if (img.depth() == CV_8U)
strm.putBytes( data, stride*height );
else if (img.depth() == CV_16U) {
/* fix endianess */
/* fix endianness */
if (!isBigEndian()) {
for( y = 0; y < height; y++ ) {
memcpy( buffer, img.ptr(y), stride );

View File

@ -205,7 +205,7 @@ int RGBE_ReadHeader(FILE *fp, int *width, int *height, rgbe_header_info *info)
return rgbe_error(rgbe_read_error,NULL);
if (buf[0] == '\n') // end of the header
break;
else if (buf[0] == '#') // commment
else if (buf[0] == '#') // comment
continue;
else if (strcmp(buf,"FORMAT=32-bit_rle_rgbe\n") == 0)
hasFormat = true;

View File

@ -4825,7 +4825,7 @@ for(int i = 0; i < it2.count; i++, ++it2)
class CV_EXPORTS LineIterator
{
public:
/** @brief intializes the iterator
/** @brief initializes the iterator
creates iterators for the line connecting pt1 and pt2
the line will be clipped on the image boundaries

View File

@ -91,13 +91,13 @@
" return result;",
"\n",
" } catch(const cv::Exception& e) {",
" LOGD(\"Imgproc::n_1getTextSize() catched cv::Exception: %s\", e.what());",
" LOGD(\"Imgproc::n_1getTextSize() caught cv::Exception: %s\", e.what());",
" jclass je = env->FindClass(\"org/opencv/core/CvException\");",
" if(!je) je = env->FindClass(\"java/lang/Exception\");",
" env->ThrowNew(je, e.what());",
" return NULL;",
" } catch (...) {",
" LOGD(\"Imgproc::n_1getTextSize() catched unknown exception (...)\");",
" LOGD(\"Imgproc::n_1getTextSize() caught unknown exception (...)\");",
" jclass je = env->FindClass(\"java/lang/Exception\");",
" env->ThrowNew(je, \"Unknown exception in JNI code {core::getTextSize()}\");",
" return NULL;",

View File

@ -754,7 +754,7 @@ cvApproxPoly( const void* array, int header_size,
}
else
{
CV_Error( CV_StsBadArg, "Input curves have uknown type" );
CV_Error( CV_StsBadArg, "Input curves have unknown type" );
}
}

View File

@ -6961,7 +6961,7 @@ struct Lab2RGBinteger
bo = tab[bo];
}
// L, a, b shoule be in their natural range
// L, a, b should be in their natural range
inline void processLabToXYZ(const v_uint8x16& lv, const v_uint8x16& av, const v_uint8x16& bv,
v_int32x4& xiv00, v_int32x4& yiv00, v_int32x4& ziv00,
v_int32x4& xiv01, v_int32x4& yiv01, v_int32x4& ziv01,

View File

@ -585,7 +585,7 @@ namespace cv{
const int h = img.rows;
const int w = img.cols;
//A quick and dirty upper bound for the maximimum number of labels.
//A quick and dirty upper bound for the maximum number of labels.
//Following formula comes from the fact that a 2x2 block in 4-way connectivity
//labeling can never have more than 2 new labels and 1 label for background.
//Worst case image example pattern:
@ -663,7 +663,7 @@ namespace cv{
const int h = img.rows;
const int w = img.cols;
//A quick and dirty upper bound for the maximimum number of labels.
//A quick and dirty upper bound for the maximum number of labels.
//Following formula comes from the fact that a 2x2 block in 4-way connectivity
//labeling can never have more than 2 new labels and 1 label for background.
//Worst case image example pattern:
@ -2536,7 +2536,7 @@ namespace cv{
const int h = img.rows;
const int w = img.cols;
//A quick and dirty upper bound for the maximimum number of labels.
//A quick and dirty upper bound for the maximum number of labels.
//Following formula comes from the fact that a 2x2 block in 8-connectivity case
//can never have more than 1 new label and 1 label for background.
//Worst case image example pattern:
@ -2598,7 +2598,7 @@ namespace cv{
const int h = img.rows;
const int w = img.cols;
//A quick and dirty upper bound for the maximimum number of labels.
//A quick and dirty upper bound for the maximum number of labels.
//Following formula comes from the fact that a 2x2 block in 8-connectivity case
//can never have more than 1 new label and 1 label for background.
//Worst case image example pattern:

View File

@ -51,7 +51,7 @@ void cv::cornerSubPix( InputArray _image, InputOutputArray _corners,
int i, j, k;
int max_iters = (criteria.type & CV_TERMCRIT_ITER) ? MIN(MAX(criteria.maxCount, 1), MAX_ITERS) : MAX_ITERS;
double eps = (criteria.type & CV_TERMCRIT_EPS) ? MAX(criteria.epsilon, 0.) : 0;
eps *= eps; // use square of error in comparsion operations
eps *= eps; // use square of error in comparison operations
cv::Mat src = _image.getMat(), cornersmat = _corners.getMat();
int count = cornersmat.checkVector(2, CV_32F);

View File

@ -237,7 +237,7 @@ namespace cv
ivx::Image::createAddressing(dst.cols, dst.rows, 2, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standart says nothing about thread-safety for now
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
if(dx)
@ -481,7 +481,7 @@ void cv::Scharr( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
if( scale != 1 )
{
// usually the smoothing part is the slowest to compute,
// so try to scale it instead of the faster differenciating part
// so try to scale it instead of the faster differentiating part
if( dx == 0 )
kx *= scale;
else

View File

@ -1395,7 +1395,7 @@ FillEdgeCollection( Mat& img, std::vector<PolyEdge>& edges, const void* color )
{
if( last && last->y1 == y )
{
// exclude edge if y reachs its lower point
// exclude edge if y reaches its lower point
prelast->next = last->next;
last = last->next;
continue;
@ -1409,7 +1409,7 @@ FillEdgeCollection( Mat& img, std::vector<PolyEdge>& edges, const void* color )
}
else if( i < total )
{
// insert new edge into active list if y reachs its upper point
// insert new edge into active list if y reaches its upper point
prelast->next = e;
e->next = last;
prelast = e;

View File

@ -184,7 +184,7 @@ public:
Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() );
// inside the loop we always pass DELTA rows to the filter
// (note that the "proceed" method takes care of possibe overflow, since
// (note that the "proceed" method takes care of possible overflow, since
// it was given the actual image height in the "start" method)
// on output we can get:
// * < DELTA rows (the initial buffer accumulation stage)

View File

@ -279,7 +279,7 @@ static int areaSign( Point2f a, Point2f b, Point2f c )
}
//---------------------------------------------------------------------
// Returns true iff point c lies on the closed segement ab.
// Returns true iff point c lies on the closed segment ab.
// Assumes it is already known that abc are collinear.
//---------------------------------------------------------------------
static bool between( Point2f a, Point2f b, Point2f c )

View File

@ -1602,7 +1602,7 @@ static bool openvx_remap(Mat src, Mat dst, Mat map1, Mat map2, int interpolation
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standart says nothing about thread-safety for now
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(VX_BORDER_CONSTANT, (vx_uint8)(borderValue[0]));

View File

@ -1112,9 +1112,9 @@ static void advance(unsigned int &index, unsigned int nrOfPoints) {
index = successor(index, nrOfPoints);
}
//! Return the succesor of the provided point index
//! Return the successor of the provided point index
/*!
* The succesor of the last polygon point is the first polygon point
* The successor of the last polygon point is the first polygon point
* (circular referencing)
*
* @param index Index of the point

View File

@ -467,7 +467,7 @@ __kernel void stage2_hysteresis(__global uchar *map_ptr, int map_step, int map_o
#elif defined GET_EDGES
// Get the edge result. egde type of value 2 will be marked as an edge point and set to 255. Otherwise 0.
// Get the edge result. edge type of value 2 will be marked as an edge point and set to 255. Otherwise 0.
// map edge type mappings
// dst edge output

View File

@ -243,7 +243,7 @@ static void rotatingCalipers( const Point2f* points, int n, int mode, float* out
{
case CALIPERS_MAXHEIGHT:
{
/* now main element lies on edge alligned to calipers side */
/* now main element lies on edge aligned to calipers side */
/* find opposite element i.e. transform */
/* 0->2, 1->3, 2->0, 3->1 */

View File

@ -1458,7 +1458,7 @@ namespace cv
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standart says nothing about thread-safety for now
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
ivx::IVX_CHECK_STATUS(vxuBox3x3(ctx, ia, ib));
@ -3345,7 +3345,7 @@ static bool openvx_gaussianBlur(InputArray _src, OutputArray _dst, Size ksize,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standart says nothing about thread-safety for now
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
ivx::IVX_CHECK_STATUS(vxuGaussian3x3(ctx, ia, ib));
@ -4416,7 +4416,7 @@ namespace cv
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standart says nothing about thread-safety for now
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(VX_BORDER_REPLICATE);
#ifdef VX_VERSION_1_1

View File

@ -126,7 +126,7 @@ OCL_INSTANTIATE_TEST_CASE_P(ImageProc, Filter2D,
Values(CV_8U, CV_16U, CV_32F),
OCL_ALL_CHANNELS,
Values(3, 5, 7), // Kernel size
Values(1, 4, 8), // Width mutiple
Values(1, 4, 8), // Width multiple
Values((BorderType)BORDER_CONSTANT,
(BorderType)BORDER_REPLICATE,
(BorderType)BORDER_REFLECT,

View File

@ -2053,7 +2053,7 @@ static void validateResult(const Mat& reference, const Mat& actual, const Mat& s
int cn = reference.channels();
ssize.width *= cn;
bool next = true;
//RGB2Lab_f works throug LUT and brings additional error
//RGB2Lab_f works through LUT and brings additional error
static const float maxErr = 1.f/192.f;
for (int y = 0; y < ssize.height && next; ++y)

View File

@ -126,7 +126,7 @@ String CV_ImageWarpBaseTest::interpolation_to_string(int inter) const
if (inverse)
str += " | WARP_INVERSE_MAP";
return str.empty() ? "Unsupported/Unkown interpolation type" : str;
return str.empty() ? "Unsupported/Unknown interpolation type" : str;
}
Size CV_ImageWarpBaseTest::randSize(RNG& rng) const
@ -851,7 +851,7 @@ const char* CV_Remap_Test::borderType_to_string() const
return "BORDER_WRAP";
if (borderType == BORDER_REFLECT_101)
return "BORDER_REFLECT_101";
return "Unsupported/Unkown border type";
return "Unsupported/Unknown border type";
}
void CV_Remap_Test::prepare_test_data_for_reference_func()

View File

@ -110,7 +110,7 @@ class AsyncServiceHelper
public void wait_install()
{
Log.e(TAG, "Instalation was not started! Nothing to wait!");
Log.e(TAG, "Installation was not started! Nothing to wait!");
}
};

View File

@ -175,7 +175,7 @@ class ClassInfo(GeneralInfo):
self.cname = self.name.replace(".", "::")
self.methods = []
self.methods_suffixes = {}
self.consts = [] # using a list to save the occurence order
self.consts = [] # using a list to save the occurrence order
self.private_consts = []
self.imports = set()
self.props= []

View File

@ -54,14 +54,14 @@ JNIEXPORT void JNICALL Java_org_opencv_android_Utils_nBitmapToMat2
return;
} catch(const cv::Exception& e) {
AndroidBitmap_unlockPixels(env, bitmap);
LOGE("nBitmapToMat catched cv::Exception: %s", e.what());
LOGE("nBitmapToMat caught cv::Exception: %s", e.what());
jclass je = env->FindClass("org/opencv/core/CvException");
if(!je) je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, e.what());
return;
} catch (...) {
AndroidBitmap_unlockPixels(env, bitmap);
LOGE("nBitmapToMat catched unknown exception (...)");
LOGE("nBitmapToMat caught unknown exception (...)");
jclass je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, "Unknown exception in JNI code {nBitmapToMat}");
return;
@ -136,14 +136,14 @@ JNIEXPORT void JNICALL Java_org_opencv_android_Utils_nMatToBitmap2
return;
} catch(const cv::Exception& e) {
AndroidBitmap_unlockPixels(env, bitmap);
LOGE("nMatToBitmap catched cv::Exception: %s", e.what());
LOGE("nMatToBitmap caught cv::Exception: %s", e.what());
jclass je = env->FindClass("org/opencv/core/CvException");
if(!je) je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, e.what());
return;
} catch (...) {
AndroidBitmap_unlockPixels(env, bitmap);
LOGE("nMatToBitmap catched unknown exception (...)");
LOGE("nMatToBitmap caught unknown exception (...)");
jclass je = env->FindClass("java/lang/Exception");
env->ThrowNew(je, "Unknown exception in JNI code {nMatToBitmap}");
return;

View File

@ -220,7 +220,7 @@ PyObject* pyopencv_from(const T& src);
enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 };
// special case, when the convertor needs full ArgInfo structure
// special case, when the converter needs full ArgInfo structure
static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info)
{
bool allowND = true;

View File

@ -758,7 +758,7 @@ class CppHeaderParser(object):
def find_next_token(self, s, tlist, p=0):
"""
Finds the next token from the 'tlist' in the input 's', starting from position 'p'.
Returns the first occured token and its position, or ("", len(s)) when no token is found
Returns the first occurred token and its position, or ("", len(s)) when no token is found
"""
token = ""
tpos = len(s)

View File

@ -2652,7 +2652,7 @@ inline void FlushInfoLog() { fflush(NULL); }
//
// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
// is not satisfied.
// Synopsys:
// Synopsis:
// GTEST_CHECK_(boolean_condition);
// or
// GTEST_CHECK_(boolean_condition) << "Additional message";
@ -2696,7 +2696,7 @@ const T& move(const T& t) {
// const Foo*). When you use ImplicitCast_, the compiler checks that
// the cast is safe. Such explicit ImplicitCast_s are necessary in
// surprisingly many situations where C++ demands an exact type match
// instead of an argument type convertable to a target type.
// instead of an argument type convertible to a target type.
//
// The syntax for using ImplicitCast_ is the same as for static_cast:
//
@ -4552,7 +4552,7 @@ class GTEST_API_ FilePath {
void Normalize();
// Returns a pointer to the last occurence of a valid path separator in
// Returns a pointer to the last occurrence of a valid path separator in
// the FilePath. On Windows, for example, both '/' and '\' are valid path
// separators. Returns NULL if no path separator was found.
const char* FindLastPathSeparator() const;
@ -19985,7 +19985,7 @@ class GTEST_API_ UnitTest {
internal::UnitTestImpl* impl() { return impl_; }
const internal::UnitTestImpl* impl() const { return impl_; }
// These classes and funcions are friends as they need to access private
// These classes and functions are friends as they need to access private
// members of UnitTest.
friend class Test;
friend class internal::AssertHelper;

View File

@ -2635,7 +2635,7 @@ class Hunk {
// Print a unified diff header for one hunk.
// The format is
// "@@ -<left_start>,<left_length> +<right_start>,<right_length> @@"
// where the left/right parts are ommitted if unnecessary.
// where the left/right parts are omitted if unnecessary.
void PrintHeader(std::ostream* ss) const {
*ss << "@@ ";
if (removes_) {
@ -8337,7 +8337,7 @@ FilePath FilePath::RemoveExtension(const char* extension) const {
return *this;
}
// Returns a pointer to the last occurence of a valid path separator in
// Returns a pointer to the last occurrence of a valid path separator in
// the FilePath. On Windows, for example, both '/' and '\' are valid path
// separators. Returns NULL if no path separator was found.
const char* FilePath::FindLastPathSeparator() const {
@ -9968,7 +9968,7 @@ namespace internal {
// Depending on the value of a char (or wchar_t), we print it in one
// of three formats:
// - as is if it's a printable ASCII (e.g. 'a', '2', ' '),
// - as a hexidecimal escape sequence (e.g. '\x7F'), or
// - as a hexadecimal escape sequence (e.g. '\x7F'), or
// - as a special escape sequence (e.g. '\r', '\n').
enum CharFormat {
kAsIs,
@ -10072,7 +10072,7 @@ void PrintCharAndCodeTo(Char c, ostream* os) {
return;
*os << " (" << static_cast<int>(c);
// For more convenience, we print c's code again in hexidecimal,
// For more convenience, we print c's code again in hexadecimal,
// unless c was already printed in the form '\x##' or the code is in
// [1, 9].
if (format == kHexEscape || (1 <= c && c <= 9)) {

View File

@ -221,9 +221,9 @@ CV_EXPORTS_W Ptr<BackgroundSubtractorMOG2>
createBackgroundSubtractorMOG2(int history=500, double varThreshold=16,
bool detectShadows=true);
/** @brief K-nearest neigbours - based Background/Foreground Segmentation Algorithm.
/** @brief K-nearest neighbours - based Background/Foreground Segmentation Algorithm.
The class implements the K-nearest neigbours background subtraction described in @cite Zivkovic2006 .
The class implements the K-nearest neighbours background subtraction described in @cite Zivkovic2006 .
Very efficient if number of foreground pixels is low.
*/
class CV_EXPORTS_W BackgroundSubtractorKNN : public BackgroundSubtractor
@ -261,7 +261,7 @@ public:
pixel is matching the kNN background model.
*/
CV_WRAP virtual int getkNNSamples() const = 0;
/** @brief Sets the k in the kNN. How many nearest neigbours need to match.
/** @brief Sets the k in the kNN. How many nearest neighbours need to match.
*/
CV_WRAP virtual void setkNNSamples(int _nkNN) = 0;

View File

@ -281,7 +281,7 @@ protected:
////////////////////////
int history;
//alpha=1/history - speed of update - if the time interval you want to average over is T
//set alpha=1/history. It is also usefull at start to make T slowly increase
//set alpha=1/history. It is also useful at start to make T slowly increase
//from 1 until the desired T
float fTb;
//Tb - threshold on the squared distance from the sample used to decide if it is well described
@ -289,7 +289,7 @@ protected:
//and that is Tb=2*2*10*10 =400; where we take typical pixel level sigma=10
/////////////////////////
//less important parameters - things you might change but be carefull
//less important parameters - things you might change but be careful
////////////////////////
int nN;//totlal number of samples
int nkNN;//number on NN for detcting background - default K=[0.1*nN]

View File

@ -42,7 +42,7 @@
/*//Implementation of the Gaussian mixture model background subtraction from:
//
//"Improved adaptive Gausian mixture model for background subtraction"
//"Improved adaptive Gaussian mixture model for background subtraction"
//Z.Zivkovic
//International Conference Pattern Recognition, UK, August, 2004
//http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
@ -91,7 +91,7 @@ namespace cv
/*
Interface of Gaussian mixture algorithm from:
"Improved adaptive Gausian mixture model for background subtraction"
"Improved adaptive Gaussian mixture model for background subtraction"
Z.Zivkovic
International Conference Pattern Recognition, UK, August, 2004
http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
@ -351,7 +351,7 @@ protected:
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
/////////////////////////
// less important parameters - things you might change but be carefull
// less important parameters - things you might change but be careful
////////////////////////
float backgroundRatio;
// corresponds to fTB=1-cf from the paper
@ -407,7 +407,7 @@ struct GaussBGStatModel2Params
int nHeight;
int nND;//number of data dimensions (image channels)
bool bPostFiltering;//defult 1 - do postfiltering - will make shadow detection results also give value 255
bool bPostFiltering;//default 1 - do postfiltering - will make shadow detection results also give value 255
double minArea; // for postfiltering
bool bInit;//default 1, faster updates at start
@ -417,7 +417,7 @@ struct GaussBGStatModel2Params
////////////////////////
float fAlphaT;
//alpha - speed of update - if the time interval you want to average over is T
//set alpha=1/T. It is also usefull at start to make T slowly increase
//set alpha=1/T. It is also useful at start to make T slowly increase
//from 1 until the desired T
float fTb;
//Tb - threshold on the squared Mahalan. dist. to decide if it is well described
@ -426,7 +426,7 @@ struct GaussBGStatModel2Params
//and that is Tb=4*4=16;
/////////////////////////
//less important parameters - things you might change but be carefull
//less important parameters - things you might change but be careful
////////////////////////
float fTg;
//Tg - threshold on the squared Mahalan. dist. to decide
@ -471,7 +471,7 @@ struct GMM
};
// shadow detection performed per pixel
// should work for rgb data, could be usefull for gray scale and depth data as well
// should work for rgb data, could be useful for gray scale and depth data as well
// See: Prati,Mikic,Trivedi,Cucchiara,"Detecting Moving Shadows...",IEEE PAMI,2003.
CV_INLINE bool
detectShadowGMM(const float* data, int nchannels, int nmodes,

View File

@ -876,7 +876,7 @@ namespace
std::vector<UMat> prevPyr; prevPyr.resize(maxLevel + 1);
std::vector<UMat> nextPyr; nextPyr.resize(maxLevel + 1);
// allocate buffers with aligned pitch to be able to use cl_khr_image2d_from_buffer extention
// allocate buffers with aligned pitch to be able to use cl_khr_image2d_from_buffer extension
// This is the required pitch alignment in pixels
int pitchAlign = (int)ocl::Device::getDefault().imagePitchAlignment();
if (pitchAlign>0)
@ -886,7 +886,7 @@ namespace
for (int level = 1; level <= maxLevel; ++level)
{
int cols,rows;
// allocate buffers with aligned pitch to be able to use image on buffer extention
// allocate buffers with aligned pitch to be able to use image on buffer extension
cols = (prevPyr[level - 1].cols+1)/2;
rows = (prevPyr[level - 1].rows+1)/2;
prevPyr[level] = UMat(rows,(cols+pitchAlign-1)&(-pitchAlign),prevPyr[level-1].type()).colRange(0,cols);

View File

@ -784,7 +784,7 @@ public:
`VideoCapture -> API Backend -> Operating System -> Device Driver -> Device Hardware`
@endcode
The returned value might be different from what really used by the device or it could be encoded
using device dependant rules (eg. steps or percentage). Effective behaviour depends from device
using device dependent rules (eg. steps or percentage). Effective behaviour depends from device
driver and API Backend
*/

View File

@ -185,7 +185,7 @@ enum
CV_CAP_PROP_MONOCHROME =19,
CV_CAP_PROP_SHARPNESS =20,
CV_CAP_PROP_AUTO_EXPOSURE =21, // exposure control done by camera,
// user can adjust refernce level
// user can adjust reference level
// using this feature
CV_CAP_PROP_GAMMA =22,
CV_CAP_PROP_TEMPERATURE =23,
@ -328,7 +328,7 @@ enum
CV_CAP_PROP_XI_COLOR_FILTER_ARRAY = 475, // Returns color filter array type of RAW data.
CV_CAP_PROP_XI_GAMMAY = 476, // Luminosity gamma
CV_CAP_PROP_XI_GAMMAC = 477, // Chromaticity gamma
CV_CAP_PROP_XI_SHARPNESS = 478, // Sharpness Strenght
CV_CAP_PROP_XI_SHARPNESS = 478, // Sharpness Strength
CV_CAP_PROP_XI_CC_MATRIX_00 = 479, // Color Correction Matrix element [0][0]
CV_CAP_PROP_XI_CC_MATRIX_01 = 480, // Color Correction Matrix element [0][1]
CV_CAP_PROP_XI_CC_MATRIX_02 = 481, // Color Correction Matrix element [0][2]

View File

@ -129,12 +129,12 @@ protected:
unsigned int payload; // Width x height x Pixel width.
int widthMin; // Camera sensor minium width.
int widthMin; // Camera sensor minimum width.
int widthMax; // Camera sensor maximum width.
int heightMin; // Camera sensor minium height.
int heightMin; // Camera sensor minimum height.
int heightMax; // Camera sensor maximum height.
bool fpsAvailable;
double fpsMin; // Camera minium fps.
double fpsMin; // Camera minimum fps.
double fpsMax; // Camera maximum fps.
bool gainAvailable;
double gainMin; // Camera minimum gain.
@ -392,7 +392,7 @@ void CvCaptureCAM_Aravis::autoExposureControl(IplImage* image)
ng = CLIP( gain + ev + exposureCompensation, gainMin, gainMax);
if( ng < gain ) {
// piority 1 - reduce gain
// priority 1 - reduce gain
arv_camera_set_gain(camera, (gain = ng));
return;
}

View File

@ -853,7 +853,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
return 0;
}
// Output image paramaters.
// Output image parameters.
int outChannels;
if (mMode == CV_CAP_MODE_BGR || mMode == CV_CAP_MODE_RGB) {
outChannels = 3;
@ -887,7 +887,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
mOutImage->imageData = reinterpret_cast<char *>(mOutImagedata);
mOutImage->imageSize = int(currSize);
// Device image paramaters and conversion code.
// Device image parameters and conversion code.
// (Not all of these conversions are used in production, but they were all tested to find the fastest options.)
int deviceChannels;
int cvtCode;

View File

@ -278,7 +278,7 @@ CvCaptureCAM_DC1394_v2_CPP::CvCaptureCAM_DC1394_v2_CPP()
dcCam = 0;
isoSpeed = 400;
fps = 15;
// Resetted the value here to 1 in order to ensure only a single frame is stored in the buffer!
// Reset the value here to 1 in order to ensure only a single frame is stored in the buffer!
nDMABufs = 8;
started = false;
cameraId = 0;

View File

@ -257,7 +257,7 @@ interface ISampleGrabber : public IUnknown
//optionally setup a second (or third, fourth ...) device - same options as above
VI.setupDevice(device2);
//As requested width and height can not always be accomodated
//As requested width and height can not always be accommodated
//make sure to check the size once the device is setup
int width = VI.getWidth(device1);
@ -595,7 +595,7 @@ static void MyFreeMediaType(AM_MEDIA_TYPE& mt){
}
if (mt.pUnk != NULL)
{
// Unecessary because pUnk should not be used, but safest.
// Unnecessary because pUnk should not be used, but safest.
mt.pUnk->Release();
mt.pUnk = NULL;
}
@ -1292,7 +1292,7 @@ char * videoInput::getDeviceName(int deviceID){
int videoInput::listDevices(bool silent){
//COM Library Intialization
//COM Library Initialization
comInit();
if(!silent) DebugPrintOut("\nVIDEOINPUT SPY MODE!\n\n");
@ -1548,7 +1548,7 @@ bool videoInput::isFrameNew(int id){
EnterCriticalSection(&VDList[id]->sgCallback->critSection);
result = VDList[id]->sgCallback->newFrame;
//we need to give it some time at the begining to start up so lets check after 400 frames
//we need to give it some time at the beginning to start up so lets check after 400 frames
if(VDList[id]->nFramesRunning > 400 && VDList[id]->sgCallback->freezeCheck > VDList[id]->nFramesForReconnect ){
freeze = true;
}
@ -1588,7 +1588,7 @@ bool videoInput::isDeviceSetup(int id) const
// ----------------------------------------------------------------------
// Gives us a little pop up window to adjust settings
// We do this in a seperate thread now!
// We do this in a separate thread now!
// ----------------------------------------------------------------------
@ -1763,7 +1763,7 @@ bool videoInput::setVideoSettingFilter(int deviceID, long Property, long lValue,
pAMVideoProcAmp->Set(Property, Default, VideoProcAmp_Flags_Auto);
}
else{
// Perhaps add a check that lValue and Flags are within the range aquired from GetRange above
// Perhaps add a check that lValue and Flags are within the range acquired from GetRange above
pAMVideoProcAmp->Set(Property, lValue, Flags);
}
@ -1845,7 +1845,7 @@ bool videoInput::setVideoSettingCamera(int deviceID, long Property, long lValue,
}
else
{
// Perhaps add a check that lValue and Flags are within the range aquired from GetRange above
// Perhaps add a check that lValue and Flags are within the range acquired from GetRange above
pIAMCameraControl->Set(Property, lValue, Flags);
}
pIAMCameraControl->Release();
@ -1971,7 +1971,7 @@ videoInput::~videoInput(){
{
delete VDList[i];
}
//Unitialize com
//Uninitialize com
comUnInit();
}
@ -2012,7 +2012,7 @@ bool videoInput::comInit(){
// ----------------------------------------------------------------------
// Same as above but to unitialize com, decreases counter and frees com
// Same as above but to uninitialize com, decreases counter and frees com
// if no one else is using it
// ----------------------------------------------------------------------
@ -2512,7 +2512,7 @@ int videoInput::start(int deviceID, videoDevice *VD){
return hr;
}
//FITLER GRAPH MANAGER//
//FILTER GRAPH MANAGER//
// Create the Filter Graph Manager.
hr = CoCreateInstance(CLSID_FilterGraph, 0, CLSCTX_INPROC_SERVER,IID_IGraphBuilder, (void**)&VD->pGraph);
if (FAILED(hr))
@ -3144,7 +3144,7 @@ HRESULT videoInput::routeCrossbar(ICaptureGraphBuilder2 **ppBuild, IBaseFilter *
}
Crossbar->Route(pOIndex,pIndex);
}else{
DebugPrintOut("SETUP: Didn't find specified Physical Connection type. Using Defualt.\n");
DebugPrintOut("SETUP: Didn't find specified Physical Connection type. Using Default.\n");
}
//we only free the crossbar when we close or restart the device

View File

@ -1547,8 +1547,8 @@ static AVStream *icv_add_video_stream_FFMPEG(AVFormatContext *oc,
}
if (c->codec_id == CV_CODEC(CODEC_ID_MPEG1VIDEO) || c->codec_id == CV_CODEC(CODEC_ID_MSMPEG4V3)){
/* needed to avoid using macroblocks in which some coeffs overflow
this doesnt happen with normal video, it just happens here as the
motion of the chroma plane doesnt match the luma plane */
this doesn't happen with normal video, it just happens here as the
motion of the chroma plane doesn't match the luma plane */
/* avoid FFMPEG warning 'clipping 1 dct coefficients...' */
c->mb_decision=2;
}
@ -1568,7 +1568,7 @@ static AVStream *icv_add_video_stream_FFMPEG(AVFormatContext *oc,
#endif
#if LIBAVCODEC_VERSION_INT>0x000409
// some formats want stream headers to be seperate
// some formats want stream headers to be separate
if(oc->oformat->flags & AVFMT_GLOBALHEADER)
{
#if LIBAVCODEC_BUILD > CALC_FFMPEG_VERSION(56, 35, 0)
@ -2387,8 +2387,8 @@ AVStream* OutputMediaStream_FFMPEG::addVideoStream(AVFormatContext *oc, CV_CODEC
if (c->codec_id == CV_CODEC(CODEC_ID_MPEG1VIDEO) || c->codec_id == CV_CODEC(CODEC_ID_MSMPEG4V3))
{
// needed to avoid using macroblocks in which some coeffs overflow
// this doesnt happen with normal video, it just happens here as the
// motion of the chroma plane doesnt match the luma plane
// this doesn't happen with normal video, it just happens here as the
// motion of the chroma plane doesn't match the luma plane
// avoid FFMPEG warning 'clipping 1 dct coefficients...'
@ -2396,7 +2396,7 @@ AVStream* OutputMediaStream_FFMPEG::addVideoStream(AVFormatContext *oc, CV_CODEC
}
#if LIBAVCODEC_VERSION_INT > 0x000409
// some formats want stream headers to be seperate
// some formats want stream headers to be separate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
{
#if LIBAVCODEC_BUILD > CALC_FFMPEG_VERSION(56, 35, 0)

View File

@ -150,7 +150,7 @@ wrprGetGigEVisionAPI()
\brief Wrapper to GigEVisionAPI function
\param api
\param eventHandler
\return true - succsess, else - false
\return true - success, else - false
See \a wrprInitGigEVisionAPI, \a gige::IGigEVisionAPI
*/

View File

@ -1159,7 +1159,7 @@ int DigitalCameraCapture::collectWidgets(std::ostream & os,
/**
* Write message to @field msgsBuffer if user want to store them
* (@field collectMsgs).
* Print debug informations on screen.
* Print debug information on screen.
*/
template<typename OsstreamPrintable>
void DigitalCameraCapture::message(MsgType msgType, const char * msg,

View File

@ -280,7 +280,7 @@ bool CvCapture_GStreamer::grabFrame()
/*!
* \brief CvCapture_GStreamer::retrieveFrame
* \return IplImage pointer. [Transfer Full]
* Retreive the previously grabbed buffer, and wrap it in an IPLImage structure
* Retrieve the previously grabbed buffer, and wrap it in an IPLImage structure
*/
IplImage * CvCapture_GStreamer::retrieveFrame(int)
{
@ -922,7 +922,7 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
if (!gst_structure_get_int (structure, "height", &height))
{
CV_WARN("Cannot query video heigth\n");
CV_WARN("Cannot query video height\n");
}
gint num = 0, denom=1;
@ -967,11 +967,11 @@ bool CvCapture_GStreamer::open( int type, const char* filename )
}
/*!
* \brief CvCapture_GStreamer::getProperty retreive the requested property from the pipeline
* \brief CvCapture_GStreamer::getProperty retrieve the requested property from the pipeline
* \param propId requested property
* \return property value
*
* There are two ways the properties can be retreived. For seek-based properties we can query the pipeline.
* There are two ways the properties can be retrieved. For seek-based properties we can query the pipeline.
* For frame-based properties, we use the caps of the lasst receivef sample. This means that some properties
* are not available until a first frame was received
*/

View File

@ -61,7 +61,7 @@ Second Patch: August 28, 2004 Sfuncia Fabio fiblan@yahoo.it
For Release: OpenCV-Linux Beta4 Opencv-0.9.6
FS: this patch fix not sequential index of device (unplugged device), and real numCameras.
for -1 index (icvOpenCAM_V4L) i dont use /dev/video but real device available, because
for -1 index (icvOpenCAM_V4L) I don't use /dev/video but real device available, because
if /dev/video is a link to /dev/video0 and i unplugged device on /dev/video0, /dev/video
is a bad link. I search the first available device with indexList.
@ -428,7 +428,7 @@ static int try_init_v4l(CvCaptureCAM_V4L* capture, const char *deviceName)
int detect = 0;
// Test device for V4L compability
// Test device for V4L compatibility
/* Test using an open to see if this new device name really does exists. */
/* No matter what the name - it still must be opened! */
@ -471,7 +471,7 @@ static int try_init_v4l2(CvCaptureCAM_V4L* capture, const char *deviceName)
int detect = 0;
// Test device for V4L2 compability
// Test device for V4L2 compatibility
/* Open and test V4L2 device */
capture->deviceHandle = v4l2_open (deviceName, O_RDWR /* required */ | O_NONBLOCK, 0);
@ -1033,7 +1033,7 @@ static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (int index)
char deviceName[MAX_DEVICE_DRIVER_NAME];
if (!numCameras)
icvInitCapture_V4L(); /* Havent called icvInitCapture yet - do it now! */
icvInitCapture_V4L(); /* Haven't called icvInitCapture yet - do it now! */
if (!numCameras)
return NULL; /* Are there any /dev/video input sources? */
@ -1073,10 +1073,10 @@ static CvCaptureCAM_V4L * icvCaptureFromCAM_V4L (const char* deviceName)
capture->buffers[MAX_V4L_BUFFERS].start = NULL;
#endif
/* w/o memset some parts arent initialized - AKA: Fill it with zeros so it is clean */
/* w/o memset some parts aren't initialized - AKA: Fill it with zeros so it is clean */
memset(capture,0,sizeof(CvCaptureCAM_V4L));
/* Present the routines needed for V4L funtionality. They are inserted as part of
/* Present the routines needed for V4L functionality. They are inserted as part of
the standard set of cv calls promoting transparency. "Vector Table" insertion. */
capture->FirstCapture = 1;
@ -1580,7 +1580,7 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) {
CLEAR (capture->form);
capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
/* read the current setting, mainly to retreive the pixelformat information */
/* read the current setting, mainly to retrieve the pixelformat information */
xioctl (capture->deviceHandle, VIDIOC_G_FMT, &capture->form);
/* set the values we want to change */
@ -1788,7 +1788,7 @@ static int icvSetControl (CvCaptureCAM_V4L* capture, int property_id, double val
}
if (v4l1_ioctl(capture->deviceHandle, VIDIOCSPICT, &capture->imageProperties) < 0){
fprintf(stderr, "VIDEOIO ERROR: V4L: Unable to set video informations\n");
fprintf(stderr, "VIDEOIO ERROR: V4L: Unable to set video information\n");
icvCloseCAM_V4L(capture);
return -1;
}

View File

@ -128,7 +128,7 @@ struct BitmapInfoHeader
{
DWORD biSize; // Write header size of BITMAPINFO header structure
LONG biWidth; // width in pixels
LONG biHeight; // heigth in pixels
LONG biHeight; // height in pixels
WORD biPlanes; // Number of color planes in which the data is stored
WORD biBitCount; // Number of bits per pixel
DWORD biCompression; // Type of compression used (uncompressed: NO_COMPRESSION=0)

View File

@ -1047,7 +1047,7 @@ static const int idct_prescale[] =
static const char jpegHeader[] =
"\xFF\xD8" // SOI - start of image
"\xFF\xE0" // APP0 - jfif extention
"\xFF\xE0" // APP0 - jfif extension
"\x00\x10" // 2 bytes: length of APP0 segment
"JFIF\x00" // JFIF signature
"\x01\x02" // version of JFIF

View File

@ -44,7 +44,7 @@
Media Foundation-based Video Capturing module is based on
videoInput library by Evgeny Pereguda:
http://www.codeproject.com/Articles/559437/Capturing-of-video-from-web-camera-on-Windows-7-an
Originaly licensed under The Code Project Open License (CPOL) 1.02:
Originally licensed under The Code Project Open License (CPOL) 1.02:
http://www.codeproject.com/info/cpol10.aspx
*/
//require Windows 8 for some of the formats defined otherwise could baseline on lower version
@ -1666,7 +1666,7 @@ void ImageGrabberThread::setEmergencyStopEvent(void *userData, void(*func)(int,
ImageGrabberThread::~ImageGrabberThread(void)
{
DebugPrintOut(L"IMAGEGRABBERTHREAD VIDEODEVICE %i: Destroing ImageGrabberThread\n", igt_DeviceID);
DebugPrintOut(L"IMAGEGRABBERTHREAD VIDEODEVICE %i: Destroying ImageGrabberThread\n", igt_DeviceID);
if (igt_Handle)
WaitForSingleObject(igt_Handle, INFINITE);
delete igt_pImageGrabber;

View File

@ -977,7 +977,7 @@ bool CvCapture_OpenNI::setDepthGeneratorProperty( int propIdx, double propValue
if( propValue != 0.0 ) // "on"
{
// if there isn't image generator (i.e. ASUS XtionPro doesn't have it)
// then the property isn't avaliable
// then the property isn't available
if( imageGenerator.IsValid() )
{
if( !depthGenerator.GetAlternativeViewPointCap().IsViewPointAs(imageGenerator) )

View File

@ -605,7 +605,7 @@ bool CvCapture_OpenNI2::setDepthGeneratorProperty( int propIdx, double propValue
if( propValue != 0.0 ) // "on"
{
// if there isn't image generator (i.e. ASUS XtionPro doesn't have it)
// then the property isn't avaliable
// then the property isn't available
if ( streams[CV_COLOR_STREAM].isValid() )
{
openni::ImageRegistrationMode mode = propValue != 0.0 ? openni::IMAGE_REGISTRATION_DEPTH_TO_COLOR : openni::IMAGE_REGISTRATION_OFF;

View File

@ -155,7 +155,7 @@ static int icvOpenFile_QT_Movie (CvCapture_QT_Movie * capture, const char * file
// we would use CFStringCreateWithFileSystemRepresentation (kCFAllocatorDefault, filename) on Mac OS X 10.4
CFStringRef inPath = CFStringCreateWithCString (kCFAllocatorDefault, filename, kCFStringEncodingISOLatin1);
OPENCV_ASSERT ((inPath != nil), "icvOpenFile_QT_Movie", "couldnt create CFString from a string");
OPENCV_ASSERT ((inPath != nil), "icvOpenFile_QT_Movie", "couldn't create CFString from a string");
// create the data reference
myErr = QTNewDataReferenceFromFullPathCFString (inPath, kQTPOSIXPathStyle, 0, & myDataRef, & myDataRefType);
@ -216,7 +216,7 @@ static int icvOpenFile_QT_Movie (CvCapture_QT_Movie * capture, const char * file
// create gworld for decompressed image
myErr = QTNewGWorld (& capture->myGWorld, k32ARGBPixelFormat /* k24BGRPixelFormat geht leider nicht */,
& myRect, nil, nil, 0);
OPENCV_ASSERT (myErr == noErr, "icvOpenFile_QT_Movie", "couldnt create QTNewGWorld() for output image");
OPENCV_ASSERT (myErr == noErr, "icvOpenFile_QT_Movie", "couldn't create QTNewGWorld() for output image");
SetMovieGWorld (capture->myMovie, capture->myGWorld, nil);
// build IplImage header that will point to the PixMap of the Movie's GWorld later on
@ -510,7 +510,7 @@ static const void * icvRetrieveFrame_QT_Movie (CvCapture_QT_Movie * capture, int
// update IplImage header that points to PixMap of the Movie's GWorld.
// unfortunately, cvCvtColor doesn't know ARGB, the QuickTime pixel format,
// so we pass a modfied address.
// so we pass a modified address.
// ATTENTION: don't access the last pixel's alpha entry, it's inexistant
myPixMapHandle = GetGWorldPixMap (capture->myGWorld);
LockPixels (myPixMapHandle);
@ -662,7 +662,7 @@ static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
char nameBuffer [255];
result = GetComponentInfo (component, & desc, nameHandle, nil, nil);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt GetComponentInfo()");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't GetComponentInfo()");
OPENCV_ASSERT (*nameHandle, "icvOpenCamera_QT", "No name returned by GetComponentInfo()");
snprintf (nameBuffer, (**nameHandle) + 1, "%s", (char *) (* nameHandle + 1));
printf ("- Videodevice: %s\n", nameBuffer);
@ -675,7 +675,7 @@ static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
{
result = VDGetNumberOfInputs (capture->grabber, & capture->channel);
if (result != noErr)
fprintf (stderr, "Couldnt GetNumberOfInputs: %d\n", (int) result);
fprintf (stderr, "Couldn't GetNumberOfInputs: %d\n", (int) result);
else
{
#ifndef NDEBUG
@ -699,7 +699,7 @@ static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
Str255 nameBuffer;
result = VDGetInputName (capture->grabber, capture->channel, nameBuffer);
OPENCV_ASSERT (result == noErr, "ictOpenCamera_QT", "couldnt GetInputName()");
OPENCV_ASSERT (result == noErr, "ictOpenCamera_QT", "couldn't GetInputName()");
snprintf (name, *nameBuffer, "%s", (char *) (nameBuffer + 1));
printf (" Choosing input %d - %s\n", (int) capture->channel, name);
#endif
@ -729,37 +729,37 @@ static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
// Select the desired input
result = VDSetInput (capture->grabber, capture->channel);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt select video digitizer input");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't select video digitizer input");
// get the bounding rectangle of the video digitizer
result = VDGetActiveSrcRect (capture->grabber, capture->channel, & myRect);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt create VDGetActiveSrcRect from digitizer");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create VDGetActiveSrcRect from digitizer");
myRect.right = 640; myRect.bottom = 480;
capture->size = cvSize (myRect.right - myRect.left, myRect.bottom - myRect.top);
printf ("Source rect is %d, %d -- %d, %d\n", (int) myRect.left, (int) myRect.top, (int) myRect.right, (int) myRect.bottom);
// create offscreen GWorld
result = QTNewGWorld (& capture->myGWorld, k32ARGBPixelFormat, & myRect, nil, nil, 0);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt create QTNewGWorld() for output image");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create QTNewGWorld() for output image");
// get pixmap
capture->pixmap = GetGWorldPixMap (capture->myGWorld);
result = GetMoviesError ();
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt get pixmap");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't get pixmap");
// set digitizer rect
result = VDSetDigitizerRect (capture->grabber, & myRect);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt create VDGetActiveSrcRect from digitizer");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create VDGetActiveSrcRect from digitizer");
// set destination of digitized input
result = VDSetPlayThruDestination (capture->grabber, capture->pixmap, & myRect, nil, nil);
printf ("QuickTime error: %d\n", (int) result);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set video destination");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video destination");
// get destination of digitized images
result = VDGetPlayThruDestination (capture->grabber, & capture->pixmap, nil, nil, nil);
printf ("QuickTime error: %d\n", (int) result);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt get video destination");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't get video destination");
OPENCV_ASSERT (capture->pixmap != nil, "icvOpenCamera_QT", "empty set video destination");
// get the bounding rectangle of the video digitizer
@ -768,15 +768,15 @@ static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
// build IplImage header that will point to the PixMap of the Movie's GWorld later on
capture->image_rgb = cvCreateImageHeader (capture->size, IPL_DEPTH_8U, 4);
OPENCV_ASSERT (capture->image_rgb, "icvOpenCamera_QT", "couldnt create image header");
OPENCV_ASSERT (capture->image_rgb, "icvOpenCamera_QT", "couldn't create image header");
// create IplImage that hold correctly formatted result
capture->image_bgr = cvCreateImage (capture->size, IPL_DEPTH_8U, 3);
OPENCV_ASSERT (capture->image_bgr, "icvOpenCamera_QT", "couldnt create image");
OPENCV_ASSERT (capture->image_bgr, "icvOpenCamera_QT", "couldn't create image");
// notify digitizer component, that we well be starting grabbing soon
result = VDCaptureStateChanging (capture->grabber, vdFlagCaptureIsForRecord | vdFlagCaptureStarting | vdFlagCaptureLowLatency);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set capture state");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set capture state");
// yeah, we did it
@ -791,7 +791,7 @@ static int icvClose_QT_Cam (CvCapture_QT_Cam * capture)
// notify digitizer component, that we well be stopping grabbing soon
result = VDCaptureStateChanging (capture->grabber, vdFlagCaptureStopping);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set capture state");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set capture state");
// release memory
cvReleaseImage (& capture->image_bgr);
@ -799,7 +799,7 @@ static int icvClose_QT_Cam (CvCapture_QT_Cam * capture)
DisposeGWorld (capture->myGWorld);
CloseComponent (capture->grabber);
// sucessful
// successful
return 1;
}
@ -830,7 +830,7 @@ static const void * icvRetrieveFrame_QT_Cam (CvCapture_QT_Cam * capture, int)
// update IplImage header that points to PixMap of the Movie's GWorld.
// unfortunately, cvCvtColor doesn't know ARGB, the QuickTime pixel format,
// so we pass a modfied address.
// so we pass a modified address.
// ATTENTION: don't access the last pixel's alpha entry, it's inexistant
//myPixMapHandle = GetGWorldPixMap (capture->myGWorld);
myPixMapHandle = capture->pixmap;
@ -869,7 +869,7 @@ static OSErr icvDataProc_QT_Cam (SGChannel channel, Ptr raw_data, long len, long
// we need a decompression sequence that fits the raw data coming from the camera
err = SGGetChannelSampleDescription (channel, (Handle) description);
OPENCV_ASSERT (err == noErr, "icvDataProc_QT_Cam", "couldnt get channel sample description");
OPENCV_ASSERT (err == noErr, "icvDataProc_QT_Cam", "couldn't get channel sample description");
//*************************************************************************************//
//This fixed a bug when Quicktime is called twice to grab a frame (black band bug) - Yannick Verdie 2010
@ -885,7 +885,7 @@ static OSErr icvDataProc_QT_Cam (SGChannel channel, Ptr raw_data, long len, long
err = DecompressSequenceBegin (&capture->sequence, description, capture->gworld, 0,&capture->bounds,&scaleMatrix, srcCopy, NULL, 0, codecNormalQuality, bestSpeedCodec);
//**************************************************************************************//
OPENCV_ASSERT (err == noErr, "icvDataProc_QT_Cam", "couldnt begin decompression sequence");
OPENCV_ASSERT (err == noErr, "icvDataProc_QT_Cam", "couldn't begin decompression sequence");
DisposeHandle ((Handle) description);
}
@ -919,22 +919,22 @@ static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
// open sequence grabber component
capture->grabber = OpenDefaultComponent (SeqGrabComponentType, 0);
OPENCV_ASSERT (capture->grabber, "icvOpenCamera_QT", "couldnt create image");
OPENCV_ASSERT (capture->grabber, "icvOpenCamera_QT", "couldn't create image");
// initialize sequence grabber component
result = SGInitialize (capture->grabber);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt initialize sequence grabber");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't initialize sequence grabber");
result = SGSetDataRef (capture->grabber, 0, 0, seqGrabDontMakeMovie);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set data reference of sequence grabber");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set data reference of sequence grabber");
// set up video channel
result = SGNewChannel (capture->grabber, VideoMediaType, & (capture->channel));
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt create new video channel");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create new video channel");
// select the camera indicated by index
SGDeviceList device_list = 0;
result = SGGetChannelDeviceList (capture->channel, 0, & device_list);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt get channel device list");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't get channel device list");
for (int i = 0, current_index = 1; i < (*device_list)->count; i++)
{
SGDeviceName device = (*device_list)->entry[i];
@ -943,33 +943,33 @@ static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
if (current_index == index)
{
result = SGSetChannelDevice (capture->channel, device.name);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set the channel video device");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set the channel video device");
break;
}
current_index++;
}
}
result = SGDisposeDeviceList (capture->grabber, device_list);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt dispose the channel device list");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't dispose the channel device list");
// query natural camera resolution -- this will be wrong, but will be an upper
// bound on the actual resolution -- the actual resolution is set below
// after starting the frame grabber
result = SGGetSrcVideoBounds (capture->channel, & (capture->bounds));
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set video channel bounds");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video channel bounds");
// create offscreen GWorld
result = QTNewGWorld (& (capture->gworld), k32ARGBPixelFormat, & (capture->bounds), 0, 0, 0);
result = SGSetGWorld (capture->grabber, capture->gworld, 0);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set GWorld for sequence grabber");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set GWorld for sequence grabber");
result = SGSetChannelBounds (capture->channel, & (capture->bounds));
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set video channel bounds");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video channel bounds");
result = SGSetChannelUsage (capture->channel, seqGrabRecord);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set channel usage");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set channel usage");
// start recording so we can size
result = SGStartRecord (capture->grabber);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt start recording");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't start recording");
// don't know *actual* resolution until now
ImageDescriptionHandle imageDesc = (ImageDescriptionHandle)NewHandle(0);
@ -981,19 +981,19 @@ static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
// stop grabber so that we can reset the parameters to the right size
result = SGStop (capture->grabber);
OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldnt stop recording");
OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldn't stop recording");
// reset GWorld to correct image size
GWorldPtr tmpgworld;
result = QTNewGWorld( &tmpgworld, k32ARGBPixelFormat, &(capture->bounds), 0, 0, 0);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt create offscreen GWorld");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create offscreen GWorld");
result = SGSetGWorld( capture->grabber, tmpgworld, 0);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set GWorld for sequence grabber");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set GWorld for sequence grabber");
DisposeGWorld( capture->gworld );
capture->gworld = tmpgworld;
result = SGSetChannelBounds (capture->channel, & (capture->bounds));
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set video channel bounds");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video channel bounds");
// allocate images
capture->size = cvSize (capture->bounds.right - capture->bounds.left, capture->bounds.bottom - capture->bounds.top);
@ -1003,7 +1003,7 @@ static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
// so we shift the base address by one byte.
// ATTENTION: don't access the last pixel's alpha entry, it's inexistant
capture->image_rgb = cvCreateImageHeader (capture->size, IPL_DEPTH_8U, 4);
OPENCV_ASSERT (capture->image_rgb, "icvOpenCamera_QT", "couldnt create image header");
OPENCV_ASSERT (capture->image_rgb, "icvOpenCamera_QT", "couldn't create image header");
pixmap = GetGWorldPixMap (capture->gworld);
OPENCV_ASSERT (pixmap, "icvOpenCamera_QT", "didn't get GWorld PixMap handle");
LockPixels (pixmap);
@ -1011,16 +1011,16 @@ static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
// create IplImage that hold correctly formatted result
capture->image_bgr = cvCreateImage (capture->size, IPL_DEPTH_8U, 3);
OPENCV_ASSERT (capture->image_bgr, "icvOpenCamera_QT", "couldnt create image");
OPENCV_ASSERT (capture->image_bgr, "icvOpenCamera_QT", "couldn't create image");
// tell the sequence grabber to invoke our data proc
result = SGSetDataProc (capture->grabber, NewSGDataUPP (icvDataProc_QT_Cam), (long) capture);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt set data proc");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set data proc");
// start recording
result = SGStartRecord (capture->grabber);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldnt start recording");
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't start recording");
return 1;
}
@ -1035,11 +1035,11 @@ static int icvClose_QT_Cam (CvCapture_QT_Cam * capture)
// stop recording
result = SGStop (capture->grabber);
OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldnt stop recording");
OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldn't stop recording");
// close sequence grabber component
result = CloseComponent (capture->grabber);
OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldnt close sequence grabber component");
OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldn't close sequence grabber component");
// end decompression sequence
CDSequenceEnd (capture->sequence);
@ -1049,7 +1049,7 @@ static int icvClose_QT_Cam (CvCapture_QT_Cam * capture)
cvReleaseImageHeader (& capture->image_rgb);
DisposeGWorld (capture->gworld);
// sucessful
// successful
return 1;
}

View File

@ -61,7 +61,7 @@ Second Patch: August 28, 2004 Sfuncia Fabio fiblan@yahoo.it
For Release: OpenCV-Linux Beta4 Opencv-0.9.6
FS: this patch fix not sequential index of device (unplugged device), and real numCameras.
for -1 index (icvOpenCAM_V4L) i dont use /dev/video but real device available, because
for -1 index (icvOpenCAM_V4L) I don't use /dev/video but real device available, because
if /dev/video is a link to /dev/video0 and i unplugged device on /dev/video0, /dev/video
is a bad link. I search the first available device with indexList.
@ -159,7 +159,7 @@ the symptoms were damaged image and 'Corrupt JPEG data: premature end of data se
11th patch: April 2, 2013, Forrest Reiling forrest.reiling@gmail.com
Added v4l2 support for getting capture property CV_CAP_PROP_POS_MSEC.
Returns the millisecond timestamp of the last frame grabbed or 0 if no frames have been grabbed
Used to successfully synchonize 2 Logitech C310 USB webcams to within 16 ms of one another
Used to successfully synchronize 2 Logitech C310 USB webcams to within 16 ms of one another
make & enjoy!
@ -231,7 +231,7 @@ make & enjoy!
#endif
#ifdef HAVE_VIDEOIO
// NetBSD compability layer with V4L2
// NetBSD compatibility layer with V4L2
#include <sys/videoio.h>
#endif
@ -398,7 +398,7 @@ static bool try_palette_v4l2(CvCaptureCAM_V4L* capture)
static int try_init_v4l2(CvCaptureCAM_V4L* capture, const char *deviceName)
{
// Test device for V4L2 compability
// Test device for V4L2 compatibility
// Return value:
// -1 then unable to open device
// 0 then detected nothing
@ -786,7 +786,7 @@ bool CvCaptureCAM_V4L::open(int _index)
char _deviceName[MAX_DEVICE_DRIVER_NAME];
if (!numCameras)
icvInitCapture_V4L(); /* Havent called icvInitCapture yet - do it now! */
icvInitCapture_V4L(); /* Haven't called icvInitCapture yet - do it now! */
if (!numCameras)
return false; /* Are there any /dev/video input sources? */

View File

@ -1707,7 +1707,7 @@ void CvCaptureCAM_XIMEA::errMsg(const char* msg, int errNum) const
case XI_WRITEREG : error_message = "Register write error"; break;
case XI_FREE_RESOURCES : error_message = "Freeing resiurces error"; break;
case XI_FREE_CHANNEL : error_message = "Freeing channel error"; break;
case XI_FREE_BANDWIDTH : error_message = "Freeing bandwith error"; break;
case XI_FREE_BANDWIDTH : error_message = "Freeing bandwidth error"; break;
case XI_READBLK : error_message = "Read block error"; break;
case XI_WRITEBLK : error_message = "Write block error"; break;
case XI_NO_IMAGE : error_message = "No image"; break;
@ -1744,22 +1744,22 @@ void CvCaptureCAM_XIMEA::errMsg(const char* msg, int errNum) const
case XI_ACQUISITION_ALREADY_UP : error_message = "Acquisition already started"; break;
case XI_OLD_DRIVER_VERSION : error_message = "Old version of device driver installed to the system."; break;
case XI_GET_LAST_ERROR : error_message = "To get error code please call GetLastError function."; break;
case XI_CANT_PROCESS : error_message = "Data cant be processed"; break;
case XI_CANT_PROCESS : error_message = "Data can't be processed"; break;
case XI_ACQUISITION_STOPED : error_message = "Acquisition has been stopped. It should be started before GetImage."; break;
case XI_ACQUISITION_STOPED_WERR : error_message = "Acquisition has been stoped with error."; break;
case XI_ACQUISITION_STOPED_WERR : error_message = "Acquisition has been stopped with error."; break;
case XI_INVALID_INPUT_ICC_PROFILE : error_message = "Input ICC profile missed or corrupted"; break;
case XI_INVALID_OUTPUT_ICC_PROFILE : error_message = "Output ICC profile missed or corrupted"; break;
case XI_DEVICE_NOT_READY : error_message = "Device not ready to operate"; break;
case XI_SHADING_TOOCONTRAST : error_message = "Shading too contrast"; break;
case XI_ALREADY_INITIALIZED : error_message = "Module already initialized"; break;
case XI_NOT_ENOUGH_PRIVILEGES : error_message = "Application doesnt enough privileges(one or more app"; break;
case XI_NOT_ENOUGH_PRIVILEGES : error_message = "Application doesn't enough privileges(one or more app"; break;
case XI_NOT_COMPATIBLE_DRIVER : error_message = "Installed driver not compatible with current software"; break;
case XI_TM_INVALID_RESOURCE : error_message = "TM file was not loaded successfully from resources"; break;
case XI_DEVICE_HAS_BEEN_RESETED : error_message = "Device has been reseted, abnormal initial state"; break;
case XI_DEVICE_HAS_BEEN_RESETED : error_message = "Device has been reset, abnormal initial state"; break;
case XI_NO_DEVICES_FOUND : error_message = "No Devices Found"; break;
case XI_RESOURCE_OR_FUNCTION_LOCKED : error_message = "Resource(device) or function locked by mutex"; break;
case XI_BUFFER_SIZE_TOO_SMALL : error_message = "Buffer provided by user is too small"; break;
case XI_COULDNT_INIT_PROCESSOR : error_message = "Couldnt initialize processor."; break;
case XI_COULDNT_INIT_PROCESSOR : error_message = "Couldn't initialize processor."; break;
case XI_NOT_INITIALIZED : error_message = "The object/module/procedure/process being referred to has not been started."; break;
case XI_RESOURCE_NOT_FOUND : error_message = "Resource not found(could be processor, file, item..)."; break;
case XI_UNKNOWN_PARAM : error_message = "Unknown parameter"; break;