Refactoring the image_pool for android, and adding some common utils for camera configuration. Also experimenting with optimization - grayscale preview is way faster than color right now.

This commit is contained in:
Ethan Rublee 2010-11-27 07:59:22 +00:00
parent 077dd77757
commit 3a932b0f6c
25 changed files with 1635 additions and 952 deletions

View File

@ -11,6 +11,10 @@
regular Android project.
-->
<activity android:name="com.opencv.OpenCV" />
<activity android:name="com.opencv.calibration.ChessBoardChooser"/>
<activity android:name="com.opencv.calibration.CameraConfig"/>
<activity android:name="com.opencv.calibration.CalibrationViewer"/>
<service android:name="com.opencv.calibration.services.CalibrationService"/>
</application>
<!-- set the opengl version
<uses-feature android:glEsVersion="0x00020000" />-->

View File

@ -12,6 +12,10 @@ $(info gedit $(LOCAL_ENV_MK))
$(error Please setup the $(LOCAL_ENV_MK) - the default was just created')
endif
ifndef ARM_TARGETS
ARM_TARGETS=armeabi armeabi-v7a
endif
ANDROID_NDK_BASE = $(ANDROID_NDK_ROOT)
$(info OPENCV_CONFIG = $(OPENCV_CONFIG))
@ -44,7 +48,7 @@ all: $(LIB) nogdb
#calls the ndk-build script, passing it OPENCV_ROOT and OPENCV_LIBS_DIR
$(LIB): $(SWIG_C_OUT) $(SOURCES) $(HEADERS) $(ANDROID_MKS)
$(ANDROID_NDK_BASE)/ndk-build OPENCV_CONFIG=$(OPENCV_CONFIG) \
PROJECT_PATH=$(PROJECT_PATH) V=$(V) $(NDK_FLAGS)
PROJECT_PATH=$(PROJECT_PATH) ARM_TARGETS=$(ARM_TARGETS) V=$(V) $(NDK_FLAGS)
#this creates the swig wrappers
@ -70,5 +74,5 @@ clean-swig:
#does clean-swig and then uses the ndk-build clean
clean: clean-swig
$(ANDROID_NDK_BASE)/ndk-build OPENCV_CONFIG=$(OPENCV_CONFIG) \
PROJECT_PATH=$(PROJECT_PATH) clean V=$(V) $(NDK_FLAGS)
PROJECT_PATH=$(PROJECT_PATH) clean ARM_TARGETS=$(ARM_TARGETS) V=$(V) $(NDK_FLAGS)

View File

@ -1,2 +1,2 @@
APP_ABI := armeabi armeabi-v7a
APP_ABI := $(ARM_TARGETS)
APP_MODULES := android-opencv

View File

@ -7,255 +7,240 @@
#include "Calibration.h"
#include <sys/stat.h>
using namespace cv;
Calibration::Calibration():patternsize(6,8)
Calibration::Calibration() :
patternsize(6, 8)
{
}
Calibration::~Calibration() {
Calibration::~Calibration()
{
}
namespace
{
double computeReprojectionErrors(
const vector<vector<Point3f> >& objectPoints, const vector<vector<
Point2f> >& imagePoints, const vector<Mat>& rvecs,
const vector<Mat>& tvecs, const Mat& cameraMatrix,
const Mat& distCoeffs, vector<float>& perViewErrors) {
vector<Point2f> imagePoints2;
int i, totalPoints = 0;
double totalErr = 0, err;
perViewErrors.resize(objectPoints.size());
double computeReprojectionErrors(const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints, const vector<Mat>& rvecs, const vector<
Mat>& tvecs, const Mat& cameraMatrix, const Mat& distCoeffs,
vector<float>& perViewErrors)
{
vector<Point2f> imagePoints2;
int i, totalPoints = 0;
double totalErr = 0, err;
perViewErrors.resize(objectPoints.size());
for (i = 0; i < (int) objectPoints.size(); i++) {
projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix,
distCoeffs, imagePoints2);
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L1 );
int n = (int) objectPoints[i].size();
perViewErrors[i] = err / n;
totalErr += err;
totalPoints += n;
}
for (i = 0; i < (int)objectPoints.size(); i++)
{
projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix, distCoeffs, imagePoints2);
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L1);
int n = (int)objectPoints[i].size();
perViewErrors[i] = err / n;
totalErr += err;
totalPoints += n;
}
return totalErr / totalPoints;
return totalErr / totalPoints;
}
void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners)
{
corners.resize(0);
void calcChessboardCorners(Size boardSize, float squareSize, vector<
Point3f>& corners) {
corners.resize(0);
for (int i = 0; i < boardSize.height; i++)
for (int j = 0; j < boardSize.width; j++)
corners.push_back(Point3f(float(j * squareSize), float(i
* squareSize), 0));
for (int i = 0; i < boardSize.height; i++)
for (int j = 0; j < boardSize.width; j++)
corners.push_back(Point3f(float(j * squareSize), float(i * squareSize), 0));
}
/**from opencv/samples/cpp/calibration.cpp
*
*/
bool runCalibration(vector<vector<Point2f> > imagePoints,
Size imageSize, Size boardSize, float squareSize, float aspectRatio,
int flags, Mat& cameraMatrix, Mat& distCoeffs, vector<Mat>& rvecs,
vector<Mat>& tvecs, vector<float>& reprojErrs, double& totalAvgErr) {
cameraMatrix = Mat::eye(3, 3, CV_64F);
if (flags & CV_CALIB_FIX_ASPECT_RATIO)
cameraMatrix.at<double> (0, 0) = aspectRatio;
bool runCalibration(vector<vector<Point2f> > imagePoints, Size imageSize, Size boardSize, float squareSize,
float aspectRatio, int flags, Mat& cameraMatrix, Mat& distCoeffs, vector<Mat>& rvecs,
vector<Mat>& tvecs, vector<float>& reprojErrs, double& totalAvgErr)
{
cameraMatrix = Mat::eye(3, 3, CV_64F);
if (flags & CV_CALIB_FIX_ASPECT_RATIO)
cameraMatrix.at<double> (0, 0) = aspectRatio;
distCoeffs = Mat::zeros(4, 1, CV_64F);
distCoeffs = Mat::zeros(4, 1, CV_64F);
vector<vector<Point3f> > objectPoints(1);
calcChessboardCorners(boardSize, squareSize, objectPoints[0]);
for (size_t i = 1; i < imagePoints.size(); i++)
objectPoints.push_back(objectPoints[0]);
vector<vector<Point3f> > objectPoints(1);
calcChessboardCorners(boardSize, squareSize, objectPoints[0]);
for (size_t i = 1; i < imagePoints.size(); i++)
objectPoints.push_back(objectPoints[0]);
calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, flags);
calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, distCoeffs, rvecs, tvecs, flags);
bool ok = checkRange(cameraMatrix, CV_CHECK_QUIET ) && checkRange(
distCoeffs, CV_CHECK_QUIET );
bool ok = checkRange(cameraMatrix, CV_CHECK_QUIET) && checkRange(distCoeffs, CV_CHECK_QUIET);
totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints, rvecs,
tvecs, cameraMatrix, distCoeffs, reprojErrs);
totalAvgErr
= computeReprojectionErrors(objectPoints, imagePoints, rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs);
return ok;
return ok;
}
void saveCameraParams(const string& filename, Size imageSize, Size boardSize,
float squareSize, float aspectRatio, int flags,
const Mat& cameraMatrix, const Mat& distCoeffs,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const vector<float>& reprojErrs,
const vector<vector<Point2f> >& imagePoints, double totalAvgErr) {
FileStorage fs(filename, FileStorage::WRITE);
void saveCameraParams(const string& filename, Size imageSize, Size boardSize, float squareSize, float aspectRatio,
int flags, const Mat& cameraMatrix, const Mat& distCoeffs, const vector<Mat>& rvecs,
const vector<Mat>& tvecs, const vector<float>& reprojErrs,
const vector<vector<Point2f> >& imagePoints, double totalAvgErr)
{
FileStorage fs(filename, FileStorage::WRITE);
time_t t;
time(&t);
struct tm *t2 = localtime(&t);
char buf[1024];
strftime(buf, sizeof(buf) - 1, "%c", t2);
time_t t;
time(&t);
struct tm *t2 = localtime(&t);
char buf[1024];
strftime(buf, sizeof(buf) - 1, "%c", t2);
fs << "calibration_time" << buf;
fs << "calibration_time" << buf;
if (!rvecs.empty() || !reprojErrs.empty())
fs << "nframes" << (int) std::max(rvecs.size(), reprojErrs.size());
fs << "image_width" << imageSize.width;
fs << "image_height" << imageSize.height;
fs << "board_width" << boardSize.width;
fs << "board_height" << boardSize.height;
fs << "squareSize" << squareSize;
if (!rvecs.empty() || !reprojErrs.empty())
fs << "nframes" << (int)std::max(rvecs.size(), reprojErrs.size());
fs << "image_width" << imageSize.width;
fs << "image_height" << imageSize.height;
fs << "board_width" << boardSize.width;
fs << "board_height" << boardSize.height;
fs << "squareSize" << squareSize;
if (flags & CV_CALIB_FIX_ASPECT_RATIO)
fs << "aspectRatio" << aspectRatio;
if (flags & CV_CALIB_FIX_ASPECT_RATIO)
fs << "aspectRatio" << aspectRatio;
if (flags != 0) {
sprintf(buf, "flags: %s%s%s%s",
flags & CV_CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess"
: "",
flags & CV_CALIB_FIX_ASPECT_RATIO ? "+fix_aspectRatio" : "",
flags & CV_CALIB_FIX_PRINCIPAL_POINT ? "+fix_principal_point"
: "",
flags & CV_CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "");
cvWriteComment(*fs, buf, 0);
}
if (flags != 0)
{
sprintf(buf, "flags: %s%s%s%s", flags & CV_CALIB_USE_INTRINSIC_GUESS ? "+use_intrinsic_guess" : "", flags
& CV_CALIB_FIX_ASPECT_RATIO ? "+fix_aspectRatio" : "", flags & CV_CALIB_FIX_PRINCIPAL_POINT
? "+fix_principal_point" : "", flags & CV_CALIB_ZERO_TANGENT_DIST ? "+zero_tangent_dist" : "");
cvWriteComment(*fs, buf, 0);
}
fs << "flags" << flags;
fs << "flags" << flags;
fs << "camera_matrix" << cameraMatrix;
fs << "distortion_coefficients" << distCoeffs;
fs << "camera_matrix" << cameraMatrix;
fs << "distortion_coefficients" << distCoeffs;
fs << "avg_reprojection_error" << totalAvgErr;
if (!reprojErrs.empty())
fs << "per_view_reprojection_errors" << Mat(reprojErrs);
fs << "avg_reprojection_error" << totalAvgErr;
if (!reprojErrs.empty())
fs << "per_view_reprojection_errors" << Mat(reprojErrs);
if (!rvecs.empty() && !tvecs.empty()) {
Mat bigmat(rvecs.size(), 6, CV_32F);
for (size_t i = 0; i < rvecs.size(); i++) {
Mat r = bigmat(Range(i, i + 1), Range(0, 3));
Mat t = bigmat(Range(i, i + 1), Range(3, 6));
rvecs[i].copyTo(r);
tvecs[i].copyTo(t);
}
cvWriteComment(
*fs,
"a set of 6-tuples (rotation vector + translation vector) for each view",
0);
fs << "extrinsic_parameters" << bigmat;
}
if (!rvecs.empty() && !tvecs.empty())
{
Mat bigmat(rvecs.size(), 6, CV_32F);
for (size_t i = 0; i < rvecs.size(); i++)
{
Mat r = bigmat(Range(i, i + 1), Range(0, 3));
Mat t = bigmat(Range(i, i + 1), Range(3, 6));
rvecs[i].copyTo(r);
tvecs[i].copyTo(t);
}
cvWriteComment(*fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0);
fs << "extrinsic_parameters" << bigmat;
}
if (!imagePoints.empty()) {
Mat imagePtMat(imagePoints.size(), imagePoints[0].size(), CV_32FC2);
for (size_t i = 0; i < imagePoints.size(); i++) {
Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
Mat(imagePoints[i]).copyTo(r);
}
fs << "image_points" << imagePtMat;
}
if (!imagePoints.empty())
{
Mat imagePtMat(imagePoints.size(), imagePoints[0].size(), CV_32FC2);
for (size_t i = 0; i < imagePoints.size(); i++)
{
Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
Mat(imagePoints[i]).copyTo(r);
}
fs << "image_points" << imagePtMat;
}
}
}//anon namespace
bool Calibration::detectAndDrawChessboard(int idx,image_pool* pool) {
bool Calibration::detectAndDrawChessboard(int idx, image_pool* pool)
{
Mat grey;
pool->getGrey(idx, grey);
if (grey.empty())
return false;
vector<Point2f> corners;
Mat grey = pool->getGrey(idx);
if (grey.empty())
return false;
vector<Point2f> corners;
IplImage iplgrey = grey;
if (!cvCheckChessboard(&iplgrey, patternsize))
return false;
bool patternfound = findChessboardCorners(grey, patternsize, corners);
IplImage iplgrey = grey;
if (!cvCheckChessboard(&iplgrey, patternsize))
return false;
bool patternfound = findChessboardCorners(grey, patternsize, corners);
Mat img = pool->getImage(idx);
Mat * img = pool->getImage(idx);
if (corners.size() < 1)
return false;
if (corners.size() < 1)
return false;
cornerSubPix(grey, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
cornerSubPix(grey, corners, Size(11, 11), Size(-1, -1), TermCriteria(
CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
if (patternfound)
imagepoints.push_back(corners);
if(patternfound)
imagepoints.push_back(corners);
drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
drawChessboardCorners(*img, patternsize, Mat(corners), patternfound);
imgsize = grey.size();
imgsize = grey.size();
return patternfound;
return patternfound;
}
void Calibration::drawText(int i, image_pool* pool, const char* ctext){
// Use "y" to show that the baseLine is about
string text = ctext;
int fontFace = FONT_HERSHEY_COMPLEX_SMALL;
double fontScale = .8;
int thickness = .5;
void Calibration::drawText(int i, image_pool* pool, const char* ctext)
{
// Use "y" to show that the baseLine is about
string text = ctext;
int fontFace = FONT_HERSHEY_COMPLEX_SMALL;
double fontScale = .8;
int thickness = .5;
Mat img = *pool->getImage(i);
Mat img = pool->getImage(i);
int baseline=0;
Size textSize = getTextSize(text, fontFace,
fontScale, thickness, &baseline);
baseline += thickness;
int baseline = 0;
Size textSize = getTextSize(text, fontFace, fontScale, thickness, &baseline);
baseline += thickness;
// center the text
Point textOrg((img.cols - textSize.width)/2,
(img.rows - textSize.height *2));
// center the text
Point textOrg((img.cols - textSize.width) / 2, (img.rows - textSize.height * 2));
// draw the box
rectangle(img, textOrg + Point(0, baseline),
textOrg + Point(textSize.width, -textSize.height),
Scalar(0,0,255),CV_FILLED);
// ... and the baseline first
line(img, textOrg + Point(0, thickness),
textOrg + Point(textSize.width, thickness),
Scalar(0, 0, 255));
// draw the box
rectangle(img, textOrg + Point(0, baseline), textOrg + Point(textSize.width, -textSize.height), Scalar(0, 0, 255),
CV_FILLED);
// ... and the baseline first
line(img, textOrg + Point(0, thickness), textOrg + Point(textSize.width, thickness), Scalar(0, 0, 255));
// then put the text itself
putText(img, text, textOrg, fontFace, fontScale,
Scalar::all(255), thickness, 8);
// then put the text itself
putText(img, text, textOrg, fontFace, fontScale, Scalar::all(255), thickness, 8);
}
void Calibration::resetChess() {
void Calibration::resetChess()
{
imagepoints.clear();
imagepoints.clear();
}
void Calibration::calibrate(const char* filename) {
void Calibration::calibrate(const char* filename)
{
vector<Mat> rvecs, tvecs;
vector<float> reprojErrs;
double totalAvgErr = 0;
int flags = 0;
flags |= CV_CALIB_FIX_PRINCIPAL_POINT | CV_CALIB_FIX_ASPECT_RATIO;
bool writeExtrinsics = true;
bool writePoints = true;
vector<Mat> rvecs, tvecs;
vector<float> reprojErrs;
double totalAvgErr = 0;
int flags = 0;
flags |= CV_CALIB_FIX_PRINCIPAL_POINT | CV_CALIB_FIX_ASPECT_RATIO;
bool writeExtrinsics = true;
bool writePoints = true;
bool ok = runCalibration(imagepoints, imgsize, patternsize, 1.f, 1.f,
flags, K, distortion, rvecs, tvecs, reprojErrs, totalAvgErr);
bool ok = runCalibration(imagepoints, imgsize, patternsize, 1.f, 1.f, flags, K, distortion, rvecs, tvecs, reprojErrs,
totalAvgErr);
if (ok)
{
if (ok){
saveCameraParams(filename, imgsize, patternsize, 1.f,
1.f, flags, K, distortion, writeExtrinsics ? rvecs
: vector<Mat> (), writeExtrinsics ? tvecs
: vector<Mat> (), writeExtrinsics ? reprojErrs
: vector<float> (), writePoints ? imagepoints : vector<
vector<Point2f> > (), totalAvgErr);
}
saveCameraParams(filename, imgsize, patternsize, 1.f, 1.f, flags, K, distortion, writeExtrinsics ? rvecs : vector<
Mat> (), writeExtrinsics ? tvecs : vector<Mat> (), writeExtrinsics ? reprojErrs : vector<float> (), writePoints
? imagepoints : vector<vector<Point2f> > (), totalAvgErr);
}
}
int Calibration::getNumberDetectedChessboards() {
return imagepoints.size();
int Calibration::getNumberDetectedChessboards()
{
return imagepoints.size();
}

View File

@ -14,8 +14,6 @@
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <vector>
#include "image_pool.h"
@ -24,36 +22,33 @@
#define DETECT_STAR 1
#define DETECT_SURF 2
class Calibration {
std::vector<cv::KeyPoint> keypoints;
vector<vector<Point2f> > imagepoints;
cv::Mat K;
cv::Mat distortion;
cv::Size imgsize;
class Calibration
{
public:
cv::Size patternsize;
Calibration();
virtual ~Calibration();
Calibration();
virtual ~Calibration();
bool detectAndDrawChessboard(int idx, image_pool* pool);
bool detectAndDrawChessboard(int idx, image_pool* pool);
void resetChess();
void resetChess();
int getNumberDetectedChessboards();
int getNumberDetectedChessboards();
void calibrate(const char* filename);
void calibrate(const char* filename);
void drawText(int idx, image_pool* pool, const char* text);
cv::Size patternsize;
private:
std::vector<cv::KeyPoint> keypoints;
std::vector<std::vector<cv::Point2f> > imagepoints;
cv::Mat K;
cv::Mat distortion;
cv::Size imgsize;
void drawText(int idx, image_pool* pool, const char* text);
};
#endif /* PROCESSOR_H_ */

View File

@ -37,273 +37,286 @@ using namespace cv;
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
static void printGLString(const char *name, GLenum s) {
const char *v = (const char *) glGetString(s);
LOGI("GL %s = %s\n", name, v);
static void printGLString(const char *name, GLenum s)
{
const char *v = (const char *)glGetString(s);
LOGI("GL %s = %s\n", name, v);
}
static void checkGlError(const char* op) {
for (GLint error = glGetError(); error; error = glGetError()) {
LOGI("after %s() glError (0x%x)\n", op, error);
}
static void checkGlError(const char* op)
{
for (GLint error = glGetError(); error; error = glGetError())
{
LOGI("after %s() glError (0x%x)\n", op, error);
}
}
static const char gVertexShader[] = "attribute vec4 a_position; \n"
"attribute vec2 a_texCoord; \n"
"varying vec2 v_texCoord; \n"
"void main() \n"
"{ \n"
" gl_Position = a_position; \n"
" v_texCoord = a_texCoord; \n"
"} \n";
"attribute vec2 a_texCoord; \n"
"varying vec2 v_texCoord; \n"
"void main() \n"
"{ \n"
" gl_Position = a_position; \n"
" v_texCoord = a_texCoord; \n"
"} \n";
static const char gFragmentShader[] =
"precision mediump float; \n"
"varying vec2 v_texCoord; \n"
"uniform sampler2D s_texture; \n"
"void main() \n"
"{ \n"
" gl_FragColor = texture2D( s_texture, v_texCoord );\n"
"} \n";
static const char gFragmentShader[] = "precision mediump float; \n"
"varying vec2 v_texCoord; \n"
"uniform sampler2D s_texture; \n"
"void main() \n"
"{ \n"
" gl_FragColor = texture2D( s_texture, v_texCoord );\n"
"} \n";
const GLfloat gTriangleVertices[] = { 0.0f, 0.5f, -0.5f, -0.5f, 0.5f, -0.5f };
GLubyte testpixels[4 * 3] = { 255, 0, 0, // Red
0, 255, 0, // Green
0, 0, 255, // Blue
255, 255, 0 // Yellow
};
const GLfloat gTriangleVertices[] = {0.0f, 0.5f, -0.5f, -0.5f, 0.5f, -0.5f};
GLubyte testpixels[4 * 3] = {255, 0, 0, // Red
0, 255, 0, // Green
0, 0, 255, // Blue
255, 255, 0 // Yellow
};
GLuint glcamera::createSimpleTexture2D(GLuint _textureid, GLubyte* pixels,
int width, int height, int channels) {
GLuint glcamera::createSimpleTexture2D(GLuint _textureid, GLubyte* pixels, int width, int height, int channels)
{
// Bind the texture
glActiveTexture(GL_TEXTURE0);
checkGlError("glActiveTexture");
// Bind the texture object
glBindTexture(GL_TEXTURE_2D, _textureid);
checkGlError("glBindTexture");
// Bind the texture
glActiveTexture( GL_TEXTURE0);
checkGlError("glActiveTexture");
// Bind the texture object
glBindTexture(GL_TEXTURE_2D, _textureid);
checkGlError("glBindTexture");
GLenum format;
switch (channels) {
case 3:
format = GL_RGB;
break;
case 1:
format = GL_LUMINANCE;
break;
case 4:
format = GL_RGBA;
break;
}
// Load the texture
glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format,
GL_UNSIGNED_BYTE, pixels);
GLenum format;
switch (channels)
{
case 3:
format = GL_RGB;
break;
case 1:
format = GL_LUMINANCE;
break;
case 4:
format = GL_RGBA;
break;
}
// Load the texture
glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, pixels);
checkGlError("glTexImage2D");
// Set the filtering mode
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
checkGlError("glTexImage2D");
// Set the filtering mode
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
return _textureid;
return _textureid;
}
GLuint glcamera::loadShader(GLenum shaderType, const char* pSource) {
GLuint shader = glCreateShader(shaderType);
if (shader) {
glShaderSource(shader, 1, &pSource, NULL);
glCompileShader(shader);
GLint compiled = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if (!compiled) {
GLint infoLen = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
if (infoLen) {
char* buf = (char*) malloc(infoLen);
if (buf) {
glGetShaderInfoLog(shader, infoLen, NULL, buf);
LOGE("Could not compile shader %d:\n%s\n",
shaderType, buf);
free(buf);
}
glDeleteShader(shader);
shader = 0;
}
}
}
return shader;
GLuint glcamera::loadShader(GLenum shaderType, const char* pSource)
{
GLuint shader = glCreateShader(shaderType);
if (shader)
{
glShaderSource(shader, 1, &pSource, NULL);
glCompileShader(shader);
GLint compiled = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if (!compiled)
{
GLint infoLen = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
if (infoLen)
{
char* buf = (char*)malloc(infoLen);
if (buf)
{
glGetShaderInfoLog(shader, infoLen, NULL, buf);
LOGE("Could not compile shader %d:\n%s\n",
shaderType, buf);
free(buf);
}
glDeleteShader(shader);
shader = 0;
}
}
}
return shader;
}
GLuint glcamera::createProgram(const char* pVertexSource,
const char* pFragmentSource) {
GLuint vertexShader = loadShader(GL_VERTEX_SHADER, pVertexSource);
if (!vertexShader) {
return 0;
}
GLuint glcamera::createProgram(const char* pVertexSource, const char* pFragmentSource)
{
GLuint vertexShader = loadShader(GL_VERTEX_SHADER, pVertexSource);
if (!vertexShader)
{
return 0;
}
GLuint pixelShader = loadShader(GL_FRAGMENT_SHADER, pFragmentSource);
if (!pixelShader) {
return 0;
}
GLuint pixelShader = loadShader(GL_FRAGMENT_SHADER, pFragmentSource);
if (!pixelShader)
{
return 0;
}
GLuint program = glCreateProgram();
if (program) {
glAttachShader(program, vertexShader);
checkGlError("glAttachShader");
glAttachShader(program, pixelShader);
checkGlError("glAttachShader");
glLinkProgram(program);
GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
if (linkStatus != GL_TRUE) {
GLint bufLength = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength);
if (bufLength) {
char* buf = (char*) malloc(bufLength);
if (buf) {
glGetProgramInfoLog(program, bufLength, NULL, buf);
LOGE("Could not link program:\n%s\n", buf);
free(buf);
}
}
glDeleteProgram(program);
program = 0;
}
}
return program;
GLuint program = glCreateProgram();
if (program)
{
glAttachShader(program, vertexShader);
checkGlError("glAttachShader");
glAttachShader(program, pixelShader);
checkGlError("glAttachShader");
glLinkProgram(program);
GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
if (linkStatus != GL_TRUE)
{
GLint bufLength = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength);
if (bufLength)
{
char* buf = (char*)malloc(bufLength);
if (buf)
{
glGetProgramInfoLog(program, bufLength, NULL, buf);
LOGE("Could not link program:\n%s\n", buf);
free(buf);
}
}
glDeleteProgram(program);
program = 0;
}
}
return program;
}
//GLuint textureID;
bool glcamera::setupGraphics(int w, int h) {
printGLString("Version", GL_VERSION);
printGLString("Vendor", GL_VENDOR);
printGLString("Renderer", GL_RENDERER);
printGLString("Extensions", GL_EXTENSIONS);
bool glcamera::setupGraphics(int w, int h)
{
printGLString("Version", GL_VERSION);
printGLString("Vendor", GL_VENDOR);
printGLString("Renderer", GL_RENDERER);
printGLString("Extensions", GL_EXTENSIONS);
LOGI("setupGraphics(%d, %d)", w, h);
gProgram = createProgram(gVertexShader, gFragmentShader);
if (!gProgram) {
LOGE("Could not create program.");
return false;
}
gvPositionHandle = glGetAttribLocation(gProgram, "a_position");
gvTexCoordHandle = glGetAttribLocation(gProgram, "a_texCoord");
LOGI("setupGraphics(%d, %d)", w, h);
gProgram = createProgram(gVertexShader, gFragmentShader);
if (!gProgram)
{
LOGE("Could not create program.");
return false;
}
gvPositionHandle = glGetAttribLocation(gProgram, "a_position");
gvTexCoordHandle = glGetAttribLocation(gProgram, "a_texCoord");
gvSamplerHandle = glGetAttribLocation(gProgram, "s_texture");
gvSamplerHandle = glGetAttribLocation(gProgram, "s_texture");
// Use tightly packed data
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// Use tightly packed data
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// Generate a texture object
glGenTextures(1, &textureID);
textureID = createSimpleTexture2D(textureID, testpixels, 2, 2, 3);
// Generate a texture object
glGenTextures(1, &textureID);
textureID = createSimpleTexture2D(textureID, testpixels, 2, 2, 3);
checkGlError("glGetAttribLocation");
LOGI("glGetAttribLocation(\"vPosition\") = %d\n",
gvPositionHandle);
checkGlError("glGetAttribLocation");
LOGI("glGetAttribLocation(\"vPosition\") = %d\n",
gvPositionHandle);
glViewport(0, 0, w, h);
checkGlError("glViewport");
return true;
glViewport(0, 0, w, h);
checkGlError("glViewport");
return true;
}
void glcamera::renderFrame() {
void glcamera::renderFrame()
{
GLfloat vVertices[] = { -1.0f, 1.0f, 0.0f, // Position 0
0.0f, 0.0f, // TexCoord 0
-1.0f, -1.0f, 0.0f, // Position 1
0.0f, 1.0f, // TexCoord 1
1.0f, -1.0f, 0.0f, // Position 2
1.0f, 1.0f, // TexCoord 2
1.0f, 1.0f, 0.0f, // Position 3
1.0f, 0.0f // TexCoord 3
};
GLushort indices[] = { 0, 1, 2, 0, 2, 3 };
GLsizei stride = 5 * sizeof(GLfloat); // 3 for position, 2 for texture
GLfloat vVertices[] = {-1.0f, 1.0f, 0.0f, // Position 0
0.0f, 0.0f, // TexCoord 0
-1.0f, -1.0f, 0.0f, // Position 1
0.0f, 1.0f, // TexCoord 1
1.0f, -1.0f, 0.0f, // Position 2
1.0f, 1.0f, // TexCoord 2
1.0f, 1.0f, 0.0f, // Position 3
1.0f, 0.0f // TexCoord 3
};
GLushort indices[] = {0, 1, 2, 0, 2, 3};
GLsizei stride = 5 * sizeof(GLfloat); // 3 for position, 2 for texture
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
checkGlError("glClearColor");
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
checkGlError("glClearColor");
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
checkGlError("glClear");
glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
checkGlError("glClear");
glUseProgram(gProgram);
checkGlError("glUseProgram");
glUseProgram(gProgram);
checkGlError("glUseProgram");
// Load the vertex position
glVertexAttribPointer(gvPositionHandle, 3, GL_FLOAT, GL_FALSE, stride,
vVertices);
// Load the texture coordinate
glVertexAttribPointer(gvTexCoordHandle, 2, GL_FLOAT, GL_FALSE, stride,
&vVertices[3]);
// Load the vertex position
glVertexAttribPointer(gvPositionHandle, 3, GL_FLOAT, GL_FALSE, stride, vVertices);
// Load the texture coordinate
glVertexAttribPointer(gvTexCoordHandle, 2, GL_FLOAT, GL_FALSE, stride, &vVertices[3]);
glEnableVertexAttribArray(gvPositionHandle);
glEnableVertexAttribArray(gvTexCoordHandle);
glEnableVertexAttribArray(gvPositionHandle);
glEnableVertexAttribArray(gvTexCoordHandle);
// Bind the texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureID);
// Bind the texture
glActiveTexture( GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textureID);
// Set the sampler texture unit to 0
glUniform1i(gvSamplerHandle, 0);
// Set the sampler texture unit to 0
glUniform1i(gvSamplerHandle, 0);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices);
//checkGlError("glVertexAttribPointer");
//glEnableVertexAttribArray(gvPositionHandle);
//checkGlError("glEnableVertexAttribArray");
//glDrawArrays(GL_TRIANGLES, 0, 3);
//checkGlError("glDrawArrays");
//checkGlError("glVertexAttribPointer");
//glEnableVertexAttribArray(gvPositionHandle);
//checkGlError("glEnableVertexAttribArray");
//glDrawArrays(GL_TRIANGLES, 0, 3);
//checkGlError("glDrawArrays");
}
void glcamera::init(int width, int height) {
newimage = false;
nimg = Mat();
setupGraphics(width, height);
void glcamera::init(int width, int height)
{
newimage = false;
nimg = Mat();
setupGraphics(width, height);
}
void glcamera::step() {
if (newimage && !nimg.empty()) {
void glcamera::step()
{
if (newimage && !nimg.empty())
{
textureID = createSimpleTexture2D(textureID,
nimg.ptr<unsigned char> (0), nimg.rows, nimg.cols,
nimg.channels());
newimage = false;
}
renderFrame();
textureID = createSimpleTexture2D(textureID, nimg.ptr<unsigned char> (0), nimg.rows, nimg.cols, nimg.channels());
newimage = false;
}
renderFrame();
}
#define NEAREST_POW2(x)((int)(0.5 + std::log(x)/0.69315) )
void glcamera::setTextureImage(const Mat& img)
{
Size size(256, 256);
resize(img, nimg, size, cv::INTER_NEAREST);
newimage = true;
}
void glcamera::drawMatToGL(int idx, image_pool* pool)
{
Mat img = pool->getImage(idx);
if (img.empty())
return; //no image at input_idx!
setTextureImage(img);
}
void glcamera::setTextureImage(Ptr<Mat> img) {
//int p2 = (int)(std::log(img->size().width)/0.69315);
int sz = 256;//std::pow(2,p2);
Size size(sz, sz);
resize(*img, nimg, size,cv::INTER_NEAREST);
newimage = true;
glcamera::glcamera() :
newimage(false)
{
LOGI("glcamera constructor");
}
glcamera::~glcamera()
{
LOGI("glcamera destructor");
}
void glcamera::drawMatToGL(int idx, image_pool* pool) {
Ptr<Mat> img = pool->getImage(idx);
if (img.empty())
return; //no image at input_idx!
setTextureImage(img);
}
glcamera::glcamera():newimage(false) {
LOGI("glcamera constructor");
}
glcamera::~glcamera() {
LOGI("glcamera destructor");
}

View File

@ -6,35 +6,34 @@
#include <GLES2/gl2ext.h>
#include "image_pool.h"
class glcamera {
Mat nimg;
bool newimage;
GLuint textureID;
GLuint gProgram;
GLuint gvPositionHandle;
GLuint gvTexCoordHandle;
GLuint gvSamplerHandle;
class glcamera
{
public:
glcamera();
~glcamera();
void init(int width, int height);
void step();
glcamera();
~glcamera();
void init(int width, int height);
void step();
void drawMatToGL(int idx, image_pool* pool);
void setTextureImage(Ptr<Mat> img);
void drawMatToGL(int idx, image_pool* pool);
void setTextureImage(const cv::Mat& img);
private:
GLuint createSimpleTexture2D(GLuint _textureid, GLubyte* pixels, int width,
int height, int channels);
GLuint loadShader(GLenum shaderType, const char* pSource);
GLuint
createProgram(const char* pVertexSource,
const char* pFragmentSource);
bool setupGraphics(int w, int h);
void renderFrame();
GLuint createSimpleTexture2D(GLuint _textureid, GLubyte* pixels, int width, int height, int channels);
GLuint loadShader(GLenum shaderType, const char* pSource);
GLuint
createProgram(const char* pVertexSource, const char* pFragmentSource);
bool setupGraphics(int w, int h);
void renderFrame();
cv::Mat nimg;
bool newimage;
GLuint textureID;
GLuint gProgram;
GLuint gvPositionHandle;
GLuint gvTexCoordHandle;
GLuint gvSamplerHandle;
};
#endif

View File

@ -5,92 +5,97 @@
#include <android/log.h>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
#define LOG_TAG "libandroid-opencv"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved)
{
JNIEnv *env;
LOGI("JNI_OnLoad called for opencv");
return JNI_VERSION_1_4;
JNIEnv *env;
LOGI("JNI_OnLoad called for opencv");
return JNI_VERSION_1_4;
}
JNIEXPORT void JNICALL Java_com_opencv_jni_opencvJNI_addYUVtoPool(JNIEnv * env,
jclass thiz, jlong ppool, jobject _jpool, jbyteArray jbuffer,
jint jidx, jint jwidth, jint jheight, jboolean jgrey) {
image_pool *pool = (image_pool *) ppool;
jclass thiz, jlong ppool, jobject _jpool, jbyteArray jbuffer,
jint jidx, jint jwidth, jint jheight, jboolean jgrey)
{
int buff_height = jheight + (jheight/2);
Size buff_size(jwidth,buff_height);
image_pool *pool = (image_pool *) ppool;
Ptr<Mat> mat = pool->getYUV(jidx);
Mat mat = pool->getYUV(jidx);
if (mat.empty() || mat->cols != jwidth || mat->rows != jheight * 2) {
//pool->deleteGrey(jidx);
mat = new Mat(jheight * 2, jwidth, CV_8UC1);
}
if (mat.empty() || mat.size() != buff_size )
{
mat.create(buff_size, CV_8UC1);
}
jsize sz = env->GetArrayLength(jbuffer);
uchar* buff = mat->ptr<uchar> (0);
jsize sz = env->GetArrayLength(jbuffer);
uchar* buff = mat.ptr<uchar> (0);
env->GetByteArrayRegion(jbuffer, 0, sz, (jbyte*) buff);
env->GetByteArrayRegion(jbuffer, 0, sz, (jbyte*) buff);
pool->addYUVMat(jidx, mat);
Ptr<Mat> color = pool->getImage(jidx);
if (color.empty() || color->cols != jwidth || color->rows != jheight) {
//pool->deleteImage(jidx);
color = new Mat(jheight, jwidth, CV_8UC3);
}
if (!jgrey) {
pool->addYUVMat(jidx, mat);
//doesn't work unfortunately..
//cvtColor(*mat,*color, CV_YCrCb2RGB);
color_convert_common(buff, buff + jwidth * jheight, jwidth, jheight,
color->ptr<uchar> (0), false);
Mat color = pool->getImage(jidx);
}
if (!jgrey)
{
if (jgrey) {
Mat grey;
pool->getGrey(jidx, grey);
if (color.cols != jwidth || color.rows != jheight || color.channels() != 3)
{
color.create(jheight, jwidth, CV_8UC3);
}
//doesn't work unfortunately..
//TODO cvtColor(mat,color, CV_YCrCb2RGB);
color_convert_common(buff, buff + jwidth * jheight, jwidth, jheight,
color.ptr<uchar> (0), false);
}
cvtColor(grey, *color, CV_GRAY2RGB);
if (jgrey)
{
Mat grey = pool->getGrey(jidx);
color = grey;
}
}
pool->addImage(jidx, color);
pool->addImage(jidx, color);
}
image_pool::image_pool() {
image_pool::image_pool()
{
}
image_pool::~image_pool() {
__android_log_print(ANDROID_LOG_INFO, "image_pool", "destructor called");
image_pool::~image_pool()
{
__android_log_print(ANDROID_LOG_INFO, "image_pool", "destructor called");
}
cv::Ptr<Mat> image_pool::getImage(int i) {
return imagesmap[i];
Mat image_pool::getImage(int i)
{
return imagesmap[i];
}
void image_pool::getGrey(int i, Mat & grey) {
cv::Ptr<Mat> tm = yuvImagesMap[i];
if (tm.empty())
return;
grey = (*tm)(Range(0, tm->rows / 2), Range::all());
Mat image_pool::getGrey(int i)
{
Mat tm = yuvImagesMap[i];
if (tm.empty())
return tm;
return tm(Range(0, tm.rows * (2.0f/3)), Range::all());
}
cv::Ptr<Mat> image_pool::getYUV(int i) {
return yuvImagesMap[i];
Mat image_pool::getYUV(int i)
{
return yuvImagesMap[i];
}
void image_pool::addYUVMat(int i, cv::Ptr<Mat> mat) {
yuvImagesMap[i] = mat;
void image_pool::addYUVMat(int i, Mat mat)
{
yuvImagesMap[i] = mat;
}
void image_pool::addImage(int i, cv::Ptr<Mat> mat) {
imagesmap[i] = mat;
void image_pool::addImage(int i, Mat mat)
{
imagesmap[i] = mat;
}

View File

@ -1,12 +1,14 @@
#ifndef IMAGE_POOL_H
#define IMAGE_POOL_H
#ifndef IMAGE_POOL_H_ANDROID_KDJFKJ
#define IMAGE_POOL_H_ANDROID_KDJFKJ
#include <opencv2/core/core.hpp>
#include <jni.h>
#include <map>
using namespace cv;
#if ANDROID
#include <jni.h>
#ifdef __cplusplus
extern "C" {
extern "C"
{
#endif
JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved);
@ -15,48 +17,48 @@ JNIEXPORT jint JNI_OnLoad(JavaVM* vm, void* reserved);
// JNIEnv *jenv, jclass jcls, jlong jarg1, jobject jarg1_);
JNIEXPORT void JNICALL Java_com_opencv_jni_opencvJNI_addYUVtoPool
(JNIEnv *, jclass, jlong, jobject, jbyteArray, jint, jint, jint, jboolean);
JNIEXPORT void JNICALL Java_com_opencv_jni_opencvJNI_addYUVtoPool(JNIEnv *, jclass, jlong, jobject, jbyteArray, jint,
jint, jint, jboolean);
#ifdef __cplusplus
}
#endif
#endif
class image_pool
{
//bool yuv2mat2(char *data, int size, int width, int height, bool grey, Mat& mat);
class image_pool {
std::map<int, Ptr< Mat> > imagesmap;
std::map<int, Ptr< Mat> > yuvImagesMap;
//uchar * mbuffer;
//int length;
public:
image_pool();
~image_pool();
cv::Ptr<Mat> getImage(int i);
image_pool();
~image_pool();
cv::Mat getImage(int i);
cv::Mat getGrey(int i);
cv::Mat getYUV(int i);
void getGrey(int i, Mat & grey);
cv::Ptr<Mat> getYUV(int i);
int getCount()
{
return imagesmap.size();
}
int getCount(){
return imagesmap.size();
}
/** Adds a mat at the given index - will not do a deep copy, just images[i] = mat
*
*/
void addImage(int i, cv::Mat mat);
void addImage(int i, Ptr< Mat> mat);
/** this function stores the given matrix in the the yuvImagesMap. Also,
* after this call getGrey will work, as the grey image is just the top
* half of the YUV mat.
*
* \param i index to store yuv image at
* \param mat the yuv matrix to store
*/
void addYUVMat(int i, Ptr< Mat> mat);
int addYUV(uchar* buffer, int size, int width, int height, bool grey,int idx);
void getBitmap(int * outintarray, int size, int idx);
/** this function stores the given matrix in the the yuvImagesMap. Also,
* after this call getGrey will work, as the grey image is just the top
* half of the YUV mat.
*
* \param i index to store yuv image at
* \param mat the yuv matrix to store
*/
void addYUVMat(int i, cv::Mat mat);
// int addYUV(uchar* buffer, int size, int width, int height, bool grey,int idx);
//
// void getBitmap(int * outintarray, int size, int idx);
private:
std::map<int, cv::Mat> imagesmap;
std::map<int, cv::Mat> yuvImagesMap;
};
#endif

View File

@ -46,10 +46,8 @@ public:
~image_pool();
Ptr<Mat> getImage(int i);
void addImage(int i, Ptr< Mat> mat);
Mat getImage(int i);
void addImage(int i, Mat mat);

View File

@ -1,98 +1,80 @@
#include <string.h>
#include <jni.h>
#include <yuv420sp2rgb.h>
#ifndef max
#define max(a,b) ({typeof(a) _a = (a); typeof(b) _b = (b); _a > _b ? _a : _b; })
#define min(a,b) ({typeof(a) _a = (a); typeof(b) _b = (b); _a < _b ? _a : _b; })
#endif
/*
YUV 4:2:0 image with a plane of 8 bit Y samples followed by an interleaved
U/V plane containing 8 bit 2x2 subsampled chroma samples.
except the interleave order of U and V is reversed.
YUV 4:2:0 image with a plane of 8 bit Y samples followed by an interleaved
U/V plane containing 8 bit 2x2 subsampled chroma samples.
except the interleave order of U and V is reversed.
H V
Y Sample Period 1 1
U (Cb) Sample Period 2 2
V (Cr) Sample Period 2 2
H V
Y Sample Period 1 1
U (Cb) Sample Period 2 2
V (Cr) Sample Period 2 2
*/
/*
size of a char:
find . -name limits.h -exec grep CHAR_BIT {} \;
*/
#ifndef max
#define max(a,b) ({typeof(a) _a = (a); typeof(b) _b = (b); _a > _b ? _a : _b; })
#define min(a,b) ({typeof(a) _a = (a); typeof(b) _b = (b); _a < _b ? _a : _b; })
#endif
const int bytes_per_pixel = 2;
void color_convert_common(
unsigned char *pY, unsigned char *pUV,
int width, int height, unsigned char *buffer,
int grey)
void color_convert_common(unsigned char *pY, unsigned char *pUV, int width, int height, unsigned char *buffer, int grey)
{
int i, j;
int nR, nG, nB;
int nY, nU, nV;
unsigned char *out = buffer;
int offset = 0;
if(grey){
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
unsigned char nB = *(pY + i * width + j);
int i, j;
int nR, nG, nB;
int nY, nU, nV;
unsigned char *out = buffer;
int offset = 0;
out[offset++] = (unsigned char)nB;
// out[offset++] = (unsigned char)nB;
// out[offset++] = (unsigned char)nB;
}
}
}else
// YUV 4:2:0
for (i = 0; i < height; i++) {
for (j = 0; j < width; j++) {
nY = *(pY + i * width + j);
nV = *(pUV + (i/2) * width + bytes_per_pixel * (j/2));
nU = *(pUV + (i/2) * width + bytes_per_pixel * (j/2) + 1);
// Yuv Convert
nY -= 16;
nU -= 128;
nV -= 128;
if (nY < 0)
nY = 0;
// nR = (int)(1.164 * nY + 2.018 * nU);
// nG = (int)(1.164 * nY - 0.813 * nV - 0.391 * nU);
// nB = (int)(1.164 * nY + 1.596 * nV);
nB = (int)(1192 * nY + 2066 * nU);
nG = (int)(1192 * nY - 833 * nV - 400 * nU);
nR = (int)(1192 * nY + 1634 * nV);
nR = min(262143, max(0, nR));
nG = min(262143, max(0, nG));
nB = min(262143, max(0, nB));
nR >>= 10; nR &= 0xff;
nG >>= 10; nG &= 0xff;
nB >>= 10; nB &= 0xff;
if (grey)
{
memcpy(out,pY,width*height*sizeof(unsigned char));
}
else
// YUV 4:2:0
for (i = 0; i < height; i++)
{
for (j = 0; j < width; j++)
{
nY = *(pY + i * width + j);
nV = *(pUV + (i / 2) * width + bytes_per_pixel * (j / 2));
nU = *(pUV + (i / 2) * width + bytes_per_pixel * (j / 2) + 1);
out[offset++] = (unsigned char)nR;
out[offset++] = (unsigned char)nG;
out[offset++] = (unsigned char)nB;
// Yuv Convert
nY -= 16;
nU -= 128;
nV -= 128;
//out[offset++] = 0xff; //set alpha for ARGB 8888 format
if (nY < 0)
nY = 0;
nB = (int)(1192 * nY + 2066 * nU);
nG = (int)(1192 * nY - 833 * nV - 400 * nU);
nR = (int)(1192 * nY + 1634 * nV);
}
//offset = i * width * 3; //non power of two
//offset = i * texture_size + j;//power of two
//offset *= 3; //3 byte per pixel
//out = buffer + offset;
}
}
nR = min(262143, max(0, nR));
nG = min(262143, max(0, nG));
nB = min(262143, max(0, nB));
nR >>= 10;
nR &= 0xff;
nG >>= 10;
nG &= 0xff;
nB >>= 10;
nB &= 0xff;
out[offset++] = (unsigned char)nR;
out[offset++] = (unsigned char)nG;
out[offset++] = (unsigned char)nB;
}
}
}

View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout
xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:orientation="vertical"
android:gravity="center_vertical|center_horizontal">
<TextView android:scrollbars="vertical" android:id="@+id/calibtext" android:text="" android:layout_width="wrap_content"
android:layout_height="wrap_content" android:padding="20dip"/>
</LinearLayout>

View File

@ -0,0 +1,40 @@
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout
xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:orientation="vertical"
android:gravity="center_vertical|center_horizontal">
<TextView android:text="@string/settings_text" android:autoLink="web" android:layout_width="wrap_content"
android:layout_height="wrap_content" android:padding="20dip"/>
<LinearLayout android:id="@+id/LinearLayout01"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:gravity="center_vertical">
<TextView android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="@string/image_size_prompt"/>
<Spinner android:id="@+id/image_size"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:saveEnabled="true"
android:prompt="@string/image_size_prompt"
android:entries="@array/image_sizes">
</Spinner>
</LinearLayout>
<LinearLayout android:id="@+id/LinearLayout01"
android:layout_width="wrap_content" android:layout_height="wrap_content"
android:gravity="center_vertical">
<TextView android:layout_width="wrap_content"
android:layout_height="wrap_content" android:text="@string/camera_mode_prompt"/>
<Spinner android:id="@+id/camera_mode"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:saveEnabled="true"
android:prompt="@string/camera_mode_prompt"
android:entries="@array/camera_mode">
</Spinner>
</LinearLayout>
</LinearLayout>

View File

@ -0,0 +1,40 @@
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout
xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:orientation="vertical"
android:gravity="center_vertical|center_horizontal">
<TextView android:text="@string/patterntext" android:autoLink="web" android:layout_width="wrap_content"
android:layout_height="wrap_content" android:padding="20dip"/>
<LinearLayout android:id="@+id/LinearLayout01"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:gravity="center_vertical">
<TextView android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="Corners in width direction:"/>
<Spinner android:id="@+id/rows"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:saveEnabled="true"
android:prompt="@string/chesspromptx"
android:entries="@array/chesssizes">
</Spinner>
</LinearLayout>
<LinearLayout android:id="@+id/LinearLayout01"
android:layout_width="wrap_content" android:layout_height="wrap_content"
android:gravity="center_vertical">
<TextView android:layout_width="wrap_content"
android:layout_height="wrap_content" android:text="Corners in height direction:"/>
<Spinner android:id="@+id/cols"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:saveEnabled="true"
android:prompt="@string/chessprompty"
android:entries="@array/chesssizes">
</Spinner>
</LinearLayout>
</LinearLayout>

View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<declare-styleable name="CameraParams">
<attr name="preview_width" format="integer"/>
<attr name="preview_height" format="integer"/>
</declare-styleable>
</resources>

View File

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string-array name="chesssizes">
<item>3</item>
<item>4</item>
<item>5</item>
<item>6</item>
<item>7</item>
<item>8</item>
<item>9</item>
<item>10</item>
<item>11</item>
<item>12</item>
<item>13</item>
</string-array>
<string name="chesspromptx">
Choose the width:</string>
<string name="chessprompty">
Choose the height:</string>
</resources>

View File

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string-array name="image_sizes">
<item>320x240</item>
<item>400x300</item>
<item>600x400</item>
<item>800x600</item>
<item>1000x800</item>
</string-array>
<string-array name="camera_mode">
<item>color</item>
<item>BW</item>
</string-array>
<string name="image_size_prompt">
Image Size:\n(may not be exact)
</string>
<string name="camera_mode_prompt">
Camera Mode:
</string>
</resources>

View File

@ -0,0 +1,19 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<string name="app_name">Calibration</string>
<string name="patternsize">Pattern Size</string>
<string name="patterntext">Please choose the width and height (number of inside corners) of the checker
board pattern you will be using for calibration. Default is 6 by 8 corners. You may find a checkerboard pattern at
http://opencv.willowgarage.com/pattern</string>
<string name="patternlink">http://opencv.willowgarage.com/pattern</string>
<string name="camera_settings_label">Camera Settings</string>
<string name="settings_text">Change the camera settings</string>
<string name="calibration_service_started">Calibration calculations have started...</string>
<string name="calibration_service_stopped">Calibration calculations has stopped.</string>
<string name="calibration_service_finished">Calibration finished, you camera is calibrated.</string>
<string name="calibration_service_label">Calibration</string>
<string name="calibration_not_enough">Please capture atleast 10 images of the pattern!</string>
</resources>

View File

@ -6,3 +6,4 @@ OPENCV_CONFIG=../build/android-opencv.mk
#you can download the ndk from http://www.crystax.net/android/ndk-r4.php
ANDROID_NDK_ROOT=$(HOME)/android-ndk-r4-crystax
ARM_TARGETS=armeabi armeabi-v7a

View File

@ -0,0 +1,47 @@
package com.opencv.calibration;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import android.app.Activity;
import android.os.Bundle;
import android.text.method.ScrollingMovementMethod;
import android.util.Log;
import android.widget.TextView;
import com.opencv.R;
public class CalibrationViewer extends Activity {
@Override
protected void onCreate(Bundle savedInstanceState) {
// TODO Auto-generated method stub
super.onCreate(savedInstanceState);
setContentView(R.layout.calibrationviewer);
Bundle extras = getIntent().getExtras();
String filename = extras.getString("calibfile");
if (filename != null) {
TextView text = (TextView) findViewById(R.id.calibtext);
text.setMovementMethod(new ScrollingMovementMethod());
try {
BufferedReader reader = new BufferedReader(new FileReader(
filename));
while (reader.ready()) {
text.append(reader.readLine() +"\n");
}
} catch (FileNotFoundException e) {
Log.e("opencv", "could not open calibration file at:"
+ filename);
} catch (IOException e) {
Log.e("opencv", "error reading file: "
+ filename);
}
}
}
}

View File

@ -0,0 +1,75 @@
package com.opencv.calibration;
import com.opencv.R;
import com.opencv.jni.Size;
import android.app.Activity;
import android.content.Context;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.os.Bundle;
import android.view.View;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemSelectedListener;
import android.widget.Spinner;
public class ChessBoardChooser extends Activity {
public static final String CHESS_SIZE = "chess_size";
public static final int DEFAULT_WIDTH = 6;
public static final int DEFAULT_HEIGHT = 8;
public static final int LOWEST = 3;
class DimChooser implements OnItemSelectedListener {
private String dim;
public DimChooser(String dim) {
this.dim = dim;
}
@Override
public void onItemSelected(AdapterView<?> arg0, View arg1, int pos,
long arg3) {
SharedPreferences settings = getSharedPreferences(CHESS_SIZE, 0);
Editor editor = settings.edit();
editor.putInt(dim, pos + LOWEST);
editor.commit();
}
@Override
public void onNothingSelected(AdapterView<?> arg0) {
}
}
@Override
protected void onCreate(Bundle savedInstanceState) {
// TODO Auto-generated method stub
super.onCreate(savedInstanceState);
setContentView(R.layout.chesssizer);
// Restore preferences
SharedPreferences settings = getSharedPreferences(CHESS_SIZE, 0);
int width = settings.getInt("width", 6);
int height = settings.getInt("height", 8);
Spinner wspin, hspin;
wspin = (Spinner) findViewById(R.id.rows);
hspin = (Spinner) findViewById(R.id.cols);
wspin.setSelection(width - LOWEST);
hspin.setSelection(height - LOWEST);
wspin.setOnItemSelectedListener(new DimChooser("width"));
hspin.setOnItemSelectedListener(new DimChooser("height"));
}
public static Size getPatternSize(Context ctx) {
SharedPreferences settings = ctx.getSharedPreferences(CHESS_SIZE, 0);
int width = settings.getInt("width", 6);
int height = settings.getInt("height", 8);
return new Size(width, height);
}
}

View File

@ -0,0 +1,166 @@
package com.opencv.calibration.services;
import java.io.File;
import java.io.IOException;
import android.app.Notification;
import android.app.NotificationManager;
import android.app.PendingIntent;
import android.app.Service;
import android.content.Intent;
import android.os.Binder;
import android.os.IBinder;
import android.util.Log;
import android.widget.Toast;
import com.opencv.R;
import com.opencv.calibration.CalibrationViewer;
import com.opencv.calibration.Calibrator;
import com.opencv.calibration.Calibrator.CalibrationCallback;
public class CalibrationService extends Service implements CalibrationCallback {
Class<?> activity;
int icon;
File calibration_file;
public void startCalibrating(Class<?> activitycaller,int icon_id, Calibrator calibrator, File calibration_file)
throws IOException {
activity = activitycaller;
icon = icon_id;
// Display a notification about us starting. We put an icon in the
// status bar.
showNotification();
this.calibration_file = calibration_file;
calibrator.setCallback(this);
calibrator.calibrate(calibration_file);
}
private NotificationManager mNM;
/**
* Class for clients to access. Because we know this service always runs in
* the same process as its clients, we don't need to deal with IPC.
*/
public class CalibrationServiceBinder extends Binder {
public CalibrationService getService() {
return CalibrationService.this;
}
}
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
Log.i("LocalService", "Received start id " + startId + ": " + intent);
// We want this service to continue running until it is explicitly
// stopped, so return sticky.
return START_NOT_STICKY;
}
@Override
public void onCreate() {
mNM = (NotificationManager) getSystemService(NOTIFICATION_SERVICE);
}
@Override
public void onDestroy() {
// Cancel the persistent notification.
// mNM.cancel(R.string.calibration_service_started);
// Tell the user we stopped.
Toast.makeText(this, R.string.calibration_service_finished,
Toast.LENGTH_SHORT).show();
}
private final IBinder mBinder = new CalibrationServiceBinder();
@Override
public IBinder onBind(Intent intent) {
return mBinder;
}
/**
* Show a notification while this service is running.
*/
private void showNotification() {
// In this sample, we'll use the same text for the ticker and the
// expanded notification
CharSequence text = getText(R.string.calibration_service_started);
// Set the icon, scrolling text and timestamp
Notification notification = new Notification(icon, text,
System.currentTimeMillis());
// The PendingIntent to launch our activity if the user selects this
// notification
PendingIntent contentIntent = PendingIntent.getActivity(this, 0,
new Intent(this, activity), 0);
// Set the info for the views that show in the notification panel.
notification.setLatestEventInfo(this,
getText(R.string.calibration_service_label), text,
contentIntent);
notification.defaults |= Notification.DEFAULT_SOUND;
// Send the notification.
// We use a layout id because it is a unique number. We use it later to
// cancel.
mNM.notify(R.string.calibration_service_started, notification);
}
/**
* Show a notification while this service is running.
*/
private void doneNotification() {
// In this sample, we'll use the same text for the ticker and the
// expanded notification
CharSequence text = getText(R.string.calibration_service_finished);
// Set the icon, scrolling text and timestamp
Notification notification = new Notification(icon, text,
System.currentTimeMillis());
Intent intent = new Intent(this,CalibrationViewer.class);
intent.putExtra("calibfile", calibration_file.getAbsolutePath());
// The PendingIntent to launch our activity if the user selects this
// notification
PendingIntent contentIntent = PendingIntent.getActivity(this, 0,
intent, 0);
// Set the info for the views that show in the notification panel.
notification.setLatestEventInfo(this,
getText(R.string.calibration_service_label), text,
contentIntent);
notification.defaults |= Notification.DEFAULT_SOUND;
// Send the notification.
// We use a layout id because it is a unique number. We use it later to
// cancel.
mNM.notify(R.string.calibration_service_started, notification);
}
@Override
public void onFoundChessboard(Calibrator calibrator) {
// TODO Auto-generated method stub
}
@Override
public void onDoneCalibration(Calibrator calibration, File calibfile) {
doneNotification();
stopSelf();
}
@Override
public void onFailedChessboard(Calibrator calibrator) {
// TODO Auto-generated method stub
}
}

View File

@ -0,0 +1,166 @@
package com.opencv.camera;
import com.opencv.R;
import android.app.Activity;
import android.content.Context;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.os.Bundle;
import android.view.View;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemSelectedListener;
import android.widget.Spinner;
public class CameraConfig extends Activity {
public static final String CAMERA_SETTINGS = "CAMERA_SETTINGS";
public static final String CAMERA_MODE = "camera_mode";
public static final String IMAGE_WIDTH = "IMAGE_WIDTH";
public static final String IMAGE_HEIGHT = "IMAGE_HEIGHT";
public static final int CAMERA_MODE_BW = 0;
public static final int CAMERA_MODE_COLOR = 1;
public static int readCameraMode(Context ctx) {
// Restore preferences
SharedPreferences settings = ctx.getSharedPreferences(CAMERA_SETTINGS,
0);
int mode = settings.getInt(CAMERA_MODE, CAMERA_MODE_BW);
return mode;
}
static public void setCameraMode(Context context, String mode) {
int m = 0;
if (mode.equals("BW")) {
m = CAMERA_MODE_BW;
} else if (mode.equals("color"))
m = CAMERA_MODE_COLOR;
setCameraMode(context, m);
}
private static String sizeToString(int[] size) {
return size[0] + "x" + size[1];
}
private static void parseStrToSize(String ssize, int[] size) {
String sz[] = ssize.split("x");
size[0] = Integer.valueOf(sz[0]);
size[1] = Integer.valueOf(sz[1]);
}
public static void readImageSize(Context ctx, int[] size) {
// Restore preferences
SharedPreferences settings = ctx.getSharedPreferences(CAMERA_SETTINGS,
0);
size[0] = settings.getInt(IMAGE_WIDTH, 600);
size[1] = settings.getInt(IMAGE_HEIGHT, 600);
}
public static void setCameraMode(Context ctx, int mode) {
// Restore preferences
SharedPreferences settings = ctx.getSharedPreferences(CAMERA_SETTINGS,
0);
Editor editor = settings.edit();
editor.putInt(CAMERA_MODE, mode);
editor.commit();
}
public static void setImageSize(Context ctx, String strsize) {
int size[] = { 0, 0 };
parseStrToSize(strsize, size);
setImageSize(ctx, size[0], size[1]);
}
public static void setImageSize(Context ctx, int width, int height) {
// Restore preferences
SharedPreferences settings = ctx.getSharedPreferences(CAMERA_SETTINGS,
0);
Editor editor = settings.edit();
editor.putInt(IMAGE_WIDTH, width);
editor.putInt(IMAGE_HEIGHT, height);
editor.commit();
}
@Override
protected void onCreate(Bundle savedInstanceState) {
// TODO Auto-generated method stub
super.onCreate(savedInstanceState);
setContentView(R.layout.camerasettings);
int mode = readCameraMode(this);
int size[] = { 0, 0 };
readImageSize(this, size);
final Spinner size_spinner;
final Spinner mode_spinner;
size_spinner = (Spinner) findViewById(R.id.image_size);
mode_spinner = (Spinner) findViewById(R.id.camera_mode);
String strsize = sizeToString(size);
String strmode = modeToString(mode);
String sizes[] = getResources().getStringArray(R.array.image_sizes);
int i = 1;
for (String x : sizes) {
if (x.equals(strsize))
break;
i++;
}
if(i <= sizes.length)
size_spinner.setSelection(i-1);
i = 1;
String modes[] = getResources().getStringArray(R.array.camera_mode);
for (String x :modes) {
if (x.equals(strmode))
break;
i++;
}
if(i <= modes.length)
mode_spinner.setSelection(i-1);
size_spinner.setOnItemSelectedListener(new OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> arg0, View spinner,
int position, long arg3) {
Object o = size_spinner.getItemAtPosition(position);
if (o != null)
setImageSize(spinner.getContext(), (String) o);
}
@Override
public void onNothingSelected(AdapterView<?> arg0) {
}
});
mode_spinner.setOnItemSelectedListener(new OnItemSelectedListener() {
@Override
public void onItemSelected(AdapterView<?> arg0, View spinner,
int position, long arg3) {
Object o = mode_spinner.getItemAtPosition(position);
if (o != null)
setCameraMode(spinner.getContext(), (String) o);
}
@Override
public void onNothingSelected(AdapterView<?> arg0) {
}
});
}
private String modeToString(int mode) {
switch (mode) {
case CAMERA_MODE_BW:
return "BW";
case CAMERA_MODE_COLOR:
return "color";
default:
return "";
}
}
}

View File

@ -22,35 +22,44 @@ import com.opencv.camera.NativeProcessor.PoolCallback;
public class NativePreviewer extends SurfaceView implements
SurfaceHolder.Callback, Camera.PreviewCallback, NativeProcessorCallback {
SurfaceHolder mHolder;
Camera mCamera;
private NativeProcessor processor;
private int preview_width, preview_height;
private int pixelformat;
private PixelFormat pixelinfo;
public NativePreviewer(Context context,AttributeSet attributes){
super(context,attributes);
/** Constructor useful for defining a NativePreviewer in android layout xml
*
* @param context
* @param attributes
*/
public NativePreviewer(Context context, AttributeSet attributes) {
super(context, attributes);
listAllCameraMethods();
// Install a SurfaceHolder.Callback so we get notified when the
// underlying surface is created and destroyed.
mHolder = getHolder();
mHolder.addCallback(this);
mHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
this.preview_width = attributes.getAttributeIntValue("opencv", "preview_width", 600);
this.preview_height= attributes.getAttributeIntValue("opencv", "preview_height", 600);
/* TODO get this working! Can't figure out how to define these in xml
*/
preview_width = attributes.getAttributeIntValue("opencv",
"preview_width", 600);
preview_height = attributes.getAttributeIntValue("opencv",
"preview_height", 600);
Log.d("NativePreviewer", "Trying to use preview size of " + preview_width + " " + preview_height);
processor = new NativeProcessor();
setZOrderMediaOverlay(false);
setZOrderMediaOverlay(false);
}
/**
*
* @param context
* @param preview_width the desired camera preview width - will attempt to get as close to this as possible
* @param preview_height the desired camera preview height
*/
public NativePreviewer(Context context, int preview_width,
int preview_height) {
super(context);
listAllCameraMethods();
// Install a SurfaceHolder.Callback so we get notified when the
@ -63,62 +72,38 @@ public class NativePreviewer extends SurfaceView implements
this.preview_height = preview_height;
processor = new NativeProcessor();
setZOrderMediaOverlay(false);
setZOrderMediaOverlay(false);
}
/** Only call in the oncreate function of the instantiating activity
*
* @param width desired width
* @param height desired height
*/
public void setPreviewSize(int width, int height){
preview_width = width;
preview_height = height;
Log.d("NativePreviewer", "Trying to use preview size of " + preview_width + " " + preview_height);
}
Handler camerainiter = new Handler();
void initCamera(SurfaceHolder holder) throws InterruptedException{
if(mCamera == null){
// The Surface has been created, acquire the camera and tell it where
// to draw.
int i = 0;
while(i++ < 5){
try{
mCamera = Camera.open();
break;
}catch(RuntimeException e){
Thread.sleep(200);
}
}
try {
mCamera.setPreviewDisplay(holder);
} catch (IOException exception) {
mCamera.release();
mCamera = null;
}catch(RuntimeException e){
Log.e("camera", "stacktrace", e);
}
}
}
void releaseCamera(){
if(mCamera !=null){
// Surface will be destroyed when we return, so stop the preview.
// Because the CameraDevice object is not a shared resource, it's very
// important to release it when the activity is paused.
mCamera.stopPreview();
mCamera.release();
}
// processor = null;
mCamera = null;
mAcb = null;
mPCWB = null;
public void setParamsFromPrefs(Context ctx){
int size[] ={0,0};
CameraConfig.readImageSize(ctx, size);
int mode = CameraConfig.readCameraMode(ctx);
setPreviewSize(size[0], size[1]);
setGrayscale(mode == CameraConfig.CAMERA_MODE_BW ? true : false);
}
public void surfaceCreated(SurfaceHolder holder) {
}
public void surfaceDestroyed(SurfaceHolder holder) {
releaseCamera();
}
private boolean hasAutoFocus = false;
public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) {
try {
@ -128,44 +113,48 @@ public class NativePreviewer extends SurfaceView implements
e.printStackTrace();
return;
}
// Now that the size is known, set up the camera parameters and begin
// the preview.
Camera.Parameters parameters = mCamera.getParameters();
List<Camera.Size> pvsizes = mCamera.getParameters().getSupportedPreviewSizes();
List<Camera.Size> pvsizes = mCamera.getParameters()
.getSupportedPreviewSizes();
int best_width = 1000000;
int best_height = 1000000;
for(Size x: pvsizes){
if(x.width - preview_width >= 0 && x.width <= best_width){
int bdist = 100000;
for (Size x : pvsizes) {
if (Math.abs(x.width - preview_width) < bdist) {
bdist = Math.abs(x.width - preview_width);
best_width = x.width;
best_height = x.height;
}
}
preview_width = best_width;
preview_height = best_height;
Log.d("NativePreviewer", "Determined compatible preview size is: (" + preview_width + "," + preview_height+")");
List<String> fmodes = mCamera.getParameters().getSupportedFocusModes();
int idx = fmodes.indexOf(Camera.Parameters.FOCUS_MODE_INFINITY);
if(idx != -1){
if (idx != -1) {
parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_INFINITY);
}else if(fmodes.indexOf(Camera.Parameters.FOCUS_MODE_FIXED) != -1){
} else if (fmodes.indexOf(Camera.Parameters.FOCUS_MODE_FIXED) != -1) {
parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_FIXED);
}
if(fmodes.indexOf(Camera.Parameters.FOCUS_MODE_AUTO) != -1){
hasAutoFocus = true;
if (fmodes.indexOf(Camera.Parameters.FOCUS_MODE_AUTO) != -1) {
hasAutoFocus = true;
}
List<String> scenemodes = mCamera.getParameters().getSupportedSceneModes();
if(scenemodes != null)
if(scenemodes.indexOf(Camera.Parameters.SCENE_MODE_STEADYPHOTO) != -1){
parameters.setSceneMode(Camera.Parameters.SCENE_MODE_STEADYPHOTO);
}
List<String> scenemodes = mCamera.getParameters()
.getSupportedSceneModes();
if (scenemodes != null)
if (scenemodes.indexOf(Camera.Parameters.SCENE_MODE_STEADYPHOTO) != -1) {
parameters
.setSceneMode(Camera.Parameters.SCENE_MODE_STEADYPHOTO);
}
parameters.setPreviewSize(preview_width, preview_height);
@ -194,68 +183,83 @@ public class NativePreviewer extends SurfaceView implements
mCamera.startPreview();
//postautofocus(0);
}
public void postautofocus(int delay) {
if(hasAutoFocus)
if (hasAutoFocus)
handler.postDelayed(autofocusrunner, delay);
}
private Runnable autofocusrunner = new Runnable() {
@Override
public void run() {
mCamera.autoFocus(autocallback);
}
};
Camera.AutoFocusCallback autocallback = new Camera.AutoFocusCallback() {
@Override
public void onAutoFocus(boolean success, Camera camera) {
if(!success)
postautofocus(1000);
}
};
Handler handler = new Handler();
/**
* This method will list all methods of the android.hardware.Camera class,
* even the hidden ones. With the information it provides, you can use the
* same approach I took below to expose methods that were written but hidden
* in eclair
*/
private void listAllCameraMethods() {
try {
Class<?> c = Class.forName("android.hardware.Camera");
Method[] m = c.getMethods();
for (int i = 0; i < m.length; i++) {
Log.d("NativePreviewer", " method:" + m[i].toString());
}
} catch (Exception e) {
// TODO Auto-generated catch block
Log.e("NativePreviewer", e.toString());
}
}
/**
* These variables are re-used over and over by addCallbackBuffer
* Demonstration of how to use onPreviewFrame. In this case I'm not
* processing the data, I'm just adding the buffer back to the buffer queue
* for re-use
*/
Method mAcb;
public void onPreviewFrame(byte[] data, Camera camera) {
if (start == null) {
start = new Date();
}
processor.post(data, preview_width, preview_height, pixelformat,
System.nanoTime(), this);
fcount++;
if (fcount % 100 == 0) {
double ms = (new Date()).getTime() - start.getTime();
Log.i("NativePreviewer", "fps:" + fcount / (ms / 1000.0));
start = new Date();
fcount = 0;
}
}
@Override
public void onDoneNativeProcessing(byte[] buffer) {
addCallbackBuffer(buffer);
}
public void addCallbackStack(LinkedList<PoolCallback> callbackstack) {
processor.addCallbackStack(callbackstack);
}
/**
* This must be called when the activity pauses, in Activity.onPause This
* has the side effect of clearing the callback stack.
*
*/
public void onPause() {
releaseCamera();
addCallbackStack(null);
processor.stop();
}
public void onResume() {
processor.start();
}
private Method mPCWB;
private void initForPCWB() {
private void initForACB() {
try {
mAcb = Class.forName("android.hardware.Camera").getMethod(
"addCallbackBuffer", byte[].class);
mPCWB = Class.forName("android.hardware.Camera").getMethod(
"setPreviewCallbackWithBuffer", PreviewCallback.class);
} catch (Exception e) {
Log
.e("NativePreviewer",
"Problem setting up for addCallbackBuffer: "
+ e.toString());
Log.e("NativePreviewer",
"Problem setting up for setPreviewCallbackWithBuffer: "
+ e.toString());
}
}
/**
@ -274,27 +278,10 @@ public class NativePreviewer extends SurfaceView implements
try {
mAcb.invoke(mCamera, b);
} catch (Exception e) {
Log.e("NativePreviewer", "invoking addCallbackBuffer failed: "
+ e.toString());
}
}
Method mPCWB;
private void initForPCWB() {
try {
mPCWB = Class.forName("android.hardware.Camera").getMethod(
"setPreviewCallbackWithBuffer", PreviewCallback.class);
} catch (Exception e) {
Log.e("NativePreviewer",
"Problem setting up for setPreviewCallbackWithBuffer: "
+ e.toString());
"invoking addCallbackBuffer failed: " + e.toString());
}
}
/**
@ -321,7 +308,8 @@ public class NativePreviewer extends SurfaceView implements
}
}
protected void clearPreviewCallbackWithBuffer() {
@SuppressWarnings("unused")
private void clearPreviewCallbackWithBuffer() {
// mCamera.setPreviewCallback(this);
// return;
try {
@ -341,69 +329,117 @@ public class NativePreviewer extends SurfaceView implements
}
}
Date start;
int fcount = 0;
boolean processing = false;
/**
* These variables are re-used over and over by addCallbackBuffer
*/
private Method mAcb;
private void initForACB() {
try {
mAcb = Class.forName("android.hardware.Camera").getMethod(
"addCallbackBuffer", byte[].class);
} catch (Exception e) {
Log.e("NativePreviewer",
"Problem setting up for addCallbackBuffer: " + e.toString());
}
}
private Runnable autofocusrunner = new Runnable() {
@Override
public void run() {
mCamera.autoFocus(autocallback);
}
};
private Camera.AutoFocusCallback autocallback = new Camera.AutoFocusCallback() {
@Override
public void onAutoFocus(boolean success, Camera camera) {
if (!success)
postautofocus(1000);
}
};
/**
* Demonstration of how to use onPreviewFrame. In this case I'm not
* processing the data, I'm just adding the buffer back to the buffer queue
* for re-use
* This method will list all methods of the android.hardware.Camera class,
* even the hidden ones. With the information it provides, you can use the
* same approach I took below to expose methods that were written but hidden
* in eclair
*/
public void onPreviewFrame(byte[] data, Camera camera) {
private void listAllCameraMethods() {
try {
Class<?> c = Class.forName("android.hardware.Camera");
Method[] m = c.getMethods();
for (int i = 0; i < m.length; i++) {
Log.d("NativePreviewer", " method:" + m[i].toString());
}
} catch (Exception e) {
// TODO Auto-generated catch block
Log.e("NativePreviewer", e.toString());
}
}
if (start == null) {
start = new Date();
private void initCamera(SurfaceHolder holder) throws InterruptedException {
if (mCamera == null) {
// The Surface has been created, acquire the camera and tell it
// where
// to draw.
int i = 0;
while (i++ < 5) {
try {
mCamera = Camera.open();
break;
} catch (RuntimeException e) {
Thread.sleep(200);
}
}
try {
mCamera.setPreviewDisplay(holder);
} catch (IOException exception) {
mCamera.release();
mCamera = null;
} catch (RuntimeException e) {
Log.e("camera", "stacktrace", e);
}
}
}
private void releaseCamera() {
if (mCamera != null) {
// Surface will be destroyed when we return, so stop the preview.
// Because the CameraDevice object is not a shared resource, it's
// very
// important to release it when the activity is paused.
mCamera.stopPreview();
mCamera.release();
}
processor.post(data, preview_width, preview_height, pixelformat, System.nanoTime(),
this);
fcount++;
if (fcount % 100 == 0) {
double ms = (new Date()).getTime() - start.getTime();
Log.i("NativePreviewer", "fps:" + fcount / (ms / 1000.0));
start = new Date();
fcount = 0;
}
// processor = null;
mCamera = null;
mAcb = null;
mPCWB = null;
}
@Override
public void onDoneNativeProcessing(byte[] buffer) {
addCallbackBuffer(buffer);
}
private Handler handler = new Handler();
public void addCallbackStack(LinkedList<PoolCallback> callbackstack) {
processor.addCallbackStack(callbackstack);
}
private Date start;
private int fcount = 0;
private boolean hasAutoFocus = false;
private SurfaceHolder mHolder;
private Camera mCamera;
/**This must be called when the activity pauses, in Activity.onPause
* This has the side effect of clearing the callback stack.
*
*/
public void onPause() {
releaseCamera();
addCallbackStack(null);
processor.stop();
}
private NativeProcessor processor;
public void onResume() {
processor.start();
private int preview_width, preview_height;
private int pixelformat;
private PixelFormat pixelinfo;
public void setGrayscale(boolean b) {
processor.setGrayscale(b);
}

View File

@ -11,106 +11,38 @@ import android.util.Log;
import com.opencv.jni.image_pool;
import com.opencv.jni.opencv;
/** The NativeProcessor is a native processing stack engine.
*
* What this means is that the NativeProcessor handles loading
* live camera frames into native memory space, i.e. the image_pool
* and then calling a stack of PoolCallback's and passing them the
* image_pool.
*
* The image_pool index 0 is populated with the live video image
*
* And any modifications to this the pool are in place, so you may
* pass on changes to the pool to the next PoolCallback in the stack.
*
*/
public class NativeProcessor {
private class ProcessorThread extends Thread {
private void process(NPPostObject pobj) throws Exception {
if (pobj.format == PixelFormat.YCbCr_420_SP) {
// add as color image, because we know how to decode this
opencv.addYUVtoPool(pool, pobj.buffer, 0, pobj.width,
pobj.height, false);
} else if (pobj.format == PixelFormat.YCbCr_422_SP) {
// add as gray image, because this format is not coded
// for...//TODO figure out how to decode this
// format
opencv.addYUVtoPool(pool, pobj.buffer, 0, pobj.width,
pobj.height, true);
} else
throw new Exception("bad pixel format!");
for (PoolCallback x : stack) {
if (interrupted()) {
throw new InterruptedException(
"Native Processor interupted while processing");
}
x.process(0, pool, pobj.timestamp, NativeProcessor.this);
}
pobj.done(); // tell the postobject that we're done doing
// all the processing.
}
@Override
public void run() {
try {
while (true) {
yield();
while(!stacklock.tryLock(5, TimeUnit.MILLISECONDS)){
}
try {
if (nextStack != null) {
stack = nextStack;
nextStack = null;
}
} finally {
stacklock.unlock();
}
NPPostObject pobj = null;
while(!lock.tryLock(5, TimeUnit.MILLISECONDS)){
}
try {
if(postobjects.isEmpty()) continue;
pobj = postobjects.removeLast();
} finally {
lock.unlock();
}
if(interrupted())throw new InterruptedException();
if(stack != null && pobj != null)
process(pobj);
}
} catch (InterruptedException e) {
Log.i("NativeProcessor",
"native processor interupted, ending now");
} catch (Exception e) {
e.printStackTrace();
} finally {
}
}
}
ProcessorThread mthread;
/** Users that would like to be able to have access to live video frames
* should implement a PoolCallback
* the idx and pool contain the images, specifically at idx == 0 is the
* live video frame.
*/
static public interface PoolCallback {
void process(int idx, image_pool pool,long timestamp, NativeProcessor nativeProcessor);
void process(int idx, image_pool pool, long timestamp,
NativeProcessor nativeProcessor);
}
Lock stacklock = new ReentrantLock();
LinkedList<PoolCallback> nextStack;
void addCallbackStack(LinkedList<PoolCallback> stack) {
/**At every frame, each PoolCallback is called in order and is passed the
* the same pool and index
*
* @param stack A list of PoolCallback objects, that will be called in order
*/
public void addCallbackStack(LinkedList<PoolCallback> stack) {
try {
while (!stacklock.tryLock(10, TimeUnit.MILLISECONDS)) {
@ -130,13 +62,43 @@ public class NativeProcessor {
}
/**
* A callback that allows the NativeProcessor to pass back the buffer when
* it has completed processing a frame.
*
* @author ethan
* Create a NativeProcessor. The processor will not start running until
* start is called, at which point it will operate in its own thread and
* sleep until a post is called. The processor should not be started until
* an onSurfaceChange event, and should be shut down when the surface is
* destroyed by calling interupt.
*
*/
static public interface NativeProcessorCallback {
public NativeProcessor() {
gray_scale_only = false;
}
/** Grayscale only is much faster because the yuv does not get decoded, and grayscale is only one
* byter per pixel - giving fast opengl texture loading.
*
* You still have access to the whole yuv image, but grayscale is only immediately available to
* use without further effort.
*
* Suggestion - use grayscale only and save your yuv images to disk if you would like color images
*
* Also, in grayscale mode, the images in the pool are only single channel, so please keep this in mind
* when accessing the color images - check the cv::Mat::channels() or cv::Mat::type() if your messing
* with color channels
*
* @param grayscale true if you want to only process grayscale images
*/
public void setGrayscale(boolean grayscale){
gray_scale_only = grayscale;
}
/**
* A callback that allows the NativeProcessor to pass back the buffer when
* it has completed processing a frame.
*/
static protected interface NativeProcessorCallback {
/**
* Called after processing, meant to be recieved by the NativePreviewer
* wich reuses the byte buffer for the camera preview...
@ -147,18 +109,22 @@ public class NativeProcessor {
void onDoneNativeProcessing(byte[] buffer);
}
/**
* Create a NativeProcessor. The processor will not start running until
* start is called, at which point it will operate in its own thread and
* sleep until a post is called. The processor should not be started until
* an onSurfaceChange event, and should be shut down when the surface is
* destroyed by calling interupt.
*
*/
public NativeProcessor() {
protected void stop() {
mthread.interrupt();
try {
mthread.join();
} catch (InterruptedException e) {
Log.w("NativeProcessor",
"interupted while stoping " + e.getMessage());
}
mthread = null;
}
protected void start() {
mthread = new ProcessorThread();
mthread.start();
}
/**
* post is used to notify the processor that a preview frame is ready, this
* will return almost immediately. if the processor is busy, returns false
@ -177,13 +143,13 @@ public class NativeProcessor {
* the processor is still processing.
*/
public boolean post(byte[] buffer, int width, int height, int format,long timestamp,
NativeProcessorCallback callback) {
protected boolean post(byte[] buffer, int width, int height, int format,
long timestamp, NativeProcessorCallback callback) {
lock.lock();
try {
NPPostObject pobj = new NPPostObject(buffer, width, height,
format,timestamp, callback);
NPPostObject pobj = new NPPostObject(buffer, width, height, format,
timestamp, callback);
postobjects.addFirst(pobj);
} finally {
lock.unlock();
@ -191,10 +157,95 @@ public class NativeProcessor {
return true;
}
private class ProcessorThread extends Thread {
private void process(NPPostObject pobj) throws Exception {
if (pobj.format == PixelFormat.YCbCr_420_SP) {
// add as color image, because we know how to decode this
opencv.addYUVtoPool(pool, pobj.buffer, 0, pobj.width,
pobj.height, gray_scale_only);
} else if (pobj.format == PixelFormat.YCbCr_422_SP) {
// add as gray image, because this format is not coded
// for...//TODO figure out how to decode this
// format
opencv.addYUVtoPool(pool, pobj.buffer, 0, pobj.width,
pobj.height, true);
} else
throw new Exception("bad pixel format!");
for (PoolCallback x : stack) {
if (interrupted()) {
throw new InterruptedException(
"Native Processor interupted while processing");
}
x.process(0, pool, pobj.timestamp, NativeProcessor.this);
}
pobj.done(); // tell the postobject that we're done doing
// all the processing.
}
@Override
public void run() {
try {
while (true) {
yield();
while (!stacklock.tryLock(5, TimeUnit.MILLISECONDS)) {
}
try {
if (nextStack != null) {
stack = nextStack;
nextStack = null;
}
} finally {
stacklock.unlock();
}
NPPostObject pobj = null;
while (!lock.tryLock(5, TimeUnit.MILLISECONDS)) {
}
try {
if (postobjects.isEmpty())
continue;
pobj = postobjects.removeLast();
} finally {
lock.unlock();
}
if (interrupted())
throw new InterruptedException();
if (stack != null && pobj != null)
process(pobj);
}
} catch (InterruptedException e) {
Log.i("NativeProcessor",
"native processor interupted, ending now");
} catch (Exception e) {
e.printStackTrace();
} finally {
}
}
}
static private class NPPostObject {
public NPPostObject(byte[] buffer, int width, int height, int format, long timestamp,
NativeProcessorCallback callback) {
public NPPostObject(byte[] buffer, int width, int height, int format,
long timestamp, NativeProcessorCallback callback) {
this.buffer = buffer;
this.width = width;
this.height = height;
@ -215,6 +266,7 @@ public class NativeProcessor {
NativeProcessorCallback callback;
}
private LinkedList<NPPostObject> postobjects = new LinkedList<NPPostObject>();
private image_pool pool = new image_pool();
@ -222,20 +274,12 @@ public class NativeProcessor {
private final Lock lock = new ReentrantLock();
private LinkedList<PoolCallback> stack = new LinkedList<PoolCallback>();
private boolean gray_scale_only;
private Lock stacklock = new ReentrantLock();
void stop() {
mthread.interrupt();
try {
mthread.join();
} catch (InterruptedException e) {
Log.w("NativeProcessor","interupted while stoping " + e.getMessage());
}
mthread = null;
}
void start() {
mthread = new ProcessorThread();
mthread.start();
}
private LinkedList<PoolCallback> nextStack;
private ProcessorThread mthread;
}