mirror of
https://github.com/opencv/opencv.git
synced 2025-01-18 06:03:15 +08:00
Merge pull request #18 from ozantonkal/implementing_camera
Implementing camera
This commit is contained in:
commit
52f141a5ba
@ -93,6 +93,40 @@ namespace cv
|
||||
Point pointer;
|
||||
unsigned int key_state;
|
||||
};
|
||||
|
||||
class CV_EXPORTS Camera
|
||||
{
|
||||
public:
|
||||
Camera(float f_x, float f_y, float c_x, float c_y, const Size &window_size);
|
||||
Camera(const Vec2f &fov, const Size &window_size);
|
||||
Camera(const cv::Matx33f &K, const Size &window_size);
|
||||
Camera(const cv::Matx44f &proj, const Size &window_size);
|
||||
|
||||
inline const Vec2d & getClip() const { return clip_; }
|
||||
inline void setClip(const Vec2d &clip) { clip_ = clip; }
|
||||
|
||||
inline const Size & getWindowSize() const { return window_size_; }
|
||||
void setWindowSize(const Size &window_size);
|
||||
|
||||
inline const Vec2f & getFov() const { return fov_; }
|
||||
inline void setFov(const Vec2f & fov) { fov_ = fov; }
|
||||
|
||||
inline const Vec2f & getPrincipalPoint() const { return principal_point_; }
|
||||
inline const Vec2f & getFocalLength() const { return focal_; }
|
||||
|
||||
void computeProjectionMatrix(Matx44f &proj) const;
|
||||
|
||||
static Camera KinectCamera(const Size &window_size);
|
||||
|
||||
private:
|
||||
void init(float f_x, float f_y, float c_x, float c_y, const Size &window_size);
|
||||
|
||||
Vec2d clip_;
|
||||
Vec2f fov_;
|
||||
Size window_size_;
|
||||
Vec2f principal_point_;
|
||||
Vec2f focal_;
|
||||
};
|
||||
|
||||
} /* namespace viz */
|
||||
} /* namespace cv */
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <opencv2/core.hpp>
|
||||
#include <opencv2/viz/types.hpp>
|
||||
#include <opencv2/viz/widgets.hpp>
|
||||
#include <boost/concept_check.hpp>
|
||||
|
||||
namespace cv
|
||||
{
|
||||
@ -38,6 +39,17 @@ namespace cv
|
||||
void setWidgetPose(const String &id, const Affine3f &pose);
|
||||
void updateWidgetPose(const String &id, const Affine3f &pose);
|
||||
Affine3f getWidgetPose(const String &id) const;
|
||||
|
||||
void setCamera(const Camera &camera);
|
||||
Camera getCamera() const;
|
||||
Affine3f getViewerPose();
|
||||
void setViewerPose(const Affine3f &pose);
|
||||
|
||||
void convertToWindowCoordinates(const Point3d &pt, Point3d &window_coord);
|
||||
void converTo3DRay(const Point3d &window_coord, Point3d &origin, Vec3d &direction);
|
||||
|
||||
Size getWindowSize() const;
|
||||
void setWindowSize(const Size &window_size);
|
||||
|
||||
void spin();
|
||||
void spinOnce(int time = 1, bool force_redraw = false);
|
||||
|
@ -182,14 +182,19 @@ namespace cv
|
||||
enum {DISPLAY_FRAMES = 1, DISPLAY_PATH = 2};
|
||||
|
||||
TrajectoryWidget(const std::vector<Affine3f> &path, int display_mode = TrajectoryWidget::DISPLAY_PATH, const Color &color = Color::white(), double scale = 1.0);
|
||||
TrajectoryWidget(const std::vector<Affine3f> &path, float line_length, double init_sphere_radius,
|
||||
double sphere_radius, const Color &line_color = Color::white(), const Color &sphere_color = Color::white());
|
||||
TrajectoryWidget(const std::vector<Affine3f> &path, const Matx33f &K, double scale = 1.0, const Color &color = Color::white()); // Camera frustums
|
||||
TrajectoryWidget(const std::vector<Affine3f> &path, const Vec2f &fov, double scale = 1.0, const Color &color = Color::white()); // Camera frustums
|
||||
|
||||
private:
|
||||
struct ApplyPath;
|
||||
};
|
||||
|
||||
class CV_EXPORTS SpheresTrajectoryWidget : public Widget3D
|
||||
{
|
||||
public:
|
||||
SpheresTrajectoryWidget(const std::vector<Affine3f> &path, float line_length = 0.05f, double init_sphere_radius = 0.021,
|
||||
double sphere_radius = 0.007, const Color &line_color = Color::white(), const Color &sphere_color = Color::white());
|
||||
};
|
||||
|
||||
class CV_EXPORTS CloudWidget : public Widget3D
|
||||
{
|
||||
@ -249,6 +254,7 @@ namespace cv
|
||||
template<> CV_EXPORTS Image3DWidget Widget::cast<Image3DWidget>();
|
||||
template<> CV_EXPORTS CameraPositionWidget Widget::cast<CameraPositionWidget>();
|
||||
template<> CV_EXPORTS TrajectoryWidget Widget::cast<TrajectoryWidget>();
|
||||
template<> CV_EXPORTS SpheresTrajectoryWidget Widget::cast<SpheresTrajectoryWidget>();
|
||||
template<> CV_EXPORTS CloudWidget Widget::cast<CloudWidget>();
|
||||
template<> CV_EXPORTS CloudCollectionWidget Widget::cast<CloudCollectionWidget>();
|
||||
template<> CV_EXPORTS CloudNormalsWidget Widget::cast<CloudNormalsWidget>();
|
||||
|
@ -263,48 +263,3 @@ int hull_vertex_table[43][7] = {
|
||||
|
||||
// return (fabsf (float (sum * 0.5f)));
|
||||
//}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Camera::computeViewMatrix (Affine3d& view_mat) const
|
||||
{
|
||||
//constructs view matrix from camera pos, view up, and the point it is looking at
|
||||
//this code is based off of gluLookAt http://www.opengl.org/wiki/GluLookAt_code
|
||||
|
||||
Vec3d zAxis = normalized(focal - pos);
|
||||
Vec3d xAxis = normalized(zAxis.cross(view_up));
|
||||
Vec3d yAxis = xAxis.cross (zAxis);
|
||||
|
||||
Matx33d R;
|
||||
|
||||
R(0, 0) = xAxis[0]; R(0, 1) = xAxis[1]; R(0, 2) = xAxis[2];
|
||||
R(1, 0) = yAxis[0]; R(1, 1) = yAxis[1]; R(1, 2) = yAxis[2];
|
||||
R(1, 0) = -zAxis[0]; R(2, 1) = -zAxis[1]; R(2, 2) = -zAxis[2];
|
||||
|
||||
Vec3d t = R * (-pos);
|
||||
|
||||
view_mat = Affine3d(R, t);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Camera::computeProjectionMatrix (Matx44d& proj) const
|
||||
{
|
||||
double top = clip[0] * tan (0.5 * fovy);
|
||||
double left = -(top * window_size[0]) / window_size[1];
|
||||
double right = -left;
|
||||
double bottom = -top;
|
||||
|
||||
double temp1 = 2.0 * clip[0];
|
||||
double temp2 = 1.0 / (right - left);
|
||||
double temp3 = 1.0 / (top - bottom);
|
||||
double temp4 = 1.0 / clip[1] - clip[0];
|
||||
|
||||
proj = Matx44d::zeros();
|
||||
|
||||
proj(0,0) = temp1 * temp2;
|
||||
proj(1,1) = temp1 * temp3;
|
||||
proj(0,2) = (right + left) * temp2;
|
||||
proj(1,2) = (top + bottom) * temp3;
|
||||
proj(2,2) = (-clip[1] - clip[0]) * temp4;
|
||||
proj(3,2) = -1.0;
|
||||
proj(2,3) = (-temp1 * clip[1]) * temp4;
|
||||
}
|
||||
|
@ -48,79 +48,6 @@ namespace cv
|
||||
SHADING_PHONG
|
||||
};
|
||||
|
||||
class CV_EXPORTS Camera
|
||||
{
|
||||
public:
|
||||
/** Focal point or lookAt. The view direction can be obtained by (focal-pos).normalized () */
|
||||
Vec3d focal;
|
||||
|
||||
/** \brief Position of the camera. */
|
||||
Vec3d pos;
|
||||
|
||||
/** \brief Up vector of the camera. */
|
||||
Vec3d view_up;
|
||||
|
||||
/** \brief Near/far clipping planes depths */
|
||||
Vec2d clip;
|
||||
|
||||
/** \brief Field of view angle in y direction (radians). */
|
||||
double fovy;
|
||||
|
||||
// the following variables are the actual position and size of the window on the screen and NOT the viewport!
|
||||
// except for the size, which is the same the viewport is assumed to be centered and same size as the window.
|
||||
Vec2i window_size;
|
||||
Vec2i window_pos;
|
||||
|
||||
/** \brief Computes View matrix for Camera (Based on gluLookAt)
|
||||
* \param[out] view_mat the resultant matrix
|
||||
*/
|
||||
void computeViewMatrix(Affine3d& view_mat) const;
|
||||
|
||||
/** \brief Computes Projection Matrix for Camera
|
||||
* \param[out] proj the resultant matrix
|
||||
*/
|
||||
void computeProjectionMatrix(Matx44d& proj) const;
|
||||
|
||||
/** \brief converts point to window coordiantes
|
||||
* \param[in] pt xyz point to be converted
|
||||
* \param[out] window_cord vector containing the pts' window X,Y, Z and 1
|
||||
*
|
||||
* This function computes the projection and view matrix every time.
|
||||
* It is very inefficient to use this for every point in the point cloud!
|
||||
*/
|
||||
void cvtWindowCoordinates (const cv::Point3f& pt, Vec4d& window_cord) const
|
||||
{
|
||||
Affine3d view;
|
||||
computeViewMatrix (view);
|
||||
|
||||
Matx44d proj;
|
||||
computeProjectionMatrix (proj);
|
||||
cvtWindowCoordinates (pt, proj * view.matrix, window_cord);
|
||||
return;
|
||||
}
|
||||
|
||||
/** \brief converts point to window coordiantes
|
||||
* \param[in] pt xyz point to be converted
|
||||
* \param[out] window_cord vector containing the pts' window X,Y, Z and 1
|
||||
* \param[in] composite_mat composite transformation matrix (proj*view)
|
||||
*
|
||||
* Use this function to compute window coordinates with a precomputed
|
||||
* transformation function. The typical composite matrix will be
|
||||
* the projection matrix * the view matrix. However, additional
|
||||
* matrices like a camera disortion matrix can also be added.
|
||||
*/
|
||||
void cvtWindowCoordinates (const Point3f& pt, const Matx44d& composite_mat, Vec4d& window_cord) const
|
||||
{
|
||||
Vec4d pte (pt.x, pt.y, pt.z, 1);
|
||||
window_cord = composite_mat * pte;
|
||||
window_cord = window_cord/window_cord[3];
|
||||
|
||||
window_cord[0] = (window_cord[0]+1.0) / 2.0*window_size[0];
|
||||
window_cord[1] = (window_cord[1]+1.0) / 2.0*window_size[1];
|
||||
window_cord[2] = (window_cord[2]+1.0) / 2.0;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1181,87 +1181,6 @@ cv::viz::TrajectoryWidget::TrajectoryWidget(const std::vector<Affine3f> &path, i
|
||||
WidgetAccessor::setProp(*this, actor);
|
||||
}
|
||||
|
||||
cv::viz::TrajectoryWidget::TrajectoryWidget(const std::vector<Affine3f> &path, float line_length, double init_sphere_radius,
|
||||
double sphere_radius, const Color &line_color, const Color &sphere_color)
|
||||
{
|
||||
vtkSmartPointer<vtkAppendPolyData> appendFilter = vtkSmartPointer<vtkAppendPolyData>::New();
|
||||
vtkIdType nr_poses = path.size();
|
||||
|
||||
// Create color arrays
|
||||
vtkSmartPointer<vtkUnsignedCharArray> line_scalars = vtkSmartPointer<vtkUnsignedCharArray>::New();
|
||||
line_scalars->SetNumberOfComponents(3);
|
||||
line_scalars->InsertNextTuple3(line_color[2], line_color[1], line_color[0]);
|
||||
|
||||
// Create color array for sphere
|
||||
vtkSphereSource * dummy_sphere = vtkSphereSource::New();
|
||||
// Create the array for big sphere
|
||||
dummy_sphere->SetRadius(init_sphere_radius);
|
||||
dummy_sphere->Update();
|
||||
vtkIdType nr_points = dummy_sphere->GetOutput()->GetNumberOfCells();
|
||||
vtkSmartPointer<vtkUnsignedCharArray> sphere_scalars_init = vtkSmartPointer<vtkUnsignedCharArray>::New();
|
||||
sphere_scalars_init->SetNumberOfComponents(3);
|
||||
sphere_scalars_init->SetNumberOfTuples(nr_points);
|
||||
sphere_scalars_init->FillComponent(0, sphere_color[2]);
|
||||
sphere_scalars_init->FillComponent(1, sphere_color[1]);
|
||||
sphere_scalars_init->FillComponent(2, sphere_color[0]);
|
||||
// Create the array for small sphere
|
||||
dummy_sphere->SetRadius(sphere_radius);
|
||||
dummy_sphere->Update();
|
||||
nr_points = dummy_sphere->GetOutput()->GetNumberOfCells();
|
||||
vtkSmartPointer<vtkUnsignedCharArray> sphere_scalars = vtkSmartPointer<vtkUnsignedCharArray>::New();
|
||||
sphere_scalars->SetNumberOfComponents(3);
|
||||
sphere_scalars->SetNumberOfTuples(nr_points);
|
||||
sphere_scalars->FillComponent(0, sphere_color[2]);
|
||||
sphere_scalars->FillComponent(1, sphere_color[1]);
|
||||
sphere_scalars->FillComponent(2, sphere_color[0]);
|
||||
dummy_sphere->Delete();
|
||||
|
||||
for (vtkIdType i = 0; i < nr_poses; ++i)
|
||||
{
|
||||
Point3f new_pos = path[i].translation();
|
||||
|
||||
vtkSmartPointer<vtkSphereSource> sphere_source = vtkSmartPointer<vtkSphereSource>::New();
|
||||
sphere_source->SetCenter (new_pos.x, new_pos.y, new_pos.z);
|
||||
if (i == 0)
|
||||
{
|
||||
sphere_source->SetRadius(init_sphere_radius);
|
||||
sphere_source->Update();
|
||||
sphere_source->GetOutput()->GetCellData()->SetScalars(sphere_scalars_init);
|
||||
appendFilter->AddInputConnection(sphere_source->GetOutputPort());
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
sphere_source->SetRadius(sphere_radius);
|
||||
sphere_source->Update();
|
||||
sphere_source->GetOutput()->GetCellData()->SetScalars(sphere_scalars);
|
||||
appendFilter->AddInputConnection(sphere_source->GetOutputPort());
|
||||
}
|
||||
|
||||
|
||||
Affine3f relativeAffine = path[i].inv() * path[i-1];
|
||||
Vec3f v = path[i].rotation() * relativeAffine.translation();
|
||||
v = normalize(v) * line_length;
|
||||
|
||||
vtkSmartPointer<vtkLineSource> line_source = vtkSmartPointer<vtkLineSource>::New();
|
||||
line_source->SetPoint1(new_pos.x + v[0], new_pos.y + v[1], new_pos.z + v[2]);
|
||||
line_source->SetPoint2(new_pos.x, new_pos.y, new_pos.z);
|
||||
line_source->Update();
|
||||
line_source->GetOutput()->GetCellData()->SetScalars(line_scalars);
|
||||
|
||||
appendFilter->AddInputConnection(line_source->GetOutputPort());
|
||||
}
|
||||
|
||||
vtkSmartPointer<vtkPolyDataMapper> mapper = vtkSmartPointer<vtkPolyDataMapper>::New();
|
||||
mapper->SetScalarModeToUseCellData();
|
||||
mapper->SetInput(appendFilter->GetOutput());
|
||||
|
||||
vtkSmartPointer<vtkActor> actor = vtkSmartPointer<vtkActor>::New();
|
||||
actor->SetMapper(mapper);
|
||||
|
||||
WidgetAccessor::setProp(*this, actor);
|
||||
}
|
||||
|
||||
cv::viz::TrajectoryWidget::TrajectoryWidget(const std::vector<Affine3f> &path, const Matx33f &K, double scale, const Color &color)
|
||||
{
|
||||
vtkSmartPointer<vtkCamera> camera = vtkSmartPointer<vtkCamera>::New();
|
||||
@ -1350,4 +1269,94 @@ template<> cv::viz::TrajectoryWidget cv::viz::Widget::cast<cv::viz::TrajectoryWi
|
||||
{
|
||||
Widget3D widget = this->cast<Widget3D>();
|
||||
return static_cast<TrajectoryWidget&>(widget);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/// spheres trajectory widget implementation
|
||||
|
||||
cv::viz::SpheresTrajectoryWidget::SpheresTrajectoryWidget(const std::vector<Affine3f> &path, float line_length, double init_sphere_radius, double sphere_radius,
|
||||
const Color &line_color, const Color &sphere_color)
|
||||
{
|
||||
vtkSmartPointer<vtkAppendPolyData> appendFilter = vtkSmartPointer<vtkAppendPolyData>::New();
|
||||
vtkIdType nr_poses = path.size();
|
||||
|
||||
// Create color arrays
|
||||
vtkSmartPointer<vtkUnsignedCharArray> line_scalars = vtkSmartPointer<vtkUnsignedCharArray>::New();
|
||||
line_scalars->SetNumberOfComponents(3);
|
||||
line_scalars->InsertNextTuple3(line_color[2], line_color[1], line_color[0]);
|
||||
|
||||
// Create color array for sphere
|
||||
vtkSphereSource * dummy_sphere = vtkSphereSource::New();
|
||||
// Create the array for big sphere
|
||||
dummy_sphere->SetRadius(init_sphere_radius);
|
||||
dummy_sphere->Update();
|
||||
vtkIdType nr_points = dummy_sphere->GetOutput()->GetNumberOfCells();
|
||||
vtkSmartPointer<vtkUnsignedCharArray> sphere_scalars_init = vtkSmartPointer<vtkUnsignedCharArray>::New();
|
||||
sphere_scalars_init->SetNumberOfComponents(3);
|
||||
sphere_scalars_init->SetNumberOfTuples(nr_points);
|
||||
sphere_scalars_init->FillComponent(0, sphere_color[2]);
|
||||
sphere_scalars_init->FillComponent(1, sphere_color[1]);
|
||||
sphere_scalars_init->FillComponent(2, sphere_color[0]);
|
||||
// Create the array for small sphere
|
||||
dummy_sphere->SetRadius(sphere_radius);
|
||||
dummy_sphere->Update();
|
||||
nr_points = dummy_sphere->GetOutput()->GetNumberOfCells();
|
||||
vtkSmartPointer<vtkUnsignedCharArray> sphere_scalars = vtkSmartPointer<vtkUnsignedCharArray>::New();
|
||||
sphere_scalars->SetNumberOfComponents(3);
|
||||
sphere_scalars->SetNumberOfTuples(nr_points);
|
||||
sphere_scalars->FillComponent(0, sphere_color[2]);
|
||||
sphere_scalars->FillComponent(1, sphere_color[1]);
|
||||
sphere_scalars->FillComponent(2, sphere_color[0]);
|
||||
dummy_sphere->Delete();
|
||||
|
||||
for (vtkIdType i = 0; i < nr_poses; ++i)
|
||||
{
|
||||
Point3f new_pos = path[i].translation();
|
||||
|
||||
vtkSmartPointer<vtkSphereSource> sphere_source = vtkSmartPointer<vtkSphereSource>::New();
|
||||
sphere_source->SetCenter (new_pos.x, new_pos.y, new_pos.z);
|
||||
if (i == 0)
|
||||
{
|
||||
sphere_source->SetRadius(init_sphere_radius);
|
||||
sphere_source->Update();
|
||||
sphere_source->GetOutput()->GetCellData()->SetScalars(sphere_scalars_init);
|
||||
appendFilter->AddInputConnection(sphere_source->GetOutputPort());
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
sphere_source->SetRadius(sphere_radius);
|
||||
sphere_source->Update();
|
||||
sphere_source->GetOutput()->GetCellData()->SetScalars(sphere_scalars);
|
||||
appendFilter->AddInputConnection(sphere_source->GetOutputPort());
|
||||
}
|
||||
|
||||
|
||||
Affine3f relativeAffine = path[i].inv() * path[i-1];
|
||||
Vec3f v = path[i].rotation() * relativeAffine.translation();
|
||||
v = normalize(v) * line_length;
|
||||
|
||||
vtkSmartPointer<vtkLineSource> line_source = vtkSmartPointer<vtkLineSource>::New();
|
||||
line_source->SetPoint1(new_pos.x + v[0], new_pos.y + v[1], new_pos.z + v[2]);
|
||||
line_source->SetPoint2(new_pos.x, new_pos.y, new_pos.z);
|
||||
line_source->Update();
|
||||
line_source->GetOutput()->GetCellData()->SetScalars(line_scalars);
|
||||
|
||||
appendFilter->AddInputConnection(line_source->GetOutputPort());
|
||||
}
|
||||
|
||||
vtkSmartPointer<vtkPolyDataMapper> mapper = vtkSmartPointer<vtkPolyDataMapper>::New();
|
||||
mapper->SetScalarModeToUseCellData();
|
||||
mapper->SetInput(appendFilter->GetOutput());
|
||||
|
||||
vtkSmartPointer<vtkActor> actor = vtkSmartPointer<vtkActor>::New();
|
||||
actor->SetMapper(mapper);
|
||||
|
||||
WidgetAccessor::setProp(*this, actor);
|
||||
}
|
||||
|
||||
template<> cv::viz::SpheresTrajectoryWidget cv::viz::Widget::cast<cv::viz::SpheresTrajectoryWidget>()
|
||||
{
|
||||
Widget3D widget = this->cast<Widget3D>();
|
||||
return static_cast<SpheresTrajectoryWidget&>(widget);
|
||||
}
|
@ -142,3 +142,126 @@ cv::viz::Mesh3d cv::viz::Mesh3d::loadMesh(const String& file)
|
||||
{
|
||||
return loadMeshImpl::loadMesh(file);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////
|
||||
/// Camera implementation
|
||||
|
||||
cv::viz::Camera::Camera(float f_x, float f_y, float c_x, float c_y, const Size &window_size)
|
||||
{
|
||||
init(f_x, f_y, c_x, c_y, window_size);
|
||||
}
|
||||
|
||||
cv::viz::Camera::Camera(const Vec2f &fov, const Size &window_size)
|
||||
{
|
||||
CV_Assert(window_size.width > 0 && window_size.height > 0);
|
||||
setClip(Vec2d(0.01, 1000.01)); // Default clipping
|
||||
setFov(fov);
|
||||
window_size_ = window_size;
|
||||
// Principal point at the center
|
||||
principal_point_ = Vec2f(static_cast<float>(window_size.width)*0.5f, static_cast<float>(window_size.height)*0.5f);
|
||||
focal_ = Vec2f(principal_point_[0] / tan(fov_[0]*0.5f), principal_point_[1] / tan(fov_[1]*0.5f));
|
||||
}
|
||||
|
||||
cv::viz::Camera::Camera(const cv::Matx33f & K, const Size &window_size)
|
||||
{
|
||||
float f_x = K(0,0);
|
||||
float f_y = K(1,1);
|
||||
float c_x = K(0,2);
|
||||
float c_y = K(1,2);
|
||||
init(f_x, f_y, c_x, c_y, window_size);
|
||||
}
|
||||
|
||||
cv::viz::Camera::Camera(const Matx44f &proj, const Size &window_size)
|
||||
{
|
||||
CV_Assert(window_size.width > 0 && window_size.height > 0);
|
||||
|
||||
double near = proj(2,3) / (proj(2,2) - 1.0);
|
||||
double far = near * (proj(2,2) - 1.0) / (proj(2,2) + 1.0);
|
||||
double left = near * (proj(0,2)-1) / proj(0,0);
|
||||
double right = 2.0 * near / proj(0,0) + left;
|
||||
double bottom = near * (proj(1,2)-1) / proj(1,1);
|
||||
double top = 2.0 * near / proj(1,1) + bottom;
|
||||
|
||||
if (fabs(left-right) < std::numeric_limits<double>::epsilon()) principal_point_[0] = static_cast<float>(window_size.width) * 0.5f;
|
||||
else principal_point_[0] = (left * static_cast<float>(window_size.width)) / (left - right);
|
||||
focal_[0] = -near * principal_point_[0] / left;
|
||||
|
||||
if (fabs(top-bottom) < std::numeric_limits<double>::epsilon()) principal_point_[1] = static_cast<float>(window_size.height) * 0.5f;
|
||||
else principal_point_[1] = (top * static_cast<float>(window_size.height)) / (top - bottom);
|
||||
focal_[1] = near * principal_point_[1] / top;
|
||||
|
||||
setClip(Vec2d(near, far));
|
||||
fov_[0] = (atan2(principal_point_[0],focal_[0]) + atan2(window_size.width-principal_point_[0],focal_[0]));
|
||||
fov_[1] = (atan2(principal_point_[1],focal_[1]) + atan2(window_size.height-principal_point_[1],focal_[1]));
|
||||
|
||||
window_size_ = window_size;
|
||||
}
|
||||
|
||||
void cv::viz::Camera::init(float f_x, float f_y, float c_x, float c_y, const Size &window_size)
|
||||
{
|
||||
CV_Assert(window_size.width > 0 && window_size.height > 0);
|
||||
setClip(Vec2d(0.01, 1000.01));// Default clipping
|
||||
|
||||
fov_[0] = (atan2(c_x,f_x) + atan2(window_size.width-c_x,f_x));
|
||||
fov_[1] = (atan2(c_y,f_y) + atan2(window_size.height-c_y,f_y));
|
||||
|
||||
principal_point_[0] = c_x;
|
||||
principal_point_[1] = c_y;
|
||||
|
||||
focal_[0] = f_x;
|
||||
focal_[1] = f_y;
|
||||
|
||||
window_size_ = window_size;
|
||||
}
|
||||
|
||||
void cv::viz::Camera::setWindowSize(const Size &window_size)
|
||||
{
|
||||
CV_Assert(window_size.width > 0 && window_size.height > 0);
|
||||
|
||||
// Get the scale factor and update the principal points
|
||||
float scalex = static_cast<float>(window_size.width) / static_cast<float>(window_size_.width);
|
||||
float scaley = static_cast<float>(window_size.height) / static_cast<float>(window_size_.height);
|
||||
|
||||
principal_point_[0] *= scalex;
|
||||
principal_point_[1] *= scaley;
|
||||
focal_ *= scaley;
|
||||
// Vertical field of view is fixed! Update horizontal field of view
|
||||
fov_[0] = (atan2(principal_point_[0],focal_[0]) + atan2(window_size.width-principal_point_[0],focal_[0]));
|
||||
|
||||
window_size_ = window_size;
|
||||
}
|
||||
|
||||
void cv::viz::Camera::computeProjectionMatrix(Matx44f &proj) const
|
||||
{
|
||||
double top = clip_[0] * principal_point_[1] / focal_[1];
|
||||
double left = -clip_[0] * principal_point_[0] / focal_[0];
|
||||
double right = clip_[0] * (window_size_.width - principal_point_[0]) / focal_[0];
|
||||
double bottom = -clip_[0] * (window_size_.height - principal_point_[1]) / focal_[1];
|
||||
|
||||
double temp1 = 2.0 * clip_[0];
|
||||
double temp2 = 1.0 / (right - left);
|
||||
double temp3 = 1.0 / (top - bottom);
|
||||
double temp4 = 1.0 / (clip_[0] - clip_[1]);
|
||||
|
||||
proj = Matx44d::zeros();
|
||||
proj(0,0) = temp1 * temp2;
|
||||
proj(1,1) = temp1 * temp3;
|
||||
proj(0,2) = (right + left) * temp2;
|
||||
proj(1,2) = (top + bottom) * temp3;
|
||||
proj(2,2) = (clip_[1]+clip_[0]) * temp4;
|
||||
proj(3,2) = -1.0;
|
||||
proj(2,3) = (temp1 * clip_[1]) * temp4;
|
||||
}
|
||||
|
||||
cv::viz::Camera cv::viz::Camera::KinectCamera(const Size &window_size)
|
||||
{
|
||||
// Without distortion, RGB Camera
|
||||
// Received from http://nicolas.burrus.name/index.php/Research/KinectCalibration
|
||||
Matx33f K = Matx33f::zeros();
|
||||
K(0,0) = 5.2921508098293293e+02;
|
||||
K(0,2) = 3.2894272028759258e+02;
|
||||
K(1,1) = 5.2556393630057437e+02;
|
||||
K(1,2) = 2.6748068171871557e+02;
|
||||
K(2,2) = 1.0f;
|
||||
return Camera(K, window_size);
|
||||
}
|
@ -45,3 +45,14 @@ cv::viz::Widget cv::viz::Viz3d::getWidget(const String &id) const { return impl_
|
||||
void cv::viz::Viz3d::setWidgetPose(const String &id, const Affine3f &pose) { impl_->setWidgetPose(id, pose); }
|
||||
void cv::viz::Viz3d::updateWidgetPose(const String &id, const Affine3f &pose) { impl_->updateWidgetPose(id, pose); }
|
||||
cv::Affine3f cv::viz::Viz3d::getWidgetPose(const String &id) const { return impl_->getWidgetPose(id); }
|
||||
|
||||
void cv::viz::Viz3d::setCamera(const Camera &camera) { impl_->setCamera(camera); }
|
||||
cv::viz::Camera cv::viz::Viz3d::getCamera() const { return impl_->getCamera(); }
|
||||
void cv::viz::Viz3d::setViewerPose(const Affine3f &pose) { impl_->setViewerPose(pose); }
|
||||
cv::Affine3f cv::viz::Viz3d::getViewerPose() { return impl_->getViewerPose(); }
|
||||
|
||||
void cv::viz::Viz3d::convertToWindowCoordinates(const Point3d &pt, Point3d &window_coord) { impl_->convertToWindowCoordinates(pt, window_coord); }
|
||||
void cv::viz::Viz3d::converTo3DRay(const Point3d &window_coord, Point3d &origin, Vec3d &direction) { impl_->converTo3DRay(window_coord, origin, direction); }
|
||||
|
||||
cv::Size cv::viz::Viz3d::getWindowSize() const { return impl_->getWindowSize(); }
|
||||
void cv::viz::Viz3d::setWindowSize(const Size &window_size) { impl_->setWindowSize(window_size.width, window_size.height); }
|
@ -542,25 +542,12 @@ bool cv::viz::Viz3d::VizImpl::setShapeRenderingProperties (int property, double
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::initCameraParameters ()
|
||||
{
|
||||
Camera camera_temp;
|
||||
// Set default camera parameters to something meaningful
|
||||
camera_temp.clip = Vec2d(0.01, 1000.01);
|
||||
|
||||
// Look straight along the z-axis
|
||||
camera_temp.focal = Vec3d(0.0, 0.0, 1.0);
|
||||
|
||||
// Position the camera at the origin
|
||||
camera_temp.pos = Vec3d(0.0, 0.0, 0.0);
|
||||
|
||||
// Set the up-vector of the camera to be the y-axis
|
||||
camera_temp.view_up = Vec3d(0.0, 1.0, 0.0);
|
||||
|
||||
// Set the camera field of view to about
|
||||
camera_temp.fovy = 0.8575;
|
||||
camera_temp.window_size = Vec2i(window_->GetScreenSize()) / 2;
|
||||
camera_temp.window_pos = Vec2i(0, 0);
|
||||
|
||||
setCameraParameters (camera_temp);
|
||||
Vec2i window_size(window_->GetScreenSize());
|
||||
window_size /= 2;
|
||||
|
||||
Camera camera_temp(Vec2f(0.0,0.8575), Size(window_size[0], window_size[1]));
|
||||
setCamera(camera_temp);
|
||||
setViewerPose(makeCameraPose(Vec3f(0.0f,0.0f,0.0f), Vec3f(0.0f, 0.0f, 1.0f), Vec3f(0.0f, 1.0f, 0.0f)));
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -577,18 +564,59 @@ void cv::viz::Viz3d::VizImpl::updateCamera ()
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::getCameras (cv::viz::Camera& camera)
|
||||
void cv::viz::Viz3d::VizImpl::setCamera(const Camera &camera)
|
||||
{
|
||||
vtkCamera* active_camera = renderer_->GetActiveCamera ();
|
||||
vtkCamera& active_camera = *renderer_->GetActiveCamera();
|
||||
|
||||
// Set the intrinsic parameters of the camera
|
||||
window_->SetSize (camera.getWindowSize().width, camera.getWindowSize().height);
|
||||
double aspect_ratio = static_cast<double>(camera.getWindowSize().width)/static_cast<double>(camera.getWindowSize().height);
|
||||
|
||||
Matx44f proj_mat;
|
||||
camera.computeProjectionMatrix(proj_mat);
|
||||
// Use the intrinsic parameters of the camera to simulate more realistically
|
||||
Matx44f old_proj_mat = convertToMatx(active_camera.GetProjectionTransformMatrix(aspect_ratio, -1.0, 1.0));
|
||||
vtkTransform *transform = vtkTransform::New();
|
||||
// This is a hack around not being able to set Projection Matrix
|
||||
transform->SetMatrix(convertToVtkMatrix(proj_mat * old_proj_mat.inv()));
|
||||
active_camera.SetUserTransform(transform);
|
||||
transform->Delete();
|
||||
}
|
||||
|
||||
camera.pos = cv::Vec3d(active_camera->GetPosition());
|
||||
camera.focal = cv::Vec3d(active_camera->GetFocalPoint());
|
||||
camera.clip = cv::Vec2d(active_camera->GetClippingRange());
|
||||
camera.view_up = cv::Vec3d(active_camera->GetViewUp());
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
cv::viz::Camera cv::viz::Viz3d::VizImpl::getCamera() const
|
||||
{
|
||||
vtkCamera& active_camera = *renderer_->GetActiveCamera();
|
||||
|
||||
Size window_size(renderer_->GetRenderWindow()->GetSize()[0],
|
||||
renderer_->GetRenderWindow()->GetSize()[1]);
|
||||
double aspect_ratio = static_cast<double>(window_size.width) / static_cast<double>(window_size.height);
|
||||
|
||||
Matx44f proj_matrix = convertToMatx(active_camera.GetProjectionTransformMatrix(aspect_ratio, -1.0f, 1.0f));
|
||||
Camera camera(proj_matrix, window_size);
|
||||
return camera;
|
||||
}
|
||||
|
||||
camera.fovy = active_camera->GetViewAngle()/ 180.0 * CV_PI;
|
||||
camera.window_size = cv::Vec2i(renderer_->GetRenderWindow()->GetSize());
|
||||
camera.window_pos = cv::Vec2d::all(0);
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::setViewerPose(const Affine3f &pose)
|
||||
{
|
||||
vtkCamera& camera = *renderer_->GetActiveCamera ();
|
||||
|
||||
// Position = extrinsic translation
|
||||
cv::Vec3f pos_vec = pose.translation();
|
||||
|
||||
// Rotate the view vector
|
||||
cv::Matx33f rotation = pose.rotation();
|
||||
cv::Vec3f y_axis (0.f, 1.f, 0.f);
|
||||
cv::Vec3f up_vec (rotation * y_axis);
|
||||
|
||||
// Compute the new focal point
|
||||
cv::Vec3f z_axis (0.f, 0.f, 1.f);
|
||||
cv::Vec3f focal_vec = pos_vec + rotation * z_axis;
|
||||
|
||||
camera.SetPosition(pos_vec[0], pos_vec[1], pos_vec[2]);
|
||||
camera.SetFocalPoint(focal_vec[0], focal_vec[1], focal_vec[2]);
|
||||
camera.SetViewUp(up_vec[0], up_vec[1], up_vec[2]);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -620,111 +648,37 @@ cv::Affine3f cv::viz::Viz3d::VizImpl::getViewerPose ()
|
||||
return cv::Affine3f(R, pos);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::convertToWindowCoordinates(const Point3d &pt, Point3d &window_coord)
|
||||
{
|
||||
Vec3d window_pt;
|
||||
vtkInteractorObserver::ComputeWorldToDisplay(renderer_, pt.x, pt.y, pt.z, window_pt.val);
|
||||
window_coord = window_pt;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::converTo3DRay(const Point3d &window_coord, Point3d &origin, Vec3d &direction)
|
||||
{
|
||||
Vec4d world_pt;
|
||||
vtkInteractorObserver::ComputeDisplayToWorld(renderer_, window_coord.x, window_coord.y, window_coord.z, world_pt.val);
|
||||
|
||||
vtkCamera &active_camera = *renderer_->GetActiveCamera();
|
||||
Vec3d cam_pos;
|
||||
active_camera.GetPosition(cam_pos.val);
|
||||
origin = cam_pos;
|
||||
direction = normalize(Vec3d(world_pt.val) - cam_pos);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::resetCamera ()
|
||||
{
|
||||
renderer_->ResetCamera ();
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::setCameraPosition (const cv::Vec3d& pos, const cv::Vec3d& view, const cv::Vec3d& up)
|
||||
{
|
||||
vtkSmartPointer<vtkCamera> cam = renderer_->GetActiveCamera ();
|
||||
cam->SetPosition (pos[0], pos[1], pos[2]);
|
||||
cam->SetFocalPoint (view[0], view[1], view[2]);
|
||||
cam->SetViewUp (up[0], up[1], up[2]);
|
||||
renderer_->Render ();
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::setCameraPosition (double pos_x, double pos_y, double pos_z, double up_x, double up_y, double up_z)
|
||||
{
|
||||
//rens_->InitTraversal ();
|
||||
vtkSmartPointer<vtkCamera> cam = renderer_->GetActiveCamera ();
|
||||
cam->SetPosition (pos_x, pos_y, pos_z);
|
||||
cam->SetViewUp (up_x, up_y, up_z);
|
||||
renderer_->Render ();
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::setCameraParameters (const cv::Matx33f& intrinsics, const cv::Affine3f& extrinsics)
|
||||
{
|
||||
// Position = extrinsic translation
|
||||
cv::Vec3f pos_vec = extrinsics.translation();
|
||||
|
||||
|
||||
// Rotate the view vector
|
||||
cv::Matx33f rotation = extrinsics.rotation();
|
||||
cv::Vec3f y_axis (0.f, 1.f, 0.f);
|
||||
cv::Vec3f up_vec (rotation * y_axis);
|
||||
|
||||
// Compute the new focal point
|
||||
cv::Vec3f z_axis (0.f, 0.f, 1.f);
|
||||
cv::Vec3f focal_vec = pos_vec + rotation * z_axis;
|
||||
|
||||
// Get the width and height of the image - assume the calibrated centers are at the center of the image
|
||||
Eigen::Vector2i window_size;
|
||||
window_size[0] = static_cast<int> (intrinsics(0, 2));
|
||||
window_size[1] = static_cast<int> (intrinsics(1, 2));
|
||||
|
||||
// Compute the vertical field of view based on the focal length and image heigh
|
||||
double fovy = 2 * atan (window_size[1] / (2. * intrinsics (1, 1))) * 180.0 / M_PI;
|
||||
|
||||
//rens_->InitTraversal ();
|
||||
|
||||
|
||||
vtkSmartPointer<vtkCamera> cam = renderer_->GetActiveCamera ();
|
||||
cam->SetPosition (pos_vec[0], pos_vec[1], pos_vec[2]);
|
||||
cam->SetFocalPoint (focal_vec[0], focal_vec[1], focal_vec[2]);
|
||||
cam->SetViewUp (up_vec[0], up_vec[1], up_vec[2]);
|
||||
cam->SetUseHorizontalViewAngle (0);
|
||||
cam->SetViewAngle (fovy);
|
||||
cam->SetClippingRange (0.01, 1000.01);
|
||||
window_->SetSize (window_size[0], window_size[1]);
|
||||
|
||||
renderer_->Render ();
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::setCameraParameters (const cv::viz::Camera &camera)
|
||||
{
|
||||
//rens_->InitTraversal ();
|
||||
|
||||
|
||||
vtkSmartPointer<vtkCamera> cam = renderer_->GetActiveCamera ();
|
||||
cam->SetPosition (camera.pos[0], camera.pos[1], camera.pos[2]);
|
||||
cam->SetFocalPoint (camera.focal[0], camera.focal[1], camera.focal[2]);
|
||||
cam->SetViewUp (camera.view_up[0], camera.view_up[1], camera.view_up[2]);
|
||||
cam->SetClippingRange (camera.clip.val);
|
||||
cam->SetUseHorizontalViewAngle (0);
|
||||
cam->SetViewAngle (camera.fovy * 180.0 / M_PI);
|
||||
|
||||
window_->SetSize (static_cast<int> (camera.window_size[0]), static_cast<int> (camera.window_size[1]));
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::setCameraClipDistances (double near, double far)
|
||||
{
|
||||
//rens_->InitTraversal ();
|
||||
|
||||
vtkSmartPointer<vtkCamera> cam = renderer_->GetActiveCamera ();
|
||||
cam->SetClippingRange (near, far);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::setCameraFieldOfView (double fovy)
|
||||
{
|
||||
//rens_->InitTraversal ();
|
||||
|
||||
vtkSmartPointer<vtkCamera> cam = renderer_->GetActiveCamera ();
|
||||
cam->SetUseHorizontalViewAngle (0);
|
||||
cam->SetViewAngle (fovy * 180.0 / M_PI);
|
||||
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
void cv::viz::Viz3d::VizImpl::resetCameraViewpoint (const std::string &id)
|
||||
{
|
||||
// TODO Cloud actor is not used
|
||||
vtkSmartPointer<vtkMatrix4x4> camera_pose;
|
||||
static CloudActorMap::iterator it = cloud_actor_map_->find (id);
|
||||
if (it != cloud_actor_map_->end ())
|
||||
@ -1016,6 +970,7 @@ void cv::viz::Viz3d::VizImpl::setWindowName (const std::string &name)
|
||||
|
||||
void cv::viz::Viz3d::VizImpl::setWindowPosition (int x, int y) { window_->SetPosition (x, y); }
|
||||
void cv::viz::Viz3d::VizImpl::setWindowSize (int xw, int yw) { window_->SetSize (xw, yw); }
|
||||
cv::Size cv::viz::Viz3d::VizImpl::getWindowSize() const { return Size(window_->GetSize()[0], window_->GetSize()[1]); }
|
||||
|
||||
bool cv::viz::Viz3d::VizImpl::addPolygonMesh (const Mesh3d& /*mesh*/, const Mat& /*mask*/, const std::string &/*id*/)
|
||||
{
|
||||
|
@ -109,6 +109,9 @@ public:
|
||||
// ////////////////////////////////////////////////////////////////////////////////////
|
||||
// All camera methods to refactor into set/getViewwerPose, setCamera()
|
||||
// and 'Camera' class itself with various constructors/fields
|
||||
|
||||
void setCamera(const Camera &camera);
|
||||
Camera getCamera() const;
|
||||
|
||||
void initCameraParameters (); /** \brief Initialize camera parameters with some default values. */
|
||||
bool cameraParamsSet () const; /** \brief Checks whether the camera parameters were manually loaded from file.*/
|
||||
@ -118,33 +121,13 @@ public:
|
||||
/** \brief Reset the camera direction from {0, 0, 0} to the center_{x, y, z} of a given dataset.
|
||||
* \param[in] id the point cloud object id (default: cloud) */
|
||||
void resetCameraViewpoint (const String& id = "cloud");
|
||||
/** \brief Set the camera pose given by position, viewpoint and up vector
|
||||
* \param[in] pos camera location
|
||||
* \param[in] view the view point of the camera
|
||||
* \param[in] up the view up direction of the camera */
|
||||
void setCameraPosition (const cv::Vec3d& pos, const cv::Vec3d& view, const cv::Vec3d& up);
|
||||
|
||||
/** \brief Set the camera location and viewup according to the given arguments
|
||||
* \param[in] pos_x,y,z the x,y,z coordinate of the camera location
|
||||
* \param[in] up_x,y,z the x,y,z component of the view up direction of the camera */
|
||||
void setCameraPosition (double pos_x, double pos_y, double pos_z, double up_x, double up_y, double up_z);
|
||||
|
||||
/** \brief Set the camera parameters via an intrinsics and and extrinsics matrix
|
||||
* \note This assumes that the pixels are square and that the center of the image is at the center of the sensor.
|
||||
* \param[in] intrinsics the intrinsics that will be used to compute the VTK camera parameters
|
||||
* \param[in] extrinsics the extrinsics that will be used to compute the VTK camera parameters */
|
||||
void setCameraParameters (const cv::Matx33f& intrinsics, const Affine3f& extrinsics);
|
||||
void setCameraParameters (const Camera &camera);
|
||||
void setCameraClipDistances (double near, double far);
|
||||
void setCameraFieldOfView (double fovy);
|
||||
void getCameras (Camera& camera);
|
||||
|
||||
|
||||
//to implement Viz3d set/getViewerPose()
|
||||
void setViewerPose(const Affine3f &pose);
|
||||
Affine3f getViewerPose();
|
||||
|
||||
|
||||
|
||||
|
||||
void convertToWindowCoordinates(const Point3d &pt, Point3d &window_coord);
|
||||
void converTo3DRay(const Point3d &window_coord, Point3d &origin, Vec3d &direction);
|
||||
|
||||
|
||||
|
||||
@ -155,6 +138,7 @@ public:
|
||||
//to implemnt in Viz3d
|
||||
void saveScreenshot (const String &file);
|
||||
void setWindowPosition (int x, int y);
|
||||
Size getWindowSize() const;
|
||||
void setWindowSize (int xw, int yw);
|
||||
void setFullScreen (bool mode);
|
||||
void setWindowName (const String &name);
|
||||
|
Loading…
Reference in New Issue
Block a user