mirror of
https://github.com/opencv/opencv.git
synced 2024-11-29 13:47:32 +08:00
makeCameraPose implementation
This commit is contained in:
parent
250dac5b71
commit
42266b04a5
@ -64,7 +64,7 @@ namespace cv
|
||||
CV_EXPORTS Affine3f makeTransformToGlobal(const Vec3f& axis_x, const Vec3f& axis_y, const Vec3f& axis_z, const Vec3f& origin = Vec3f::all(0));
|
||||
|
||||
//! constructs camera pose from position, focal_point and up_vector (see gluLookAt() for more infromation
|
||||
CV_EXPORTS Affine3f makeCameraPose(const Vec3f& position, const Vec3f& focal_point, const Vec3f& up_vector);
|
||||
CV_EXPORTS Affine3f makeCameraPose(const Vec3f& position, const Vec3f& focal_point, const Vec3f& y_dir);
|
||||
|
||||
|
||||
//! checks float value for Nan
|
||||
|
@ -170,7 +170,6 @@ namespace cv
|
||||
{
|
||||
public:
|
||||
CameraPositionWidget(double scale = 1.0);
|
||||
CameraPositionWidget(const Vec3f &position, const Vec3f &look_at, const Vec3f &up_vector, double scale = 1.0);
|
||||
CameraPositionWidget(const Matx33f &K, double scale = 1.0, const Color &color = Color::white());
|
||||
CameraPositionWidget(const Vec2f &fov, double scale = 1.0, const Color &color = Color::white());
|
||||
CameraPositionWidget(const Matx33f &K, const Mat &img, double scale = 1.0, const Color &color = Color::white());
|
||||
|
@ -883,74 +883,6 @@ cv::viz::CameraPositionWidget::CameraPositionWidget(double scale)
|
||||
WidgetAccessor::setProp(*this, actor);
|
||||
}
|
||||
|
||||
cv::viz::CameraPositionWidget::CameraPositionWidget(const Vec3f &position, const Vec3f &look_at, const Vec3f &up_vector, double scale)
|
||||
{
|
||||
vtkSmartPointer<vtkAxes> axes = vtkSmartPointer<vtkAxes>::New ();
|
||||
axes->SetOrigin (0, 0, 0);
|
||||
axes->SetScaleFactor (scale);
|
||||
|
||||
// Compute the transformation matrix for drawing the camera frame in a scene
|
||||
Vec3f u,v,n;
|
||||
n = normalize(look_at - position);
|
||||
u = normalize(up_vector.cross(n));
|
||||
v = n.cross(u);
|
||||
|
||||
vtkSmartPointer<vtkMatrix4x4> mat_trans = vtkSmartPointer<vtkMatrix4x4>::New();
|
||||
mat_trans->SetElement(0,0,u[0]);
|
||||
mat_trans->SetElement(0,1,u[1]);
|
||||
mat_trans->SetElement(0,2,u[2]);
|
||||
mat_trans->SetElement(1,0,v[0]);
|
||||
mat_trans->SetElement(1,1,v[1]);
|
||||
mat_trans->SetElement(1,2,v[2]);
|
||||
mat_trans->SetElement(2,0,n[0]);
|
||||
mat_trans->SetElement(2,1,n[1]);
|
||||
mat_trans->SetElement(2,2,n[2]);
|
||||
// Inverse rotation (orthogonal, so just take transpose)
|
||||
mat_trans->Transpose();
|
||||
// Then translate the coordinate frame to camera position
|
||||
mat_trans->SetElement(0,3,position[0]);
|
||||
mat_trans->SetElement(1,3,position[1]);
|
||||
mat_trans->SetElement(2,3,position[2]);
|
||||
mat_trans->SetElement(3,3,1);
|
||||
|
||||
vtkSmartPointer<vtkFloatArray> axes_colors = vtkSmartPointer<vtkFloatArray>::New ();
|
||||
axes_colors->Allocate (6);
|
||||
axes_colors->InsertNextValue (0.0);
|
||||
axes_colors->InsertNextValue (0.0);
|
||||
axes_colors->InsertNextValue (0.5);
|
||||
axes_colors->InsertNextValue (0.5);
|
||||
axes_colors->InsertNextValue (1.0);
|
||||
axes_colors->InsertNextValue (1.0);
|
||||
|
||||
vtkSmartPointer<vtkPolyData> axes_data = axes->GetOutput ();
|
||||
axes_data->Update ();
|
||||
axes_data->GetPointData ()->SetScalars (axes_colors);
|
||||
|
||||
// Transform the default coordinate frame
|
||||
vtkSmartPointer<vtkTransform> transform = vtkSmartPointer<vtkTransform>::New();
|
||||
transform->PreMultiply();
|
||||
transform->SetMatrix(mat_trans);
|
||||
|
||||
vtkSmartPointer<vtkTransformPolyDataFilter> filter = vtkSmartPointer<vtkTransformPolyDataFilter>::New();
|
||||
filter->SetInput(axes_data);
|
||||
filter->SetTransform(transform);
|
||||
filter->Update();
|
||||
|
||||
vtkSmartPointer<vtkTubeFilter> axes_tubes = vtkSmartPointer<vtkTubeFilter>::New ();
|
||||
axes_tubes->SetInput (filter->GetOutput());
|
||||
axes_tubes->SetRadius (axes->GetScaleFactor () / 50.0);
|
||||
axes_tubes->SetNumberOfSides (6);
|
||||
|
||||
vtkSmartPointer<vtkDataSetMapper> mapper = vtkSmartPointer<vtkDataSetMapper>::New ();
|
||||
mapper->SetScalarModeToUsePointData ();
|
||||
mapper->SetInput(axes_tubes->GetOutput ());
|
||||
|
||||
vtkSmartPointer<vtkLODActor> actor = vtkSmartPointer<vtkLODActor>::New();
|
||||
actor->SetMapper(mapper);
|
||||
|
||||
WidgetAccessor::setProp(*this, actor);
|
||||
}
|
||||
|
||||
cv::viz::CameraPositionWidget::CameraPositionWidget(const Matx33f &K, double scale, const Color &color)
|
||||
{
|
||||
vtkSmartPointer<vtkCamera> camera = vtkSmartPointer<vtkCamera>::New();
|
||||
|
@ -19,6 +19,32 @@ cv::Affine3f cv::viz::makeTransformToGlobal(const Vec3f& axis_x, const Vec3f& ax
|
||||
return Affine3f(R, origin);
|
||||
}
|
||||
|
||||
cv::Affine3f cv::viz::makeCameraPose(const Vec3f& position, const Vec3f& focal_point, const Vec3f& y_dir)
|
||||
{
|
||||
// Compute the transformation matrix for drawing the camera frame in a scene
|
||||
Vec3f u,v,n;
|
||||
n = normalize(focal_point - position);
|
||||
u = normalize(y_dir.cross(n));
|
||||
v = n.cross(u);
|
||||
|
||||
Matx44f pose_mat;
|
||||
pose_mat.zeros();
|
||||
pose_mat(0,0) = u[0];
|
||||
pose_mat(0,1) = u[1];
|
||||
pose_mat(0,2) = u[2];
|
||||
pose_mat(1,0) = v[0];
|
||||
pose_mat(1,1) = v[1];
|
||||
pose_mat(1,2) = v[2];
|
||||
pose_mat(2,0) = n[0];
|
||||
pose_mat(2,1) = n[1];
|
||||
pose_mat(2,2) = n[2];
|
||||
pose_mat(3,0) = position[0];
|
||||
pose_mat(3,1) = position[1];
|
||||
pose_mat(3,2) = position[2];
|
||||
pose_mat(3,3) = 1.0f;
|
||||
pose_mat = pose_mat.t();
|
||||
return pose_mat;
|
||||
}
|
||||
|
||||
vtkSmartPointer<vtkMatrix4x4> cv::viz::convertToVtkMatrix (const cv::Matx44f &m)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user