I implemented FixedAspectRatioFrameLayout, so I can reuse it and have any hosted view be with fixed aspect ratio:
public class FixedAspectRatioFrameLayout extends FrameLayout
{
private int mAspectRatioWidth;
private int mAspectRatioHeight;
public FixedAspectRatioFrameLayout(Context context)
{
super(context);
}
public FixedAspectRatioFrameLayout(Context context, AttributeSet attrs)
{
super(context, attrs);
init(context, attrs);
}
public FixedAspectRatioFrameLayout(Context context, AttributeSet attrs, int defStyle)
{
super(context, attrs, defStyle);
init(context, attrs);
}
private void init(Context context, AttributeSet attrs)
{
TypedArray a = context.obtainStyledAttributes(attrs, R.styleable.FixedAspectRatioFrameLayout);
mAspectRatioWidth = a.getInt(R.styleable.FixedAspectRatioFrameLayout_aspectRatioWidth, 4);
mAspectRatioHeight = a.getInt(R.styleable.FixedAspectRatioFrameLayout_aspectRatioHeight, 3);
a.recycle();
}
// **overrides**
@Override protected void onMeasure (int widthMeasureSpec, int heightMeasureSpec)
{
int originalWidth = MeasureSpec.getSize(widthMeasureSpec);
int originalHeight = MeasureSpec.getSize(heightMeasureSpec);
int calculatedHeight = originalWidth * mAspectRatioHeight / mAspectRatioWidth;
int finalWidth, finalHeight;
if (calculatedHeight > originalHeight)
{
finalWidth = originalHeight * mAspectRatioWidth / mAspectRatioHeight;
finalHeight = originalHeight;
}
else
{
finalWidth = originalWidth;
finalHeight = calculatedHeight;
}
super.onMeasure(
MeasureSpec.makeMeasureSpec(finalWidth, MeasureSpec.EXACTLY),
MeasureSpec.makeMeasureSpec(finalHeight, MeasureSpec.EXACTLY));
}
}
What you want to achieve is explained in the tutorial: Demo 3: Homography from the camera displacement.
You have the current camera pose (rotation + translation), you can compute the desired camera pose that allows to view the chessboard from a bird eye view.
As the chessboard frame is different from the camera frame (see here for the camera frame), the desired rotation for the camera pose that allows a bird eye view is:
Just follow the tutorial and you should get a homography matrix similar to:
H:
[0.935, -0.337, 40.383;
-0.116, 0.729, 64.381;
0.000408, -0.001299, 1]
With warpPerspective
:
Another example:
As the chessboard is flat (for a generic scene a homography is valid only for a pure rotational camera movement), you can also play with the translation:
Edit: the code derived from the tutorial
#include <opencv2/opencv.hpp>
#include <opencv2/aruco.hpp>
using namespace std;
using namespace cv;
namespace
{
enum Pattern { CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
void calcChessboardCorners(Size boardSize, float squareSize, vector<Point3f>& corners, Pattern patternType = CHESSBOARD)
{
corners.resize(0);
switch (patternType)
{
case CHESSBOARD:
case CIRCLES_GRID:
//! [compute-chessboard-object-points]
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
//To try to center the chessboard frame, we substract the image size
corners.push_back(Point3f(float((j-boardSize.width/2)*squareSize),
float((i-boardSize.height/2)*squareSize), 0));
//! [compute-chessboard-object-points]
break;
case ASYMMETRIC_CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float((2*j + i % 2)*squareSize),
float(i*squareSize), 0));
break;
default:
CV_Error(Error::StsBadArg, "Unknown pattern type\n");
}
}
void computeC2MC1(const Mat &R1, const Mat &tvec1, const Mat &R2, const Mat &tvec2,
Mat &R_1to2, Mat &tvec_1to2)
{
//c2Mc1 = c2Mo * oMc1 = c2Mo * c1Mo.inv()
R_1to2 = R2 * R1.t();
tvec_1to2 = R2 * (-R1.t()*tvec1) + tvec2;
}
} //namespace
int main()
{
Mat img = imread("left02.jpg");
Mat img_corners = img.clone(), img_pose = img.clone(), img_bird_eye_view = img.clone();
vector<Point2f> corners;
Size patternSize(9,6);
bool found = findChessboardCorners(img, patternSize, corners);
drawChessboardCorners(img_corners, patternSize, corners, found);
imshow("Chessboard corners detection", img_corners);
vector<Point3f> objectPoints;
float squareSize = 2.5e-2;
calcChessboardCorners(patternSize, squareSize, objectPoints);
FileStorage fs("left_intrinsics.yml", FileStorage::READ);
Mat cameraMatrix, distCoeffs;
fs["camera_matrix"] >> cameraMatrix;
fs["distortion_coefficients"] >> distCoeffs;
Mat rvec, tvec;
solvePnP(objectPoints, corners, cameraMatrix, distCoeffs, rvec, tvec);
aruco::drawAxis(img_pose, cameraMatrix, distCoeffs, rvec, tvec, 2*squareSize);
imshow("Pose", img_pose);
Mat R_desired = (Mat_<double>(3,3) <<
0, 1, 0,
-1, 0, 0,
0, 0, 1);
Mat R;
Rodrigues(rvec, R);
Mat normal = (Mat_<double>(3,1) << 0, 0, 1);
Mat normal1 = R*normal;
Mat origin(3, 1, CV_64F, Scalar(0));
Mat origin1 = R*origin + tvec;
double d_inv1 = 1.0 / normal1.dot(origin1);
Mat R_1to2, tvec_1to2;
Mat tvec_desired = tvec.clone();
computeC2MC1(R, tvec, R_desired, tvec_desired, R_1to2, tvec_1to2);
Mat H = R_1to2 + d_inv1 * tvec_1to2*normal1.t();
H = cameraMatrix * H * cameraMatrix.inv();
H = H/H.at<double>(2,2);
std::cout << "H:\n" << H << std::endl;
warpPerspective(img_pose, img_bird_eye_view, H, img.size());
Mat compare;
hconcat(img_pose, img_bird_eye_view, compare);
imshow("Bird eye view", compare);
waitKey();
return 0;
}
Best Answer
The projection matrix and the view matrix describe completely different transformations. While the projection matrix describes the mapping from 3D points of a scene, to 2D points of the viewport, the view matrix describes the direction and position from which the scene is looked at. The view matrix is defined by the camera position and the direction too the target of view and the up vector of the camera.
(see Transform the modelMatrix)
This means it is not possible to get the view matrix from the projection matrix. But the camera defines a view matrix.
If the projection is perspective, then it will be possible to get the field of view angle and the aspect ratio from the projection matrix.
The Perspective Projection Matrix looks like this:
it follows:
The field of view angle along the Y-axis in degrees:
The aspect ratio:
See further the answers to the following question:
How to render depth linearly in modern OpenGL with gl_FragCoord.z in fragment shader?
How to recover view space position given view space depth value and ndc xy