So I have a set of 2d-3d correspondence points where the 2d coordinate points are detected by a corner detection algorithm (not written by me, and verified to be correct) and 3d coordinate points where I arbitrarily set the initial reference point to be (5, 0, 20). I chose these integers because they’re a balance between being easy numbers to work with and geometrically reasonable.
I calculate the rest of the 3d points with a little bit of math/geometry and based on the initial reference anchor point of (5,0,20).
However, I’m getting a very significant reprojection error and when I visually reproject the points, they also look very wrong as well. Does the initial reference 3d coordinate point as long as the rest of them follow suit math/geometry-wise?
Here’s a snippet of my ceres solver code:
struct ReprojectionError {
ReprojectionError(const Eigen::Vector2d& observed, const Eigen::Vector3d& point3D, double focal_length, const Eigen::Vector2d& camera_center)
: observed_(observed), point3D_(point3D), focal_length_(focal_length), camera_center_(camera_center) {}
template <typename T>
bool operator()(const T* const camera_rotation, const T* const camera_translation, T* residuals) const {
// Convert the rotation from angle-axis to a rotation matrix.
T R[9];
ceres::AngleAxisToRotationMatrix(camera_rotation, R);
// Convert the rotation matrix and translation vector to Eigen types for easier manipulation.
Eigen::Matrix<T, 3, 3> rotation_matrix;
rotation_matrix << R[0], R[1], R[2],
R[3], R[4], R[5],
R[6], R[7], R[8];
Eigen::Matrix<T, 3, 1> translation_vector(camera_translation[0], camera_translation[1], camera_translation[2]);
// Apply the extrinsic transformation (rotation and translation).
Eigen::Matrix<T, 3, 1> p = rotation_matrix * point3D_.cast<T>() + translation_vector;
// Apply the intrinsic transformation.
T xp = focal_length_ * p[0] / p[2] + camera_center_[0];
T yp = focal_length_ * p[1] / p[2] + camera_center_[1];
// Compute the residuals.
residuals[0] = T(observed_(0)) - xp;
residuals[1] = T(observed_(1)) - yp;
return true;
}
static ceres::CostFunction* Create(const Eigen::Vector2d& observed, const Eigen::Vector3d& point3D, double focal_length, const Eigen::Vector2d& camera_center) {
return (new ceres::AutoDiffCostFunction<ReprojectionError, 2, 3, 3>(
new ReprojectionError(observed, point3D, focal_length, camera_center)));
}
Eigen::Vector2d observed_;
Eigen::Vector3d point3D_;
double focal_length_;
Eigen::Vector2d camera_center_;
};
...
void calibrateSolver(double focal_length, Eigen::Vector2d camera_center, double camera_rotation[3], double camera_translation[3],
std::vector<Correspondence> correspondences, std::string camera_name, const cv::Mat& image) {
// Build the problem.
ceres::Problem problem;
for (const auto& corr : correspondences) {
ceres::CostFunction* cost_function =
ReprojectionError::Create(corr.point2D, corr.point3D, focal_length, camera_center);
problem.AddResidualBlock(cost_function, nullptr, camera_rotation, camera_translation);
}
// Fix the translation vector
problem.SetParameterBlockConstant(camera_translation);
// Configure the solver.
ceres::Solver::Options options;
options.linear_solver_type = ceres::DENSE_QR;
options.max_num_iterations = 100;
options.minimizer_progress_to_stdout = true;
ceres::Solver::Summary summary;
// Solve the problem.
ceres::Solve(options, &problem, &summary);
// Output the results.
std::cout << summary.FullReport() << "n";
std::cout << "Estimated rotation (angle-axis) for " << camera_name << " : "
<< camera_rotation[0] << ", " << camera_rotation[1] << ", " << camera_rotation[2] << "n";
std::cout << "Estimated translation: "
<< camera_translation[0] << ", " << camera_translation[1] << ", " << camera_translation[2] << "n";
int main() {
...
For the purposes of my problem, I’m only solving for/correcting rotation. All the other camera parameters are assumed to be correct).