|
|
@@ -0,0 +1,619 @@
|
|
|
+#include "camera_calibrator_impl.h"
|
|
|
+#include "core/imgui_utility.hpp"
|
|
|
+#include "third_party/scope_guard.hpp"
|
|
|
+#include "render/render_utility.h"
|
|
|
+
|
|
|
+#include <boost/asio/post.hpp>
|
|
|
+
|
|
|
+#include <opencv2/calib3d.hpp>
|
|
|
+#include <opencv2/imgproc.hpp>
|
|
|
+
|
|
|
+#include <glm/ext/quaternion_geometric.hpp>
|
|
|
+#include <glm/gtc/matrix_access.hpp>
|
|
|
+
|
|
|
+#define NANOVG_GL3_IMPLEMENTATION
|
|
|
+
|
|
|
+#include <nanovg_gl.h>
|
|
|
+
|
|
|
+#include <fstream>
|
|
|
+#include <ranges>
|
|
|
+
|
|
|
+namespace camera_calibrator_impl {
|
|
|
+
|
|
|
+ cv::Mat to_cv_mat(const glm::mat3 &mat) {
|
|
|
+ auto ret = cv::Mat(3, 3, CV_32FC1);
|
|
|
+ for (auto j = 0; j < 3; ++j)
|
|
|
+ for (auto i = 0; i < 3; ++i)
|
|
|
+ ret.at<float>(j, i) = mat[i][j];
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ cv::Mat to_cv_mat(const glm::vec3 &vec) {
|
|
|
+ auto ret = cv::Mat(3, 1, CV_32FC1);
|
|
|
+ for (auto j = 0; j < 3; ++j)
|
|
|
+ ret.at<float>(j, 0) = vec[j];
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ glm::mat3 rodrigues_to_mat(const cv::Mat &vec) {
|
|
|
+ assert(vec.size().area() == 3);
|
|
|
+ auto rot_mat = cv::Mat();
|
|
|
+ cv::Rodrigues(vec, rot_mat);
|
|
|
+ return to_mat3(rot_mat);
|
|
|
+ }
|
|
|
+
|
|
|
+ cv::Mat to_cv_rodigues(const glm::mat3 &mat) {
|
|
|
+ auto ret = cv::Mat();
|
|
|
+ cv::Rodrigues(to_cv_mat(mat), ret);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ glm::mat3 to_mat3(const cv::Mat &mat) {
|
|
|
+ assert(mat.size() == cv::Size(3, 3));
|
|
|
+ assert(mat.type() == CV_32FC1
|
|
|
+ || mat.type() == CV_64FC1);
|
|
|
+ auto ret = glm::mat3();
|
|
|
+ for (auto j = 0; j < 3; ++j)
|
|
|
+ for (auto i = 0; i < 3; ++i) {
|
|
|
+ switch (mat.type()) {
|
|
|
+ // @formatter:off
|
|
|
+ case CV_32FC1: { ret[i][j] = mat.at<float>(j, i); break; }
|
|
|
+ case CV_64FC1: { ret[i][j] = mat.at<double>(j, i); break; }
|
|
|
+ // @formatter:on
|
|
|
+ default: {
|
|
|
+ RET_ERROR_E;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ glm::vec3 to_vec3(const cv::Mat &mat) {
|
|
|
+ assert(mat.size() == cv::Size(1, 3));
|
|
|
+ assert(mat.type() == CV_32FC1
|
|
|
+ || mat.type() == CV_64FC1);
|
|
|
+ auto ret = glm::vec3();
|
|
|
+ for (auto j = 0; j < 3; ++j) {
|
|
|
+ switch (mat.type()) {
|
|
|
+ // @formatter:off
|
|
|
+ case CV_32FC1: { ret[j] = mat.at<float>(j, 0); break; }
|
|
|
+ case CV_64FC1: { ret[j] = mat.at<double>(j, 0); break; }
|
|
|
+ // @formatter:on
|
|
|
+ default: {
|
|
|
+ RET_ERROR_E;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ template<typename Tp>
|
|
|
+ glm::vec3 to_vec3(const cv::Vec<Tp, 3> &vec) {
|
|
|
+ auto ret = glm::vec3();
|
|
|
+ for (auto j = 0; j < 3; ++j)
|
|
|
+ ret[j] = vec[j];
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ glm::vec3 to_translation(const glm::mat4 &mat) {
|
|
|
+ return glm::column(mat, 3);
|
|
|
+ }
|
|
|
+
|
|
|
+ void hand_eye_calib::set_object_points(cv::Size size, float dis) {
|
|
|
+ obj_ps.clear();
|
|
|
+ for (auto iy = 0; iy < size.height; ++iy)
|
|
|
+ for (auto ix = 0; ix < size.width; ++ix) {
|
|
|
+ obj_ps.emplace_back(iy * dis, ix * dis);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ glm::mat4 hand_eye_calib::calc_track_mat(timestamp_type t) {
|
|
|
+ auto iter = track_pool.lower_bound(t);
|
|
|
+ assert(iter != track_pool.end());
|
|
|
+ if (iter == track_pool.begin()
|
|
|
+ || iter->first == t) { // do not need interpolation
|
|
|
+ return iter->second;
|
|
|
+ }
|
|
|
+
|
|
|
+ auto next = iter, prev = --iter;
|
|
|
+ assert(next != track_pool.begin());
|
|
|
+ auto next_ts = next->first, prev_ts = prev->first;
|
|
|
+ auto interp_t = 1.0f * (t - prev_ts) / (next_ts - prev_ts);
|
|
|
+
|
|
|
+ auto next_mat = next->second, prev_mat = prev->second;
|
|
|
+ auto next_t = to_translation(next_mat);
|
|
|
+ auto prev_t = to_translation(prev_mat);
|
|
|
+ auto cur_t = glm::mix(prev_t, next_t, interp_t);
|
|
|
+
|
|
|
+ auto next_quat = glm::quat_cast(glm::mat3(next_mat));
|
|
|
+ auto prev_quat = glm::quat_cast(glm::mat3(prev_mat));
|
|
|
+ auto cur_quat = glm::slerp(prev_quat, next_quat, interp_t);
|
|
|
+
|
|
|
+ return to_transform_mat(cur_t, glm::mat3_cast(cur_quat));
|
|
|
+ }
|
|
|
+
|
|
|
+ glm::vec2 hand_eye_calib::distort_point(glm::vec2 p) {
|
|
|
+ auto &dc = dist_coeffs;
|
|
|
+ assert(dc.size() == 8);
|
|
|
+ auto k1 = dc[0], k2 = dc[1], p1 = dc[2], p2 = dc[3];
|
|
|
+ auto k3 = dc[4], k4 = dc[5], k5 = dc[6], k6 = dc[7];
|
|
|
+
|
|
|
+ auto r2 = p.x * p.x + p.y * p.y;
|
|
|
+ auto r4 = r2 * r2, r6 = r4 * r2;
|
|
|
+ auto k = (1.f + k1 * r2 + k2 * r4 + k3 * r6)
|
|
|
+ / (1.f + k4 * r2 + k5 * r4 + k6 * r6);
|
|
|
+ auto tx = p.x * k
|
|
|
+ + 2 * p1 * p.x * p.y
|
|
|
+ + p2 * (r2 + 2 * p.x * p.x);
|
|
|
+ auto ty = p.y * k
|
|
|
+ + p1 * (r2 + 2 * p.y * p.y)
|
|
|
+ + 2 * p2 * p.x * p.y;
|
|
|
+ return glm::vec2(tx, ty);
|
|
|
+ }
|
|
|
+
|
|
|
+ glm::vec2 hand_eye_calib::project_point(glm::vec3 p) {
|
|
|
+ auto np = distort_point(from_homo(p));
|
|
|
+ return transform_p(intrinsic_mat, np);
|
|
|
+ }
|
|
|
+
|
|
|
+ float hand_eye_calib::corner_distance(const corner_type &c1,
|
|
|
+ const corner_type &c2) {
|
|
|
+ assert(c1.size() == c2.size());
|
|
|
+ auto corner_num = c1.size();
|
|
|
+ float dis_sum = 0.f;
|
|
|
+ for (auto k = 0; k < corner_num; ++k) {
|
|
|
+ auto p1 = c1[k], p2 = c2[k];
|
|
|
+ dis_sum += sqrt(pow(p1.x - p2.x, 2.f)
|
|
|
+ + pow(p1.y - p2.y, 2.f));
|
|
|
+ }
|
|
|
+ return dis_sum / corner_num;
|
|
|
+ }
|
|
|
+
|
|
|
+ void hand_eye_calib::calc() {
|
|
|
+ auto sample_num = img_pool.size();
|
|
|
+ auto corner_num = img_pool.begin()->corner.size();
|
|
|
+
|
|
|
+ // select some images for calibrateCamera
|
|
|
+ static constexpr auto pix_dis_threshold = 128.0f;
|
|
|
+ auto img_ps_in = std::vector<corner_type>();
|
|
|
+ auto img_select_op = [&](corner_type &c_cand) {
|
|
|
+ return std::ranges::all_of(img_ps_in, [&](corner_type &c_exist) {
|
|
|
+ return corner_distance(c_cand, c_exist) > pix_dis_threshold;
|
|
|
+ });
|
|
|
+ };
|
|
|
+
|
|
|
+ auto img_ps_all = std::vector<corner_type>();
|
|
|
+ std::ranges::transform(img_pool, std::back_inserter(img_ps_all),
|
|
|
+ [](const auto &item) { return item.corner; });
|
|
|
+ std::ranges::copy_if(img_ps_all, std::back_inserter(img_ps_in), img_select_op);
|
|
|
+
|
|
|
+ auto select_num = img_ps_in.size();
|
|
|
+ auto obj_ps_in =
|
|
|
+ std::vector<object_points_type>(select_num, obj_ps);
|
|
|
+
|
|
|
+ cv::Mat r_vec_ignore, t_vec_ignore;
|
|
|
+ auto intrinsic_cv_mat = cv::Mat();
|
|
|
+ auto dist_coeffs_mat = cv::Mat();
|
|
|
+ auto err = cv::calibrateCamera(
|
|
|
+ obj_ps_in, img_ps_in, img_size,
|
|
|
+ intrinsic_cv_mat, dist_coeffs_mat,
|
|
|
+ r_vec_ignore, t_vec_ignore,
|
|
|
+ cv::CALIB_RATIONAL_MODEL); // use k4, k5, k6
|
|
|
+ SPDLOG_INFO("Overall camera calibration error is {:.2f}pix", err);
|
|
|
+ intrinsic_mat = to_mat3(intrinsic_cv_mat);
|
|
|
+
|
|
|
+ assert(dist_coeffs_mat.size().height == 1);
|
|
|
+ auto dist_coeffs_num = dist_coeffs_mat.size().width;
|
|
|
+ assert(dist_coeffs_num >= 8); // k1-3, p1-2, k4-6
|
|
|
+ dist_coeffs.reserve(dist_coeffs_num);
|
|
|
+ assert(dist_coeffs_mat.type() == CV_64FC1);
|
|
|
+ for (auto k = 0; k < 8; ++k) {
|
|
|
+ dist_coeffs.push_back(dist_coeffs_mat.at<double>(0, k));
|
|
|
+ }
|
|
|
+
|
|
|
+ // solve PnP for each image
|
|
|
+ auto cam_r_vec = std::vector<cv::Mat>(); // chess board in camera
|
|
|
+ auto cam_t_vec = std::vector<cv::Mat>();
|
|
|
+ auto obj_ps_pnp = std::vector<cv::Point3d>();
|
|
|
+ std::ranges::transform(obj_ps, std::back_inserter(obj_ps_pnp), [](cv::Vec3f &p) {
|
|
|
+ return cv::Point3d(p[0], p[1], p[2]);
|
|
|
+ });
|
|
|
+ std::optional<glm::quat> last_rot_quat;
|
|
|
+ static constexpr auto quat_threshold = 0.5f;
|
|
|
+ for (auto &corner: img_ps_all) {
|
|
|
+ cv::Mat chess_r, chess_t; // chess board in camera
|
|
|
+
|
|
|
+ for (;;) {
|
|
|
+ bool ok = cv::solvePnP(obj_ps_pnp, corner,
|
|
|
+ intrinsic_cv_mat, dist_coeffs_mat,
|
|
|
+ chess_r, chess_t);
|
|
|
+ assert(ok);
|
|
|
+
|
|
|
+ // rotate corners if needed
|
|
|
+ auto rot_mat = rodrigues_to_mat(chess_r);
|
|
|
+ auto rot_quat = glm::quat_cast(rot_mat);
|
|
|
+ break; // TODO: test the commented code
|
|
|
+
|
|
|
+// if (!last_rot_quat.has_value()
|
|
|
+// // https://math.stackexchange.com/questions/90081/quaternion-distance
|
|
|
+// || 1 - glm::dot(rot_quat, *last_rot_quat) < quat_threshold) {
|
|
|
+// last_rot_quat = rot_quat;
|
|
|
+// break;
|
|
|
+// } else {
|
|
|
+// std::ranges::reverse(corner);
|
|
|
+// SPDLOG_DEBUG("Flipped.");
|
|
|
+// }
|
|
|
+ }
|
|
|
+
|
|
|
+ auto ret_mat = to_transform_mat(to_vec3(chess_t),
|
|
|
+ rodrigues_to_mat(chess_r));
|
|
|
+ auto cam_r = to_cv_mat(glm::mat3(ret_mat)); // rotation part
|
|
|
+ auto cam_t = to_cv_mat(to_translation(ret_mat)); // translation part
|
|
|
+ cam_r_vec.push_back(cam_r);
|
|
|
+ cam_t_vec.push_back(cam_t);
|
|
|
+ }
|
|
|
+
|
|
|
+ static constexpr auto ts_offset = -80000; // us
|
|
|
+
|
|
|
+ // build hand-eye calibration problem
|
|
|
+ auto ref_r_vec = std::vector<cv::Mat>(); // tracker in reference
|
|
|
+ auto ref_t_vec = std::vector<cv::Mat>();
|
|
|
+ for (auto &info: img_pool) {
|
|
|
+ auto ref_mat = calc_track_mat(info.sample_ts + ts_offset);
|
|
|
+ ref_mat = glm::inverse(ref_mat);
|
|
|
+ auto r_mat = glm::mat3(ref_mat);
|
|
|
+ auto t_vec = glm::vec3(to_translation(ref_mat)); // fourth column of matrix
|
|
|
+ ref_r_vec.push_back(to_cv_mat(r_mat));
|
|
|
+ ref_t_vec.push_back(to_cv_mat(t_vec));
|
|
|
+ }
|
|
|
+
|
|
|
+ cv::Mat ret_r, ret_t;
|
|
|
+ cv::Mat aux_r, aux_t;
|
|
|
+ cv::calibrateRobotWorldHandEye(
|
|
|
+ cam_r_vec, cam_t_vec, // chess board in camera
|
|
|
+ ref_r_vec, ref_t_vec, // tracker in reference
|
|
|
+ aux_r, aux_t, // tracker in chess board
|
|
|
+ ret_r, ret_t); // reference in camera
|
|
|
+ auto ret_mat = to_transform_mat(to_vec3(ret_t), to_mat3(ret_r));
|
|
|
+ result_mat = glm::inverse(ret_mat); // camera in reference
|
|
|
+
|
|
|
+ // projection error
|
|
|
+ auto aux_mat = to_transform_mat(to_vec3(aux_t), to_mat3(aux_r));
|
|
|
+ auto chess_mat = glm::inverse(aux_mat); // chess board in traker
|
|
|
+ auto err_mat = cv::Mat(sample_num, corner_num, CV_32FC2);
|
|
|
+ auto obj_ps_err = std::vector<cv::Point3f>();
|
|
|
+ std::ranges::transform(obj_ps, std::back_inserter(obj_ps_err), [](cv::Vec3f &p) {
|
|
|
+ return cv::Point3f(p[0], p[1], p[2]);
|
|
|
+ });
|
|
|
+ for (auto j = 0; j < sample_num; ++j) {
|
|
|
+ auto &item = img_pool[j];
|
|
|
+ auto proj_mat = ret_mat // reference in camera
|
|
|
+ * glm::inverse(calc_track_mat(item.sample_ts + ts_offset)) // tracker in reference
|
|
|
+ * chess_mat; // chess board in tracker
|
|
|
+ auto cam_mat = to_transform_mat( // chess board in camera
|
|
|
+ to_vec3(cam_t_vec[j]), to_mat3(cam_r_vec[j]));
|
|
|
+ auto loop_mat = glm::inverse(proj_mat) // camera in chess board
|
|
|
+ * cam_mat; // chess board in camera
|
|
|
+ for (auto i = 0; i < corner_num; ++i) {
|
|
|
+ auto p_gold = to_vec3(obj_ps[i]);
|
|
|
+ auto p_pred = transform_p(loop_mat, p_gold);
|
|
|
+ auto err_3d = glm::distance(p_gold, p_pred);
|
|
|
+ err_mat.at<cv::Vec2f>(j, i)[0] = err_3d;
|
|
|
+ }
|
|
|
+
|
|
|
+ auto proj_r = to_cv_rodigues(glm::mat3(proj_mat));
|
|
|
+ auto proj_t = to_cv_mat(to_translation(proj_mat));
|
|
|
+ auto c_pred = std::vector<cv::Point2f>();
|
|
|
+ cv::projectPoints(obj_ps_err, proj_r, proj_t,
|
|
|
+ intrinsic_cv_mat, dist_coeffs_mat,
|
|
|
+ c_pred);
|
|
|
+ assert(c_pred.size() == corner_num);
|
|
|
+ for (auto i = 0; i < corner_num; ++i) {
|
|
|
+ auto c_gold = to_vec2(item.corner[i]);
|
|
|
+ auto err_2d = glm::distance(c_gold, to_vec2(c_pred[i]));
|
|
|
+ err_mat.at<cv::Vec2f>(j, i)[1] = err_2d;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ auto err_val = cv::mean(err_mat);
|
|
|
+ obj_reproj_err = err_val[0];
|
|
|
+ SPDLOG_INFO("Average 3D projection error is {:.2f}mm", obj_reproj_err);
|
|
|
+ img_reproj_err = err_val[1];
|
|
|
+ SPDLOG_INFO("Average 2D projection error is {:.2f}pix", img_reproj_err);
|
|
|
+ }
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+camera_calibrator::impl::impl(const create_config &_conf) {
|
|
|
+ conf = _conf;
|
|
|
+ vg = nvgCreateGL3(NVG_ANTIALIAS | NVG_STENCIL_STROKES);
|
|
|
+}
|
|
|
+
|
|
|
+camera_calibrator::impl::~impl() {
|
|
|
+ if (tp != nullptr) {
|
|
|
+ tp->purge();
|
|
|
+ stop();
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::impl::start() {
|
|
|
+ // clear last data
|
|
|
+ last_finish.store(nullptr);
|
|
|
+ img_pool.clear();
|
|
|
+ track_pool.clear();
|
|
|
+
|
|
|
+ assert(tp == nullptr);
|
|
|
+ tp = std::make_unique<BS::thread_pool>(conf.num_threads);
|
|
|
+ img_conn = OBJ_SIG(conf.img_name)->connect(
|
|
|
+ [this](auto name) { img_callback(name); });
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::impl::process() {
|
|
|
+ calib = std::make_unique<hand_eye_calib>();
|
|
|
+ calib->img_size = img_size;
|
|
|
+ calib->set_object_points(conf.pattern_size, conf.corner_distance);
|
|
|
+
|
|
|
+ auto valid_imgs =
|
|
|
+ img_pool | std::views::filter([](const auto &item) {
|
|
|
+ assert(item.process_finished);
|
|
|
+ return item.corners_detected;
|
|
|
+ }) | std::views::transform([](const auto &item) {
|
|
|
+ assert(!item.corners.empty());
|
|
|
+ return hand_eye_calib::image_store_type(
|
|
|
+ item.corners, item.sample_ts);
|
|
|
+ });
|
|
|
+ std::ranges::copy(valid_imgs, std::back_inserter(calib->img_pool));
|
|
|
+
|
|
|
+ // TODO: C++23 can done this by std::ranges::to
|
|
|
+ for (auto &item: track_pool) {
|
|
|
+ calib->track_pool.emplace(item.sample_ts, item.ref_mat);
|
|
|
+ }
|
|
|
+
|
|
|
+ calib->calc();
|
|
|
+
|
|
|
+ if (conf.sophiar_conn != nullptr) {
|
|
|
+ auto ret = Eigen::Isometry3d();
|
|
|
+ to_eigen_transform(calib->result_mat, ret);
|
|
|
+ conf.sophiar_conn->update_transform_variable(
|
|
|
+ conf.result_transform_var, ret);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::impl::simulate_process(const simulate_info_type &info) {
|
|
|
+ load_track_data(info.data_path);
|
|
|
+ img_size = info.img_size;
|
|
|
+ process();
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::impl::stop() {
|
|
|
+ assert(img_conn.connected());
|
|
|
+ img_conn.disconnect();
|
|
|
+ tp = nullptr; // implicit wait()
|
|
|
+ save_track_data();
|
|
|
+ process();
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::impl::save_track_data() {
|
|
|
+ auto fout = std::ofstream("calib_data.txt");
|
|
|
+
|
|
|
+ // tracker data
|
|
|
+ fout << track_pool.size() << std::endl;
|
|
|
+ for (auto &item: track_pool) {
|
|
|
+ fout << item.sample_ts << " ";
|
|
|
+
|
|
|
+ using mat_type = typeof(item.ref_mat);
|
|
|
+ using value_type = mat_type::value_type;
|
|
|
+ static constexpr auto len = sizeof(mat_type) / sizeof(value_type);
|
|
|
+
|
|
|
+ auto ptr = (value_type *) &item.ref_mat;
|
|
|
+ for (auto i = 0; i < len; ++i) {
|
|
|
+ fout << ptr[i] << " ";
|
|
|
+ }
|
|
|
+ fout << std::endl;
|
|
|
+ }
|
|
|
+ fout << std::endl;
|
|
|
+
|
|
|
+ // camera data
|
|
|
+ auto sample_num = std::ranges::count_if(
|
|
|
+ img_pool, [](auto &item) { return item.corners_detected; });
|
|
|
+ fout << sample_num << std::endl;
|
|
|
+ for (auto &item: img_pool) {
|
|
|
+ if (!item.corners_detected) continue;
|
|
|
+ fout << item.sample_ts << " ";
|
|
|
+ for (auto &point: item.corners) {
|
|
|
+ fout << point.x << " " << point.y << " ";
|
|
|
+ }
|
|
|
+ fout << std::endl;
|
|
|
+ }
|
|
|
+ fout << std::endl;
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::impl::load_track_data(const std::string &path) {
|
|
|
+ auto fin = std::ifstream(path);
|
|
|
+
|
|
|
+ // tracker data
|
|
|
+ assert(track_pool.empty());
|
|
|
+ size_t sample_num;
|
|
|
+ fin >> sample_num;
|
|
|
+ for (auto k = 0; k < sample_num; ++k) {
|
|
|
+ auto &item = track_pool.emplace_back();
|
|
|
+ fin >> item.sample_ts;
|
|
|
+
|
|
|
+ using mat_type = typeof(item.ref_mat);
|
|
|
+ using value_type = mat_type::value_type;
|
|
|
+ static constexpr auto len = sizeof(mat_type) / sizeof(value_type);
|
|
|
+
|
|
|
+ auto ptr = (value_type *) &item.ref_mat;
|
|
|
+ for (auto i = 0; i < len; ++i) {
|
|
|
+ fin >> ptr[i];
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // camera data
|
|
|
+ fin >> sample_num;
|
|
|
+ for (auto k = 0; k < sample_num; ++k) {
|
|
|
+ auto &item = img_pool.emplace_back();
|
|
|
+ fin >> item.sample_ts;
|
|
|
+ assert(item.sample_ts != 0);
|
|
|
+ auto corner_num = conf.pattern_size.area();
|
|
|
+ for (auto i = 0; i < corner_num; ++i) {
|
|
|
+ auto &p = item.corners.emplace_back();
|
|
|
+ fin >> p.x >> p.y;
|
|
|
+ }
|
|
|
+ item.corners_detected = true;
|
|
|
+ item.process_finished = true;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::impl::load_fake_data() {
|
|
|
+ auto dist_coeffs = cv::Mat::zeros(1, 8, CV_64FC1);
|
|
|
+ // @formatter:off
|
|
|
+ cv::Mat intrinsic_mat = (cv::Mat_<double>(3, 3) <<
|
|
|
+ 1000.f, 0.f, 500.f,
|
|
|
+ 0.f, 1000.f, 500.f,
|
|
|
+ 0.f, 0.f, 1.f);
|
|
|
+ // @formatter:on
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::impl::per_image_process(img_store_type *st) {
|
|
|
+ auto closer = sg::make_scope_guard([&] {
|
|
|
+ st->process_finished = true;
|
|
|
+ // release image to save memory
|
|
|
+ st->img_mat = cv::Mat();
|
|
|
+ st->img = nullptr;
|
|
|
+ });
|
|
|
+
|
|
|
+ // color to gray
|
|
|
+ cv::Mat img_gray;
|
|
|
+ cv::cvtColor(st->img_mat, img_gray, cv::COLOR_BGR2GRAY);
|
|
|
+
|
|
|
+ // find control points
|
|
|
+ assert(conf.board_type == CALIB_CHESS);
|
|
|
+ auto corner_flag = cv::CALIB_CB_ADAPTIVE_THRESH
|
|
|
+ | cv::CALIB_CB_NORMALIZE_IMAGE
|
|
|
+ | cv::CALIB_CB_FAST_CHECK;
|
|
|
+ corner_type img_corners;
|
|
|
+ bool ok = cv::findChessboardCorners(img_gray, conf.pattern_size,
|
|
|
+ img_corners, corner_flag);
|
|
|
+ if (!ok) {
|
|
|
+ last_finish.store(nullptr, std::memory_order::relaxed);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ st->corners_detected = true;
|
|
|
+
|
|
|
+ // refinement points // https://docs.opencv.org/4.9.0/d2/d1d/samples_2cpp_2lkdemo_8cpp-example.html
|
|
|
+ auto win_size = cv::Size(10, 10);
|
|
|
+ auto zero_zone = cv::Size(-1, -1);
|
|
|
+ auto term_crit = cv::TermCriteria(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.03);
|
|
|
+ cv::cornerSubPix(img_gray, img_corners, win_size, zero_zone, term_crit);
|
|
|
+ st->corners = std::move(img_corners);
|
|
|
+ ++img_ok_cnt;
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::impl::img_callback(obj_name_type name) {
|
|
|
+ assert(name == conf.img_name);
|
|
|
+ auto img = to_image(name);
|
|
|
+ if (img == nullptr) return;
|
|
|
+
|
|
|
+ if (img_size.empty()) {
|
|
|
+ img_size = img->size();
|
|
|
+ } else {
|
|
|
+ assert(img_size == img->size());
|
|
|
+ }
|
|
|
+
|
|
|
+ // tracking information
|
|
|
+ auto cur_ts = OBJ_TS(conf.img_name);
|
|
|
+ auto ref_t = conf.sophiar_conn
|
|
|
+ ->query_transform_variable(conf.ref_transform_var);
|
|
|
+ if (!ref_t.has_value()) {
|
|
|
+ return;
|
|
|
+ } else {
|
|
|
+ auto &item = track_pool.emplace_back();
|
|
|
+ item.ref_mat = to_mat4(*ref_t);
|
|
|
+ item.sample_ts = cur_ts;
|
|
|
+ }
|
|
|
+
|
|
|
+ // throttle
|
|
|
+ if (tp->get_tasks_queued() > conf.num_threads - 1) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ auto &st = img_pool.emplace_back();
|
|
|
+ st.img = img;
|
|
|
+ st.img_mat = img->cv_mat(conf.stream);
|
|
|
+ st.sample_ts = cur_ts;
|
|
|
+
|
|
|
+ // post task
|
|
|
+ tp->detach_task([this, p_st = &st] {
|
|
|
+ per_image_process(p_st);
|
|
|
+ last_finish.store(p_st, std::memory_order::release);
|
|
|
+ });
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::impl::render() {
|
|
|
+ if (tp == nullptr) return; // not running
|
|
|
+ auto st = last_finish.load(std::memory_order::acquire);
|
|
|
+ if (st == nullptr) return;
|
|
|
+ assert(st->process_finished);
|
|
|
+ if (!st->corners_detected) return;
|
|
|
+
|
|
|
+ auto size = query_viewport_size();
|
|
|
+ nvgBeginFrame(vg, size.width, size.height, 1.0f);
|
|
|
+ auto closer = sg::make_scope_guard([this] { nvgEndFrame(vg); });
|
|
|
+
|
|
|
+ nvgBeginPath(vg);
|
|
|
+ static constexpr auto r = 5.0f;
|
|
|
+ for (auto &p: st->corners) {
|
|
|
+ nvgCircle(vg, p.x, size.height - p.y, r); // flip Y
|
|
|
+ }
|
|
|
+ nvgPathWinding(vg, NVG_HOLE);
|
|
|
+ nvgFillColor(vg, nvgRGB(255, 0, 0));
|
|
|
+ nvgFill(vg);
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::impl::show() {
|
|
|
+ auto ctx = conf.ctx;
|
|
|
+ bool is_running = (tp != nullptr);
|
|
|
+
|
|
|
+ ImGui::SeparatorText("Actions");
|
|
|
+ if (!is_running) {
|
|
|
+ if (ImGui::Button("Start")) {
|
|
|
+ post(*ctx, [this] { start(); });
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (ImGui::Button("Close")) {
|
|
|
+ post(*ctx, [this] { stop(); });
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // Info
|
|
|
+ if (is_running) {
|
|
|
+ ImGui::SeparatorText("Info");
|
|
|
+ assert(tp != nullptr);
|
|
|
+ ImGui::Text("Pending Images: %ld", tp->get_tasks_total());
|
|
|
+ ImGui::Text("Track Count: %ld", track_pool.size());
|
|
|
+ ImGui::Text("Image Count: %d / %ld", (int) img_ok_cnt, img_pool.size());
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+camera_calibrator::camera_calibrator(const create_config &conf)
|
|
|
+ : pimpl(std::make_unique<impl>(conf)) {
|
|
|
+}
|
|
|
+
|
|
|
+camera_calibrator::~camera_calibrator() = default;
|
|
|
+
|
|
|
+void camera_calibrator::show() {
|
|
|
+ pimpl->show();
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::render() {
|
|
|
+ pimpl->render();
|
|
|
+}
|
|
|
+
|
|
|
+void camera_calibrator::simulate_process(const simulate_info_type &info) {
|
|
|
+ pimpl->simulate_process(info);
|
|
|
+}
|