#include "camera_calibrator_impl.h" #include "core/imgui_utility.hpp" #include "third_party/scope_guard.hpp" #include "render/render_utility.h" #include "render/render_texture.h" #include #include #include #include #include #include #define NANOVG_GL3_IMPLEMENTATION #include #include #include #include namespace camera_calibrator_impl { cv::Mat to_cv_mat(const glm::mat3 &mat) { auto ret = cv::Mat(3, 3, CV_32FC1); for (auto j = 0; j < 3; ++j) for (auto i = 0; i < 3; ++i) ret.at(j, i) = mat[i][j]; return ret; } cv::Mat to_cv_mat(const glm::vec3 &vec) { auto ret = cv::Mat(3, 1, CV_32FC1); for (auto j = 0; j < 3; ++j) ret.at(j, 0) = vec[j]; return ret; } glm::mat3 rodrigues_to_mat(const cv::Mat &vec) { assert(vec.size().area() == 3); auto rot_mat = cv::Mat(); cv::Rodrigues(vec, rot_mat); return to_mat3(rot_mat); } cv::Mat to_cv_rodigues(const glm::mat3 &mat) { auto ret = cv::Mat(); cv::Rodrigues(to_cv_mat(mat), ret); return ret; } glm::mat3 to_mat3(const cv::Mat &mat) { assert(mat.size() == cv::Size(3, 3)); assert(mat.type() == CV_32FC1 || mat.type() == CV_64FC1); auto ret = glm::mat3(); for (auto j = 0; j < 3; ++j) for (auto i = 0; i < 3; ++i) { switch (mat.type()) { // @formatter:off case CV_32FC1: { ret[i][j] = mat.at(j, i); break; } case CV_64FC1: { ret[i][j] = mat.at(j, i); break; } // @formatter:on default: { RET_ERROR_E; } } } return ret; } glm::vec3 to_vec3(const cv::Mat &mat) { assert(mat.size() == cv::Size(1, 3)); assert(mat.type() == CV_32FC1 || mat.type() == CV_64FC1); auto ret = glm::vec3(); for (auto j = 0; j < 3; ++j) { switch (mat.type()) { // @formatter:off case CV_32FC1: { ret[j] = mat.at(j, 0); break; } case CV_64FC1: { ret[j] = mat.at(j, 0); break; } // @formatter:on default: { RET_ERROR_E; } } } return ret; } template glm::vec3 to_vec3(const cv::Vec &vec) { auto ret = glm::vec3(); for (auto j = 0; j < 3; ++j) ret[j] = vec[j]; return ret; } void hand_eye_calib::set_object_points(cv::Size size, float dis) { obj_ps.clear(); for (auto iy = 0; iy < size.height; ++iy) for (auto ix = 0; ix < size.width; ++ix) { obj_ps.emplace_back(iy * dis, ix * dis); } } glm::mat4 hand_eye_calib::calc_track_mat(timestamp_type t) { return track_pool.query(t); } glm::vec2 hand_eye_calib::distort_point(glm::vec2 p) { auto &dc = dist_coeffs; assert(dc.size() == 8); auto k1 = dc[0], k2 = dc[1], p1 = dc[2], p2 = dc[3]; auto k3 = dc[4], k4 = dc[5], k5 = dc[6], k6 = dc[7]; auto r2 = p.x * p.x + p.y * p.y; auto r4 = r2 * r2, r6 = r4 * r2; auto k = (1.f + k1 * r2 + k2 * r4 + k3 * r6) / (1.f + k4 * r2 + k5 * r4 + k6 * r6); auto tx = p.x * k + 2 * p1 * p.x * p.y + p2 * (r2 + 2 * p.x * p.x); auto ty = p.y * k + p1 * (r2 + 2 * p.y * p.y) + 2 * p2 * p.x * p.y; return glm::vec2(tx, ty); } glm::vec2 hand_eye_calib::project_point(glm::vec3 p) { auto np = distort_point(from_homo(p)); return transform_p(intrinsic_mat, np); } float hand_eye_calib::corner_distance(const corner_type &c1, const corner_type &c2) { assert(c1.size() == c2.size()); auto corner_num = c1.size(); float dis_sum = 0.f; for (auto k = 0; k < corner_num; ++k) { auto p1 = c1[k], p2 = c2[k]; dis_sum += sqrt(pow(p1.x - p2.x, 2.f) + pow(p1.y - p2.y, 2.f)); } return dis_sum / corner_num; } glm::mat4 hand_eye_calib::calib_hand_eye() { // build hand-eye calibration problem auto ref_r_vec = std::vector(); // tracker in reference auto ref_t_vec = std::vector(); for (auto &info: img_pool) { auto ref_mat = calc_track_mat(info.sample_ts + ts_offset); ref_mat = glm::inverse(ref_mat); auto r_mat = glm::mat3(ref_mat); auto t_vec = glm::vec3(to_translation(ref_mat)); // fourth column of matrix ref_r_vec.push_back(to_cv_mat(r_mat)); ref_t_vec.push_back(to_cv_mat(t_vec)); } // if (simulate_conventional) { // cv::Mat ret_r, ret_t; // cv::calibrateRobotWorldHandEye( // cam_r_vec, cam_t_vec, // chess board in camera // ref_r_vec, ref_t_vec, // tracker in reference // aux_r, aux_t, // tracker in chess board // ret_r, ret_t); // reference in camera //// SPDLOG_DEBUG("OpenCV: {}", glm::to_string(to_transform_mat(to_vec3(aux_t), to_mat3(aux_r)))); // return to_transform_mat(to_vec3(ret_t), to_mat3(ret_r)); // } auto inv_op = [](const cv::Mat &r, const cv::Mat &t) { return glm::inverse(to_transform_mat(to_vec3(t), to_mat3(r))); }; auto mat_A = std::vector(); // reference in tracker auto mat_B = std::vector(); // camera in chess board std::ranges::transform(ref_r_vec, ref_t_vec, std::back_inserter(mat_A), inv_op); std::ranges::transform(cam_r_vec, cam_t_vec, std::back_inserter(mat_B), inv_op); // weighted shah algorithm using namespace Eigen; using mat_T_type = Matrix; auto mat_T = mat_T_type(); mat_T.setZero(); for (auto k = 0; k < sample_num; ++k) { auto Ra = glm::mat3(mat_A[k]), Rb = glm::mat3(mat_B[k]); mat_T += sample_w[k] * kron_product(to_eigen(Ra), to_eigen(Rb)); } JacobiSVD svdT(mat_T, ComputeFullU | ComputeFullV); auto x = svdT.matrixV().col(0), y = svdT.matrixU().col(0); auto shah_solve_op = [](Matrix3f x) -> Matrix3f { auto x_det = x.determinant(); x *= (x_det >= 0 ? 1.f : -1.f) / std::pow(std::abs(x_det), 1.f / 3.f); JacobiSVD svdX(x, ComputeFullU | ComputeFullV); x = svdX.matrixU() * svdX.matrixV().transpose(); return x.transpose(); }; auto Rx = shah_solve_op(Map(x.data())); auto Ry = shah_solve_op(Map(y.data())); using mat_A_type = Matrix; mat_A_type A = MatrixXf::Zero(3 * sample_num, 6); VectorXf b = VectorXf::Zero(3 * sample_num); for (int k = 0; k < sample_num; ++k) { auto ta = to_eigen(to_translation(mat_A[k])); auto tb = to_eigen(to_translation(mat_B[k])); A.block<3, 3>(3 * k, 0) = sample_w[k] * -to_eigen(glm::mat3(mat_A[k])); A.block<3, 3>(3 * k, 3) = sample_w[k] * Matrix3f::Identity(); b.segment<3>(3 * k) = sample_w[k] * (ta - Ry * tb); } using vec_t_type = Vector; vec_t_type t = A.colPivHouseholderQr().solve(b); Matrix4f X, Y; X << Rx, t.segment<3>(0), 0, 0, 0, 1; // camera in reference Y << Ry, t.segment<3>(3), 0, 0, 0, 1; // chess board in tracker auto aux_mat = glm::inverse(to_mat4(Y)); aux_r = to_cv_mat(glm::mat3(aux_mat)); aux_t = to_cv_mat(to_translation(aux_mat)); // SPDLOG_DEBUG("Shah: {}", glm::to_string(aux_mat)); return glm::inverse(to_mat4(X)); } cv::Scalar hand_eye_calib::evaluate_hand_eye(const glm::mat4 &ret_mat) { auto aux_mat = to_transform_mat(to_vec3(aux_t), to_mat3(aux_r)); auto chess_mat = glm::inverse(aux_mat); // chess board in traker auto err_mat = cv::Mat(sample_num, corner_num, CV_32FC2); auto obj_ps_err = std::vector(); std::ranges::transform(obj_ps, std::back_inserter(obj_ps_err), [](cv::Vec3f &p) { return cv::Point3f(p[0], p[1], p[2]); }); for (auto j = 0; j < sample_num; ++j) { auto &item = img_pool[j]; auto proj_mat = ret_mat // reference in camera * glm::inverse(calc_track_mat(item.sample_ts + ts_offset)) // tracker in reference * chess_mat; // chess board in tracker auto cam_mat = to_transform_mat( // chess board in camera to_vec3(cam_t_vec[j]), to_mat3(cam_r_vec[j])); auto loop_mat = glm::inverse(proj_mat) // camera in chess board * cam_mat; // chess board in camera for (auto i = 0; i < corner_num; ++i) { auto p_gold = to_vec3(obj_ps[i]); auto p_pred = transform_p(loop_mat, p_gold); auto err_3d = glm::distance(p_gold, p_pred); err_mat.at(j, i)[0] = err_3d; } auto proj_r = to_cv_rodigues(glm::mat3(proj_mat)); auto proj_t = to_cv_mat(to_translation(proj_mat)); auto c_pred = std::vector(); cv::projectPoints(obj_ps_err, proj_r, proj_t, cam_cv.intrinsic, cam_cv.dist_coeffs, c_pred); assert(c_pred.size() == corner_num); for (auto i = 0; i < corner_num; ++i) { auto c_gold = to_vec2(item.corner[i]); auto err_2d = glm::distance(c_gold, to_vec2(c_pred[i])); err_mat.at(j, i)[1] = err_2d; } } // calculate weighted average error cv::Scalar err_sum = {}; float w_sum = 0.f; for (auto j = 0; j < sample_num; ++j) { for (auto i = 0; i < corner_num; ++i) { auto err = sample_w[j] * err_mat.at(j, i); err_sum[0] += err[0]; err_sum[1] += err[1]; } w_sum += sample_w[j] * corner_num; } return err_sum / w_sum; } hand_eye_calib::camera_calib_result hand_eye_calib::camera_calib(const img_index_list_type &index) { auto img_ps_in = std::vector(); std::ranges::transform(index, std::back_inserter(img_ps_in), [this](const auto &ind) { return img_pool[ind].corner; }); auto select_num = img_ps_in.size(); auto obj_ps_in = std::vector(select_num, obj_ps); auto ret = camera_calib_result(); cv::Mat r_vec_ignore, t_vec_ignore; auto err = cv::calibrateCamera( obj_ps_in, img_ps_in, img_size, ret.intrinsic, ret.dist_coeffs, r_vec_ignore, t_vec_ignore, cv::CALIB_RATIONAL_MODEL); // use k4, k5, k6 assert(!isnan(err)); return ret; } float hand_eye_calib::reproject_error(const camera_calib_result &cam, const corner_type &img_ps) { cv::Mat r_vec, t_vec; auto ok = cv::solvePnP(obj_ps, img_ps, cam.intrinsic, cam.dist_coeffs, r_vec, t_vec); assert(ok); corner_type proj_ps; cv::projectPoints(obj_ps, r_vec, t_vec, cam.intrinsic, cam.dist_coeffs, proj_ps); return corner_distance(img_ps, proj_ps); } hand_eye_calib::camera_calib_result hand_eye_calib::camera_calib_ransac() { static constexpr auto max_iter = 32; // TODO: make configurable static constexpr auto calib_img_num = 10; static constexpr auto accept_threshold = 1.0f; // pix static constexpr auto valid_ratio = 0.8f; // percentage auto rd = std::random_device(); auto gen = std::mt19937(rd()); auto sampler = std::uniform_int_distribution(0, sample_num - 1); auto err_vec = std::vector(sample_num); auto ret = camera_calib_result(); auto ret_err = std::numeric_limits::max(); auto index_list = img_index_list_type(); for (auto k = 0; k < max_iter; ++k) { index_list.resize(calib_img_num); std::ranges::generate(index_list, [&] { return sampler(gen); }); auto cur = camera_calib(index_list); // evaluate current result err_vec.clear(); std::ranges::transform(img_pool, std::back_inserter(err_vec), [=, this](const auto &img) { return reproject_error(cur, img.corner); }); auto err_mean = std::accumulate(err_vec.begin(), err_vec.end(), 0.f) / sample_num; SPDLOG_INFO("Iter {}, error = {:.2f}pix.", k, err_mean); if (save_calib_error) { auto fout = std::ofstream(fmt::format("calib_{}.txt", k)); for (auto val: err_vec) { fout << val << std::endl; } } auto ratio = 1.f * std::ranges::count_if( err_vec, [=](auto err) { return err <= accept_threshold; }) / sample_num; if (ratio < valid_ratio) continue; if (err_mean < ret_err) { ret = cur; ret_err = err_mean; } } assert(!ret.intrinsic.empty()); SPDLOG_INFO("Camera calibration error is {:.2f}pix", ret_err); // remove outlier samples auto end_iter = std::ranges::remove_if(img_pool, [=, this](const auto &img) { return reproject_error(ret, img.corner) > accept_threshold; }); img_pool.erase(end_iter.begin(), end_iter.end()); sample_num = img_pool.size(); return ret; } void hand_eye_calib::calc_sample_weights() { auto cam_p = std::vector(); auto cam_p_op = [](const cv::Mat &r, const cv::Mat &t) { auto t_mat = to_transform_mat(to_vec3(t), to_mat3(r)); return to_translation(t_mat); }; std::ranges::transform(cam_r_vec, cam_t_vec, std::back_inserter(cam_p), cam_p_op); sample_w.clear(); auto w_op = [&](glm::vec3 x) { return 1.f / std::ranges::count_if(cam_p, [=](glm::vec3 y) { return glm::distance(x, y) <= sample_neighbor_radius; }); }; std::ranges::transform(cam_p, std::back_inserter(sample_w), w_op); } void hand_eye_calib::calc() { sample_num = img_pool.size(); corner_num = img_pool.begin()->corner.size(); if (sub_sample_count != 0) { assert(sample_num >= sub_sample_count); std::random_device rd; std::mt19937 g(rd()); std::shuffle(img_pool.begin(), img_pool.end(), g); img_pool.erase(img_pool.begin() + sub_sample_count, img_pool.end()); sample_num = img_pool.size(); } auto ts = current_timestamp(); cam_cv = camera_calib_ransac(); SPDLOG_INFO("Used time: {}ms", (current_timestamp() - ts) / 1000); intrinsic_mat = to_mat3(cam_cv.intrinsic); assert(cam_cv.dist_coeffs.size().height == 1); auto dist_coeffs_num = cam_cv.dist_coeffs.size().width; assert(dist_coeffs_num >= 8); // k1-3, p1-2, k4-6 dist_coeffs.reserve(dist_coeffs_num); assert(cam_cv.dist_coeffs.type() == CV_64FC1); for (auto k = 0; k < 8; ++k) { dist_coeffs.push_back(cam_cv.dist_coeffs.at(0, k)); } // solve PnP for each image cam_r_vec = std::vector(); // chess board in camera cam_t_vec = std::vector(); auto obj_ps_pnp = std::vector(); std::ranges::transform(obj_ps, std::back_inserter(obj_ps_pnp), [](cv::Vec3f &p) { return cv::Point3d(p[0], p[1], p[2]); }); std::optional last_rot_quat; static constexpr auto quat_threshold = 0.5f; for (auto &img: img_pool) { auto &corner = img.corner; cv::Mat chess_r, chess_t; // chess board in camera for (;;) { bool ok = cv::solvePnP(obj_ps_pnp, corner, cam_cv.intrinsic, cam_cv.dist_coeffs, chess_r, chess_t); assert(ok); // rotate corners if needed auto rot_mat = rodrigues_to_mat(chess_r); auto rot_quat = glm::quat_cast(rot_mat); if (!last_rot_quat.has_value() // https://math.stackexchange.com/questions/90081/quaternion-distance || 1 - glm::dot(rot_quat, *last_rot_quat) < quat_threshold) { last_rot_quat = rot_quat; break; } else { std::ranges::reverse(corner); } } auto ret_mat = to_transform_mat(to_vec3(chess_t), rodrigues_to_mat(chess_r)); auto cam_r = to_cv_mat(glm::mat3(ret_mat)); // rotation part auto cam_t = to_cv_mat(to_translation(ret_mat)); // translation part cam_r_vec.push_back(cam_r); cam_t_vec.push_back(cam_t); } calc_sample_weights(); // find minimal error using trichotomy algorithm auto offset_l = -128000; // -128ms auto offset_r = 0; // 0ms static constexpr auto eps = 1000; // 1ms auto eval_func = [this](int offset) { ts_offset = offset; auto ret_mat = calib_hand_eye(); return evaluate_hand_eye(ret_mat); }; auto weight_op = [](const cv::Scalar &w) { // return w[1]; return w[0] * 10 + w[1]; // w[0]: 3d, w[1]: 2d }; auto better_op = [=](const cv::Scalar &lhs, // is lhs better than rhs ? const cv::Scalar &rhs) { return weight_op(lhs) < weight_op(rhs); }; for (; offset_r - offset_l > eps;) { auto l_sec = (2 * offset_l + offset_r) / 3; auto r_sec = (offset_l + 2 * offset_r) / 3; auto l_err = eval_func(l_sec); auto r_err = eval_func(r_sec); if (better_op(l_err, r_err)) { offset_r = r_sec; } else { offset_l = l_sec; } } result_ts_offset = (offset_l + offset_r) / 2; if (simulate_conventional) { static constexpr auto select_num = 4; auto step = (float) sample_num / (select_num + 1); auto select_ind = img_index_list_type(select_num); for (auto k = 1; k < select_num; ++k) { select_ind[k] = sample_num - step * k; } cam_cv = camera_calib(select_ind); intrinsic_mat = to_mat3(cam_cv.intrinsic); dist_coeffs.clear(); for (auto k = 0; k < 8; ++k) { dist_coeffs.push_back(cam_cv.dist_coeffs.at(0, k)); } auto img_pool_new = image_pool_type(); std::ranges::transform(select_ind, std::back_inserter(img_pool_new), [this](auto k) { return img_pool[k]; }); img_pool = img_pool_new; sample_num = select_num; // solve PnP for each image cam_r_vec = std::vector(); // chess board in camera cam_t_vec = std::vector(); for (auto &img: img_pool) { auto &corner = img.corner; cv::Mat chess_r, chess_t; // chess board in camera bool ok = cv::solvePnP(obj_ps_pnp, corner, cam_cv.intrinsic, cam_cv.dist_coeffs, chess_r, chess_t); auto ret_mat = to_transform_mat(to_vec3(chess_t), rodrigues_to_mat(chess_r)); auto cam_r = to_cv_mat(glm::mat3(ret_mat)); // rotation part auto cam_t = to_cv_mat(to_translation(ret_mat)); // translation part cam_r_vec.push_back(cam_r); cam_t_vec.push_back(cam_t); } // reset weights sample_w = std::vector(sample_num, 1.f); } if (disable_temporl_offset) { result_ts_offset = 0; } ts_offset = result_ts_offset; result_mat = glm::inverse(calib_hand_eye()); SPDLOG_INFO("Estimated temporal offset is {}ms", result_ts_offset / 1000); auto err_val = evaluate_hand_eye(glm::inverse(result_mat)); obj_reproj_err = err_val[0]; SPDLOG_INFO("Average 3D projection error is {:.2f}mm", obj_reproj_err); img_reproj_err = err_val[1]; SPDLOG_INFO("Average 2D projection error is {:.2f}pix", img_reproj_err); } camera_intrinsic_v0 camera_calibrator_impl::hand_eye_calib::intrinsic_v0() { auto ret = camera_intrinsic_v0(); ret.fx = intrinsic_mat[0][0]; ret.fy = intrinsic_mat[1][1]; ret.cx = intrinsic_mat[2][0]; ret.cy = intrinsic_mat[2][1]; std::ranges::copy(dist_coeffs, std::back_inserter(ret.dist)); ret.width = img_size.width; ret.height = img_size.height; return ret; } } camera_calibrator::impl::impl(const create_config &_conf) { conf = _conf; vg = nvgCreateGL3(NVG_ANTIALIAS | NVG_STENCIL_STROKES); } camera_calibrator::impl::~impl() { if (tp != nullptr) { tp->purge(); stop(true); // on_exit = true, force exit } } void camera_calibrator::impl::start() { // clear last data last_finish.store(nullptr); img_pool.clear(); track_pool.clear(); img_ok_cnt = 0; coverage_fbo = std::make_unique(); assert(tp == nullptr); tp = std::make_unique(conf.num_threads); img_conn = OBJ_SIG(conf.img_name)->connect( [this](auto name) { img_callback(name); }); } void camera_calibrator::impl::process() { calib = std::make_unique(); calib->img_size = img_size; calib->set_object_points(conf.pattern_size, conf.corner_distance); auto valid_imgs = img_pool | std::views::filter([](const auto &item) { assert(item.process_finished); if (!item.corners_detected) return false; if (item.corner_sharpness > sharpness_threshold) return false; return true; }) | std::views::transform([](const auto &item) { assert(!item.corners.empty()); return hand_eye_calib::image_store_type( item.corners, item.sample_ts); }); std::ranges::copy(valid_imgs, std::back_inserter(calib->img_pool)); for (auto &item: track_pool) { calib->track_pool.add(item.ref_mat, item.sample_ts); } calib->calc(); if (conf.sophiar_conn != nullptr) { auto ret = Eigen::Isometry3d(); to_eigen_transform(calib->result_mat, ret); conf.sophiar_conn->update_transform_variable( conf.result_transform_var, ret); } if (conf.cb_func) { conf.cb_func(result_type{ .cam_in = calib->intrinsic_v0(), .transform = calib->result_mat, .time_offset = calib->result_ts_offset, }); } } void camera_calibrator::impl::simulate_process(const simulate_info_type &info) { load_track_data(info.data_path); img_size = info.img_size; process(); } void camera_calibrator::impl::stop(bool on_exit) { assert(img_conn.connected()); img_conn.disconnect(); tp = nullptr; // implicit wait() coverage_fbo = nullptr; if (!on_exit) { save_track_data(); process(); } } void camera_calibrator::impl::save_track_data() { auto fout = std::ofstream("calib_data.txt"); // tracker data fout << track_pool.size() << std::endl; for (auto &item: track_pool) { fout << item.sample_ts << " "; using mat_type = typeof(item.ref_mat); using value_type = mat_type::value_type; static constexpr auto len = sizeof(mat_type) / sizeof(value_type); auto ptr = (value_type *) &item.ref_mat; for (auto i = 0; i < len; ++i) { fout << ptr[i] << " "; } fout << std::endl; } fout << std::endl; // camera data auto sample_num = std::ranges::count_if( img_pool, [](auto &item) { return item.corners_detected; }); fout << sample_num << std::endl; for (auto &item: img_pool) { if (!item.corners_detected) continue; fout << item.sample_ts << " "; for (auto &point: item.corners) { fout << point.x << " " << point.y << " "; } fout << item.corner_sharpness; fout << std::endl; } fout << std::endl; } void camera_calibrator::impl::load_track_data(const std::string &path) { auto fin = std::ifstream(path); // tracker data assert(track_pool.empty()); size_t sample_num; fin >> sample_num; for (auto k = 0; k < sample_num; ++k) { auto &item = track_pool.emplace_back(); fin >> item.sample_ts; using mat_type = typeof(item.ref_mat); using value_type = mat_type::value_type; static constexpr auto len = sizeof(mat_type) / sizeof(value_type); auto ptr = (value_type *) &item.ref_mat; for (auto i = 0; i < len; ++i) { fin >> ptr[i]; } } // camera data fin >> sample_num; for (auto k = 0; k < sample_num; ++k) { auto &item = img_pool.emplace_back(); fin >> item.sample_ts; assert(item.sample_ts != 0); auto corner_num = conf.pattern_size.area(); for (auto i = 0; i < corner_num; ++i) { auto &p = item.corners.emplace_back(); fin >> p.x >> p.y; } fin >> item.corner_sharpness; item.corners_detected = true; item.process_finished = true; } } void camera_calibrator::impl::per_image_process(img_store_type *st) { auto closer = sg::make_scope_guard([&] { st->process_finished = true; // release image to save memory st->img_mat = cv::Mat(); st->img = nullptr; }); // color to gray cv::Mat img_gray; cv::cvtColor(st->img_mat, img_gray, cv::COLOR_BGR2GRAY); // find control points assert(conf.board_type == CALIB_CHESS); auto corner_flag = cv::CALIB_CB_ADAPTIVE_THRESH | cv::CALIB_CB_NORMALIZE_IMAGE | cv::CALIB_CB_FAST_CHECK; corner_type img_corners; bool ok = cv::findChessboardCorners(img_gray, conf.pattern_size, img_corners, corner_flag); if (!ok) { // last_finish.store(nullptr, std::memory_order::relaxed); return; } st->corners_detected = true; // refinement points // https://docs.opencv.org/4.9.0/d2/d1d/samples_2cpp_2lkdemo_8cpp-example.html auto win_size = cv::Size(10, 10); auto zero_zone = cv::Size(-1, -1); auto term_crit = cv::TermCriteria(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.03); cv::cornerSubPix(img_gray, img_corners, win_size, zero_zone, term_crit); st->corners = img_corners; // evaluate sharpness auto sharpness = cv::estimateChessboardSharpness( img_gray, conf.pattern_size, img_corners); st->corner_sharpness = sharpness[0]; if (st->corner_sharpness > sharpness_threshold) return; ++img_ok_cnt; } void camera_calibrator::impl::img_callback(obj_name_type name) { assert(name == conf.img_name); auto img = to_image(name); if (img == nullptr) return; if (img_size.empty()) { img_size = img->size(); } else { assert(img_size == img->size()); } // tracking information auto cur_ts = OBJ_TS(conf.img_name); auto ref_t = conf.sophiar_conn ->query_transform_variable(conf.ref_transform_var); if (!ref_t.has_value()) return; auto &item = track_pool.emplace_back(); item.ref_mat = to_mat4(*ref_t); item.sample_ts = cur_ts; auto err_t = conf.sophiar_conn ->query_double_variable(conf.ref_error_var); if (!err_t.has_value()) return; // assert(err_t.has_value()); item.track_error = *err_t; last_track_err = item.track_error; if (item.track_error > track_err_threshold) return; // throttle if (tp->get_tasks_queued() > conf.num_threads - 1) { return; } auto &st = img_pool.emplace_back(); st.img = img; st.img_mat = img->cv_mat(conf.stream); st.sample_ts = cur_ts; // post task tp->detach_task([this, p_st = &st] { per_image_process(p_st); last_finish.store(p_st, std::memory_order::release); }); } void camera_calibrator::impl::render_corners(const corner_type &corner, NVGcolor color) { auto size = query_viewport_size(); nvgBeginFrame(vg, size.width, size.height, 1.0f); auto closer = sg::make_scope_guard([this] { nvgEndFrame(vg); // TODO: ugly hacked, NanoVG sets ColSrc to One glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); }); nvgBeginPath(vg); static constexpr auto r = 5.0f; for (auto &p: corner) { nvgCircle(vg, p.x, size.height - p.y, r); // flip Y } nvgPathWinding(vg, NVG_HOLE); nvgFillColor(vg, color); nvgFill(vg); } void camera_calibrator::impl::render() { if (tp == nullptr) return; // not running // render coverage assert(coverage_fbo != nullptr); if (coverage_fbo->id == 0) [[unlikely]] { // lazy creation for img_size auto fbo_conf = smart_frame_buffer::create_config{ .size = img_size, .depth_fmt = GL_DEPTH24_STENCIL8, }; coverage_fbo->create(fbo_conf); } else { auto tex_conf = tex_render_info{ .mode = TEX_COLOR_ONLY, }; tex_conf.color.fmt = COLOR_RGB; tex_conf.color.id = coverage_fbo->color_tex[0].id; tex_conf.color.alpha = 0.5f; render_texture(tex_conf); } auto st = last_finish.load(std::memory_order::acquire); if (st == nullptr) return; assert(st->process_finished); if (!st->corners_detected) return; render_corners(st->corners, nvgRGB(255, 0, 0)); coverage_fbo->bind(); render_corners(st->corners, nvgRGB(0, 255, 0)); coverage_fbo->unbind(); } void camera_calibrator::impl::show() { auto ctx = conf.ctx; bool is_running = (tp != nullptr); ImGui::SeparatorText("Actions"); if (!is_running) { if (ImGui::Button("Start")) { post(*ctx, [this] { start(); }); } } else { if (ImGui::Button("Close")) { post(*ctx, [this] { stop(); }); } } // Info if (is_running) { ImGui::SeparatorText("Info"); assert(tp != nullptr); ImGui::Text("Pending Images: %ld", tp->get_tasks_total()); ImGui::Text("Track Count: %ld", track_pool.size()); ImGui::Text("Image Count: %d / %ld", (int) img_ok_cnt, img_pool.size()); { // track error auto guard = imgui_valid_style_guard( last_track_err <= track_err_threshold); ImGui::Text("Last Track Error: %.2f", last_track_err); } auto last_st = last_finish.load(std::memory_order::consume); if (last_st != nullptr && last_st->corners_detected) { auto guard = imgui_valid_style_guard( last_st->corner_sharpness <= sharpness_threshold); ImGui::Text("Last Sharpness: %.2f", last_st->corner_sharpness); } } } camera_calibrator::camera_calibrator(const create_config &conf) : pimpl(std::make_unique(conf)) { } camera_calibrator::~camera_calibrator() = default; void camera_calibrator::show() { pimpl->show(); } void camera_calibrator::render() { pimpl->render(); } void camera_calibrator::simulate_process(const simulate_info_type &info) { pimpl->simulate_process(info); }