Эх сурвалжийг харах

Some functions for evaluation.

jcsyshc 1 жил өмнө
parent
commit
8708f07c77

+ 31 - 1
data/config_debug.yaml

@@ -1 +1,31 @@
-app_name: debug
+app_name: debug
+
+eval_points:
+  - name: P1
+    value: [ -42.65, 73.45, -438.96 ]
+  - name: P2
+    value: [ 47.69, 79.50, -439.40 ]
+  - name: P3
+    value: [ 2.82, 69.34, -467.07 ]
+  - name: P4
+    value: [ -57.70, 82.30, -473.82 ]
+  - name: P5
+    value: [ 57.04, 88.72, -478.34 ]
+  - name: P6
+    value: [ -47.50, 80.30, -503.17 ]
+  - name: P7
+    value: [ -28.70, 71.69, -493.38 ]
+  - name: P8
+    value: [ 31.60, 75.59, -494.99 ]
+  - name: P9
+    value: [ 50.47, 86.65, -502.33 ]
+  - name: E1
+    value: [ -30.57, 82.33, -535.06 ]
+  - name: E2
+    value: [ -18.16, 69.89, -535.18 ]
+  - name: E3
+    value: [ -0.20, 66.85, -539.44 ]
+  - name: E4
+    value: [ 16.79, 71.42, -540.98 ]
+  - name: E5
+    value: [ 26.50, 82.57, -533.76 ]

+ 5 - 1
data/config_endo_guide_oldhead.yaml

@@ -78,4 +78,8 @@ acl_guide:
   tibia:
     collect_obj: point_picker_in_tibia_ref
     collect_var: picked_point_in_tibia_ref
-    transform_var: tibia_ref_in_tracker
+    transform_var: tibia_ref_in_tracker
+  drill_transform_var: not_implemented
+
+output_width: 1920
+output_height: 1080

+ 86 - 16
src/image_process/impl/camera_calibrator.cpp

@@ -160,6 +160,17 @@ namespace camera_calibrator_impl {
             ref_t_vec.push_back(to_cv_mat(t_vec));
         }
 
+//        if (simulate_conventional) {
+//            cv::Mat ret_r, ret_t;
+//            cv::calibrateRobotWorldHandEye(
+//                    cam_r_vec, cam_t_vec, // chess board in camera
+//                    ref_r_vec, ref_t_vec, // tracker in reference
+//                    aux_r, aux_t, // tracker in chess board
+//                    ret_r, ret_t); // reference in camera
+////        SPDLOG_DEBUG("OpenCV: {}", glm::to_string(to_transform_mat(to_vec3(aux_t), to_mat3(aux_r))));
+//            return to_transform_mat(to_vec3(ret_t), to_mat3(ret_r));
+//        }
+
         auto inv_op = [](const cv::Mat &r, const cv::Mat &t) {
             return glm::inverse(to_transform_mat(to_vec3(t), to_mat3(r)));
         };
@@ -212,15 +223,6 @@ namespace camera_calibrator_impl {
         aux_t = to_cv_mat(to_translation(aux_mat));
 //        SPDLOG_DEBUG("Shah: {}", glm::to_string(aux_mat));
         return glm::inverse(to_mat4(X));
-
-//        cv::Mat ret_r, ret_t;
-//        cv::calibrateRobotWorldHandEye(
-//                cam_r_vec, cam_t_vec, // chess board in camera
-//                ref_r_vec, ref_t_vec, // tracker in reference
-//                aux_r, aux_t, // tracker in chess board
-//                ret_r, ret_t); // reference in camera
-//        SPDLOG_DEBUG("OpenCV: {}", glm::to_string(to_transform_mat(to_vec3(aux_t), to_mat3(aux_r))));
-//        return to_transform_mat(to_vec3(ret_t), to_mat3(ret_r));
     }
 
     cv::Scalar hand_eye_calib::evaluate_hand_eye(const glm::mat4 &ret_mat) {
@@ -310,8 +312,8 @@ namespace camera_calibrator_impl {
     hand_eye_calib::camera_calib_result
     hand_eye_calib::camera_calib_ransac() {
         static constexpr auto max_iter = 32; // TODO: make configurable
-        static constexpr auto calib_img_num = 5;
-        static constexpr auto accept_threshold = 0.7f; // pix
+        static constexpr auto calib_img_num = 10;
+        static constexpr auto accept_threshold = 0.4f; // pix
         static constexpr auto valid_ratio = 0.8f; // percentage
 
         auto rd = std::random_device();
@@ -331,7 +333,15 @@ namespace camera_calibrator_impl {
             std::ranges::transform(img_pool, std::back_inserter(err_vec),
                                    [=, this](const auto &img) { return reproject_error(cur, img.corner); });
             auto err_mean = std::accumulate(err_vec.begin(), err_vec.end(), 0.f) / sample_num;
-            SPDLOG_DEBUG("Iter {}, error = {:.2f}pix.", k, err_mean);
+            SPDLOG_INFO("Iter {}, error = {:.2f}pix.", k, err_mean);
+
+            if (save_calib_error) {
+                auto fout = std::ofstream(fmt::format("calib_{}.txt", k));
+                for (auto val: err_vec) {
+                    fout << val << std::endl;
+                }
+            }
+
             auto ratio = 1.f * std::ranges::count_if(
                     err_vec, [=](auto err) { return err <= accept_threshold; }) / sample_num;
             if (ratio < valid_ratio) continue;
@@ -375,7 +385,18 @@ namespace camera_calibrator_impl {
         sample_num = img_pool.size();
         corner_num = img_pool.begin()->corner.size();
 
+        if (sub_sample_count != 0) {
+            assert(sample_num >= sub_sample_count);
+            std::random_device rd;
+            std::mt19937 g(rd());
+            std::shuffle(img_pool.begin(), img_pool.end(), g);
+            img_pool.erase(img_pool.begin() + sub_sample_count, img_pool.end());
+            sample_num = img_pool.size();
+        }
+
+        auto ts = current_timestamp();
         cam_cv = camera_calib_ransac();
+        SPDLOG_INFO("Used time: {}ms", (current_timestamp() - ts) / 1000);
         intrinsic_mat = to_mat3(cam_cv.intrinsic);
 
         assert(cam_cv.dist_coeffs.size().height == 1);
@@ -437,9 +458,13 @@ namespace camera_calibrator_impl {
             auto ret_mat = calib_hand_eye();
             return evaluate_hand_eye(ret_mat);
         };
-        auto better_op = [](const cv::Scalar &lhs, // is lhs better than rhs ?
-                            const cv::Scalar &rhs) {
-            return lhs[1] < rhs[1]; // use 2d error
+        auto weight_op = [](const cv::Scalar &w) {
+//            return w[1];
+            return w[0] * 10 + w[1]; // w[0]: 3d, w[1]: 2d
+        };
+        auto better_op = [=](const cv::Scalar &lhs, // is lhs better than rhs ?
+                             const cv::Scalar &rhs) {
+            return weight_op(lhs) < weight_op(rhs);
         };
         for (; offset_r - offset_l > eps;) {
             auto l_sec = (2 * offset_l + offset_r) / 3;
@@ -454,6 +479,51 @@ namespace camera_calibrator_impl {
         }
         result_ts_offset = (offset_l + offset_r) / 2;
 
+        if (simulate_conventional) {
+            static constexpr auto select_num = 4;
+            auto step = (float) sample_num / (select_num + 1);
+            auto select_ind = img_index_list_type(select_num);
+            for (auto k = 1; k < select_num; ++k) {
+                select_ind[k] = sample_num - step * k;
+            }
+
+            cam_cv = camera_calib(select_ind);
+            intrinsic_mat = to_mat3(cam_cv.intrinsic);
+            dist_coeffs.clear();
+            for (auto k = 0; k < 8; ++k) {
+                dist_coeffs.push_back(cam_cv.dist_coeffs.at<double>(0, k));
+            }
+
+            auto img_pool_new = image_pool_type();
+            std::ranges::transform(select_ind, std::back_inserter(img_pool_new),
+                                   [this](auto k) { return img_pool[k]; });
+            img_pool = img_pool_new;
+            sample_num = select_num;
+
+            // solve PnP for each image
+            cam_r_vec = std::vector<cv::Mat>(); // chess board in camera
+            cam_t_vec = std::vector<cv::Mat>();
+            for (auto &img: img_pool) {
+                auto &corner = img.corner;
+                cv::Mat chess_r, chess_t; // chess board in camera
+                bool ok = cv::solvePnP(obj_ps_pnp, corner,
+                                       cam_cv.intrinsic, cam_cv.dist_coeffs,
+                                       chess_r, chess_t);
+                auto ret_mat = to_transform_mat(to_vec3(chess_t),
+                                                rodrigues_to_mat(chess_r));
+                auto cam_r = to_cv_mat(glm::mat3(ret_mat)); // rotation part
+                auto cam_t = to_cv_mat(to_translation(ret_mat)); // translation part
+                cam_r_vec.push_back(cam_r);
+                cam_t_vec.push_back(cam_t);
+            }
+
+            // reset weights
+            sample_w = std::vector<float>(sample_num, 1.f);
+        }
+
+        if (disable_temporl_offset) {
+            result_ts_offset = 0;
+        }
         ts_offset = result_ts_offset;
         result_mat = glm::inverse(calib_hand_eye());
         SPDLOG_INFO("Estimated temporal offset is {}ms", result_ts_offset / 1000);
@@ -481,7 +551,7 @@ namespace camera_calibrator_impl {
 
 camera_calibrator::impl::impl(const create_config &_conf) {
     conf = _conf;
-    vg = nvgCreateGL3(NVG_ANTIALIAS | NVG_STENCIL_STROKES);
+//    vg = nvgCreateGL3(NVG_ANTIALIAS | NVG_STENCIL_STROKES);
 }
 
 camera_calibrator::impl::~impl() {

+ 6 - 0
src/image_process/impl/camera_calibrator_impl.h

@@ -22,6 +22,12 @@ namespace camera_calibrator_impl {
     static constexpr auto track_err_threshold = 0.25f;
     static constexpr auto sample_neighbor_radius = 5.f;
 
+    // for evaluation
+    static constexpr bool disable_temporl_offset = false;
+    static constexpr bool simulate_conventional = false;
+    static constexpr bool save_calib_error = false;
+    static constexpr auto sub_sample_count = 0;
+
     using corner_type = std::vector<cv::Point2f>;
 
     cv::Mat to_cv_mat(const glm::mat3 &mat);

+ 29 - 1
src/impl/apps/debug/app_debug.cpp

@@ -52,17 +52,45 @@
 //}
 
 #include "image_process/camera_calibrator.h"
+#include "module/experiment/calib_eval.h"
 
 app_debug::app_debug(const create_config &conf) {
+    auto cam_info = camera_calibrator::result_type();
     auto calib_conf = camera_calibrator::create_config{
             .pattern_size = cv::Size(11, 8),
             .corner_distance = 5,
+            .cb_func = [&](auto info) { cam_info = info; },
     };
     auto calib = std::make_unique<camera_calibrator>(calib_conf);
 
     auto sim_info = camera_calibrator::simulate_info_type{
-            .data_path = "/home/tpx/project/DepthGuide/cmake-build-debug/exp-20240605/calib/calib_data_c5.txt",
+            .data_path = "/home/tpx/project/DepthGuide/cmake-build-debug/exp-20240605/calib/calib_data_c1.txt",
             .img_size = cv::Size(1920, 1080),
     };
     calib->simulate_process(sim_info);
+
+    std::string eval_path_list[] = {
+            "/home/tpx/project/DepthGuide/cmake-build-debug/exp-20240605_bak/videos/Stable_Pose_1_1717595907048131",
+            "/home/tpx/project/DepthGuide/cmake-build-debug/exp-20240605_bak/videos/Stable_Pose_2_1717596106615177",
+            "/home/tpx/project/DepthGuide/cmake-build-debug/exp-20240605_bak/videos/Stable_Pose_3_1717596157508744",
+            "/home/tpx/project/DepthGuide/cmake-build-debug/exp-20240605_bak/videos/Stable_Pose_4_1717596214643474",
+            "/home/tpx/project/DepthGuide/cmake-build-debug/exp-20240605_bak/videos/Stable_Pose_5_1717596236889051",
+            "/home/tpx/project/DepthGuide/cmake-build-debug/exp-20240605_bak/videos/Stable_Pose_6_1717596274604857",
+            "/home/tpx/project/DepthGuide/cmake-build-debug/exp-20240605_bak/videos/Stable_Pose_7_1717596308077328",
+            "/home/tpx/project/DepthGuide/cmake-build-debug/exp-20240605_bak/videos/Stable_Pose_8_1717596347701421",
+            "/home/tpx/project/DepthGuide/cmake-build-debug/exp-20240605_bak/videos/Dynamic_Pose_1_1717596522706273"
+    };
+
+    auto eval = std::make_unique<calib_eval>(calib_eval::record_config{});
+
+    auto eval_info = calib_eval::simulate_info_type::from_yaml(
+            conf.ext_conf["eval_points"]);
+    eval_info.cam = cam_info;
+
+    for (auto &path: eval_path_list) {
+        eval_info.save_folder = path;
+        eval->simulate_eval(eval_info);
+    }
+
+    glfwSetWindowShouldClose(glfwGetCurrentContext(), true);
 }

+ 22 - 0
src/module/experiment/calib_eval.h

@@ -2,7 +2,11 @@
 #define DEPTHGUIDE_CALIB_EVAL_H
 
 #include "core/object_manager.h"
+#include "core/yaml_utility.hpp"
 #include "core/local_connection.h" // sophiar
+#include "image_process/camera_calibrator.h"
+
+#include <glm/glm.hpp>
 
 #include <memory>
 
@@ -22,6 +26,24 @@ public:
 
     void show();
 
+    struct simulate_info_type {
+        struct point_type {
+            std::string name;
+            glm::vec3 value;
+        };
+        using cam_type = camera_calibrator::result_type;
+
+        std::string save_folder; // previously saved folder
+        std::vector<point_type> points;
+        cam_type cam;
+
+        void fill_from_yaml(const YAML::Node &conf);
+
+        FROM_YAML_IMPL(simulate_info_type);
+    };
+
+    void simulate_eval(const simulate_info_type &info);
+
 private:
     struct impl;
     std::unique_ptr<impl> pimpl;

+ 125 - 0
src/module/experiment/impl/calib_eval.cpp

@@ -1,10 +1,16 @@
 #include "calib_eval_impl.h"
 #include "core/math_helper.hpp"
 #include "core/imgui_utility.hpp"
+#include "image_process/impl/camera_calibrator_impl.h"
 
+#include <nlohmann/json.hpp>
+
+#include <opencv2/calib3d.hpp>
 #include <opencv2/imgcodecs.hpp>
 
+#include <filesystem>
 #include <fstream>
+#include <regex>
 
 calib_eval::impl::~impl() {
     if (img_conn.connected()) {
@@ -87,6 +93,109 @@ void calib_eval::impl::show() {
     }
 }
 
+void calib_eval::impl::simulate_eval(const simulate_info_type &info) {
+    namespace fs = std::filesystem;
+    auto tb = transform_buffer();
+    auto track_file = fs::path(info.save_folder) / "track_data.txt";
+    assert(fs::is_regular_file(track_file));
+    auto fin = std::ifstream(track_file);
+    int sample_num = 0;
+    fin >> sample_num;
+    for (auto k = 0; k < sample_num; ++k) {
+        using mat_type = glm::mat4;
+        using value_type = mat_type::value_type;
+        static constexpr auto len = sizeof(mat_type) / sizeof(value_type);
+        timestamp_type ts;
+        glm::mat4 mat;
+        auto ptr = (value_type *) &mat;
+        fin >> ts;
+        for (auto i = 0; i < len; ++i) {
+            fin >> ptr[i];
+        }
+        tb.add(mat, ts);
+    }
+
+    auto obj_ps = std::vector<cv::Point3f>();
+    auto img_ps = std::vector<cv::Point2f>();
+    std::ranges::transform(info.points, std::back_inserter(obj_ps), [](auto point) {
+        auto p = point.value;
+        return cv::Point3f(p.x, p.y, p.z);
+    });
+
+    using namespace nlohmann;
+    auto label_filepath = fs::path(info.save_folder) / "annotate.json";
+    auto label_file = std::ifstream(label_filepath);
+    auto label = json::parse(label_file);
+
+    auto &cam_in = info.cam.cam_in;
+    auto pixel_to_3d = [&](glm::vec2 p, float d) {
+        auto img_ps = std::vector<cv::Point2f>({cv::Point2f(p.x, p.y)});
+        auto norm_ps = std::vector<cv::Point2f>();
+        cv::undistortPoints(img_ps, norm_ps,
+                            cam_in.intrinsic_cv_mat(), cam_in.dist_cv_mat());
+        assert(norm_ps.size() == 1);
+        auto np = norm_ps[0];
+        return glm::vec3(np.x * d, np.y * d, d);
+    };
+
+    using namespace camera_calibrator_impl;
+    auto ret = json::array();
+    auto num_re = std::regex(R"(img_(\d+)\.jpg)");
+    assert(fs::is_directory(info.save_folder));
+    for (const auto &item: fs::directory_iterator(info.save_folder)) {
+        auto filename = item.path().filename().string();
+        auto match = std::smatch();
+        if (!std::regex_search(filename, match, num_re)
+            || match.size() <= 1) {
+            continue;
+        }
+
+        auto ts = std::stoll(match.str(1));
+        auto ref_mat = tb.query(ts + info.cam.time_offset); // model in reference
+        auto t_mat = glm::inverse(info.cam.transform) * ref_mat; // model in camera
+        auto r_vec = to_cv_rodigues(glm::mat3(t_mat));
+        auto t_vec = to_cv_mat(to_translation(t_mat));
+        img_ps.clear();
+        cv::projectPoints(obj_ps, r_vec, t_vec,
+                          cam_in.intrinsic_cv_mat(), cam_in.dist_cv_mat(),
+                          img_ps);
+        assert(img_ps.size() == info.points.size());
+
+        assert(label.contains(filename));
+        auto img_label = label[filename];
+        auto sub = json::array();
+        for (auto i = 0; i < info.points.size(); ++i) {
+            auto p_name = info.points[i].name;
+            if (!img_label.contains(p_name)) continue;
+            auto p_gold_j = img_label[p_name];
+            auto p_gold = glm::vec2(
+                    p_gold_j[0].get<float>(), p_gold_j[1].get<float>());
+
+            auto p_pred = to_vec2(img_ps[i]);
+            auto p_info = json();
+            p_info["name"] = info.points[i].name;
+            p_info["pred"] = json::array({p_pred.x, p_pred.y});
+            p_info["gold"] = json::array({p_gold.x, p_gold.y});
+            p_info["error_2d"] = glm::distance(p_pred, p_gold);
+            auto depth = transform_p(t_mat, to_vec3(obj_ps[i])).z;
+            assert(depth > 0);
+            p_info["error_3d"] = glm::distance(pixel_to_3d(p_pred, depth),
+                                               pixel_to_3d(p_gold, depth));
+//            auto p_proj = transform_p(t_mat, info.points[i].value);
+//            p_info["value_raw"] = json::array({p_proj.x, p_proj.y, p_proj.z});
+            sub.push_back(p_info);
+        }
+        auto i_info = json();
+        i_info["ts"] = ts;
+        i_info["result"] = sub;
+        ret.push_back(i_info);
+    }
+
+    auto result_file = fs::path(info.save_folder) / "result.json";
+    auto fout = std::ofstream(result_file);
+    fout << ret.dump(2);
+}
+
 calib_eval::calib_eval(const record_config &rec)
         : pimpl(std::make_unique<impl>()) {
     pimpl->rec_conf = rec;
@@ -96,4 +205,20 @@ calib_eval::~calib_eval() = default;
 
 void calib_eval::show() {
     pimpl->show();
+}
+
+void calib_eval::simulate_info_type::fill_from_yaml(const YAML::Node &conf) {
+    auto load_vec3 = [](const YAML::Node &conf) {
+        return glm::vec3(conf[0].as<float>(), conf[1].as<float>(), conf[2].as<float>());
+    };
+    auto load_point = [=](const YAML::Node &conf) {
+        return point_type(LOAD_STR("name"), load_vec3(conf["value"]));
+    };
+    for (auto p_conf: conf) {
+        points.push_back(load_point(p_conf));
+    }
+}
+
+void calib_eval::simulate_eval(const simulate_info_type &info) {
+    pimpl->simulate_eval(info);
 }

+ 2 - 0
src/module/experiment/impl/calib_eval_impl.h

@@ -36,6 +36,8 @@ struct calib_eval::impl {
 
     void show();
 
+    void simulate_eval(const simulate_info_type &info);
+
 };
 
 #endif //DEPTHGUIDE_CALIB_EVAL_IMPL_H

+ 1 - 0
src/render/impl/render_utility.cpp

@@ -365,6 +365,7 @@ smart_frame_buffer::~smart_frame_buffer() {
 }
 
 void smart_frame_buffer::deallocate() {
+    if (id == 0) return;
     glDeleteFramebuffers(1, &id);
 }