Browse Source

Implemented hand eye calibration.

jcsyshc 1 year ago
parent
commit
c90a9b585b

+ 12 - 0
CMakeLists.txt

@@ -6,6 +6,7 @@ set(CMAKE_CXX_STANDARD 20)
 add_executable(${PROJECT_NAME} src/main.cpp
         src/ai/impl/fast_sam.cpp
         src/ai/impl/zmq_client.cpp
+        src/image_process/impl/camera_calibrator.cpp
         src/image_process/impl/image_process_ui.cpp
         src/image_process/impl/process_funcs.cpp
         src/image_process/impl/versatile_convertor.cpp
@@ -40,11 +41,13 @@ add_executable(${PROJECT_NAME} src/main.cpp
         src/module/impl/camera_augment_helper_v2.cpp
         src/module/impl/depth_guide_controller.cpp
         src/module/impl/image_augment_helper.cpp
+        src/module/impl/image_augment_helper_v2.cpp
         src/module/impl/image_player.cpp
         src/module/impl/image_saver.cpp
         src/module/impl/image_viewer.cpp
         src/module/impl/image_streamer.cpp
         src/module/impl/pc_viewer.cpp
+        src/module/impl/sophiar_monitor.cpp
         src/network_v3/sender_tcp.cpp
         src/network_v3/sender_udp_fec.cpp
         #        src/network_v3/receiver_tcp.cpp
@@ -161,6 +164,11 @@ target_sources(${PROJECT_NAME} PRIVATE
 target_compile_definitions(${PROJECT_NAME} PRIVATE
         IMGUIZMO_IMGUI_FOLDER=${IMGUI_DIR}/)
 
+# NanoVG config
+set(NANOVG_DIR /home/tpx/src/nanovg)
+target_include_directories(${PROJECT_NAME} PRIVATE ${NANOVG_DIR}/src)
+target_sources(${PROJECT_NAME} PRIVATE ${NANOVG_DIR}/src/nanovg.c)
+
 # yaml-cpp
 find_package(yaml-cpp REQUIRED)
 target_include_directories(${PROJECT_NAME} PRIVATE ${YAML_CPP_INCLUDE_DIR})
@@ -280,6 +288,10 @@ target_link_libraries(${PROJECT_NAME} assimp::assimp)
 find_package(azmq REQUIRED)
 target_link_libraries(${PROJECT_NAME} Azmq::azmq)
 
+# BS::thread_pool config
+set(BSTP_DIR /home/tpx/src/thread-pool-4.1.0)
+target_include_directories(${PROJECT_NAME} PRIVATE ${BSTP_DIR}/include)
+
 # Sophiar2 config
 if (WIN32)
     set(Sophiar2DIR D:/Program/Robot/Sophiar2)

+ 9 - 0
data/config_depth_guide_v2.yaml

@@ -9,6 +9,15 @@ augment_list:
   - name: Femur
     transform_var: femur_in_tracker_denoised
     model: /home/tpx/project/DepthGuide/data/model/Femur_3.stl
+  - name: FemurPlaning
+    transform_var: femur_in_tracker_denoised
+    model: /home/tpx/project/DepthGuide/data/model/JIN MING HAO_Plane 1_001.stl
+  - name: FemurAfterCutLeft
+    transform_var: femur_in_tracker_denoised
+    model: /home/tpx/project/DepthGuide/data/model/JIN MING HAO_Part_2_of_Femur_3_001.stl
+  - name: FemurAfterCutRight
+    transform_var: femur_in_tracker_denoised
+    model: /home/tpx/project/DepthGuide/data/model/JIN MING HAO_Part_3_of_Femur_3_001.stl
   - name: Tibia
     transform_var: tibia_in_tracker_denoised
     model: /home/tpx/project/DepthGuide/data/model/Tibia_3.stl

+ 15 - 1
data/config_endo_guide.yaml

@@ -1 +1,15 @@
-app_name: endo_guide
+app_name: endo_guide
+
+sophiar_config: /home/tpx/project/DepthGuide/data/sophiar_config_endo_guide.json
+sophiar_start_var: tracker_all
+
+camera_ref_transform_var: camera_ref_in_tracker
+camera_transform_var: camera_in_tracker
+
+monitor:
+  - name: Camera
+    var: camera_ref_in_tracker
+  - name: Femur
+    var: femur_ref_in_tracker
+  - name: Tibia
+    var: tibia_ref_in_tracker

+ 405 - 0
data/sophiar_config_endo_guide.json

@@ -0,0 +1,405 @@
+{
+  "variable_list": [
+    {
+      "name": "probe_ref_in_tracker",
+      "type": "transform_obj"
+    },
+    {
+      "name": "probe_in_tracker",
+      "type": "transform_obj"
+    },
+    {
+      "name": "femur_ref_in_tracker",
+      "type": "transform_obj"
+    },
+    {
+      "name": "femur_in_femur_ref",
+      "type": "transform_obj"
+    },
+    {
+      "name": "tibia_ref_in_tracker",
+      "type": "transform_obj"
+    },
+    {
+      "name": "tibia_in_tibia_ref",
+      "type": "transform_obj"
+    },
+    {
+      "name": "camera_ref_in_tracker",
+      "type": "transform_obj"
+    },
+    {
+      "name": "camera_in_tracker",
+      "type": "transform_obj"
+    },
+    {
+      "name": "femur_in_tracker",
+      "type": "transform_obj"
+    },
+    {
+      "name": "tibia_in_tracker",
+      "type": "transform_obj"
+    },
+    {
+      "name": "probe_in_femur_ref",
+      "type": "transform_obj"
+    },
+    {
+      "name": "probe_in_tibia_ref",
+      "type": "transform_obj"
+    },
+    {
+      "name": "probe_in_femur",
+      "type": "transform_obj"
+    },
+    {
+      "name": "probe_in_tibia",
+      "type": "transform_obj"
+    },
+    {
+      "name": "probe_tip_in_femur_ref",
+      "type": "scalarxyz_obj"
+    },
+    {
+      "name": "probe_tip_in_tibia_ref",
+      "type": "scalarxyz_obj"
+    },
+    {
+      "name": "picked_point_in_femur_ref",
+      "type": "scalarxyz_obj"
+    },
+    {
+      "name": "picked_point_in_tibia_ref",
+      "type": "scalarxyz_obj"
+    },
+    {
+      "name": "probe_tip_in_femur",
+      "type": "scalarxyz_obj"
+    },
+    {
+      "name": "probe_tip_in_tibia",
+      "type": "scalarxyz_obj"
+    },
+    {
+      "name": "picked_point_in_femur",
+      "type": "scalarxyz_obj"
+    },
+    {
+      "name": "picked_point_in_tibia",
+      "type": "scalarxyz_obj"
+    },
+    {
+      "name": "femur_in_tracker_denoised",
+      "type": "transform_obj"
+    },
+    {
+      "name": "tibia_in_tracker_denoised",
+      "type": "transform_obj"
+    }
+  ],
+  "object_list": [
+    {
+      "type": "transform_tree",
+      "name": "transform_tree",
+      "init_config": {
+        "node_list": [
+          {
+            "name": "tracker"
+          },
+          {
+            "name": "probe_ref",
+            "parent": "tracker",
+            "transform_var_name": "probe_ref_in_tracker"
+          },
+          {
+            "name": "probe",
+            "parent": "probe_ref",
+            "transform": [
+              -0.23,
+              -13.98,
+              -119.65,
+              1,
+              0,
+              0,
+              0
+            ]
+          },
+          {
+            "name": "femur_ref",
+            "parent": "tracker",
+            "transform_var_name": "femur_ref_in_tracker"
+          },
+          {
+            "name": "femur",
+            "parent": "femur_ref",
+            "transform_var_name": "femur_in_femur_ref"
+          },
+          {
+            "name": "tibia_ref",
+            "parent": "tracker",
+            "transform_var_name": "tibia_ref_in_tracker"
+          },
+          {
+            "name": "tibia",
+            "parent": "tibia_ref",
+            "transform_var_name": "tibia_in_tibia_ref"
+          },
+          {
+            "name": "camera_ref",
+            "parent": "tracker",
+            "transform_var_name": "camera_ref_in_tracker"
+          },
+          {
+            "name": "camera",
+            "parent": "camera_ref",
+            "transform": [
+              19.3286001048916,
+              37.0595264763240,
+              -55.5602916285344,
+              0.505328802080145,
+              -0.493396136841779,
+              0.500875581191629,
+              0.500326599436610
+            ]
+          }
+        ]
+      },
+      "start_config": {
+        "watch_list": [
+          {
+            "target": "camera",
+            "observer": "tracker",
+            "transform_var_name": "camera_in_tracker"
+          },
+          {
+            "target": "femur",
+            "observer": "tracker",
+            "transform_var_name": "femur_in_tracker"
+          },
+          {
+            "target": "tibia",
+            "observer": "tracker",
+            "transform_var_name": "tibia_in_tracker"
+          },
+          {
+            "target": "probe",
+            "observer": "tracker",
+            "transform_var_name": "probe_in_tracker"
+          },
+          {
+            "target": "probe",
+            "observer": "femur_ref",
+            "transform_var_name": "probe_in_femur_ref"
+          },
+          {
+            "target": "probe",
+            "observer": "tibia_ref",
+            "transform_var_name": "probe_in_tibia_ref"
+          },
+          {
+            "target": "probe",
+            "observer": "femur",
+            "transform_var_name": "probe_in_femur"
+          },
+          {
+            "target": "probe",
+            "observer": "tibia",
+            "transform_var_name": "probe_in_tibia"
+          }
+        ]
+      }
+    },
+    {
+      "type": "ndi_interface",
+      "name": "ndi",
+      "init_config": {
+        "address_type": "ethernet",
+        "ip": "10.0.0.5",
+        "tcp_port": 8765,
+        "com_port": "/dev/ttyUSB0",
+        "tool_list": [
+          {
+            "rom_path": "/home/tpx/data/roms/GlassProbe_4Ball_4.rom",
+            "serial_number": "3DD50000",
+            "outputs": {
+              "transform": "probe_ref_in_tracker"
+            }
+          },
+          {
+            "rom_path": "/home/tpx/data/roms/Glass_4Ball_2.rom",
+            "outputs": {
+              "transform": "camera_ref_in_tracker"
+            }
+          },
+          {
+            "rom_path": "/home/tpx/data/roms/Glass_3Ball_5.rom",
+            "serial_number": "39B33001",
+            "outputs": {
+              "transform": "femur_ref_in_tracker"
+            }
+          },
+          {
+            "rom_path": "/home/tpx/data/roms/Glass_3Ball_6.rom",
+            "outputs": {
+              "transform": "tibia_ref_in_tracker"
+            }
+          }
+        ]
+      },
+      "start_config": {
+        "allow_unreliable": true,
+        "prefer_stream_tracking": false
+      }
+    },
+    {
+      "type": "variable_validity_watcher",
+      "name": "probe_visibility_watcher",
+      "start_config": {
+        "variable_name": "probe_in_tracker"
+      }
+    },
+    {
+      "type": "transform_obj_validity_watcher",
+      "name": "camera_visibility_watcher",
+      "start_config": {
+        "variable_name": "camera_ref_in_tracker"
+      }
+    },
+    {
+      "type": "transform_obj_validity_watcher",
+      "name": "femur_visibility_watcher",
+      "start_config": {
+        "variable_name": "femur_ref_in_tracker"
+      }
+    },
+    {
+      "type": "transform_obj_validity_watcher",
+      "name": "tibia_visibility_watcher",
+      "start_config": {
+        "variable_name": "tibia_ref_in_tracker"
+      }
+    },
+    {
+      "type": "scalarxyz_transformer",
+      "name": "probe_tip_in_femur_ref_transformer",
+      "start_config": {
+        "transform_type": "point",
+        "transform_var_name": "probe_in_femur_ref",
+        "target_value": [
+          0,
+          0,
+          0
+        ],
+        "output_var_name": "probe_tip_in_femur_ref"
+      },
+      "dependencies": [
+        "ndi",
+        "transform_tree"
+      ]
+    },
+    {
+      "type": "scalarxyz_transformer",
+      "name": "probe_tip_in_tibia_ref_transformer",
+      "start_config": {
+        "transform_type": "point",
+        "transform_var_name": "probe_in_tibia_ref",
+        "target_value": [
+          0,
+          0,
+          0
+        ],
+        "output_var_name": "probe_tip_in_tibia_ref"
+      },
+      "dependencies": [
+        "ndi",
+        "transform_tree"
+      ]
+    },
+    {
+      "type": "transform_stabilizer",
+      "name": "point_picker_in_femur_ref",
+      "start_config": {
+        "stable_type": "point",
+        "input_var_name": "probe_tip_in_femur_ref",
+        "output_var_name": "picked_point_in_femur_ref",
+        "linear_tolerance_mm": 0.05,
+        "temporal_interval_s": 3
+      },
+      "dependencies": [
+        "probe_tip_in_femur_ref_transformer"
+      ]
+    },
+    {
+      "type": "transform_stabilizer",
+      "name": "point_picker_in_tibia_ref",
+      "start_config": {
+        "stable_type": "point",
+        "input_var_name": "probe_tip_in_tibia_ref",
+        "output_var_name": "picked_point_in_tibia_ref",
+        "linear_tolerance_mm": 0.05,
+        "temporal_interval_s": 3
+      },
+      "dependencies": [
+        "probe_tip_in_tibia_ref_transformer"
+      ]
+    },
+    {
+      "type": "scalarxyz_obj_watcher",
+      "name": "picked_point_watcher_for_femur_ref",
+      "start_config": {
+        "variable_name": "picked_point_in_femur_ref"
+      }
+    },
+    {
+      "type": "scalarxyz_obj_watcher",
+      "name": "picked_point_watcher_for_tibia_ref",
+      "start_config": {
+        "variable_name": "picked_point_in_tibia_ref"
+      }
+    },
+    {
+      "type": "transform_obj_watcher",
+      "name": "femur_registration_result_watcher",
+      "start_config": {
+        "variable_name": "femur_in_femur_ref"
+      }
+    },
+    {
+      "type": "transform_obj_watcher",
+      "name": "tibia_registration_result_watcher",
+      "start_config": {
+        "variable_name": "tibia_in_tibia_ref"
+      }
+    },
+    {
+      "type": "kalman_denoiser",
+      "name": "femur_denoiser",
+      "init_config": {
+        "variable_in": "femur_in_tracker",
+        "variable_out": "femur_in_tracker_denoised"
+      }
+    },
+    {
+      "type": "kalman_denoiser",
+      "name": "tibia_denoiser",
+      "init_config": {
+        "variable_in": "tibia_in_tracker",
+        "variable_out": "tibia_in_tracker_denoised"
+      }
+    },
+    {
+      "type": "empty_object",
+      "name": "tracker_all",
+      "dependencies": [
+        "transform_tree",
+        "ndi",
+        "probe_visibility_watcher",
+        "camera_visibility_watcher",
+        "femur_visibility_watcher",
+        "tibia_visibility_watcher",
+        "femur_denoiser",
+        "tibia_denoiser"
+      ]
+    }
+  ]
+}

+ 9 - 2
src/ai/impl/zmq_client.cpp

@@ -104,6 +104,10 @@ namespace zmq_client_impl {
 
             auto img_shape = img_info["shape"];
             assert(img_shape.is_array());
+            if (img_shape.size() == 1) { // no object found
+                assert(img_shape[0].get<int>() == 0);
+                return {};
+            }
             assert(img_shape.size() == 2
                    || img_shape.size() == 3);
             auto img_height = img_shape[0].get<int>();
@@ -168,7 +172,8 @@ void zmq_client::impl::aux_thread_work(const create_config &conf) {
 }
 
 void zmq_client::impl::aux_on_request(const request_type &req) {
-    assert(!socket_busy.test());
+//    assert(!socket_busy.test()); // TODO: fix socket_busy
+    if (socket_busy.test()) return;
     socket_busy.test_and_set();
 
     // send request
@@ -194,7 +199,9 @@ void zmq_client::impl::try_recv_rep() {
 
 void zmq_client::impl::on_reply(const data_type &data) {
     auto rep = decode_reply(data);
-    reply_cb(rep);
+    if (!rep.img_list.empty()) {
+        reply_cb(rep);
+    }
     socket_busy.clear();
 }
 

+ 35 - 0
src/core/math_helper.hpp

@@ -19,6 +19,12 @@ inline glm::mat4 to_transform_mat(glm::vec3 t, glm::vec3 r) {
     return offset * glm::mat4_cast(rot);
 }
 
+inline glm::mat4 to_transform_mat(glm::vec3 t, glm::mat3 r) {
+    auto ret = glm::mat4(r);
+    ret[3] = glm::vec4(t, 1.f);
+    return ret;
+}
+
 template<typename Scalar, int Mode>
 inline glm::mat4 to_mat4(const Eigen::Transform<Scalar, 3, Mode> &m) {
     auto ret = glm::mat4();
@@ -30,6 +36,23 @@ inline glm::mat4 to_mat4(const Eigen::Transform<Scalar, 3, Mode> &m) {
     return ret;
 }
 
+template<typename Scalar, int Mode>
+inline auto to_eigen_transform(const glm::mat4 &m) {
+    auto ret = Eigen::Transform<Scalar, 3, Mode>();
+    auto &mat = ret.matrix();
+    for (auto j = 0; j < 4; ++j)
+        for (auto i = 0; i < 4; ++i) {
+            mat(i, j) = m[j][i];
+        }
+    return ret;
+}
+
+template<typename Scalar, int Mode>
+inline void to_eigen_transform(const glm::mat4 &m,
+                               Eigen::Transform<Scalar, 3, Mode> &ret) {
+    ret = to_eigen_transform<Scalar, Mode>(m);
+}
+
 template<typename T>
 concept Vec2Type = requires(T t) {
     { t.x } -> std::convertible_to<float>;
@@ -57,15 +80,27 @@ inline auto to_homo(const glm::vec3 &v) {
     return glm::vec4(v, 1.f);
 }
 
+inline auto to_homo(const glm::vec2 &v) {
+    return glm::vec3(v, 1.f);
+}
+
 inline auto from_homo(const glm::vec4 &v) {
     return glm::vec3(v) / v.w;
 }
 
+inline auto from_homo(const glm::vec3 &v) {
+    return glm::vec2(v) / v.z;
+}
+
 // transform point
 inline glm::vec3 transform_p(const glm::mat4 &mat, const glm::vec3 &point) {
     return from_homo(mat * to_homo(point));
 }
 
+inline glm::vec2 transform_p(const glm::mat3 &mat, const glm::vec2 &point) {
+    return from_homo(mat * to_homo(point));
+}
+
 // transform vector
 inline glm::vec3 transform_v(const glm::mat4 &mat, const glm::vec3 &vec) {
     return glm::mat3(mat) * vec;

+ 61 - 0
src/image_process/camera_calibrator.h

@@ -0,0 +1,61 @@
+#ifndef DEPTHGUIDE_CAMERA_CALIBRATOR_H
+#define DEPTHGUIDE_CAMERA_CALIBRATOR_H
+
+#include "core/cuda_helper.hpp"
+#include "core/object_manager.h"
+#include "core/local_connection.h" // sophiar
+
+#include <opencv2/core/types.hpp>
+
+#include <memory>
+
+class camera_calibrator {
+public:
+
+    enum calib_board_enum {
+        CALIB_CHESS,
+        CALIB_CIRCLE,
+    };
+
+    struct create_config {
+        obj_name_type img_name;
+        calib_board_enum board_type;
+        cv::Size pattern_size;
+        float corner_distance; // in mm
+
+        uint8_t num_threads = 2;
+        timestamp_type min_interval = 1e5; // 100ms, 10fps
+        smart_cuda_stream *stream = nullptr;
+        using io_context = boost::asio::io_context;
+        io_context *ctx;
+
+        // for sophiar
+        sophiar_conn_type *sophiar_conn = nullptr;
+        std::string ref_transform_var;
+        std::string result_transform_var;
+    };
+
+    explicit camera_calibrator(const create_config &conf);
+
+    ~camera_calibrator();
+
+    void show();
+
+    // connect to post render of image augment helper
+    void render();
+
+    // for debug
+
+    struct simulate_info_type {
+        std::string data_path; // previously saved track data
+        cv::Size img_size;
+    };
+
+    void simulate_process(const simulate_info_type &info);
+
+private:
+    struct impl;
+    std::unique_ptr<impl> pimpl;
+};
+
+#endif //DEPTHGUIDE_CAMERA_CALIBRATOR_H

+ 619 - 0
src/image_process/impl/camera_calibrator.cpp

@@ -0,0 +1,619 @@
+#include "camera_calibrator_impl.h"
+#include "core/imgui_utility.hpp"
+#include "third_party/scope_guard.hpp"
+#include "render/render_utility.h"
+
+#include <boost/asio/post.hpp>
+
+#include <opencv2/calib3d.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include <glm/ext/quaternion_geometric.hpp>
+#include <glm/gtc/matrix_access.hpp>
+
+#define NANOVG_GL3_IMPLEMENTATION
+
+#include <nanovg_gl.h>
+
+#include <fstream>
+#include <ranges>
+
+namespace camera_calibrator_impl {
+
+    cv::Mat to_cv_mat(const glm::mat3 &mat) {
+        auto ret = cv::Mat(3, 3, CV_32FC1);
+        for (auto j = 0; j < 3; ++j)
+            for (auto i = 0; i < 3; ++i)
+                ret.at<float>(j, i) = mat[i][j];
+        return ret;
+    }
+
+    cv::Mat to_cv_mat(const glm::vec3 &vec) {
+        auto ret = cv::Mat(3, 1, CV_32FC1);
+        for (auto j = 0; j < 3; ++j)
+            ret.at<float>(j, 0) = vec[j];
+        return ret;
+    }
+
+    glm::mat3 rodrigues_to_mat(const cv::Mat &vec) {
+        assert(vec.size().area() == 3);
+        auto rot_mat = cv::Mat();
+        cv::Rodrigues(vec, rot_mat);
+        return to_mat3(rot_mat);
+    }
+
+    cv::Mat to_cv_rodigues(const glm::mat3 &mat) {
+        auto ret = cv::Mat();
+        cv::Rodrigues(to_cv_mat(mat), ret);
+        return ret;
+    }
+
+    glm::mat3 to_mat3(const cv::Mat &mat) {
+        assert(mat.size() == cv::Size(3, 3));
+        assert(mat.type() == CV_32FC1
+               || mat.type() == CV_64FC1);
+        auto ret = glm::mat3();
+        for (auto j = 0; j < 3; ++j)
+            for (auto i = 0; i < 3; ++i) {
+                switch (mat.type()) {
+                    // @formatter:off
+                    case CV_32FC1: { ret[i][j] = mat.at<float>(j, i); break; }
+                    case CV_64FC1: { ret[i][j] = mat.at<double>(j, i); break; }
+                    // @formatter:on
+                    default: {
+                        RET_ERROR_E;
+                    }
+                }
+            }
+        return ret;
+    }
+
+    glm::vec3 to_vec3(const cv::Mat &mat) {
+        assert(mat.size() == cv::Size(1, 3));
+        assert(mat.type() == CV_32FC1
+               || mat.type() == CV_64FC1);
+        auto ret = glm::vec3();
+        for (auto j = 0; j < 3; ++j) {
+            switch (mat.type()) {
+                // @formatter:off
+                case CV_32FC1: { ret[j] = mat.at<float>(j, 0); break; }
+                case CV_64FC1: { ret[j] = mat.at<double>(j, 0); break; }
+                    // @formatter:on
+                default: {
+                    RET_ERROR_E;
+                }
+            }
+        }
+        return ret;
+    }
+
+    template<typename Tp>
+    glm::vec3 to_vec3(const cv::Vec<Tp, 3> &vec) {
+        auto ret = glm::vec3();
+        for (auto j = 0; j < 3; ++j)
+            ret[j] = vec[j];
+        return ret;
+    }
+
+    glm::vec3 to_translation(const glm::mat4 &mat) {
+        return glm::column(mat, 3);
+    }
+
+    void hand_eye_calib::set_object_points(cv::Size size, float dis) {
+        obj_ps.clear();
+        for (auto iy = 0; iy < size.height; ++iy)
+            for (auto ix = 0; ix < size.width; ++ix) {
+                obj_ps.emplace_back(iy * dis, ix * dis);
+            }
+    }
+
+    glm::mat4 hand_eye_calib::calc_track_mat(timestamp_type t) {
+        auto iter = track_pool.lower_bound(t);
+        assert(iter != track_pool.end());
+        if (iter == track_pool.begin()
+            || iter->first == t) { // do not need interpolation
+            return iter->second;
+        }
+
+        auto next = iter, prev = --iter;
+        assert(next != track_pool.begin());
+        auto next_ts = next->first, prev_ts = prev->first;
+        auto interp_t = 1.0f * (t - prev_ts) / (next_ts - prev_ts);
+
+        auto next_mat = next->second, prev_mat = prev->second;
+        auto next_t = to_translation(next_mat);
+        auto prev_t = to_translation(prev_mat);
+        auto cur_t = glm::mix(prev_t, next_t, interp_t);
+
+        auto next_quat = glm::quat_cast(glm::mat3(next_mat));
+        auto prev_quat = glm::quat_cast(glm::mat3(prev_mat));
+        auto cur_quat = glm::slerp(prev_quat, next_quat, interp_t);
+
+        return to_transform_mat(cur_t, glm::mat3_cast(cur_quat));
+    }
+
+    glm::vec2 hand_eye_calib::distort_point(glm::vec2 p) {
+        auto &dc = dist_coeffs;
+        assert(dc.size() == 8);
+        auto k1 = dc[0], k2 = dc[1], p1 = dc[2], p2 = dc[3];
+        auto k3 = dc[4], k4 = dc[5], k5 = dc[6], k6 = dc[7];
+
+        auto r2 = p.x * p.x + p.y * p.y;
+        auto r4 = r2 * r2, r6 = r4 * r2;
+        auto k = (1.f + k1 * r2 + k2 * r4 + k3 * r6)
+                 / (1.f + k4 * r2 + k5 * r4 + k6 * r6);
+        auto tx = p.x * k
+                  + 2 * p1 * p.x * p.y
+                  + p2 * (r2 + 2 * p.x * p.x);
+        auto ty = p.y * k
+                  + p1 * (r2 + 2 * p.y * p.y)
+                  + 2 * p2 * p.x * p.y;
+        return glm::vec2(tx, ty);
+    }
+
+    glm::vec2 hand_eye_calib::project_point(glm::vec3 p) {
+        auto np = distort_point(from_homo(p));
+        return transform_p(intrinsic_mat, np);
+    }
+
+    float hand_eye_calib::corner_distance(const corner_type &c1,
+                                          const corner_type &c2) {
+        assert(c1.size() == c2.size());
+        auto corner_num = c1.size();
+        float dis_sum = 0.f;
+        for (auto k = 0; k < corner_num; ++k) {
+            auto p1 = c1[k], p2 = c2[k];
+            dis_sum += sqrt(pow(p1.x - p2.x, 2.f)
+                            + pow(p1.y - p2.y, 2.f));
+        }
+        return dis_sum / corner_num;
+    }
+
+    void hand_eye_calib::calc() {
+        auto sample_num = img_pool.size();
+        auto corner_num = img_pool.begin()->corner.size();
+
+        // select some images for calibrateCamera
+        static constexpr auto pix_dis_threshold = 128.0f;
+        auto img_ps_in = std::vector<corner_type>();
+        auto img_select_op = [&](corner_type &c_cand) {
+            return std::ranges::all_of(img_ps_in, [&](corner_type &c_exist) {
+                return corner_distance(c_cand, c_exist) > pix_dis_threshold;
+            });
+        };
+
+        auto img_ps_all = std::vector<corner_type>();
+        std::ranges::transform(img_pool, std::back_inserter(img_ps_all),
+                               [](const auto &item) { return item.corner; });
+        std::ranges::copy_if(img_ps_all, std::back_inserter(img_ps_in), img_select_op);
+
+        auto select_num = img_ps_in.size();
+        auto obj_ps_in =
+                std::vector<object_points_type>(select_num, obj_ps);
+
+        cv::Mat r_vec_ignore, t_vec_ignore;
+        auto intrinsic_cv_mat = cv::Mat();
+        auto dist_coeffs_mat = cv::Mat();
+        auto err = cv::calibrateCamera(
+                obj_ps_in, img_ps_in, img_size,
+                intrinsic_cv_mat, dist_coeffs_mat,
+                r_vec_ignore, t_vec_ignore,
+                cv::CALIB_RATIONAL_MODEL); // use k4, k5, k6
+        SPDLOG_INFO("Overall camera calibration error is {:.2f}pix", err);
+        intrinsic_mat = to_mat3(intrinsic_cv_mat);
+
+        assert(dist_coeffs_mat.size().height == 1);
+        auto dist_coeffs_num = dist_coeffs_mat.size().width;
+        assert(dist_coeffs_num >= 8); // k1-3, p1-2, k4-6
+        dist_coeffs.reserve(dist_coeffs_num);
+        assert(dist_coeffs_mat.type() == CV_64FC1);
+        for (auto k = 0; k < 8; ++k) {
+            dist_coeffs.push_back(dist_coeffs_mat.at<double>(0, k));
+        }
+
+        // solve PnP for each image
+        auto cam_r_vec = std::vector<cv::Mat>(); // chess board in camera
+        auto cam_t_vec = std::vector<cv::Mat>();
+        auto obj_ps_pnp = std::vector<cv::Point3d>();
+        std::ranges::transform(obj_ps, std::back_inserter(obj_ps_pnp), [](cv::Vec3f &p) {
+            return cv::Point3d(p[0], p[1], p[2]);
+        });
+        std::optional<glm::quat> last_rot_quat;
+        static constexpr auto quat_threshold = 0.5f;
+        for (auto &corner: img_ps_all) {
+            cv::Mat chess_r, chess_t; // chess board in camera
+
+            for (;;) {
+                bool ok = cv::solvePnP(obj_ps_pnp, corner,
+                                       intrinsic_cv_mat, dist_coeffs_mat,
+                                       chess_r, chess_t);
+                assert(ok);
+
+                // rotate corners if needed
+                auto rot_mat = rodrigues_to_mat(chess_r);
+                auto rot_quat = glm::quat_cast(rot_mat);
+                break; // TODO: test the commented code
+
+//                if (!last_rot_quat.has_value()
+//                    // https://math.stackexchange.com/questions/90081/quaternion-distance
+//                    || 1 - glm::dot(rot_quat, *last_rot_quat) < quat_threshold) {
+//                    last_rot_quat = rot_quat;
+//                    break;
+//                } else {
+//                    std::ranges::reverse(corner);
+//                    SPDLOG_DEBUG("Flipped.");
+//                }
+            }
+
+            auto ret_mat = to_transform_mat(to_vec3(chess_t),
+                                            rodrigues_to_mat(chess_r));
+            auto cam_r = to_cv_mat(glm::mat3(ret_mat)); // rotation part
+            auto cam_t = to_cv_mat(to_translation(ret_mat)); // translation part
+            cam_r_vec.push_back(cam_r);
+            cam_t_vec.push_back(cam_t);
+        }
+
+        static constexpr auto ts_offset = -80000; // us
+
+        // build hand-eye calibration problem
+        auto ref_r_vec = std::vector<cv::Mat>(); // tracker in reference
+        auto ref_t_vec = std::vector<cv::Mat>();
+        for (auto &info: img_pool) {
+            auto ref_mat = calc_track_mat(info.sample_ts + ts_offset);
+            ref_mat = glm::inverse(ref_mat);
+            auto r_mat = glm::mat3(ref_mat);
+            auto t_vec = glm::vec3(to_translation(ref_mat)); // fourth column of matrix
+            ref_r_vec.push_back(to_cv_mat(r_mat));
+            ref_t_vec.push_back(to_cv_mat(t_vec));
+        }
+
+        cv::Mat ret_r, ret_t;
+        cv::Mat aux_r, aux_t;
+        cv::calibrateRobotWorldHandEye(
+                cam_r_vec, cam_t_vec, // chess board in camera
+                ref_r_vec, ref_t_vec, // tracker in reference
+                aux_r, aux_t, // tracker in chess board
+                ret_r, ret_t); // reference in camera
+        auto ret_mat = to_transform_mat(to_vec3(ret_t), to_mat3(ret_r));
+        result_mat = glm::inverse(ret_mat); // camera in reference
+
+        // projection error
+        auto aux_mat = to_transform_mat(to_vec3(aux_t), to_mat3(aux_r));
+        auto chess_mat = glm::inverse(aux_mat);  // chess board in traker
+        auto err_mat = cv::Mat(sample_num, corner_num, CV_32FC2);
+        auto obj_ps_err = std::vector<cv::Point3f>();
+        std::ranges::transform(obj_ps, std::back_inserter(obj_ps_err), [](cv::Vec3f &p) {
+            return cv::Point3f(p[0], p[1], p[2]);
+        });
+        for (auto j = 0; j < sample_num; ++j) {
+            auto &item = img_pool[j];
+            auto proj_mat = ret_mat // reference in camera
+                            * glm::inverse(calc_track_mat(item.sample_ts + ts_offset)) // tracker in reference
+                            * chess_mat; // chess board in tracker
+            auto cam_mat = to_transform_mat( // chess board in camera
+                    to_vec3(cam_t_vec[j]), to_mat3(cam_r_vec[j]));
+            auto loop_mat = glm::inverse(proj_mat) // camera in chess board
+                            * cam_mat; // chess board in camera
+            for (auto i = 0; i < corner_num; ++i) {
+                auto p_gold = to_vec3(obj_ps[i]);
+                auto p_pred = transform_p(loop_mat, p_gold);
+                auto err_3d = glm::distance(p_gold, p_pred);
+                err_mat.at<cv::Vec2f>(j, i)[0] = err_3d;
+            }
+
+            auto proj_r = to_cv_rodigues(glm::mat3(proj_mat));
+            auto proj_t = to_cv_mat(to_translation(proj_mat));
+            auto c_pred = std::vector<cv::Point2f>();
+            cv::projectPoints(obj_ps_err, proj_r, proj_t,
+                              intrinsic_cv_mat, dist_coeffs_mat,
+                              c_pred);
+            assert(c_pred.size() == corner_num);
+            for (auto i = 0; i < corner_num; ++i) {
+                auto c_gold = to_vec2(item.corner[i]);
+                auto err_2d = glm::distance(c_gold, to_vec2(c_pred[i]));
+                err_mat.at<cv::Vec2f>(j, i)[1] = err_2d;
+            }
+        }
+
+        auto err_val = cv::mean(err_mat);
+        obj_reproj_err = err_val[0];
+        SPDLOG_INFO("Average 3D projection error is {:.2f}mm", obj_reproj_err);
+        img_reproj_err = err_val[1];
+        SPDLOG_INFO("Average 2D projection error is {:.2f}pix", img_reproj_err);
+    }
+
+}
+
+camera_calibrator::impl::impl(const create_config &_conf) {
+    conf = _conf;
+    vg = nvgCreateGL3(NVG_ANTIALIAS | NVG_STENCIL_STROKES);
+}
+
+camera_calibrator::impl::~impl() {
+    if (tp != nullptr) {
+        tp->purge();
+        stop();
+    }
+}
+
+void camera_calibrator::impl::start() {
+    // clear last data
+    last_finish.store(nullptr);
+    img_pool.clear();
+    track_pool.clear();
+
+    assert(tp == nullptr);
+    tp = std::make_unique<BS::thread_pool>(conf.num_threads);
+    img_conn = OBJ_SIG(conf.img_name)->connect(
+            [this](auto name) { img_callback(name); });
+}
+
+void camera_calibrator::impl::process() {
+    calib = std::make_unique<hand_eye_calib>();
+    calib->img_size = img_size;
+    calib->set_object_points(conf.pattern_size, conf.corner_distance);
+
+    auto valid_imgs =
+            img_pool | std::views::filter([](const auto &item) {
+                assert(item.process_finished);
+                return item.corners_detected;
+            }) | std::views::transform([](const auto &item) {
+                assert(!item.corners.empty());
+                return hand_eye_calib::image_store_type(
+                        item.corners, item.sample_ts);
+            });
+    std::ranges::copy(valid_imgs, std::back_inserter(calib->img_pool));
+
+    // TODO: C++23 can done this by std::ranges::to
+    for (auto &item: track_pool) {
+        calib->track_pool.emplace(item.sample_ts, item.ref_mat);
+    }
+
+    calib->calc();
+
+    if (conf.sophiar_conn != nullptr) {
+        auto ret = Eigen::Isometry3d();
+        to_eigen_transform(calib->result_mat, ret);
+        conf.sophiar_conn->update_transform_variable(
+                conf.result_transform_var, ret);
+    }
+}
+
+void camera_calibrator::impl::simulate_process(const simulate_info_type &info) {
+    load_track_data(info.data_path);
+    img_size = info.img_size;
+    process();
+}
+
+void camera_calibrator::impl::stop() {
+    assert(img_conn.connected());
+    img_conn.disconnect();
+    tp = nullptr; // implicit wait()
+    save_track_data();
+    process();
+}
+
+void camera_calibrator::impl::save_track_data() {
+    auto fout = std::ofstream("calib_data.txt");
+
+    // tracker data
+    fout << track_pool.size() << std::endl;
+    for (auto &item: track_pool) {
+        fout << item.sample_ts << " ";
+
+        using mat_type = typeof(item.ref_mat);
+        using value_type = mat_type::value_type;
+        static constexpr auto len = sizeof(mat_type) / sizeof(value_type);
+
+        auto ptr = (value_type *) &item.ref_mat;
+        for (auto i = 0; i < len; ++i) {
+            fout << ptr[i] << " ";
+        }
+        fout << std::endl;
+    }
+    fout << std::endl;
+
+    // camera data
+    auto sample_num = std::ranges::count_if(
+            img_pool, [](auto &item) { return item.corners_detected; });
+    fout << sample_num << std::endl;
+    for (auto &item: img_pool) {
+        if (!item.corners_detected) continue;
+        fout << item.sample_ts << " ";
+        for (auto &point: item.corners) {
+            fout << point.x << " " << point.y << " ";
+        }
+        fout << std::endl;
+    }
+    fout << std::endl;
+}
+
+void camera_calibrator::impl::load_track_data(const std::string &path) {
+    auto fin = std::ifstream(path);
+
+    // tracker data
+    assert(track_pool.empty());
+    size_t sample_num;
+    fin >> sample_num;
+    for (auto k = 0; k < sample_num; ++k) {
+        auto &item = track_pool.emplace_back();
+        fin >> item.sample_ts;
+
+        using mat_type = typeof(item.ref_mat);
+        using value_type = mat_type::value_type;
+        static constexpr auto len = sizeof(mat_type) / sizeof(value_type);
+
+        auto ptr = (value_type *) &item.ref_mat;
+        for (auto i = 0; i < len; ++i) {
+            fin >> ptr[i];
+        }
+    }
+
+    // camera data
+    fin >> sample_num;
+    for (auto k = 0; k < sample_num; ++k) {
+        auto &item = img_pool.emplace_back();
+        fin >> item.sample_ts;
+        assert(item.sample_ts != 0);
+        auto corner_num = conf.pattern_size.area();
+        for (auto i = 0; i < corner_num; ++i) {
+            auto &p = item.corners.emplace_back();
+            fin >> p.x >> p.y;
+        }
+        item.corners_detected = true;
+        item.process_finished = true;
+    }
+}
+
+void camera_calibrator::impl::load_fake_data() {
+    auto dist_coeffs = cv::Mat::zeros(1, 8, CV_64FC1);
+    // @formatter:off
+    cv::Mat intrinsic_mat = (cv::Mat_<double>(3, 3) <<
+            1000.f, 0.f, 500.f,
+            0.f, 1000.f, 500.f,
+            0.f, 0.f, 1.f);
+    // @formatter:on
+
+
+
+
+}
+
+void camera_calibrator::impl::per_image_process(img_store_type *st) {
+    auto closer = sg::make_scope_guard([&] {
+        st->process_finished = true;
+        // release image to save memory
+        st->img_mat = cv::Mat();
+        st->img = nullptr;
+    });
+
+    // color to gray
+    cv::Mat img_gray;
+    cv::cvtColor(st->img_mat, img_gray, cv::COLOR_BGR2GRAY);
+
+    // find control points
+    assert(conf.board_type == CALIB_CHESS);
+    auto corner_flag = cv::CALIB_CB_ADAPTIVE_THRESH
+                       | cv::CALIB_CB_NORMALIZE_IMAGE
+                       | cv::CALIB_CB_FAST_CHECK;
+    corner_type img_corners;
+    bool ok = cv::findChessboardCorners(img_gray, conf.pattern_size,
+                                        img_corners, corner_flag);
+    if (!ok) {
+        last_finish.store(nullptr, std::memory_order::relaxed);
+        return;
+    }
+    st->corners_detected = true;
+
+    // refinement points // https://docs.opencv.org/4.9.0/d2/d1d/samples_2cpp_2lkdemo_8cpp-example.html
+    auto win_size = cv::Size(10, 10);
+    auto zero_zone = cv::Size(-1, -1);
+    auto term_crit = cv::TermCriteria(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.03);
+    cv::cornerSubPix(img_gray, img_corners, win_size, zero_zone, term_crit);
+    st->corners = std::move(img_corners);
+    ++img_ok_cnt;
+}
+
+void camera_calibrator::impl::img_callback(obj_name_type name) {
+    assert(name == conf.img_name);
+    auto img = to_image(name);
+    if (img == nullptr) return;
+
+    if (img_size.empty()) {
+        img_size = img->size();
+    } else {
+        assert(img_size == img->size());
+    }
+
+    // tracking information
+    auto cur_ts = OBJ_TS(conf.img_name);
+    auto ref_t = conf.sophiar_conn
+            ->query_transform_variable(conf.ref_transform_var);
+    if (!ref_t.has_value()) {
+        return;
+    } else {
+        auto &item = track_pool.emplace_back();
+        item.ref_mat = to_mat4(*ref_t);
+        item.sample_ts = cur_ts;
+    }
+
+    // throttle
+    if (tp->get_tasks_queued() > conf.num_threads - 1) {
+        return;
+    }
+
+    auto &st = img_pool.emplace_back();
+    st.img = img;
+    st.img_mat = img->cv_mat(conf.stream);
+    st.sample_ts = cur_ts;
+
+    // post task
+    tp->detach_task([this, p_st = &st] {
+        per_image_process(p_st);
+        last_finish.store(p_st, std::memory_order::release);
+    });
+}
+
+void camera_calibrator::impl::render() {
+    if (tp == nullptr) return; // not running
+    auto st = last_finish.load(std::memory_order::acquire);
+    if (st == nullptr) return;
+    assert(st->process_finished);
+    if (!st->corners_detected) return;
+
+    auto size = query_viewport_size();
+    nvgBeginFrame(vg, size.width, size.height, 1.0f);
+    auto closer = sg::make_scope_guard([this] { nvgEndFrame(vg); });
+
+    nvgBeginPath(vg);
+    static constexpr auto r = 5.0f;
+    for (auto &p: st->corners) {
+        nvgCircle(vg, p.x, size.height - p.y, r); // flip Y
+    }
+    nvgPathWinding(vg, NVG_HOLE);
+    nvgFillColor(vg, nvgRGB(255, 0, 0));
+    nvgFill(vg);
+}
+
+void camera_calibrator::impl::show() {
+    auto ctx = conf.ctx;
+    bool is_running = (tp != nullptr);
+
+    ImGui::SeparatorText("Actions");
+    if (!is_running) {
+        if (ImGui::Button("Start")) {
+            post(*ctx, [this] { start(); });
+        }
+    } else {
+        if (ImGui::Button("Close")) {
+            post(*ctx, [this] { stop(); });
+        }
+    }
+
+    // Info
+    if (is_running) {
+        ImGui::SeparatorText("Info");
+        assert(tp != nullptr);
+        ImGui::Text("Pending Images: %ld", tp->get_tasks_total());
+        ImGui::Text("Track Count: %ld", track_pool.size());
+        ImGui::Text("Image Count: %d / %ld", (int) img_ok_cnt, img_pool.size());
+    }
+}
+
+camera_calibrator::camera_calibrator(const create_config &conf)
+        : pimpl(std::make_unique<impl>(conf)) {
+}
+
+camera_calibrator::~camera_calibrator() = default;
+
+void camera_calibrator::show() {
+    pimpl->show();
+}
+
+void camera_calibrator::render() {
+    pimpl->render();
+}
+
+void camera_calibrator::simulate_process(const simulate_info_type &info) {
+    pimpl->simulate_process(info);
+}

+ 163 - 0
src/image_process/impl/camera_calibrator_impl.h

@@ -0,0 +1,163 @@
+#ifndef DEPTHGUIDE_CAMERA_CALIBRATOR_IMPL_H
+#define DEPTHGUIDE_CAMERA_CALIBRATOR_IMPL_H
+
+#include "image_process/camera_calibrator.h"
+#include "core/image_utility_v2.h"
+#include "core/math_helper.hpp"
+
+#include <opencv2/core/mat.hpp>
+
+#include <nanovg.h>
+
+#include <BS_thread_pool.hpp>
+
+#include <atomic>
+#include <list>
+
+namespace camera_calibrator_impl {
+
+    using corner_type = std::vector<cv::Point2f>;
+
+    cv::Mat to_cv_mat(const glm::mat3 &mat);
+
+    cv::Mat to_cv_mat(const glm::vec3 &vec);
+
+    glm::mat3 rodrigues_to_mat(const cv::Mat &vec);
+
+    cv::Mat to_cv_rodigues(const glm::mat3 &mat);
+
+    glm::mat3 to_mat3(const cv::Mat &mat);
+
+    glm::vec3 to_vec3(const cv::Mat &mat);
+
+    glm::vec3 to_translation(const glm::mat4 &mat);
+
+    struct hand_eye_calib {
+
+        // fill before use
+
+        using track_pool_type =
+                std::map<timestamp_type, glm::mat4>;
+        track_pool_type track_pool;
+
+        struct image_store_type {
+            corner_type corner;
+            timestamp_type sample_ts;
+        };
+
+        using image_pool_type =
+                std::vector<image_store_type>;
+        image_pool_type img_pool;
+
+        cv::Size img_size;
+
+        // fill by member functions
+
+        using object_points_type =
+                std::vector<cv::Vec3f>;
+        object_points_type obj_ps;
+
+        // result
+        glm::mat3 intrinsic_mat; // intrinsic matrix
+        using dist_coeffs_type = std::vector<float>;
+        dist_coeffs_type dist_coeffs; // distortion coefficients
+        glm::mat4 result_mat; // result matrix, camera in camera reference
+
+        // result error
+        float obj_reproj_err; // average error of spacial corners after hand-eye calibration, in mm
+        float img_reproj_err; // average error of image corners after hand-eye calibration, in pixel
+
+        void set_object_points(cv::Size size, float dis); // dis: in mm
+
+        void calc();
+
+    private:
+
+        // interpolate track matrix
+        glm::mat4 calc_track_mat(timestamp_type t);
+
+        // p: in the normalized plane
+        glm::vec2 distort_point(glm::vec2 p);
+
+        // p: in camera coordinate
+        // result: in pixel
+        // duplicate cv::projectPoints
+        glm::vec2 project_point(glm::vec3 p);
+
+        // average distance between two sets of corner points
+        static float corner_distance(const corner_type &c1,
+                                     const corner_type &c2);
+
+    };
+
+}
+
+using namespace camera_calibrator_impl;
+
+struct camera_calibrator::impl {
+
+    create_config conf;
+
+    using conn_type = boost::signals2::connection;
+    conn_type img_conn;
+
+    struct img_store_type {
+        image_ptr img;
+        cv::Mat img_mat;
+        timestamp_type sample_ts;
+        corner_type corners;
+
+        bool process_finished: 1 = false;
+        bool corners_detected: 1 = false;
+    };
+
+    using img_pool_type = std::list<img_store_type>;
+    img_pool_type img_pool;
+    std::atomic_int img_ok_cnt = 0;
+
+    struct track_store_type {
+        glm::mat4 ref_mat;
+        timestamp_type sample_ts;
+    };
+
+    using track_pool_type = std::vector<track_store_type>;
+    track_pool_type track_pool;
+
+    std::atomic<img_store_type *> last_finish = nullptr;
+    NVGcontext *vg = nullptr;
+
+    std::unique_ptr<BS::thread_pool> tp; // thread pool
+
+    cv::Size img_size;
+    std::unique_ptr<hand_eye_calib> calib;
+
+    explicit impl(const create_config &conf);
+
+    ~impl();
+
+    void per_image_process(img_store_type *st);
+
+    void img_callback(obj_name_type name);
+
+    void save_track_data();
+
+    void load_track_data(const std::string &path);
+
+    // generate sample data for debug
+    void load_fake_data();
+
+    void process(); // do calibration
+
+    void simulate_process(const simulate_info_type &info);
+
+    void start();
+
+    void stop();
+
+    void render();
+
+    void show();
+
+};
+
+#endif //DEPTHGUIDE_CAMERA_CALIBRATOR_IMPL_H

+ 4 - 1
src/image_process/impl/process_funcs.cpp

@@ -142,6 +142,7 @@ namespace shrink_pc_zero_depth {
         size_t out_num = 0;
         pc_filter::shrink_zero_depth(in_info, pc_out->cuda<PointT>(stream),
                                      &out_num, stream->cuda);
+        if (out_num == 0) return nullptr;
         pc_out->shrink(out_num);
         pc_out->cuda_modified(stream);
         return pc_out;
@@ -175,7 +176,9 @@ namespace shrink_pc_zero_depth {
         }
 
         // copy meta info
-        pc_out->merge_from(conf.in.get());
+        if (pc_out != nullptr) {
+            pc_out->merge_from(conf.in.get());
+        }
 
         *conf.out = pc_out;
     }

+ 57 - 41
src/impl/apps/debug/app_debug.cpp

@@ -5,48 +5,64 @@
 #include <opencv2/cudastereo.hpp>
 #include <opencv2/cudaimgproc.hpp>
 
-app_debug::app_debug(const create_config &conf) {
-
-    auto left_path = "/home/tpx/project/DepthGuide/cmake-build-debug/Left.png";
-    auto right_path = "/home/tpx/project/DepthGuide/cmake-build-debug/Right.png";
-
-    auto left_img = cv::imread(left_path);
-    auto right_img = cv::imread(right_path);
-
-    auto left_img_cuda = cv::cuda::GpuMat();
-    auto right_img_cuda = cv::cuda::GpuMat();
-
-    left_img_cuda.upload(left_img);
-    right_img_cuda.upload(right_img);
+//app_debug::app_debug(const create_config &conf) {
+//
+//    auto left_path = "/home/tpx/project/DepthGuide/cmake-build-debug/Left.png";
+//    auto right_path = "/home/tpx/project/DepthGuide/cmake-build-debug/Right.png";
+//
+//    auto left_img = cv::imread(left_path);
+//    auto right_img = cv::imread(right_path);
+//
+//    auto left_img_cuda = cv::cuda::GpuMat();
+//    auto right_img_cuda = cv::cuda::GpuMat();
+//
+//    left_img_cuda.upload(left_img);
+//    right_img_cuda.upload(right_img);
+//
+//    // covert to gray
+//    cv::cuda::cvtColor(left_img_cuda, left_img_cuda, cv::COLOR_RGB2GRAY);
+//    cv::cuda::cvtColor(right_img_cuda, right_img_cuda, cv::COLOR_RGB2GRAY);
+//
+//    auto disparity_cuda = cv::cuda::GpuMat();
+//    auto stereo = cv::cuda::createStereoSGM();
+//    stereo->setNumDisparities(128);
+////    stereo->setMode(cv::StereoSGBM::MODE_HH);
+//    stereo->compute(left_img_cuda, right_img_cuda, disparity_cuda);
+//
+//    auto filter = cv::cuda::createDisparityBilateralFilter();
+//    filter->setNumDisparities(128);
+//    filter->setRadius(5);
+//    filter->setNumIters(3);
+//    filter->apply(disparity_cuda, left_img_cuda, disparity_cuda);
+//
+//    auto disparity = cv::Mat();
+//    disparity_cuda.download(disparity);
+//
+//    double min_val, max_val;
+//    cv::minMaxLoc(disparity, &min_val, &max_val);
+//    SPDLOG_INFO("Min: {}, Max: {}", min_val, max_val);
+//
+//    auto tmp = cv::Mat();
+//    disparity.convertTo(tmp, CV_32FC1);
+//    tmp = (tmp - min_val) / (max_val - min_val) * 255.f;
+//    tmp.convertTo(disparity, CV_8UC1);
+//    cv::imwrite("disparity.png", disparity);
+//
+//    glfwSetWindowShouldClose(glfwGetCurrentContext(), true);
+//}
 
-    // covert to gray
-    cv::cuda::cvtColor(left_img_cuda, left_img_cuda, cv::COLOR_RGB2GRAY);
-    cv::cuda::cvtColor(right_img_cuda, right_img_cuda, cv::COLOR_RGB2GRAY);
+#include "image_process/camera_calibrator.h"
 
-    auto disparity_cuda = cv::cuda::GpuMat();
-    auto stereo = cv::cuda::createStereoSGM();
-    stereo->setNumDisparities(128);
-//    stereo->setMode(cv::StereoSGBM::MODE_HH);
-    stereo->compute(left_img_cuda, right_img_cuda, disparity_cuda);
-
-    auto filter = cv::cuda::createDisparityBilateralFilter();
-    filter->setNumDisparities(128);
-    filter->setRadius(5);
-    filter->setNumIters(3);
-    filter->apply(disparity_cuda, left_img_cuda, disparity_cuda);
-
-    auto disparity = cv::Mat();
-    disparity_cuda.download(disparity);
-
-    double min_val, max_val;
-    cv::minMaxLoc(disparity, &min_val, &max_val);
-    SPDLOG_INFO("Min: {}, Max: {}", min_val, max_val);
-
-    auto tmp = cv::Mat();
-    disparity.convertTo(tmp, CV_32FC1);
-    tmp = (tmp - min_val) / (max_val - min_val) * 255.f;
-    tmp.convertTo(disparity, CV_8UC1);
-    cv::imwrite("disparity.png", disparity);
+app_debug::app_debug(const create_config &conf) {
+    auto calib_conf = camera_calibrator::create_config{
+            .pattern_size = cv::Size(11, 8),
+            .corner_distance = 5,
+    };
+    auto calib = std::make_unique<camera_calibrator>(calib_conf);
 
-    glfwSetWindowShouldClose(glfwGetCurrentContext(), true);
+    auto sim_info = camera_calibrator::simulate_info_type{
+            .data_path = "/home/tpx/project/DepthGuide/cmake-build-debug/calib_data.txt",
+            .img_size = cv::Size(1920, 1080),
+    };
+    calib->simulate_process(sim_info);
 }

+ 1 - 1
src/impl/apps/depth_guide/depth_guide.cpp

@@ -15,7 +15,7 @@ app_depth_guide::app_depth_guide(const create_config &_conf) {
     OBJ_SAVE(img_depth_fake, image_u8c3());
     auto fake_info = fake_color_config{.mode = FAKE_800P, .lower = 200, .upper = 1000};
     OBJ_SAVE(img_depth_fake_info, versatile_convertor_impl::encode_config(fake_info));
-    OBJ_SAVE(img_out, image_u8c4());
+    OBJ_SAVE(img_out, image_ptr());
     OBJ_SAVE(img_obj_mask, image_ptr());
     OBJ_SAVE(pc_raw, pc_ptr());
     OBJ_SAVE(pc_obj_raw, pc_ptr());

+ 77 - 3
src/impl/apps/endo_guide/endo_guide.cpp

@@ -2,14 +2,23 @@
 #include "core/imgui_utility.hpp"
 
 app_endo_guide::app_endo_guide(const create_config &_conf) {
-    conf = _conf;
+    main_conf = _conf;
+    auto conf = main_conf.ext_conf;
 
     // initialize object manager
     OBJ_SAVE(img_color, image_u8c3());
+    OBJ_SAVE(img_out, image_ptr());
+
+    // initialize sophiar
+    sophiar_thread = std::make_unique<std::thread>([conf_path = LOAD_STR("sophiar_config")] {
+        sophiar::run_sophiar(conf_path);
+    });
+    sophiar_conn = std::make_unique<sophiar_conn_type>();
+    sophiar_start_var = LOAD_STR("sophiar_start_var");
 
     // initialize modules
     auto uvc_cam_conf = uvc_camera_ui::create_config{
-            .img_name = img_color, .ctx = conf.asio_ctx,
+            .img_name = img_color, .ctx = main_conf.asio_ctx,
     };
     uvc_cam = std::make_unique<uvc_camera_ui>(uvc_cam_conf);
 
@@ -19,8 +28,37 @@ app_endo_guide::app_endo_guide(const create_config &_conf) {
     };
     auto &bg_extra_conf = bg_viewer_conf.extra.color;
     bg_extra_conf.fmt = COLOR_RGB;
-    bg_extra_conf.name = img_color;
+    bg_extra_conf.name = img_out;
     bg_viewer = std::make_unique<image_viewer>(bg_viewer_conf);
+
+    auto saver_conf = image_saver::create_config{.ctx = main_conf.asio_ctx};
+    saver_conf.img_list.emplace_back("Image", img_color);
+    out_saver = std::make_unique<image_saver>(saver_conf);
+
+    auto calib_conf = camera_calibrator::create_config{
+            .img_name = img_color,
+            .board_type = camera_calibrator::CALIB_CHESS,
+            .pattern_size = cv::Size(11, 8), .corner_distance = 5,
+            .stream = default_cuda_stream, .ctx = main_conf.asio_ctx,
+            .sophiar_conn = sophiar_conn.get(),
+            .ref_transform_var = LOAD_STR("camera_ref_transform_var"),
+    };
+    cam_calib = std::make_unique<camera_calibrator>(calib_conf);
+
+    auto helper_conf = image_augment_helper_v2::create_config{
+            .img_name = img_color, .remap_name = invalid_obj_name,
+            .out_name = img_out, .img_flip_y = false,
+            .manager = nullptr, .stream = default_cuda_stream,
+            .ctx = main_conf.asio_ctx, .sophiar_conn = nullptr,
+    };
+    aug_helper = std::make_unique<image_augment_helper_v2>(helper_conf);
+    aug_helper->post_render_sig.connect([this] { cam_calib->render(); });
+
+    auto monitor_conf = sophiar_monitor::create_config{
+            .item_list = sophiar_monitor::item_list_from_yaml(LOAD_LIST("monitor")),
+            .sophiar_conn = sophiar_conn.get(),
+    };
+    monitor = std::make_unique<sophiar_monitor>(monitor_conf);
 }
 
 void app_endo_guide::show_ui() {
@@ -31,10 +69,46 @@ void app_endo_guide::show_ui() {
             auto id_guard = imgui_id_guard("camera");
             uvc_cam->show();
         }
+
+        if (ImGui::CollapsingHeader("Tracker")) {
+            if (ImGui::Button("Start")) {
+                start_tracking();
+            }
+            monitor->show();
+        }
+
+        if (ImGui::CollapsingHeader("Calibration")) {
+            auto id_guard = imgui_id_guard("calibration");
+            cam_calib->show();
+        }
+
+        if (ImGui::CollapsingHeader("Debug")) {
+            if (ImGui::TreeNode("Image Saver")) {
+                out_saver->show();
+                ImGui::TreePop();
+            }
+
+        }
     }
     ImGui::End();
 }
 
 void app_endo_guide::render_background() {
     bg_viewer->render();
+}
+
+void app_endo_guide::start_tracking() {
+    // sophiar_conn->start_object(sophiar_start_var);
+    // work in another thread to prevent blocking
+    auto t = std::thread([this] {
+        auto conn = sophiar::local_connection();
+        conn.start_object(sophiar_start_var);
+    });
+    t.detach();
+}
+
+app_endo_guide::~app_endo_guide() {
+    // sophiar
+    sophiar::stop_sophiar();
+    sophiar_thread->join();
 }

+ 20 - 2
src/impl/apps/endo_guide/endo_guide.h

@@ -3,15 +3,19 @@
 
 #include "core/object_manager.h"
 #include "device/uvc_camera_ui.h"
+#include "module/image_augment_helper_v2.h"
+#include "module/image_saver.h"
 #include "module/image_viewer.h"
+#include "module/sophiar_monitor.h"
 #include "impl/app_base.h"
+#include "image_process/camera_calibrator.h"
 
 class app_endo_guide : public app_base {
 public:
 
     explicit app_endo_guide(const create_config &conf);
 
-    ~app_endo_guide() override = default;
+    ~app_endo_guide() override;
 
     const char *window_name() override { return "EndoGuide V1.-1"; }
 
@@ -21,17 +25,31 @@ public:
 
 private:
 
+    void start_tracking();
+
     enum obj_names : object_manager::name_type {
 
         // images from device
         img_color,
 
+        // image output
+        img_out
     };
 
-    create_config conf;
+    create_config main_conf;
+
+    // sophiar
+    std::unique_ptr<sophiar_conn_type> sophiar_conn;
+    std::unique_ptr<std::thread> sophiar_thread;
+    std::string sophiar_start_var;
 
+    // modules
     std::unique_ptr<uvc_camera_ui> uvc_cam;
+    std::unique_ptr<camera_calibrator> cam_calib;
+    std::unique_ptr<image_augment_helper_v2> aug_helper;
     std::unique_ptr<image_viewer> bg_viewer;
+    std::unique_ptr<image_saver> out_saver;
+    std::unique_ptr<sophiar_monitor> monitor;
 };
 
 #endif //DEPTHGUIDE_ENDO_GUIDE_H

+ 40 - 0
src/module/image_augment_helper_v2.h

@@ -0,0 +1,40 @@
+#ifndef DEPTHGUIDE_IMAGE_AUGMENT_HELPER_V2_H
+#define DEPTHGUIDE_IMAGE_AUGMENT_HELPER_V2_H
+
+#include "core/object_manager.h"
+#include "module/augment_manager_v2.h"
+
+#include <memory>
+
+class image_augment_helper_v2 {
+public:
+
+    struct create_config {
+        obj_name_type img_name;
+        obj_name_type remap_name; // CV_32FC2, pixel -> undistorted normalized plane
+        obj_name_type out_name; // image output, image_ptr, CV_8UC3
+        bool img_flip_y = true;
+        augment_manager_v2 *manager;
+        smart_cuda_stream *stream;
+
+        // for camera augment helper
+        using io_context = boost::asio::io_context;
+        io_context *ctx = nullptr;
+        sophiar_conn_type *sophiar_conn = nullptr;
+    };
+
+    explicit image_augment_helper_v2(create_config conf);
+
+    ~image_augment_helper_v2();
+
+    void render();
+
+    using render_sig_type = boost::signals2::signal<void()>;
+    render_sig_type post_render_sig;
+
+private:
+    struct impl;
+    std::unique_ptr<impl> pimpl;
+};
+
+#endif //DEPTHGUIDE_IMAGE_AUGMENT_HELPER_V2_H

+ 3 - 1
src/module/impl/image_augment_helper.cpp

@@ -86,7 +86,9 @@ stereo_augment_helper::impl::impl(const create_config &_conf) {
 }
 
 void stereo_augment_helper::impl::process() {
-    auto img_size = to_image(conf.left_name)->size();
+    auto img = to_image(conf.left_name);
+    if (img == nullptr) return;
+    auto img_size = img->size();
     if (img_size.empty()) return;
     if (ui.follow_image_size) {
         auto fbo_size = cv::Size(img_size.width * 2, img_size.height);

+ 58 - 0
src/module/impl/image_augment_helper_v2.cpp

@@ -0,0 +1,58 @@
+#include "image_augment_helper_v2_impl.h"
+#include "third_party/scope_guard.hpp"
+
+image_augment_helper_v2::impl::impl(create_config _conf) {
+    conf = _conf;
+    fbo_conf = fbo_conf_type{
+            .depth_fmt = GL_DEPTH24_STENCIL8,
+    };
+    img_down = std::make_unique<viewport_downloader>(
+            viewport_downloader::create_config{.stream = conf.stream});
+    img_conn = OBJ_SIG(conf.img_name)->connect(
+            [this](auto name) { img_callback(name); });
+}
+
+image_augment_helper_v2::impl::~impl() {
+    img_conn.disconnect();
+}
+
+void image_augment_helper_v2::impl::img_callback(obj_name_type name) {
+    assert(name == conf.img_name);
+
+    auto img = to_image(name);
+    if (img == nullptr) return;
+    fbo_conf.size = img->size();
+    fbo.create(fbo_conf);
+    fbo.bind();
+    auto closer = sg::make_scope_guard([this] { fbo.unbind(); });
+
+    auto img_conf = color_image_render::config_type{
+            .flip_y = conf.img_flip_y, .stream = conf.stream,
+    };
+    img_ren.render_v2(img, img_conf);
+
+    assert(conf.manager == nullptr); // not implemented
+
+    q_this->post_render_sig();
+    output_img = img_down->download_v2(COLOR_RGB);
+    OBJ_SAVE(conf.out_name, output_img);
+}
+
+void image_augment_helper_v2::impl::render() {
+    if (output_img == nullptr) return;
+    auto img_conf = color_image_render::config_type{
+            .flip_y = false, .stream = conf.stream,
+    };
+    out_ren.render_v2(output_img, img_conf);
+}
+
+image_augment_helper_v2::image_augment_helper_v2(create_config conf)
+        : pimpl(std::make_unique<impl>(conf)) {
+    pimpl->q_this = this;
+}
+
+image_augment_helper_v2::~image_augment_helper_v2() = default;
+
+void image_augment_helper_v2::render() {
+    pimpl->render();
+}

+ 34 - 0
src/module/impl/image_augment_helper_v2_impl.h

@@ -0,0 +1,34 @@
+#ifndef DEPTHGUIDE_IMAGE_AUGMENT_HELPER_V2_IMPL_H
+#define DEPTHGUIDE_IMAGE_AUGMENT_HELPER_V2_IMPL_H
+
+#include "module/image_augment_helper_v2.h"
+#include "module/viewport_downloader.hpp"
+#include "render/render_tools.h"
+
+struct image_augment_helper_v2::impl {
+
+    image_augment_helper_v2 *q_this = nullptr;
+    create_config conf;
+    obj_conn_type img_conn;
+
+    using fbo_conf_type = smart_frame_buffer::create_config;
+    fbo_conf_type fbo_conf;
+    smart_frame_buffer fbo;
+
+    color_image_render img_ren;
+    std::unique_ptr<viewport_downloader> img_down;
+
+    image_ptr output_img;
+    color_image_render out_ren;
+
+    explicit impl(create_config conf);
+
+    ~impl();
+
+    void img_callback(obj_name_type name);
+
+    void render();
+
+};
+
+#endif //DEPTHGUIDE_IMAGE_AUGMENT_HELPER_V2_IMPL_H

+ 59 - 0
src/module/impl/sophiar_monitor.cpp

@@ -0,0 +1,59 @@
+#include "module/sophiar_monitor.h"
+#include "core/imgui_utility.hpp"
+
+struct sophiar_monitor::impl {
+
+    create_config conf;
+
+    explicit impl(const create_config &_conf) {
+        conf = _conf;
+    }
+
+    void show_item(const std::string &var_name,
+                   const std::string &show_name,
+                   bool last = false) {
+        auto var = conf.sophiar_conn->query_transform_variable(var_name);
+        bool var_ok = var.has_value();
+        if (var_ok) {
+            ImGui::PushStyleColor(ImGuiCol_Text, (ImVec4) ImColor(0, 255, 0));
+        } else {
+            ImGui::PushStyleColor(ImGuiCol_Text, (ImVec4) ImColor(255, 0, 0));
+        }
+        ImGui::Checkbox(show_name.c_str(), &var_ok);
+        ImGui::PopStyleColor();
+        if (!last) {
+            ImGui::SameLine();
+        }
+    }
+
+    void show() {
+        for (auto k = 0; k < conf.item_list.size(); ++k) {
+            auto &item = conf.item_list[k];
+            show_item(item.var_name, item.disp_name,
+                      (k + 1) == conf.item_list.size());
+        }
+    }
+
+};
+
+sophiar_monitor::sophiar_monitor(const create_config &conf)
+        : pimpl(std::make_unique<impl>(conf)) {
+}
+
+sophiar_monitor::~sophiar_monitor() = default;
+
+sophiar_monitor::item_list_type
+sophiar_monitor::item_list_from_yaml(const YAML::Node &_conf) {
+    auto item_num = _conf.size();
+    auto ret = item_list_type(item_num);
+    for (auto k = 0; k < item_num; ++k) {
+        auto conf = _conf[k];
+        ret[k].disp_name = LOAD_STR("name");
+        ret[k].var_name = LOAD_STR("var");
+    }
+    return ret;
+}
+
+void sophiar_monitor::show() {
+    pimpl->show();
+}

+ 38 - 0
src/module/sophiar_monitor.h

@@ -0,0 +1,38 @@
+#ifndef DEPTHGUIDE_SOPHIAR_MONITOR_H
+#define DEPTHGUIDE_SOPHIAR_MONITOR_H
+
+#include "core/yaml_utility.hpp"
+#include "core/local_connection.h" // sophiar
+
+#include <memory>
+#include <vector>
+
+class sophiar_monitor {
+public:
+
+    struct create_config {
+        struct item_type {
+            std::string disp_name;
+            std::string var_name;
+        };
+        using item_list_type = std::vector<item_type>;
+        item_list_type item_list;
+        sophiar_conn_type *sophiar_conn;
+    };
+
+    using item_list_type = create_config::item_list_type;
+
+    static item_list_type item_list_from_yaml(const YAML::Node &conf);
+
+    explicit sophiar_monitor(const create_config &conf);
+
+    ~sophiar_monitor();
+
+    void show();
+
+private:
+    struct impl;
+    std::unique_ptr<impl> pimpl;
+};
+
+#endif //DEPTHGUIDE_SOPHIAR_MONITOR_H

+ 8 - 3
src/render/impl/render_utility.cpp

@@ -370,12 +370,17 @@ void smart_frame_buffer::create_impl(const create_config &conf) {
     if (conf.depth_fmt != GL_NONE) {
         depth_tex.create(conf.depth_fmt, size);
         glBindTexture(GL_TEXTURE_2D, depth_tex.id);
-        glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth_tex.id, 0);
-        clear_flag |= GL_DEPTH_BUFFER_BIT;
+        if (conf.depth_fmt == GL_DEPTH24_STENCIL8 || conf.depth_fmt == GL_DEPTH32F_STENCIL8) {
+            glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, depth_tex.id, 0);
+            clear_flag |= GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
+        } else {
+            glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth_tex.id, 0);
+            clear_flag |= GL_DEPTH_BUFFER_BIT;
+        }
     }
 
     // stencil texture
-    if (conf.stencil_fmt != GL_NONE) {
+    if (conf.stencil_fmt != GL_NONE) { // TODO: find out the problem with GL_STENCIL_INDEX8
         stencil_tex.create(conf.stencil_fmt, size);
         glBindTexture(GL_TEXTURE_2D, stencil_tex.id);
         glFramebufferTexture2D(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, stencil_tex.id, 0);