diff --git a/README.md b/README.md index 859930cc..60a0d4eb 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,9 @@ The LibMultiSense C++ and Python library has been tested with the following oper - [IMU Data Streaming](#imu-data-streaming) - [Python](#python-9) - [C++](#c-9) +- [Query Camera Calibration](#query-camera-calibration) + - [Python](#python-10) + - [C++](#c-10) ## Client Networking Prerequisite @@ -1064,3 +1067,104 @@ int main(int argc, char** argv) return 0; } ``` + +--- + +## Query Camera Calibration + +The camera's internal stereo calibration can be queried from the MultiSense. This calibration corresponds to the +full-resolution operating mode of the camera and can be used to rectify raw images or project 3D points. + +### Python + +```python +import libmultisense as lms + +def main(): + channel_config = lms.ChannelConfig() + channel_config.ip_address = "10.66.171.21" + + with lms.Channel.create(channel_config) as channel: + if not channel: + print("Invalid channel") + exit(1) + + # Query the camera calibration. NOTE this is for the full resolution operating mode. Each frame + # also contains a scaled calibration which can be easier to handle depending on the application + calibration = channel.get_calibration() + + # Print the intrinsic matrix (K) for the left camera + print("Left Camera Intrinsic Matrix (K):") + print(calibration.left.K) + + # Print the rectified projection matrix (P) for the left camera + print("Left Camera Rectified Projection Matrix (P):") + print(calibration.left.P) + + # Print the distortion coefficients (D) for the left camera + print("Left Camera Distortion Coefficients (D):") + print(calibration.left.D) + + # Access aux camera calibration if present + if calibration.aux is not None: + print("Aux Camera Intrinsic Matrix (K):") + print(calibration.aux.K) + + # Create a Q matrix to convert disparity pixels to 3D point clouds + Q = lms.QMatrix(calibration.left, calibration.right); + print(Q.matrix()) + +if __name__ == "__main__": + main() +``` + +### C++ + +```c++ +#include +#include +#include + +namespace lms = multisense; + +int main(int argc, char** argv) +{ + const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"}); + if (!channel) + { + std::cerr << "Failed to create channel" << std::endl; + return 1; + } + + // Query the camera calibration. NOTE this is for the full resolution operating mode. Each frame also contains + // a scaled calibration which can be easier to handle depending on the application + const auto calibration = channel->get_calibration(); + + // Access intrinsic matrix (K) for the left camera + std::cout << "Left Camera Intrinsic Matrix (K):" << std::endl; + for (const auto& row : calibration.left.K) + { + for (float val : row) + { + std::cout << val << " "; + } + std::cout << std::endl; + } + + // Access rectified projection matrix (P) for the left camera + std::cout << "Left Camera Rectified Projection Matrix (P):" << std::endl; + for (const auto& row : calibration.left.P) + { + for (float val : row) + { + std::cout << val << " "; + } + std::cout << std::endl; + } + + // Generate a Q matrix to convert disparity points to 3D point clouds + const auto Q = QMatrix(calibration.left, calibration.right); + + return 0; +} +``` diff --git a/python/bindings.cc b/python/bindings.cc index 9998eaa9..16a0c000 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -777,7 +777,8 @@ PYBIND11_MODULE(_libmultisense, m) { // Utilities py::class_(m, "QMatrix") .def(py::init()) - .def("reproject", &multisense::QMatrix::reproject); + .def("reproject", &multisense::QMatrix::reproject) + .def("matrix", &multisense::QMatrix::matrix); py::class_(m, "Pixel") .def(py::init<>()) @@ -1073,4 +1074,6 @@ PYBIND11_MODULE(_libmultisense, m) { m.def("create_bgr_from_ycbcr420", &multisense::create_bgr_from_ycbcr420, py::call_guard()); m.def("create_bgr_image", &multisense::create_bgr_image, py::call_guard()); + + m.def("scale_calibration", &multisense::scale_calibration, py::call_guard()); } diff --git a/source/LibMultiSense/details/utilities.cc b/source/LibMultiSense/details/utilities.cc index 7ca321c3..d084b33b 100644 --- a/source/LibMultiSense/details/utilities.cc +++ b/source/LibMultiSense/details/utilities.cc @@ -496,6 +496,23 @@ std::optional create_bgr_image(const ImageFrame &frame, const DataSource return std::nullopt; } +CameraCalibration scale_calibration(CameraCalibration calibration, double scale) +{ + calibration.K[0][0] *= scale; + calibration.K[0][2] *= scale; + calibration.K[1][1] *= scale; + calibration.K[1][2] *= scale; + + calibration.P[0][0] *= scale; + calibration.P[0][2] *= scale; + calibration.P[0][3] *= scale; + calibration.P[1][1] *= scale; + calibration.P[1][2] *= scale; + calibration.P[1][3] *= scale; + + return calibration; +} + std::optional> create_pointcloud(const ImageFrame &frame, double max_range, const DataSource &disparity_source) diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh b/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh index f2200822..8198600c 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh @@ -112,14 +112,14 @@ public: fy_(reference_cal.P[1][1]), cx_(reference_cal.P[0][2]), cy_(reference_cal.P[1][2]), - tx_(matching_cal.P[0][3] / matching_cal.P[0][0]), + tx_(static_cast(matching_cal.P[0][3]) / static_cast(matching_cal.P[0][0])), cx_prime_(matching_cal.P[0][2]), fytx_(fy_ * tx_), fxtx_(fx_ * tx_), fycxtx_(fy_ * cx_ * tx_), fxcytx_(fx_ * cy_ * tx_), fxfytx_(fx_ * fy_ * tx_), - fycxcxprime_(fy_ * (cx_ - cx_prime_)) + fycxcxprime_(fy_ * (static_cast(cx_) - static_cast(cx_prime_))) { } @@ -133,6 +133,18 @@ public: return Point{static_cast(x), static_cast(y), static_cast(z)}; } + + std::array, 4> matrix() const + { + // + // See https://docs.carnegierobotics.com/cookbook/overview.html#reproject-disparity-images-to-3d-point-clouds + // + return std::array, 4>{{{fytx_, 0.0, 0.0, -fycxtx_}, + {0.0, fxtx_, 0.0, -fxcytx_}, + {0.0, 0.0, 0.0, fxfytx_}, + {0.0, 0.0, -fy_, fycxcxprime_}}}; + } + private: double fx_ = 0.0; double fy_ = 0.0; @@ -223,6 +235,16 @@ MULTISENSE_API std::optional create_bgr_from_ycbcr420(const Image &luma, MULTISENSE_API std::optional create_bgr_image(const ImageFrame &frame, const DataSource &output_source); +/// +/// @brief Scale a calibration to align with a different operating resolution +/// +/// @param calibration The input calibration to scale +/// @param scale The scale factor to apply to the calibration +/// +/// @return A scaled calibration +/// +MULTISENSE_API CameraCalibration scale_calibration(CameraCalibration calibration, double scale); + /// /// @brief Create a point cloud from a image frame and a color source. /// diff --git a/source/LibMultiSense/test/multisense_utilities_test.cc b/source/LibMultiSense/test/multisense_utilities_test.cc index efa5ac0c..3ab451e2 100644 --- a/source/LibMultiSense/test/multisense_utilities_test.cc +++ b/source/LibMultiSense/test/multisense_utilities_test.cc @@ -147,6 +147,103 @@ TEST(QMatrix, reproject) EXPECT_NEAR(z, repojected_pt.z, epsilon); } +TEST(QMatrix, matrix) +{ + const float fx = 1500.0; + const float fy = 1000.0; + const float cx = 960.0; + const float cy = 600.0; + const double tx = -0.27; + + // Use double for the product to ensure we get an exact -405.0f + const float fxtx = static_cast(static_cast(fx) * tx); + + CameraCalibration left_calibration{ + {{{fx, 0.0, cx}, {0.0, fy, cy}, {0.0, 0.0, 1.0}}}, + {{{1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}}, + {{{fx, 0.0, cx, 0.0}, {0.0, fy, cy, 0.0}, {0.0, 0.0, 1.0, 0.0}}}, + CameraCalibration::DistortionType::NONE, + {}}; + + CameraCalibration right_calibration{ + {{{fx, 0.0, cx}, {0.0, fy, cy}, {0.0, 0.0, 1.0}}}, + {{{1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}}, + {{{fx, 0.0, cx, fxtx}, {0.0, fy, cy, 0.0}, {0.0, 0.0, 1.0, 0.0}}}, + CameraCalibration::DistortionType::NONE, + {}}; + + QMatrix q{left_calibration, right_calibration}; + + const auto matrix = q.matrix(); + + const double expected_fytx = static_cast(fy) * tx; + const double expected_fxtx = static_cast(fx) * tx; + const double expected_fycxtx = static_cast(fy) * static_cast(cx) * tx; + const double expected_fxcytx = static_cast(fx) * static_cast(cy) * tx; + const double expected_fxfytx = static_cast(fx) * static_cast(fy) * tx; + const double expected_fycxcxprime = 0.0; + + const double epsilon = 1e-6; + + EXPECT_NEAR(matrix[0][0], expected_fytx, epsilon); + EXPECT_NEAR(matrix[0][1], 0.0, epsilon); + EXPECT_NEAR(matrix[0][2], 0.0, epsilon); + EXPECT_NEAR(matrix[0][3], -expected_fycxtx, epsilon); + + EXPECT_NEAR(matrix[1][0], 0.0, epsilon); + EXPECT_NEAR(matrix[1][1], expected_fxtx, epsilon); + EXPECT_NEAR(matrix[1][2], 0.0, epsilon); + EXPECT_NEAR(matrix[1][3], -expected_fxcytx, epsilon); + + EXPECT_NEAR(matrix[2][0], 0.0, epsilon); + EXPECT_NEAR(matrix[2][1], 0.0, epsilon); + EXPECT_NEAR(matrix[2][2], 0.0, epsilon); + EXPECT_NEAR(matrix[2][3], expected_fxfytx, epsilon); + + EXPECT_NEAR(matrix[3][0], 0.0, epsilon); + EXPECT_NEAR(matrix[3][1], 0.0, epsilon); + EXPECT_NEAR(matrix[3][2], -static_cast(fy), epsilon); + EXPECT_NEAR(matrix[3][3], expected_fycxcxprime, epsilon); +} + +TEST(scale_calibration, basic) +{ + const float fx = 1500.0; + const float fy = 1000.0; + const float cx = 960.0; + const float cy = 600.0; + const float tx = -0.27; + + CameraCalibration cal{ + {{{fx, 0.0, cx}, {0.0, fy, cy}, {0.0, 0.0, 1.0}}}, + {{{1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}}, + {{{fx, 0.0, cx, fx * tx}, {0.0, fy, cy, 0.0}, {0.0, 0.0, 1.0, 0.0}}}, + CameraCalibration::DistortionType::NONE, + {}}; + + const double scale = 0.5; + const auto scaled_cal = scale_calibration(cal, scale); + + EXPECT_FLOAT_EQ(scaled_cal.K[0][0], fx * scale); + EXPECT_FLOAT_EQ(scaled_cal.K[0][2], cx * scale); + EXPECT_FLOAT_EQ(scaled_cal.K[1][1], fy * scale); + EXPECT_FLOAT_EQ(scaled_cal.K[1][2], cy * scale); + + EXPECT_FLOAT_EQ(scaled_cal.P[0][0], fx * scale); + EXPECT_FLOAT_EQ(scaled_cal.P[0][2], cx * scale); + EXPECT_FLOAT_EQ(scaled_cal.P[0][3], fx * tx * scale); + EXPECT_FLOAT_EQ(scaled_cal.P[1][1], fy * scale); + EXPECT_FLOAT_EQ(scaled_cal.P[1][2], cy * scale); + EXPECT_FLOAT_EQ(scaled_cal.P[1][3], 0.0); + + // Ensure other values are unchanged + EXPECT_FLOAT_EQ(scaled_cal.K[0][1], cal.K[0][1]); + EXPECT_FLOAT_EQ(scaled_cal.K[1][0], cal.K[1][0]); + EXPECT_FLOAT_EQ(scaled_cal.K[2][0], cal.K[2][0]); + EXPECT_FLOAT_EQ(scaled_cal.K[2][1], cal.K[2][1]); + EXPECT_FLOAT_EQ(scaled_cal.K[2][2], cal.K[2][2]); +} + TEST(create_depth_image, mono_and_float) { const float fx = 1000.0;