From fda36ba5012b44eebb621da58e679eaa31315247 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 28 Apr 2026 14:02:55 -0400 Subject: [PATCH 1/5] Add in camera calibration example to the README --- README.md | 232 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 232 insertions(+) diff --git a/README.md b/README.md index 859930cc..3f3533ad 100644 --- a/README.md +++ b/README.md @@ -1064,3 +1064,235 @@ int main(int argc, char** argv) return 0; } ``` +<<<<<<< Updated upstream +======= + +--- + +## Query Camera Calibration + +The camera's internal stereo calibration can be queried from the MultiSense. This calibration corresponds to the +full-resolution operating mode of the camera and can be used to rectify raw images or project 3D points. + +### Python + +```python +import libmultisense as lms + +def main(): + channel_config = lms.ChannelConfig() + channel_config.ip_address = "10.66.171.21" + + with lms.Channel.create(channel_config) as channel: + if not channel: + print("Invalid channel") + exit(1) + + # Query the camera calibration + calibration = channel.get_calibration() + + # Print the intrinsic matrix (K) for the left camera + print("Left Camera Intrinsic Matrix (K):") + print(calibration.left.K) + + # Print the rectified projection matrix (P) for the left camera + print("Left Camera Rectified Projection Matrix (P):") + print(calibration.left.P) + + # Print the distortion coefficients (D) for the left camera + print("Left Camera Distortion Coefficients (D):") + print(calibration.left.D) + + # Access aux camera calibration if present + if calibration.aux is not None: + print("Aux Camera Intrinsic Matrix (K):") + print(calibration.aux.K) + + # Create a Q matrix to convert disparity pixels to 3D point clouds + Q = lms.QMatrix(calibration.left, calibration.right); + +if __name__ == "__main__": + main() +``` + +### C++ + +```c++ +#include +#include +#include + +namespace lms = multisense; + +int main(int argc, char** argv) +{ + const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"}); + if (!channel) + { + std::cerr << "Failed to create channel" << std::endl; + return 1; + } + + // Query the camera calibration + const auto calibration = channel->get_calibration(); + + // Access intrinsic matrix (K) for the left camera + std::cout << "Left Camera Intrinsic Matrix (K):" << std::endl; + for (const auto& row : calibration.left.K) + { + for (float val : row) + { + std::cout << val << " "; + } + std::cout << std::endl; + } + + // Access rectified projection matrix (P) for the left camera + std::cout << "Left Camera Rectified Projection Matrix (P):" << std::endl; + for (const auto& row : calibration.left.P) + { + for (float val : row) + { + std::cout << val << " "; + } + std::cout << std::endl; + } + + // Generate a Q matrix to convert disparity points to 3D point clouds + const auto Q = QMatrix(calibration.left, calibration.right); + + return 0; +} +``` + +--- + +## Feature Rendering + +LibMultiSense supports retrieving image features computed on-camera. These are synchronized with the +corresponding image frames. + +The following example demonstrates how to retrieve and render features on a rectified image. + +> [!NOTE] +> MultiSense firmware version v7.34 or newer is required to use the onboard feature detector + +### Python + +```python +import libmultisense as lms +import cv2 + +def main(): + channel_config = lms.ChannelConfig() + channel_config.ip_address = "10.66.171.21" + + with lms.Channel.create(channel_config) as channel: + if not channel: + print("Invalid channel") + exit(1) + + # Set the feature detector config to enable the feature detector + config = channel.get_config() + config.feature_detector_config = lms.FeatureDetectorConfig() + config.feature_detector_config.number_of_features = 1500 + config.feature_detector_config.grouping_enabled = True + channel.set_config(config) + + # Start both the rectified image and the corresponding feature stream + sources = [lms.DataSource.LEFT_MONO_RAW, lms.DataSource.LEFT_ORB_FEATURES] + if channel.start_streams(sources) != lms.Status.OK: + print("Unable to start streams") + exit(1) + + while True: + frame = channel.get_next_image_frame() + if frame and frame.has_image(lms.DataSource.LEFT_MONO_RAW): + img = frame.get_image(lms.DataSource.LEFT_MONO_RAW).as_array + + # Convert grayscale to BGR for color rendering + display_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + + if frame.has_feature(lms.DataSource.LEFT_ORB_FEATURES): + features = frame.get_feature(lms.DataSource.LEFT_ORB_FEATURES) + print(f"Frame {frame.frame_id}: Received {len(features.keypoints)} features") + + for kp in features.keypoints: + cv2.circle(display_img, (int(kp.x), int(kp.y)), 3, (0, 255, 0), -1) + + cv2.imshow("MultiSense Features", display_img) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +if __name__ == "__main__": + main() +``` + +### C++ + +```c++ +#include +#include + +#define HAVE_OPENCV 1 +#include +#include + +namespace lms = multisense; + +int main(int argc, char** argv) +{ + const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"}); + if (!channel) + { + std::cerr << "Failed to create channel" << std::endl; + return 1; + } + + // + // Set the feature detector config to enable the feature detector + // + auto config = channel->get_config(); + config.feature_detector_config = lms::MultiSenseConfig::FeatureDetectorConfig{number_of_features, true, 1}; + channel->set_config(config); + + // Start both the rectified image and the corresponding feature stream + const std::vector sources = { + lms::DataSource::LEFT_MONO_RAW, + lms::DataSource::LEFT_ORB_FEATURES + }; + + if (const auto status = channel->start_streams(sources); status != lms::Status::OK) + { + std::cerr << "Cannot start streams: " << lms::to_string(status) << std::endl; + return 1; + } + + while(true) + { + if (const auto frame = channel->get_next_image_frame(); frame) + { + if (frame->has_image(lms::DataSource::LEFT_MONO_RAW)) + { + cv::Mat img = frame->get_image(lms::DataSource::LEFT_MONO_RAW).cv_mat(); + cv::Mat display_img; + cv::cvtColor(img, display_img, cv::COLOR_GRAY2BGR); + + if (frame->has_feature(lms::DataSource::LEFT_ORB_FEATURES)) + { + const auto& features = frame->get_feature(lms::DataSource::LEFT_ORB_FEATURES); + + // Use the native OpenCV utility to convert keypoints and draw them + cv::drawKeypoints(display_img, features.cv_keypoints(), display_img, cv::Scalar(0, 255, 0)); + } + + cv::imshow("MultiSense Features", display_img); + if (cv::waitKey(1) == 'q') break; + } + } + } + + return 0; +} +``` +>>>>>>> Stashed changes From 6e9a12dc36fe53681e37b78f0f9ce54847e06b60 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 28 Apr 2026 14:04:55 -0400 Subject: [PATCH 2/5] Fix bad merge --- README.md | 137 ++---------------------------------------------------- 1 file changed, 3 insertions(+), 134 deletions(-) diff --git a/README.md b/README.md index 3f3533ad..b35b9467 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,9 @@ The LibMultiSense C++ and Python library has been tested with the following oper - [IMU Data Streaming](#imu-data-streaming) - [Python](#python-9) - [C++](#c-9) +- [Query Camera Calibration](#query-camera-calibration) + - [Python](#python-10) + - [C++](#c-10) ## Client Networking Prerequisite @@ -1064,8 +1067,6 @@ int main(int argc, char** argv) return 0; } ``` -<<<<<<< Updated upstream -======= --- @@ -1164,135 +1165,3 @@ int main(int argc, char** argv) return 0; } ``` - ---- - -## Feature Rendering - -LibMultiSense supports retrieving image features computed on-camera. These are synchronized with the -corresponding image frames. - -The following example demonstrates how to retrieve and render features on a rectified image. - -> [!NOTE] -> MultiSense firmware version v7.34 or newer is required to use the onboard feature detector - -### Python - -```python -import libmultisense as lms -import cv2 - -def main(): - channel_config = lms.ChannelConfig() - channel_config.ip_address = "10.66.171.21" - - with lms.Channel.create(channel_config) as channel: - if not channel: - print("Invalid channel") - exit(1) - - # Set the feature detector config to enable the feature detector - config = channel.get_config() - config.feature_detector_config = lms.FeatureDetectorConfig() - config.feature_detector_config.number_of_features = 1500 - config.feature_detector_config.grouping_enabled = True - channel.set_config(config) - - # Start both the rectified image and the corresponding feature stream - sources = [lms.DataSource.LEFT_MONO_RAW, lms.DataSource.LEFT_ORB_FEATURES] - if channel.start_streams(sources) != lms.Status.OK: - print("Unable to start streams") - exit(1) - - while True: - frame = channel.get_next_image_frame() - if frame and frame.has_image(lms.DataSource.LEFT_MONO_RAW): - img = frame.get_image(lms.DataSource.LEFT_MONO_RAW).as_array - - # Convert grayscale to BGR for color rendering - display_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - - if frame.has_feature(lms.DataSource.LEFT_ORB_FEATURES): - features = frame.get_feature(lms.DataSource.LEFT_ORB_FEATURES) - print(f"Frame {frame.frame_id}: Received {len(features.keypoints)} features") - - for kp in features.keypoints: - cv2.circle(display_img, (int(kp.x), int(kp.y)), 3, (0, 255, 0), -1) - - cv2.imshow("MultiSense Features", display_img) - if cv2.waitKey(1) & 0xFF == ord('q'): - break - -if __name__ == "__main__": - main() -``` - -### C++ - -```c++ -#include -#include - -#define HAVE_OPENCV 1 -#include -#include - -namespace lms = multisense; - -int main(int argc, char** argv) -{ - const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"}); - if (!channel) - { - std::cerr << "Failed to create channel" << std::endl; - return 1; - } - - // - // Set the feature detector config to enable the feature detector - // - auto config = channel->get_config(); - config.feature_detector_config = lms::MultiSenseConfig::FeatureDetectorConfig{number_of_features, true, 1}; - channel->set_config(config); - - // Start both the rectified image and the corresponding feature stream - const std::vector sources = { - lms::DataSource::LEFT_MONO_RAW, - lms::DataSource::LEFT_ORB_FEATURES - }; - - if (const auto status = channel->start_streams(sources); status != lms::Status::OK) - { - std::cerr << "Cannot start streams: " << lms::to_string(status) << std::endl; - return 1; - } - - while(true) - { - if (const auto frame = channel->get_next_image_frame(); frame) - { - if (frame->has_image(lms::DataSource::LEFT_MONO_RAW)) - { - cv::Mat img = frame->get_image(lms::DataSource::LEFT_MONO_RAW).cv_mat(); - cv::Mat display_img; - cv::cvtColor(img, display_img, cv::COLOR_GRAY2BGR); - - if (frame->has_feature(lms::DataSource::LEFT_ORB_FEATURES)) - { - const auto& features = frame->get_feature(lms::DataSource::LEFT_ORB_FEATURES); - - // Use the native OpenCV utility to convert keypoints and draw them - cv::drawKeypoints(display_img, features.cv_keypoints(), display_img, cv::Scalar(0, 255, 0)); - } - - cv::imshow("MultiSense Features", display_img); - if (cv::waitKey(1) == 'q') break; - } - } - } - - return 0; -} -``` ->>>>>>> Stashed changes From d790e0629dde453994e43a73428359b9e3799b62 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 28 Apr 2026 14:21:28 -0400 Subject: [PATCH 3/5] Add utility function to create a Q matrix --- README.md | 7 +++++-- python/bindings.cc | 3 ++- .../include/MultiSense/MultiSenseUtilities.hh | 12 ++++++++++++ 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b35b9467..60a0d4eb 100644 --- a/README.md +++ b/README.md @@ -1089,7 +1089,8 @@ def main(): print("Invalid channel") exit(1) - # Query the camera calibration + # Query the camera calibration. NOTE this is for the full resolution operating mode. Each frame + # also contains a scaled calibration which can be easier to handle depending on the application calibration = channel.get_calibration() # Print the intrinsic matrix (K) for the left camera @@ -1111,6 +1112,7 @@ def main(): # Create a Q matrix to convert disparity pixels to 3D point clouds Q = lms.QMatrix(calibration.left, calibration.right); + print(Q.matrix()) if __name__ == "__main__": main() @@ -1134,7 +1136,8 @@ int main(int argc, char** argv) return 1; } - // Query the camera calibration + // Query the camera calibration. NOTE this is for the full resolution operating mode. Each frame also contains + // a scaled calibration which can be easier to handle depending on the application const auto calibration = channel->get_calibration(); // Access intrinsic matrix (K) for the left camera diff --git a/python/bindings.cc b/python/bindings.cc index 9998eaa9..a9db9ede 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -777,7 +777,8 @@ PYBIND11_MODULE(_libmultisense, m) { // Utilities py::class_(m, "QMatrix") .def(py::init()) - .def("reproject", &multisense::QMatrix::reproject); + .def("reproject", &multisense::QMatrix::reproject) + .def("matrix", &multisense::QMatrix::matrix); py::class_(m, "Pixel") .def(py::init<>()) diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh b/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh index f2200822..45e046ad 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh @@ -133,6 +133,18 @@ public: return Point{static_cast(x), static_cast(y), static_cast(z)}; } + + std::array, 4> matrix() const + { + // + // See https://docs.carnegierobotics.com/cookbook/overview.html#reproject-disparity-images-to-3d-point-clouds + // + return std::array, 4>{{{fytx_, 0.0, 0.0, -fycxtx_}, + {0.0, fxtx_, 0.0, -fxcytx_}, + {0.0, 0.0, 0.0, fxfytx_}, + {0.0, 0.0, -fy_, fycxcxprime_}}}; + } + private: double fx_ = 0.0; double fy_ = 0.0; From 9fda9b4f63d7cf9b3afcc0e29748cd3bdc770d91 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 28 Apr 2026 16:36:42 -0400 Subject: [PATCH 4/5] Add in calibration scaling function --- python/bindings.cc | 2 ++ source/LibMultiSense/details/utilities.cc | 17 +++++++++++++++++ .../include/MultiSense/MultiSenseUtilities.hh | 10 ++++++++++ 3 files changed, 29 insertions(+) diff --git a/python/bindings.cc b/python/bindings.cc index a9db9ede..16a0c000 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -1074,4 +1074,6 @@ PYBIND11_MODULE(_libmultisense, m) { m.def("create_bgr_from_ycbcr420", &multisense::create_bgr_from_ycbcr420, py::call_guard()); m.def("create_bgr_image", &multisense::create_bgr_image, py::call_guard()); + + m.def("scale_calibration", &multisense::scale_calibration, py::call_guard()); } diff --git a/source/LibMultiSense/details/utilities.cc b/source/LibMultiSense/details/utilities.cc index 7ca321c3..d084b33b 100644 --- a/source/LibMultiSense/details/utilities.cc +++ b/source/LibMultiSense/details/utilities.cc @@ -496,6 +496,23 @@ std::optional create_bgr_image(const ImageFrame &frame, const DataSource return std::nullopt; } +CameraCalibration scale_calibration(CameraCalibration calibration, double scale) +{ + calibration.K[0][0] *= scale; + calibration.K[0][2] *= scale; + calibration.K[1][1] *= scale; + calibration.K[1][2] *= scale; + + calibration.P[0][0] *= scale; + calibration.P[0][2] *= scale; + calibration.P[0][3] *= scale; + calibration.P[1][1] *= scale; + calibration.P[1][2] *= scale; + calibration.P[1][3] *= scale; + + return calibration; +} + std::optional> create_pointcloud(const ImageFrame &frame, double max_range, const DataSource &disparity_source) diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh b/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh index 45e046ad..130da117 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh @@ -235,6 +235,16 @@ MULTISENSE_API std::optional create_bgr_from_ycbcr420(const Image &luma, MULTISENSE_API std::optional create_bgr_image(const ImageFrame &frame, const DataSource &output_source); +/// +/// @brief Scale a calibration to align with a different operating resolution +/// +/// @param calibration The input calibration to scale +/// @param scale The scale factor to apply to the calibration +/// +/// @return A scaled calibration +/// +MULTISENSE_API CameraCalibration scale_calibration(CameraCalibration calibration, double scale); + /// /// @brief Create a point cloud from a image frame and a color source. /// From 3de921effcbcfceb1c5f78e8d2b1b5f74b365f7a Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 28 Apr 2026 21:44:21 -0400 Subject: [PATCH 5/5] Add scaled calibration --- .../include/MultiSense/MultiSenseUtilities.hh | 4 +- .../test/multisense_utilities_test.cc | 97 +++++++++++++++++++ 2 files changed, 99 insertions(+), 2 deletions(-) diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh b/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh index 130da117..8198600c 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh @@ -112,14 +112,14 @@ public: fy_(reference_cal.P[1][1]), cx_(reference_cal.P[0][2]), cy_(reference_cal.P[1][2]), - tx_(matching_cal.P[0][3] / matching_cal.P[0][0]), + tx_(static_cast(matching_cal.P[0][3]) / static_cast(matching_cal.P[0][0])), cx_prime_(matching_cal.P[0][2]), fytx_(fy_ * tx_), fxtx_(fx_ * tx_), fycxtx_(fy_ * cx_ * tx_), fxcytx_(fx_ * cy_ * tx_), fxfytx_(fx_ * fy_ * tx_), - fycxcxprime_(fy_ * (cx_ - cx_prime_)) + fycxcxprime_(fy_ * (static_cast(cx_) - static_cast(cx_prime_))) { } diff --git a/source/LibMultiSense/test/multisense_utilities_test.cc b/source/LibMultiSense/test/multisense_utilities_test.cc index efa5ac0c..3ab451e2 100644 --- a/source/LibMultiSense/test/multisense_utilities_test.cc +++ b/source/LibMultiSense/test/multisense_utilities_test.cc @@ -147,6 +147,103 @@ TEST(QMatrix, reproject) EXPECT_NEAR(z, repojected_pt.z, epsilon); } +TEST(QMatrix, matrix) +{ + const float fx = 1500.0; + const float fy = 1000.0; + const float cx = 960.0; + const float cy = 600.0; + const double tx = -0.27; + + // Use double for the product to ensure we get an exact -405.0f + const float fxtx = static_cast(static_cast(fx) * tx); + + CameraCalibration left_calibration{ + {{{fx, 0.0, cx}, {0.0, fy, cy}, {0.0, 0.0, 1.0}}}, + {{{1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}}, + {{{fx, 0.0, cx, 0.0}, {0.0, fy, cy, 0.0}, {0.0, 0.0, 1.0, 0.0}}}, + CameraCalibration::DistortionType::NONE, + {}}; + + CameraCalibration right_calibration{ + {{{fx, 0.0, cx}, {0.0, fy, cy}, {0.0, 0.0, 1.0}}}, + {{{1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}}, + {{{fx, 0.0, cx, fxtx}, {0.0, fy, cy, 0.0}, {0.0, 0.0, 1.0, 0.0}}}, + CameraCalibration::DistortionType::NONE, + {}}; + + QMatrix q{left_calibration, right_calibration}; + + const auto matrix = q.matrix(); + + const double expected_fytx = static_cast(fy) * tx; + const double expected_fxtx = static_cast(fx) * tx; + const double expected_fycxtx = static_cast(fy) * static_cast(cx) * tx; + const double expected_fxcytx = static_cast(fx) * static_cast(cy) * tx; + const double expected_fxfytx = static_cast(fx) * static_cast(fy) * tx; + const double expected_fycxcxprime = 0.0; + + const double epsilon = 1e-6; + + EXPECT_NEAR(matrix[0][0], expected_fytx, epsilon); + EXPECT_NEAR(matrix[0][1], 0.0, epsilon); + EXPECT_NEAR(matrix[0][2], 0.0, epsilon); + EXPECT_NEAR(matrix[0][3], -expected_fycxtx, epsilon); + + EXPECT_NEAR(matrix[1][0], 0.0, epsilon); + EXPECT_NEAR(matrix[1][1], expected_fxtx, epsilon); + EXPECT_NEAR(matrix[1][2], 0.0, epsilon); + EXPECT_NEAR(matrix[1][3], -expected_fxcytx, epsilon); + + EXPECT_NEAR(matrix[2][0], 0.0, epsilon); + EXPECT_NEAR(matrix[2][1], 0.0, epsilon); + EXPECT_NEAR(matrix[2][2], 0.0, epsilon); + EXPECT_NEAR(matrix[2][3], expected_fxfytx, epsilon); + + EXPECT_NEAR(matrix[3][0], 0.0, epsilon); + EXPECT_NEAR(matrix[3][1], 0.0, epsilon); + EXPECT_NEAR(matrix[3][2], -static_cast(fy), epsilon); + EXPECT_NEAR(matrix[3][3], expected_fycxcxprime, epsilon); +} + +TEST(scale_calibration, basic) +{ + const float fx = 1500.0; + const float fy = 1000.0; + const float cx = 960.0; + const float cy = 600.0; + const float tx = -0.27; + + CameraCalibration cal{ + {{{fx, 0.0, cx}, {0.0, fy, cy}, {0.0, 0.0, 1.0}}}, + {{{1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}}, + {{{fx, 0.0, cx, fx * tx}, {0.0, fy, cy, 0.0}, {0.0, 0.0, 1.0, 0.0}}}, + CameraCalibration::DistortionType::NONE, + {}}; + + const double scale = 0.5; + const auto scaled_cal = scale_calibration(cal, scale); + + EXPECT_FLOAT_EQ(scaled_cal.K[0][0], fx * scale); + EXPECT_FLOAT_EQ(scaled_cal.K[0][2], cx * scale); + EXPECT_FLOAT_EQ(scaled_cal.K[1][1], fy * scale); + EXPECT_FLOAT_EQ(scaled_cal.K[1][2], cy * scale); + + EXPECT_FLOAT_EQ(scaled_cal.P[0][0], fx * scale); + EXPECT_FLOAT_EQ(scaled_cal.P[0][2], cx * scale); + EXPECT_FLOAT_EQ(scaled_cal.P[0][3], fx * tx * scale); + EXPECT_FLOAT_EQ(scaled_cal.P[1][1], fy * scale); + EXPECT_FLOAT_EQ(scaled_cal.P[1][2], cy * scale); + EXPECT_FLOAT_EQ(scaled_cal.P[1][3], 0.0); + + // Ensure other values are unchanged + EXPECT_FLOAT_EQ(scaled_cal.K[0][1], cal.K[0][1]); + EXPECT_FLOAT_EQ(scaled_cal.K[1][0], cal.K[1][0]); + EXPECT_FLOAT_EQ(scaled_cal.K[2][0], cal.K[2][0]); + EXPECT_FLOAT_EQ(scaled_cal.K[2][1], cal.K[2][1]); + EXPECT_FLOAT_EQ(scaled_cal.K[2][2], cal.K[2][2]); +} + TEST(create_depth_image, mono_and_float) { const float fx = 1000.0;