Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 104 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ The LibMultiSense C++ and Python library has been tested with the following oper
- [IMU Data Streaming](#imu-data-streaming)
- [Python](#python-9)
- [C++](#c-9)
- [Query Camera Calibration](#query-camera-calibration)
- [Python](#python-10)
- [C++](#c-10)

## Client Networking Prerequisite

Expand Down Expand Up @@ -1064,3 +1067,104 @@ int main(int argc, char** argv)
return 0;
}
```

---

## Query Camera Calibration

The camera's internal stereo calibration can be queried from the MultiSense. This calibration corresponds to the
full-resolution operating mode of the camera and can be used to rectify raw images or project 3D points.

### Python

```python
import libmultisense as lms

def main():
channel_config = lms.ChannelConfig()
channel_config.ip_address = "10.66.171.21"

with lms.Channel.create(channel_config) as channel:
if not channel:
print("Invalid channel")
exit(1)

# Query the camera calibration. NOTE this is for the full resolution operating mode. Each frame
# also contains a scaled calibration which can be easier to handle depending on the application
calibration = channel.get_calibration()

# Print the intrinsic matrix (K) for the left camera
print("Left Camera Intrinsic Matrix (K):")
print(calibration.left.K)

# Print the rectified projection matrix (P) for the left camera
print("Left Camera Rectified Projection Matrix (P):")
print(calibration.left.P)

# Print the distortion coefficients (D) for the left camera
print("Left Camera Distortion Coefficients (D):")
print(calibration.left.D)

# Access aux camera calibration if present
if calibration.aux is not None:
print("Aux Camera Intrinsic Matrix (K):")
print(calibration.aux.K)

# Create a Q matrix to convert disparity pixels to 3D point clouds
Q = lms.QMatrix(calibration.left, calibration.right);
print(Q.matrix())

if __name__ == "__main__":
main()
```

### C++

```c++
#include <iostream>
#include <MultiSense/MultiSenseChannel.hh>
#include <MultiSense/MultiSenseUtilities.hh>

namespace lms = multisense;

int main(int argc, char** argv)
{
const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"});
if (!channel)
{
std::cerr << "Failed to create channel" << std::endl;
return 1;
}

// Query the camera calibration. NOTE this is for the full resolution operating mode. Each frame also contains
// a scaled calibration which can be easier to handle depending on the application
const auto calibration = channel->get_calibration();

// Access intrinsic matrix (K) for the left camera
std::cout << "Left Camera Intrinsic Matrix (K):" << std::endl;
for (const auto& row : calibration.left.K)
{
for (float val : row)
{
std::cout << val << " ";
}
std::cout << std::endl;
}

// Access rectified projection matrix (P) for the left camera
std::cout << "Left Camera Rectified Projection Matrix (P):" << std::endl;
for (const auto& row : calibration.left.P)
{
for (float val : row)
{
std::cout << val << " ";
}
std::cout << std::endl;
}

// Generate a Q matrix to convert disparity points to 3D point clouds
const auto Q = QMatrix(calibration.left, calibration.right);

return 0;
}
```
5 changes: 4 additions & 1 deletion python/bindings.cc
Original file line number Diff line number Diff line change
Expand Up @@ -777,7 +777,8 @@ PYBIND11_MODULE(_libmultisense, m) {
// Utilities
py::class_<multisense::QMatrix>(m, "QMatrix")
.def(py::init<const multisense::CameraCalibration &, const multisense::CameraCalibration &>())
.def("reproject", &multisense::QMatrix::reproject);
.def("reproject", &multisense::QMatrix::reproject)
.def("matrix", &multisense::QMatrix::matrix);

py::class_<multisense::Pixel>(m, "Pixel")
.def(py::init<>())
Expand Down Expand Up @@ -1073,4 +1074,6 @@ PYBIND11_MODULE(_libmultisense, m) {
m.def("create_bgr_from_ycbcr420", &multisense::create_bgr_from_ycbcr420, py::call_guard<py::gil_scoped_release>());

m.def("create_bgr_image", &multisense::create_bgr_image, py::call_guard<py::gil_scoped_release>());

m.def("scale_calibration", &multisense::scale_calibration, py::call_guard<py::gil_scoped_release>());
}
17 changes: 17 additions & 0 deletions source/LibMultiSense/details/utilities.cc
Original file line number Diff line number Diff line change
Expand Up @@ -496,6 +496,23 @@ std::optional<Image> create_bgr_image(const ImageFrame &frame, const DataSource
return std::nullopt;
}

CameraCalibration scale_calibration(CameraCalibration calibration, double scale)
{
calibration.K[0][0] *= scale;
calibration.K[0][2] *= scale;
calibration.K[1][1] *= scale;
calibration.K[1][2] *= scale;

calibration.P[0][0] *= scale;
calibration.P[0][2] *= scale;
calibration.P[0][3] *= scale;
calibration.P[1][1] *= scale;
calibration.P[1][2] *= scale;
calibration.P[1][3] *= scale;

return calibration;
}

std::optional<PointCloud<void>> create_pointcloud(const ImageFrame &frame,
double max_range,
const DataSource &disparity_source)
Expand Down
26 changes: 24 additions & 2 deletions source/LibMultiSense/include/MultiSense/MultiSenseUtilities.hh
Original file line number Diff line number Diff line change
Expand Up @@ -112,14 +112,14 @@ public:
fy_(reference_cal.P[1][1]),
cx_(reference_cal.P[0][2]),
cy_(reference_cal.P[1][2]),
tx_(matching_cal.P[0][3] / matching_cal.P[0][0]),
tx_(static_cast<double>(matching_cal.P[0][3]) / static_cast<double>(matching_cal.P[0][0])),
cx_prime_(matching_cal.P[0][2]),
fytx_(fy_ * tx_),
fxtx_(fx_ * tx_),
fycxtx_(fy_ * cx_ * tx_),
fxcytx_(fx_ * cy_ * tx_),
fxfytx_(fx_ * fy_ * tx_),
fycxcxprime_(fy_ * (cx_ - cx_prime_))
fycxcxprime_(fy_ * (static_cast<double>(cx_) - static_cast<double>(cx_prime_)))
{
}

Expand All @@ -133,6 +133,18 @@ public:
return Point<void>{static_cast<float>(x), static_cast<float>(y), static_cast<float>(z)};
}


std::array<std::array<double, 4>, 4> matrix() const
{
//
// See https://docs.carnegierobotics.com/cookbook/overview.html#reproject-disparity-images-to-3d-point-clouds
//
return std::array<std::array<double, 4>, 4>{{{fytx_, 0.0, 0.0, -fycxtx_},
{0.0, fxtx_, 0.0, -fxcytx_},
{0.0, 0.0, 0.0, fxfytx_},
{0.0, 0.0, -fy_, fycxcxprime_}}};
}

private:
double fx_ = 0.0;
double fy_ = 0.0;
Expand Down Expand Up @@ -223,6 +235,16 @@ MULTISENSE_API std::optional<Image> create_bgr_from_ycbcr420(const Image &luma,
MULTISENSE_API std::optional<Image> create_bgr_image(const ImageFrame &frame,
const DataSource &output_source);

///
/// @brief Scale a calibration to align with a different operating resolution
///
/// @param calibration The input calibration to scale
/// @param scale The scale factor to apply to the calibration
///
/// @return A scaled calibration
///
MULTISENSE_API CameraCalibration scale_calibration(CameraCalibration calibration, double scale);

///
/// @brief Create a point cloud from a image frame and a color source.
///
Expand Down
97 changes: 97 additions & 0 deletions source/LibMultiSense/test/multisense_utilities_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,103 @@ TEST(QMatrix, reproject)
EXPECT_NEAR(z, repojected_pt.z, epsilon);
}

TEST(QMatrix, matrix)
{
const float fx = 1500.0;
const float fy = 1000.0;
const float cx = 960.0;
const float cy = 600.0;
const double tx = -0.27;

// Use double for the product to ensure we get an exact -405.0f
const float fxtx = static_cast<float>(static_cast<double>(fx) * tx);

CameraCalibration left_calibration{
{{{fx, 0.0, cx}, {0.0, fy, cy}, {0.0, 0.0, 1.0}}},
{{{1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}},
{{{fx, 0.0, cx, 0.0}, {0.0, fy, cy, 0.0}, {0.0, 0.0, 1.0, 0.0}}},
CameraCalibration::DistortionType::NONE,
{}};

CameraCalibration right_calibration{
{{{fx, 0.0, cx}, {0.0, fy, cy}, {0.0, 0.0, 1.0}}},
{{{1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}},
{{{fx, 0.0, cx, fxtx}, {0.0, fy, cy, 0.0}, {0.0, 0.0, 1.0, 0.0}}},
CameraCalibration::DistortionType::NONE,
{}};

QMatrix q{left_calibration, right_calibration};

const auto matrix = q.matrix();

const double expected_fytx = static_cast<double>(fy) * tx;
const double expected_fxtx = static_cast<double>(fx) * tx;
const double expected_fycxtx = static_cast<double>(fy) * static_cast<double>(cx) * tx;
const double expected_fxcytx = static_cast<double>(fx) * static_cast<double>(cy) * tx;
const double expected_fxfytx = static_cast<double>(fx) * static_cast<double>(fy) * tx;
const double expected_fycxcxprime = 0.0;

const double epsilon = 1e-6;

EXPECT_NEAR(matrix[0][0], expected_fytx, epsilon);
EXPECT_NEAR(matrix[0][1], 0.0, epsilon);
EXPECT_NEAR(matrix[0][2], 0.0, epsilon);
EXPECT_NEAR(matrix[0][3], -expected_fycxtx, epsilon);

EXPECT_NEAR(matrix[1][0], 0.0, epsilon);
EXPECT_NEAR(matrix[1][1], expected_fxtx, epsilon);
EXPECT_NEAR(matrix[1][2], 0.0, epsilon);
EXPECT_NEAR(matrix[1][3], -expected_fxcytx, epsilon);

EXPECT_NEAR(matrix[2][0], 0.0, epsilon);
EXPECT_NEAR(matrix[2][1], 0.0, epsilon);
EXPECT_NEAR(matrix[2][2], 0.0, epsilon);
EXPECT_NEAR(matrix[2][3], expected_fxfytx, epsilon);

EXPECT_NEAR(matrix[3][0], 0.0, epsilon);
EXPECT_NEAR(matrix[3][1], 0.0, epsilon);
EXPECT_NEAR(matrix[3][2], -static_cast<double>(fy), epsilon);
EXPECT_NEAR(matrix[3][3], expected_fycxcxprime, epsilon);
}

TEST(scale_calibration, basic)
{
const float fx = 1500.0;
const float fy = 1000.0;
const float cx = 960.0;
const float cy = 600.0;
const float tx = -0.27;

CameraCalibration cal{
{{{fx, 0.0, cx}, {0.0, fy, cy}, {0.0, 0.0, 1.0}}},
{{{1.0, 0.0, 0.0}, {0.0, 1.0, 0.0}, {0.0, 0.0, 1.0}}},
{{{fx, 0.0, cx, fx * tx}, {0.0, fy, cy, 0.0}, {0.0, 0.0, 1.0, 0.0}}},
CameraCalibration::DistortionType::NONE,
{}};

const double scale = 0.5;
const auto scaled_cal = scale_calibration(cal, scale);

EXPECT_FLOAT_EQ(scaled_cal.K[0][0], fx * scale);
EXPECT_FLOAT_EQ(scaled_cal.K[0][2], cx * scale);
EXPECT_FLOAT_EQ(scaled_cal.K[1][1], fy * scale);
EXPECT_FLOAT_EQ(scaled_cal.K[1][2], cy * scale);

EXPECT_FLOAT_EQ(scaled_cal.P[0][0], fx * scale);
EXPECT_FLOAT_EQ(scaled_cal.P[0][2], cx * scale);
EXPECT_FLOAT_EQ(scaled_cal.P[0][3], fx * tx * scale);
EXPECT_FLOAT_EQ(scaled_cal.P[1][1], fy * scale);
EXPECT_FLOAT_EQ(scaled_cal.P[1][2], cy * scale);
EXPECT_FLOAT_EQ(scaled_cal.P[1][3], 0.0);

// Ensure other values are unchanged
EXPECT_FLOAT_EQ(scaled_cal.K[0][1], cal.K[0][1]);
EXPECT_FLOAT_EQ(scaled_cal.K[1][0], cal.K[1][0]);
EXPECT_FLOAT_EQ(scaled_cal.K[2][0], cal.K[2][0]);
EXPECT_FLOAT_EQ(scaled_cal.K[2][1], cal.K[2][1]);
EXPECT_FLOAT_EQ(scaled_cal.K[2][2], cal.K[2][2]);
}

TEST(create_depth_image, mono_and_float)
{
const float fx = 1000.0;
Expand Down
Loading