Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
f752e1d
Natively support feature streams from the MultiSense
mattalvarado Mar 18, 2026
c24662c
Add unit tests
mattalvarado Mar 18, 2026
f241ae6
Support configuraton
mattalvarado Mar 19, 2026
127bd86
Fix build
mattalvarado Mar 19, 2026
0c2693a
Serialization updates
mattalvarado Mar 19, 2026
09a4f78
Merge branch 'master' of github.com:carnegierobotics/LibMultiSense in…
mattalvarado Mar 24, 2026
c4e01ac
Better formatting
mattalvarado Mar 24, 2026
180ce60
Stop secondary stream sources too
mattalvarado Mar 24, 2026
b397548
Use convert functions
mattalvarado Mar 24, 2026
382dc41
Move the feature detector config as part of the MultiSenseConfig
mattalvarado Mar 24, 2026
1501458
Refined support including app registration
mattalvarado Mar 26, 2026
4d5c168
Add feature detector utility
mattalvarado Mar 26, 2026
eec3c3e
In process debug
mattalvarado Mar 26, 2026
62197ae
Solve memory issues
mattalvarado Mar 26, 2026
fac8168
Logic fixes
mattalvarado Mar 26, 2026
3208889
Updates to README
mattalvarado Mar 26, 2026
f587261
Cleanup our feature metadata cache
mattalvarado Mar 27, 2026
ea49f4c
Let users know they need new firmware for the feature detector
mattalvarado Mar 27, 2026
2d5f580
Get rid of debug prints
mattalvarado Mar 31, 2026
2b18756
Merge branch 'master' into malvarado/natively_support_feature_streams
mattalvarado Mar 31, 2026
3c496a1
Merge branch 'master' into malvarado/natively_support_feature_streams
mattalvarado Apr 7, 2026
3fc9d10
Fix bad cmake install
mattalvarado Apr 7, 2026
97bf035
Fix utility and install
mattalvarado Apr 7, 2026
b27023c
Merge branch 'master' into malvarado/natively_support_feature_streams
mattalvarado Apr 27, 2026
891e7de
Merge branch 'master' into malvarado/natively_support_feature_streams
mattalvarado Apr 27, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ endif()

if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
message(STATUS "Selecting default build type: Release")
set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Build type (default: Release)" FORCE)
#set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Build type (default: Release)" FORCE)
set(CMAKE_BUILD_TYPE "DebugWithRelInfo" CACHE STRING "Build type (default: DebugWithRelInfo)" FORCE)
endif ()

#
Expand Down
135 changes: 135 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ The LibMultiSense C++ and Python library has been tested with the following oper
- [IMU Data Streaming](#imu-data-streaming)
- [Python](#python-9)
- [C++](#c-9)
- [Feature Rendering](#feature-rendering)
- [Python](#python-10)
- [C++](#c-10)

## Client Networking Prerequisite

Expand Down Expand Up @@ -900,6 +903,7 @@ and ranges.
### Python

```python

import libmultisense as lms

def main():
Expand Down Expand Up @@ -1064,3 +1068,134 @@ int main(int argc, char** argv)
return 0;
}
```

---

## Feature Rendering

LibMultiSense supports retrieving image features computed on-camera. These are synchronized with the
corresponding image frames.

The following example demonstrates how to retrieve and render features on a rectified image.

> [!NOTE]
> MultiSense firmware version v7.34 or newer is required to use the onboard feature detector

### Python

```python
import libmultisense as lms
import cv2

def main():
channel_config = lms.ChannelConfig()
channel_config.ip_address = "10.66.171.21"

with lms.Channel.create(channel_config) as channel:
if not channel:
print("Invalid channel")
exit(1)

# Set the feature detector config to enable the feature detector
config = channel.get_config()
config.feature_detector_config = lms.FeatureDetectorConfig()
config.feature_detector_config.number_of_features = 1500
config.feature_detector_config.grouping_enabled = True
channel.set_config(config)

# Start both the rectified image and the corresponding feature stream
sources = [lms.DataSource.LEFT_MONO_RAW, lms.DataSource.LEFT_ORB_FEATURES]
if channel.start_streams(sources) != lms.Status.OK:
print("Unable to start streams")
exit(1)

while True:
frame = channel.get_next_image_frame()
if frame and frame.has_image(lms.DataSource.LEFT_MONO_RAW):
img = frame.get_image(lms.DataSource.LEFT_MONO_RAW).as_array

# Convert grayscale to BGR for color rendering
display_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

if frame.has_feature(lms.DataSource.LEFT_ORB_FEATURES):
features = frame.get_feature(lms.DataSource.LEFT_ORB_FEATURES)
print(f"Frame {frame.frame_id}: Received {len(features.keypoints)} features")

for kp in features.keypoints:
cv2.circle(display_img, (int(kp.x), int(kp.y)), 3, (0, 255, 0), -1)

cv2.imshow("MultiSense Features", display_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break

if __name__ == "__main__":
main()
```

### C++

```c++
#include <iostream>
#include <opencv2/opencv.hpp>

#define HAVE_OPENCV 1
#include <MultiSense/MultiSenseChannel.hh>
#include <MultiSense/MultiSenseUtilities.hh>

namespace lms = multisense;

int main(int argc, char** argv)
{
const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"});
if (!channel)
{
std::cerr << "Failed to create channel" << std::endl;
return 1;
}

//
// Set the feature detector config to enable the feature detector
//
auto config = channel->get_config();
config.feature_detector_config = lms::MultiSenseConfig::FeatureDetectorConfig{number_of_features, true, 1};
channel->set_config(config);

// Start both the rectified image and the corresponding feature stream
const std::vector<lms::DataSource> sources = {
lms::DataSource::LEFT_MONO_RAW,
lms::DataSource::LEFT_ORB_FEATURES
};

if (const auto status = channel->start_streams(sources); status != lms::Status::OK)
{
std::cerr << "Cannot start streams: " << lms::to_string(status) << std::endl;
return 1;
}

while(true)
{
if (const auto frame = channel->get_next_image_frame(); frame)
{
if (frame->has_image(lms::DataSource::LEFT_MONO_RAW))
{
cv::Mat img = frame->get_image(lms::DataSource::LEFT_MONO_RAW).cv_mat();
cv::Mat display_img;
cv::cvtColor(img, display_img, cv::COLOR_GRAY2BGR);

if (frame->has_feature(lms::DataSource::LEFT_ORB_FEATURES))
{
const auto& features = frame->get_feature(lms::DataSource::LEFT_ORB_FEATURES);

// Use the native OpenCV utility to convert keypoints and draw them
cv::drawKeypoints(display_img, features.cv_keypoints(), display_img, cv::Scalar(0, 255, 0));
}

cv::imshow("MultiSense Features", display_img);
if (cv::waitKey(1) == 'q') break;
}
}
}

return 0;
}
```
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,4 @@ multisense_ptp_utility = "libmultisense.utilities.ptp_utility:main"
multisense_rectified_focal_length_utility = "libmultisense.utilities.rectified_focal_length_utility:main"
multisense_save_image_utility = "libmultisense.utilities.save_image_utility:main"
multisense_version_info_utility = "libmultisense.utilities.version_info_utility:main"
multisense_feature_detector_utility = "libmultisense.utilities.feature_detector_utility:main"
54 changes: 53 additions & 1 deletion python/bindings.cc
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,12 @@ PYBIND11_MODULE(_libmultisense, m) {
.value("AUX_RAW", multisense::DataSource::AUX_RAW)
.value("AUX_RECTIFIED_RAW", multisense::DataSource::AUX_RECTIFIED_RAW)
.value("COST_RAW", multisense::DataSource::COST_RAW)
.value("LEFT_ORB_FEATURES", multisense::DataSource::LEFT_ORB_FEATURES)
.value("RIGHT_ORB_FEATURES", multisense::DataSource::RIGHT_ORB_FEATURES)
.value("AUX_ORB_FEATURES", multisense::DataSource::AUX_ORB_FEATURES)
.value("LEFT_RECTIFIED_ORB_FEATURES", multisense::DataSource::LEFT_RECTIFIED_ORB_FEATURES)
.value("RIGHT_RECTIFIED_ORB_FEATURES", multisense::DataSource::RIGHT_RECTIFIED_ORB_FEATURES)
.value("AUX_RECTIFIED_ORB_FEATURES", multisense::DataSource::AUX_RECTIFIED_ORB_FEATURES)
.value("IMU", multisense::DataSource::IMU)
.def("__str__", py::overload_cast<const multisense::DataSource&>(&multisense::to_string), py::prepend())
.def("__repr__", py::overload_cast<const multisense::DataSource&>(&multisense::to_string), py::prepend());
Expand Down Expand Up @@ -162,6 +168,40 @@ PYBIND11_MODULE(_libmultisense, m) {
.value("JPEG", multisense::Image::PixelFormat::JPEG)
.value("H264", multisense::Image::PixelFormat::H264);

// FeatureKeyPoint
py::class_<multisense::FeatureKeyPoint>(m, "FeatureKeyPoint")
.def(py::init<>())
.def_readwrite("x", &multisense::FeatureKeyPoint::x)
.def_readwrite("y", &multisense::FeatureKeyPoint::y)
.def_readwrite("angle", &multisense::FeatureKeyPoint::angle)
.def_readwrite("response", &multisense::FeatureKeyPoint::response)
.def_readwrite("octave", &multisense::FeatureKeyPoint::octave)
.def_readwrite("class_id", &multisense::FeatureKeyPoint::class_id);

// FeatureDescriptorType
py::enum_<multisense::FeatureDescriptorType>(m, "FeatureDescriptorType")
.value("UNKNOWN", multisense::FeatureDescriptorType::UNKNOWN)
.value("ORB", multisense::FeatureDescriptorType::ORB);

// FeatureMessage
py::class_<multisense::FeatureMessage>(m, "FeatureMessage")
.def(py::init<>())
.def_readonly("source", &multisense::FeatureMessage::source)
.def_readonly("descriptor_type", &multisense::FeatureMessage::descriptor_type)
.def_readonly("keypoints", &multisense::FeatureMessage::keypoints)
.def_property_readonly("descriptors", [](const multisense::FeatureMessage& msg)
{
const SSizeVector shape = {static_cast<py::ssize_t>(msg.descriptors.size())};
const SSizeVector strides = {static_cast<py::ssize_t>(sizeof(uint8_t))};
return py::array(py::buffer_info(
const_cast<uint8_t*>(msg.descriptors.data()),
static_cast<py::ssize_t>(sizeof(uint8_t)),
py::format_descriptor<uint8_t>::format(),
1,
shape,
strides));
});

// Image
py::class_<multisense::Image>(m, "Image")
.def(py::init<>())
Expand Down Expand Up @@ -274,8 +314,11 @@ PYBIND11_MODULE(_libmultisense, m) {
.def("add_image", &multisense::ImageFrame::add_image)
.def("get_image", &multisense::ImageFrame::get_image)
.def("has_image", &multisense::ImageFrame::has_image)
.def("get_feature", &multisense::ImageFrame::get_feature)
.def("has_feature", &multisense::ImageFrame::has_feature)
.def_readonly("frame_id", &multisense::ImageFrame::frame_id)
.def_readonly("images", &multisense::ImageFrame::images)
.def_readonly("features", &multisense::ImageFrame::features)
.def_readonly("calibration", &multisense::ImageFrame::calibration)
.def_readonly("frame_time", &multisense::ImageFrame::frame_time)
.def_readonly("ptp_frame_time", &multisense::ImageFrame::ptp_frame_time)
Expand Down Expand Up @@ -473,6 +516,14 @@ PYBIND11_MODULE(_libmultisense, m) {
.def_readwrite("internal", &multisense::MultiSenseConfig::LightingConfig::internal)
.def_readwrite("external", &multisense::MultiSenseConfig::LightingConfig::external);

// FeatureDetectorConfig
py::class_<multisense::MultiSenseConfig::FeatureDetectorConfig>(m, "FeatureDetectorConfig")
.def(py::init<>())
PYBIND11_JSON_SUPPORT(multisense::MultiSenseConfig::FeatureDetectorConfig)
.def_readwrite("number_of_features", &multisense::MultiSenseConfig::FeatureDetectorConfig::number_of_features)
.def_readwrite("grouping_enabled", &multisense::MultiSenseConfig::FeatureDetectorConfig::grouping_enabled)
.def_readwrite("motion_octave", &multisense::MultiSenseConfig::FeatureDetectorConfig::motion_octave);

// MultiSenseConfig
py::class_<multisense::MultiSenseConfig>(m, "MultiSenseConfig")
.def(py::init<>())
Expand All @@ -488,7 +539,8 @@ PYBIND11_MODULE(_libmultisense, m) {
.def_readwrite("time_config", &multisense::MultiSenseConfig::time_config)
.def_readwrite("network_config", &multisense::MultiSenseConfig::network_config)
.def_readwrite("imu_config", &multisense::MultiSenseConfig::imu_config)
.def_readwrite("lighting_config", &multisense::MultiSenseConfig::lighting_config);
.def_readwrite("lighting_config", &multisense::MultiSenseConfig::lighting_config)
.def_readwrite("feature_detector_config", &multisense::MultiSenseConfig::feature_detector_config);

// MultiSenseStatus::PtpStatus
py::class_<multisense::MultiSenseStatus::PtpStatus>(m, "PtpStatus")
Expand Down
102 changes: 102 additions & 0 deletions python/libmultisense/utilities/feature_detector_utility.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
#!/usr/bin/env python
#
# @file feature_detector_utility.cc
#
# Copyright 2013-2026
# Carnegie Robotics, LLC
# 4501 Hatfield Street, Pittsburgh, PA 15201
# http://www.carnegierobotics.com
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Carnegie Robotics, LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CARNEGIE ROBOTICS, LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Significant history (date, user, job code, action):
# 2026-03-26, malvarado@carnegierobotics.com, IRAD, Created file.
#

import argparse
import cv2
from jsondiff import diff

import libmultisense as lms

def main():
parser = argparse.ArgumentParser("LibMultiSense feature detector utility")
parser.add_argument("-a", "--ip_address", default="10.66.171.21", help="The IPv4 address of the MultiSense.")
parser.add_argument("-m", "--mtu", type=int, default=1500, help="The MTU to use to communicate with the camera.")
parser.add_argument("-n", "--num-features", type=int, default=1500, help="The max number of features to target.")
parser.add_argument("-f", "--fps", type=int, default=10, help="The framerate of the camera.")
args = parser.parse_args()

channel_config = lms.ChannelConfig()
channel_config.ip_address = args.ip_address
channel_config.mtu = args.mtu

with lms.Channel.create(channel_config) as channel:
if not channel:
print("Invalid channel")
exit(1)

config = channel.get_config()
if config:
config.frames_per_second = args.fps

# Set feature configuration to enable the feature detector
config.feature_detector_config = lms.FeatureDetectorConfig()
config.feature_detector_config.number_of_features = args.num_features
config.feature_detector_config.grouping_enabled = True
config.feature_detector_config.motion_octave = 1
status = channel.set_config(config)
if status != lms.Status.OK:
print("Cannot enable feature detector", lms.to_string(status))


# Start both the rectified image and the corresponding feature stream
sources = [lms.DataSource.LEFT_MONO_RAW, lms.DataSource.LEFT_ORB_FEATURES]
if channel.start_streams(sources) != lms.Status.OK:
print("Unable to start streams")
exit(1)

while True:
frame = channel.get_next_image_frame()
if frame and frame.has_image(lms.DataSource.LEFT_MONO_RAW):
img = frame.get_image(lms.DataSource.LEFT_MONO_RAW).as_array

# Convert grayscale to BGR for color rendering
display_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

# Draw features on the image
if frame.has_feature(lms.DataSource.LEFT_ORB_FEATURES):
features = frame.get_feature(lms.DataSource.LEFT_ORB_FEATURES)
print(f"Frame {frame.frame_id}: Received {len(features.keypoints)} features")

for kp in features.keypoints:
cv2.circle(display_img, (int(kp.x), int(kp.y)), 3, (0, 255, 0), -1)

cv2.imshow("MultiSense Features", display_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break

if __name__ == '__main__':
main()
2 changes: 1 addition & 1 deletion source/Legacy/include/MultiSense/details/storage.hh
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ namespace details {

std::pair<bool, KEY> will_drop_() {
const bool will_drop = m_map.size() == m_depth;
KEY drop_key; // If will_drop is false, this value is intentionally uninitialized
KEY drop_key{};

if (will_drop)
{
Expand Down
Loading
Loading