From f752e1d4cda1cee2ee59193a6a3a8566992c271a Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 17 Mar 2026 22:37:12 -0400 Subject: [PATCH 01/20] Natively support feature streams from the MultiSense --- README.md | 117 ++++++++++++++++ python/bindings.cc | 45 +++++- .../LibMultiSense/details/legacy/channel.cc | 128 +++++++++++++++++- .../LibMultiSense/details/legacy/utilities.cc | 28 ++++ source/LibMultiSense/details/utilities.cc | 29 ++++ .../MultiSense/MultiSenseSerialization.hh | 8 +- .../include/MultiSense/MultiSenseTypes.hh | 91 ++++++++++++- .../include/details/legacy/channel.hh | 24 +++- .../include/details/legacy/utilities.hh | 5 + .../LibMultiSense/test/multi_channel_test.cc | 8 +- .../test/multisense_utilities_test.cc | 6 +- .../FeatureDetectorUtility.cc | 8 +- .../include/FeatureDetectorUtilities.hh | 26 ++-- .../include/wire/FeatureMessage.hh} | 15 +- .../include/wire/FeatureMetaMessage.hh} | 14 +- .../include/wire/SecondaryAppDataMessage.hh | 2 +- .../include/wire/SecondaryAppMetaMessage.hh | 2 +- 17 files changed, 518 insertions(+), 38 deletions(-) rename source/{Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMessage.hh => Wire/include/wire/FeatureMessage.hh} (93%) rename source/{Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMetaMessage.hh => Wire/include/wire/FeatureMetaMessage.hh} (95%) diff --git a/README.md b/README.md index 3a5b579d..87405bfc 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,9 @@ The LibMultiSense C++ and Python library has been tested with the following oper - [Color Image Generation](#color-image-generation) - [Python](#python-7) - [C++](#c-7) +- [Feature Rendering](#feature-rendering) + - [Python](#python-8) + - [C++](#c-8) ## Client Networking Prerequisite @@ -734,3 +737,117 @@ int main(int argc, char** argv) return 0; } ``` + +--- + +## Feature Rendering + +LibMultiSense supports retrieving image features computed on-camera. These are synchronized with the +corresponding image frames. + +The following example demonstrates how to retrieve and render features on a rectified image. + +### Python + +```python +import libmultisense as lms +import cv2 + +def main(): + channel_config = lms.ChannelConfig() + channel_config.ip_address = "10.66.171.21" + + with lms.Channel.create(channel_config) as channel: + if not channel: + print("Invalid channel") + exit(1) + + # Start both the rectified image and the corresponding feature stream + sources = [lms.DataSource.LEFT_RECTIFIED_RAW, lms.DataSource.LEFT_RECTIFIED_ORB_FEATURES] + if channel.start_streams(sources) != lms.Status.OK: + print("Unable to start streams") + exit(1) + + while True: + frame = channel.get_next_image_frame() + if frame and frame.has_image(lms.DataSource.LEFT_RECTIFIED_RAW): + img = frame.get_image(lms.DataSource.LEFT_RECTIFIED_RAW).as_array + + # Convert grayscale to BGR for color rendering + display_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + + if frame.has_feature(lms.DataSource.LEFT_RECTIFIED_ORB_FEATURES): + features = frame.get_feature(lms.DataSource.LEFT_RECTIFIED_ORB_FEATURES) + print(f"Frame {frame.frame_id}: Received {len(features.keypoints)} features") + + for kp in features.keypoints: + cv2.circle(display_img, (int(kp.x), int(kp.y)), 3, (0, 255, 0), -1) + + cv2.imshow("MultiSense Features", display_img) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +if __name__ == "__main__": + main() +``` + +### C++ + +```c++ +#include +#include + +#define HAVE_OPENCV 1 +#include +#include + +namespace lms = multisense; + +int main(int argc, char** argv) +{ + const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"}); + if (!channel) + { + std::cerr << "Failed to create channel" << std::endl; + return 1; + } + + // Start both the rectified image and the corresponding feature stream + const std::vector sources = { + lms::DataSource::LEFT_RECTIFIED_RAW, + lms::DataSource::LEFT_RECTIFIED_ORB_FEATURES + }; + + if (const auto status = channel->start_streams(sources); status != lms::Status::OK) + { + std::cerr << "Cannot start streams: " << lms::to_string(status) << std::endl; + return 1; + } + + while(true) + { + if (const auto frame = channel->get_next_image_frame(); frame) + { + if (frame->has_image(lms::DataSource::LEFT_RECTIFIED_RAW)) + { + cv::Mat img = frame->get_image(lms::DataSource::LEFT_RECTIFIED_RAW).cv_mat(); + cv::Mat display_img; + cv::cvtColor(img, display_img, cv::COLOR_GRAY2BGR); + + if (frame->has_feature(lms::DataSource::LEFT_RECTIFIED_ORB_FEATURES)) + { + const auto& features = frame->get_feature(lms::DataSource::LEFT_RECTIFIED_ORB_FEATURES); + + // Use the native OpenCV utility to convert keypoints and draw them + cv::drawKeypoints(display_img, features.cv_keypoints(), display_img, cv::Scalar(0, 255, 0)); + } + + cv::imshow("MultiSense Features", display_img); + if (cv::waitKey(1) == 'q') break; + } + } + } + + return 0; +} +``` diff --git a/python/bindings.cc b/python/bindings.cc index 8b910d69..8caed7ab 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -116,7 +116,13 @@ PYBIND11_MODULE(_libmultisense, m) { .value("AUX_CHROMA_RECTIFIED_RAW", multisense::DataSource::AUX_CHROMA_RECTIFIED_RAW) .value("AUX_RAW", multisense::DataSource::AUX_RAW) .value("AUX_RECTIFIED_RAW", multisense::DataSource::AUX_RECTIFIED_RAW) - .value("COST_RAW", multisense::DataSource::COST_RAW); + .value("COST_RAW", multisense::DataSource::COST_RAW) + .value("LEFT_ORB_FEATURES", multisense::DataSource::LEFT_ORB_FEATURES) + .value("RIGHT_ORB_FEATURES", multisense::DataSource::RIGHT_ORB_FEATURES) + .value("AUX_ORB_FEATURES", multisense::DataSource::AUX_ORB_FEATURES) + .value("LEFT_RECTIFIED_ORB_FEATURES", multisense::DataSource::LEFT_RECTIFIED_ORB_FEATURES) + .value("RIGHT_RECTIFIED_ORB_FEATURES", multisense::DataSource::RIGHT_RECTIFIED_ORB_FEATURES) + .value("AUX_RECTIFIED_ORB_FEATURES", multisense::DataSource::AUX_RECTIFIED_ORB_FEATURES); // ColorImageEncoding py::enum_(m, "ColorImageEncoding") @@ -157,6 +163,40 @@ PYBIND11_MODULE(_libmultisense, m) { .value("JPEG", multisense::Image::PixelFormat::JPEG) .value("H264", multisense::Image::PixelFormat::H264); + // KeyPoint + py::class_(m, "KeyPoint") + .def(py::init<>()) + .def_readwrite("x", &multisense::KeyPoint::x) + .def_readwrite("y", &multisense::KeyPoint::y) + .def_readwrite("angle", &multisense::KeyPoint::angle) + .def_readwrite("response", &multisense::KeyPoint::response) + .def_readwrite("octave", &multisense::KeyPoint::octave) + .def_readwrite("class_id", &multisense::KeyPoint::class_id); + + // DescriptorType + py::enum_(m, "DescriptorType") + .value("UNKNOWN", multisense::DescriptorType::UNKNOWN) + .value("ORB", multisense::DescriptorType::ORB); + + // FeatureMessage + py::class_(m, "FeatureMessage") + .def(py::init<>()) + .def_readonly("source", &multisense::FeatureMessage::source) + .def_readonly("descriptor_type", &multisense::FeatureMessage::descriptor_type) + .def_readonly("keypoints", &multisense::FeatureMessage::keypoints) + .def_property_readonly("descriptors", [](const multisense::FeatureMessage& msg) + { + const SSizeVector shape = {static_cast(msg.descriptors.size())}; + const SSizeVector strides = {static_cast(sizeof(uint8_t))}; + return py::array(py::buffer_info( + const_cast(msg.descriptors.data()), + static_cast(sizeof(uint8_t)), + py::format_descriptor::format(), + 1, + shape, + strides)); + }); + // Image py::class_(m, "Image") .def(py::init<>()) @@ -269,8 +309,11 @@ PYBIND11_MODULE(_libmultisense, m) { .def("add_image", &multisense::ImageFrame::add_image) .def("get_image", &multisense::ImageFrame::get_image) .def("has_image", &multisense::ImageFrame::has_image) + .def("get_feature", &multisense::ImageFrame::get_feature) + .def("has_feature", &multisense::ImageFrame::has_feature) .def_readonly("frame_id", &multisense::ImageFrame::frame_id) .def_readonly("images", &multisense::ImageFrame::images) + .def_readonly("features", &multisense::ImageFrame::features) .def_readonly("calibration", &multisense::ImageFrame::calibration) .def_readonly("frame_time", &multisense::ImageFrame::frame_time) .def_readonly("ptp_frame_time", &multisense::ImageFrame::ptp_frame_time) diff --git a/source/LibMultiSense/details/legacy/channel.cc b/source/LibMultiSense/details/legacy/channel.cc index d772c60b..827369d7 100644 --- a/source/LibMultiSense/details/legacy/channel.cc +++ b/source/LibMultiSense/details/legacy/channel.cc @@ -92,6 +92,10 @@ #include #include #include +#include +#include +#include +#include #include "details/legacy/channel.hh" #include "details/legacy/calibration.hh" @@ -250,6 +254,12 @@ Status LegacyChannel::connect(const Config &config) m_message_assembler.register_callback(wire::ImuData::ID, std::bind(&LegacyChannel::imu_callback, this, std::placeholders::_1)); + m_message_assembler.register_callback(wire::SecondaryAppData::ID, + std::bind(&LegacyChannel::secondary_app_data_callback, this, std::placeholders::_1)); + + m_message_assembler.register_callback(wire::SecondaryAppMetadata::ID, + std::bind(&LegacyChannel::secondary_app_meta_callback, this, std::placeholders::_1)); + // // Main thread which services incoming data and dispatches to callbacks and conditions attached to the channel // @@ -354,7 +364,13 @@ void LegacyChannel::disconnect() m_connected = false; + m_message_assembler.remove_callback(wire::ImageMeta::ID); m_message_assembler.remove_callback(wire::Image::ID); + m_message_assembler.remove_callback(wire::CompressedImage::ID); + m_message_assembler.remove_callback(wire::Disparity::ID); + m_message_assembler.remove_callback(wire::ImuData::ID); + m_message_assembler.remove_callback(wire::SecondaryAppData::ID); + m_message_assembler.remove_callback(wire::SecondaryAppMetadata::ID); m_socket = NetworkSocket{}; @@ -1046,6 +1062,111 @@ Status LegacyChannel::stop_streams_internal(const std::vector &sourc return Status::TIMEOUT; } +void LegacyChannel::secondary_app_meta_callback(std::shared_ptr> data) +{ + using namespace crl::multisense::details; + const auto wire_meta = deserialize(*data); + m_secondary_app_meta_cache[wire_meta.frameId] = wire_meta; +} + +void LegacyChannel::secondary_app_data_callback(std::shared_ptr> data) +{ + using namespace crl::multisense::details; + using namespace std::chrono; + + const auto wire_data = deserialize(*data); + const auto source = convert_sources(static_cast(wire_data.sourceExtended) << 32 | wire_data.source); + + if (source.size() != 1 || !is_feature_source(source[0])) + { + return; + } + + const auto meta = m_secondary_app_meta_cache.find(wire_data.frameId); + if (meta == std::end(m_secondary_app_meta_cache)) + { + CRL_DEBUG("Missing corresponding meta for feature frame_id %" PRIu64 "\n", wire_data.frameId); + return; + } + + utility::BufferStreamReader metaStream(reinterpret_cast(meta->second.dataP), meta->second.dataLength); + wire::FeatureDetectorMeta feature_meta(metaStream, wire::FeatureDetectorMeta::VERSION); + + utility::BufferStreamReader stream(reinterpret_cast(wire_data.dataP), wire_data.length); + wire::FeatureDetector featureDetector(stream, wire::FeatureDetector::VERSION); + + FeatureMessage feature_msg; + feature_msg.source = source[0]; + feature_msg.descriptor_type = DescriptorType::ORB; + + const size_t startDescriptor = featureDetector.numFeatures * sizeof(wire::Feature); + uint8_t * feature_data = reinterpret_cast(featureDetector.dataP); + + feature_msg.keypoints.reserve(featureDetector.numFeatures); + for (size_t i = 0; i < featureDetector.numFeatures; i++) { + wire::Feature f = *reinterpret_cast(feature_data + (i * sizeof(wire::Feature))); + KeyPoint kp; + kp.x = f.x; + kp.y = f.y; + kp.angle = f.angle; + kp.response = f.resp; + kp.octave = f.octave; + kp.class_id = f.descriptor; + feature_msg.keypoints.push_back(kp); + } + + feature_msg.descriptors.reserve(featureDetector.numDescriptors * sizeof(wire::Descriptor)); + for (size_t j = 0; j < featureDetector.numDescriptors; j++) { + wire::Descriptor d = *reinterpret_cast(feature_data + (startDescriptor + (j * sizeof(wire::Descriptor)))); + const uint8_t* d_bytes = reinterpret_cast(d.d); + feature_msg.descriptors.insert(feature_msg.descriptors.end(), d_bytes, d_bytes + sizeof(wire::Descriptor)); + } + + const nanoseconds capture_time{seconds{wire_data.timeSeconds} + + microseconds{wire_data.timeMicroSeconds}}; + const TimeT capture_time_point{capture_time}; + const TimeT ptp_capture_time_point{nanoseconds{feature_meta.ptpNanoSeconds}}; + + handle_and_dispatch_feature(std::move(feature_msg), wire_data.frameId); +} + +void LegacyChannel::handle_and_dispatch_feature(FeatureMessage feature, int64_t frame_id) +{ + // Create a new frame if one does not exist, or add the input feature to the corresponding frame + if (m_frame_buffer.count(frame_id) == 0) + { + // Actually, without image metadata it's hard to fill the complete frame structure. + // We will initialize a partial frame and it will be completed when the image arrives. + ImageFrame frame; + frame.frame_id = frame_id; + frame.add_feature(std::move(feature)); + m_frame_buffer.emplace(frame_id, std::move(frame)); + } + else + { + m_frame_buffer[frame_id].add_feature(std::move(feature)); + } + + // Check if our frame is valid, if so dispatch to our callbacks and notify anyone who is waiting on the next frame + if (const auto &frame = m_frame_buffer[frame_id]; + std::all_of(std::begin(m_active_streams), + std::end(m_active_streams), + [&frame](const auto &e) { + if (is_image_source(e)) return frame.has_image(e); + if (is_feature_source(e)) return frame.has_feature(e); + return true; + })) + { + m_image_frame_notifier.set_and_notify(frame); + + std::lock_guard lock(m_image_callback_mutex); + if (m_user_image_frame_callback) + { + m_user_image_frame_callback(frame); + } + } +} + void LegacyChannel::image_meta_callback(std::shared_ptr> data) { using namespace crl::multisense::details; @@ -1330,6 +1451,7 @@ void LegacyChannel::handle_and_dispatch(Image image, const auto source = image.source; ImageFrame frame{frame_id, std::map{std::make_pair(source, std::move(image))}, + {}, calibration, capture_time, ptp_capture_time, @@ -1352,7 +1474,11 @@ void LegacyChannel::handle_and_dispatch(Image image, if (const auto &frame = m_frame_buffer[frame_id]; std::all_of(std::begin(m_active_streams), std::end(m_active_streams), - [&frame](const auto &e){return is_image_source(e) ? frame.has_image(e) : true;})) + [&frame](const auto &e) { + if (is_image_source(e)) return frame.has_image(e); + if (is_feature_source(e)) return frame.has_feature(e); + return true; + })) { // // Notify anyone waiting on the next frame diff --git a/source/LibMultiSense/details/legacy/utilities.cc b/source/LibMultiSense/details/legacy/utilities.cc index e55e93bd..e0bb0b0e 100644 --- a/source/LibMultiSense/details/legacy/utilities.cc +++ b/source/LibMultiSense/details/legacy/utilities.cc @@ -69,6 +69,22 @@ bool is_image_source(const DataSource &source) } } +bool is_feature_source(const DataSource &source) +{ + switch (source) + { + case DataSource::LEFT_ORB_FEATURES: + case DataSource::RIGHT_ORB_FEATURES: + case DataSource::AUX_ORB_FEATURES: + case DataSource::LEFT_RECTIFIED_ORB_FEATURES: + case DataSource::RIGHT_RECTIFIED_ORB_FEATURES: + case DataSource::AUX_RECTIFIED_ORB_FEATURES: + return true; + default: + return false; + } +} + Status get_status(const crl::multisense::details::wire::Ack::AckStatus &status) { using namespace crl::multisense::details::wire; @@ -127,6 +143,12 @@ std::vector convert_sources(const crl::multisense::details::wire::So if (source & wire::SOURCE_CHROMA_RECT_AUX) {sources.push_back(DataSource::AUX_CHROMA_RECTIFIED_RAW);} if (source & wire::SOURCE_DISPARITY_COST) {sources.push_back(DataSource::COST_RAW);} if (source & wire::SOURCE_IMU) {sources.push_back(DataSource::IMU);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_0) {sources.push_back(DataSource::LEFT_ORB_FEATURES);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_1) {sources.push_back(DataSource::RIGHT_ORB_FEATURES);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_2) {sources.push_back(DataSource::AUX_ORB_FEATURES);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_3) {sources.push_back(DataSource::LEFT_RECTIFIED_ORB_FEATURES);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_4) {sources.push_back(DataSource::RIGHT_RECTIFIED_ORB_FEATURES);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_5) {sources.push_back(DataSource::AUX_RECTIFIED_ORB_FEATURES);} return sources; } @@ -161,6 +183,12 @@ crl::multisense::details::wire::SourceType convert_sources(const std::vector(source));} } diff --git a/source/LibMultiSense/details/utilities.cc b/source/LibMultiSense/details/utilities.cc index 4588c6c8..8978fe37 100644 --- a/source/LibMultiSense/details/utilities.cc +++ b/source/LibMultiSense/details/utilities.cc @@ -55,6 +55,7 @@ #ifdef HAVE_OPENCV #include +#include #endif #include "MultiSense/MultiSenseUtilities.hh" @@ -174,6 +175,34 @@ cv::Mat Image::cv_mat() const cv_type, const_cast(raw_data->data() + image_data_offset)}; } + +std::vector FeatureMessage::cv_keypoints() const +{ + std::vector kp; + kp.reserve(keypoints.size()); + + for (const auto &k : keypoints) + { + kp.emplace_back(k.x, k.y, 0.0f, k.angle, k.response, k.octave, k.class_id); + } + + return kp; +} + +cv::Mat FeatureMessage::cv_descriptors() const +{ + if (keypoints.empty()) + { + return cv::Mat{}; + } + + const int descriptor_size = descriptors.size() / keypoints.size(); + + return cv::Mat{static_cast(keypoints.size()), + descriptor_size, + CV_8UC1, + const_cast(descriptors.data())}; +} #endif bool write_image(const Image &image, const std::filesystem::path &path) diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh b/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh index cadcb05e..1841d796 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh @@ -157,7 +157,13 @@ NLOHMANN_JSON_SERIALIZE_ENUM(DataSource, { {DataSource::AUX_RAW, "AUX_RAW"}, {DataSource::AUX_RECTIFIED_RAW, "AUX_RECTIFIED_RAW"}, {DataSource::COST_RAW, "COST_RAW"}, - {DataSource::IMU, "IMU"} + {DataSource::IMU, "IMU"}, + {DataSource::LEFT_ORB_FEATURES, "LEFT_ORB_FEATURES"}, + {DataSource::RIGHT_ORB_FEATURES, "RIGHT_ORB_FEATURES"}, + {DataSource::AUX_ORB_FEATURES, "AUX_ORB_FEATURES"}, + {DataSource::LEFT_RECTIFIED_ORB_FEATURES, "LEFT_RECTIFIED_ORB_FEATURES"}, + {DataSource::RIGHT_RECTIFIED_ORB_FEATURES, "RIGHT_RECTIFIED_ORB_FEATURES"}, + {DataSource::AUX_RECTIFIED_ORB_FEATURES, "AUX_RECTIFIED_ORB_FEATURES"} }) NLOHMANN_JSON_SERIALIZE_ENUM(CameraCalibration::DistortionType, { diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh index fb05aa33..89c466cd 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh @@ -104,7 +104,13 @@ enum class DataSource : uint16_t AUX_RAW, AUX_RECTIFIED_RAW, COST_RAW, - IMU + IMU, + LEFT_ORB_FEATURES, + RIGHT_ORB_FEATURES, + AUX_ORB_FEATURES, + LEFT_RECTIFIED_ORB_FEATURES, + RIGHT_RECTIFIED_ORB_FEATURES, + AUX_RECTIFIED_ORB_FEATURES }; enum class ColorImageEncoding : uint16_t @@ -316,6 +322,55 @@ struct ImageHistogram std::vector data{}; }; +/// +/// @brief Represents a generic feature keypoint +/// +struct KeyPoint +{ + float x = 0.0f; + float y = 0.0f; + float angle = 0.0f; + float response = 0.0f; + uint32_t octave = 0; + uint32_t class_id = 0; +}; + +/// +/// @brief The descriptor type for features +/// +enum class DescriptorType : uint8_t +{ + UNKNOWN, + ORB +}; + +/// +/// @brief A collection of features and their descriptors +/// +struct FeatureMessage +{ + DataSource source = DataSource::UNKNOWN; + DescriptorType descriptor_type = DescriptorType::UNKNOWN; + std::vector keypoints{}; + std::vector descriptors{}; + +#ifdef HAVE_OPENCV + /// + /// @brief Convert keypoints to native OpenCV keypoints + /// + std::vector cv_keypoints() const; + + /// + /// @brief Convert descriptors to a native OpenCV Mat. + /// The cv::Mat returned here wraps the underlying descriptor data pointer associated with + /// the FeatureMessage object. If the input FeatureMessage object goes out of scope while you are + /// still using the corresponding cv::Mat, you will need to `clone` the cv::Mat creating an internal + /// copy of all the data + /// + cv::Mat cv_descriptors() const; +#endif +}; + /// /// @brief A frame containing multiple images (indexed by DataSource). /// @@ -350,6 +405,35 @@ struct ImageFrame return (images.find(source) != images.end()); } + /// + /// @brief Add a feature message to the frame, keyed by its DataSource. + /// + void add_feature(const FeatureMessage& feature) + { + features[feature.source] = feature; + } + + /// + /// @brief Retrieve feature message by DataSource. Throws if not found. + /// + const FeatureMessage& get_feature(const DataSource &source) const + { + auto it = features.find(source); + if (it == features.end()) + { + throw std::runtime_error("No feature found for requested DataSource"); + } + return it->second; + } + + /// + /// @brief Check if we have features for a given data source + /// + bool has_feature(const DataSource &source) const + { + return (features.find(source) != features.end()); + } + /// /// @brief The unique monotonically increasing ID for each frame populated by the MultiSense /// @@ -360,6 +444,11 @@ struct ImageFrame /// std::map images{}; + /// + /// @brief The features associated with each source in the frame + /// + std::map features{}; + /// /// @brief The scaled calibration for the entire camera /// diff --git a/source/LibMultiSense/include/details/legacy/channel.hh b/source/LibMultiSense/include/details/legacy/channel.hh index 9d8407b2..38de2877 100644 --- a/source/LibMultiSense/include/details/legacy/channel.hh +++ b/source/LibMultiSense/include/details/legacy/channel.hh @@ -43,6 +43,7 @@ #include #include #include +#include #include "details/legacy/ip.hh" #include "details/legacy/message.hh" @@ -288,10 +289,20 @@ private: void disparity_callback(std::shared_ptr> data); /// - /// @brief Disparity callback used to internally receive images sent from the MultiSense + /// @brief Imu callback used to internally receive imu sent from the MultiSense /// void imu_callback(std::shared_ptr> data); + /// + /// @brief Secondary app meta callback used to internally receive secondary app meta sent from the MultiSense + /// + void secondary_app_meta_callback(std::shared_ptr> data); + + /// + /// @brief Secondary app data callback used to internally receive secondary app data sent from the MultiSense + /// + void secondary_app_data_callback(std::shared_ptr> data); + /// /// @brief Handle internal process, and potentially dispatch a image /// @@ -302,6 +313,12 @@ private: const TimeT &capture_time, const TimeT &ptp_capture_time); + /// + /// @brief Handle internal process, and potentially dispatch a feature + /// + void handle_and_dispatch_feature(FeatureMessage feature, + int64_t frame_id); + /// /// @brief Internal mutex used to handle updates from users /// @@ -387,6 +404,11 @@ private: /// std::map m_meta_cache{}; + /// + /// @brief A cache of secondary app metadata associated with a specific frame id + /// + std::map m_secondary_app_meta_cache{}; + /// /// @brief A cache of image frames associated with a specific frame id /// diff --git a/source/LibMultiSense/include/details/legacy/utilities.hh b/source/LibMultiSense/include/details/legacy/utilities.hh index fc899527..e51b5534 100644 --- a/source/LibMultiSense/include/details/legacy/utilities.hh +++ b/source/LibMultiSense/include/details/legacy/utilities.hh @@ -102,6 +102,11 @@ struct ImuSampleScalars /// bool is_image_source(const DataSource &source); +/// +/// @brief Determine if a datasource is a feature source +/// +bool is_feature_source(const DataSource &source); + /// /// Convert a wire status to a API Status /// diff --git a/source/LibMultiSense/test/multi_channel_test.cc b/source/LibMultiSense/test/multi_channel_test.cc index 49500715..f341b274 100644 --- a/source/LibMultiSense/test/multi_channel_test.cc +++ b/source/LibMultiSense/test/multi_channel_test.cc @@ -47,14 +47,14 @@ TEST(frames_synchronized, basic_test) EXPECT_TRUE(frames_synchronized(frames, 0ns)); - frames.emplace_back(ImageFrame{1, {}, {}, TimeT{10ms}, TimeT{10ms}, {}, {}, {}, {}}); - frames.emplace_back(ImageFrame{3, {}, {}, TimeT{30ms}, TimeT{30ms}, {}, {}, {}, {}}); + frames.emplace_back(ImageFrame{1, {}, {}, {}, TimeT{10ms}, TimeT{10ms}, {}, {}, {}, {}}); + frames.emplace_back(ImageFrame{3, {}, {}, {}, TimeT{30ms}, TimeT{30ms}, {}, {}, {}, {}}); EXPECT_FALSE(frames_synchronized(frames, 0ns)); EXPECT_TRUE(frames_synchronized(frames, 21ms)); - frames.emplace_back(ImageFrame{37, {}, {}, TimeT{38ms}, TimeT{38ms}, {}, {}, {}, {}}); - frames.emplace_back(ImageFrame{7, {}, {}, TimeT{31ms}, TimeT{31ms}, {}, {}, {}, {}}); + frames.emplace_back(ImageFrame{37, {}, {}, {}, TimeT{38ms}, TimeT{38ms}, {}, {}, {}, {}}); + frames.emplace_back(ImageFrame{7, {}, {}, {}, TimeT{31ms}, TimeT{31ms}, {}, {}, {}, {}}); EXPECT_FALSE(frames_synchronized(frames, 21ms)); EXPECT_TRUE(frames_synchronized(frames, 29ms)); diff --git a/source/LibMultiSense/test/multisense_utilities_test.cc b/source/LibMultiSense/test/multisense_utilities_test.cc index efa5ac0c..48319dbf 100644 --- a/source/LibMultiSense/test/multisense_utilities_test.cc +++ b/source/LibMultiSense/test/multisense_utilities_test.cc @@ -181,7 +181,7 @@ TEST(create_depth_image, mono_and_float) const auto disparity_image = create_example_disparity_image(left_calibration, right_calibration, 3.0, disk_distance_m, 1920, 1200); - ImageFrame frame{0, {}, StereoCalibration{left_calibration, right_calibration, aux_calibration}, {}, {}, {}, {}, {}, {}}; + ImageFrame frame{0, {}, {}, StereoCalibration{left_calibration, right_calibration, aux_calibration}, {}, {}, {}, {}, {}, {}}; frame.add_image(disparity_image); const float invalid = 7000.0; @@ -266,7 +266,7 @@ TEST(get_aux_3d_point, basic_tests) const auto disparity_image = create_example_disparity_image(left_calibration, right_calibration, 3.0, disk_distance_m, 1920, 1200); - ImageFrame frame{0, {}, StereoCalibration{left_calibration, right_calibration, aux_calibration}, {}, {}, {}, {}, {}, {}}; + ImageFrame frame{0, {}, {}, StereoCalibration{left_calibration, right_calibration, aux_calibration}, {}, {}, {}, {}, {}, {}}; frame.add_image(disparity_image); const auto invalid_point = get_aux_3d_point(frame, Pixel{0, 0}, 100, 0.01); @@ -371,7 +371,7 @@ TEST(create_pointcloud, basic_tests) const auto disparity_image = create_example_disparity_image(left_calibration, right_calibration, 3.0, disk_distance_m, 1920, 1200); - ImageFrame frame{0, {}, StereoCalibration{left_calibration, right_calibration, std::nullopt}, {}, {}, {}, {}, {}, {}}; + ImageFrame frame{0, {}, {}, StereoCalibration{left_calibration, right_calibration, std::nullopt}, {}, {}, {}, {}, {}, {}}; frame.add_image(disparity_image); const auto point_cloud = create_pointcloud(frame, disk_distance_m + 3.0); diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc b/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc index 38825606..7fd8a3e3 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc +++ b/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc @@ -461,8 +461,8 @@ void featureDetectorCallback(const secondary_app::Header& header, } else { - if ((fHeader.source == feature_detector::Source_Feature_Left) - || (fHeader.source == feature_detector::Source_Feature_Rectified_Left)) + if ((fHeader.source == feature_detector::Source_ORB_Feature_Left) + || (fHeader.source == feature_detector::Source_ORB_Feature_Left_Rectified)) { auto it = userData->elapsedTime.find(fHeader.frameId); if (it == userData->elapsedTime.end()) { @@ -747,11 +747,11 @@ int main(int argc, { status = channelP->startStreams((operatingMode.supportedDataSources & Source_Luma_Left) | (operatingMode.supportedDataSources & Source_Luma_Right) | - feature_detector::Source_Feature_Left|feature_detector::Source_Feature_Right); + feature_detector::Source_ORB_Feature_Left|feature_detector::Source_ORB_Feature_Right); } else { - status = channelP->startStreams(feature_detector::Source_Feature_Left|feature_detector::Source_Feature_Right); + status = channelP->startStreams(feature_detector::Source_ORB_Feature_Left|feature_detector::Source_ORB_Feature_Right); } diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh b/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh index 59f3d373..e0d16c2f 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh +++ b/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh @@ -37,8 +37,8 @@ #ifndef __FEATURE_DETECTOR_UTILITIES_H__ #define __FEATURE_DETECTOR_UTILITIES_H__ -#include "FeatureDetectorMessage.hh" -#include "FeatureDetectorMetaMessage.hh" +#include +#include #include "FeatureDetectorConfig.hh" using namespace crl::multisense; @@ -47,12 +47,12 @@ using namespace crl::multisense::details; namespace feature_detector { -static CRL_CONSTEXPR DataSource Source_Feature_Left = Source_Secondary_App_Data_0; -static CRL_CONSTEXPR DataSource Source_Feature_Right = Source_Secondary_App_Data_1; -static CRL_CONSTEXPR DataSource Source_Feature_Aux = Source_Secondary_App_Data_2; -static CRL_CONSTEXPR DataSource Source_Feature_Rectified_Left = Source_Secondary_App_Data_3; -static CRL_CONSTEXPR DataSource Source_Feature_Rectified_Right = Source_Secondary_App_Data_4; -static CRL_CONSTEXPR DataSource Source_Feature_Rectified_Aux = Source_Secondary_App_Data_5; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Left = Source_Secondary_App_Data_0; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Right = Source_Secondary_App_Data_1; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Aux = Source_Secondary_App_Data_2; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Left_Rectified = Source_Secondary_App_Data_3; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Right_Rectified = Source_Secondary_App_Data_4; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Aux_Rectified = Source_Secondary_App_Data_5; /** The recommended maximum number of features for full resolution camera operation */ static CRL_CONSTEXPR int RECOMMENDED_MAX_FEATURES_FULL_RES = 5000; @@ -229,10 +229,10 @@ Status secondaryAppDataExtract(feature_detector::Header &header, const secondary using namespace crl::multisense::details; utility::BufferStreamReader stream(reinterpret_cast(orig.secondaryAppDataP), orig.secondaryAppDataLength); - FeatureDetector featureDetector(stream, FeatureDetector::VERSION); + wire::FeatureDetector featureDetector(stream, wire::FeatureDetector::VERSION); utility::BufferStreamReader metaStream(reinterpret_cast(orig.secondaryAppMetadataP), orig.secondaryAppMetadataLength); - FeatureDetectorMeta _meta(metaStream, FeatureDetectorMeta::VERSION); + wire::FeatureDetectorMeta _meta(metaStream, wire::FeatureDetectorMeta::VERSION); header.source = featureDetector.source; @@ -256,16 +256,16 @@ Status secondaryAppDataExtract(feature_detector::Header &header, const secondary header.numFeatures = featureDetector.numFeatures; header.numDescriptors = featureDetector.numDescriptors; - const size_t startDescriptor=featureDetector.numFeatures*sizeof(Feature); + const size_t startDescriptor=featureDetector.numFeatures*sizeof(wire::Feature); uint8_t * dataP = reinterpret_cast(featureDetector.dataP); for (size_t i = 0; i < featureDetector.numFeatures; i++) { - feature_detector::Feature f = *reinterpret_cast(dataP + (i * sizeof(Feature))); + feature_detector::Feature f = *reinterpret_cast(dataP + (i * sizeof(wire::Feature))); header.features->push_back(f); } for (size_t j = 0;j < featureDetector.numDescriptors; j++) { - feature_detector::Descriptor d = *reinterpret_cast(dataP + (startDescriptor + (j * sizeof(Descriptor)))); + feature_detector::Descriptor d = *reinterpret_cast(dataP + (startDescriptor + (j * sizeof(wire::Descriptor)))); header.descriptors->push_back(d); } diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMessage.hh b/source/Wire/include/wire/FeatureMessage.hh similarity index 93% rename from source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMessage.hh rename to source/Wire/include/wire/FeatureMessage.hh index e3ca9030..18fc178b 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMessage.hh +++ b/source/Wire/include/wire/FeatureMessage.hh @@ -1,5 +1,5 @@ /** - * @file LibMultiSense/FeatureDetectorMessage.hh + * @file FeatureMessage.hh * * This message contains first class feature data. * @@ -37,8 +37,8 @@ * 2024-22-11, patrick.smith@carnegierobotics.com, IRAD, Moved file. **/ -#ifndef LibMultiSense_FeatureDetectorMessage -#define LibMultiSense_FeatureDetectorMessage +#ifndef LibMultiSense_FeatureMessage +#define LibMultiSense_FeatureMessage #include @@ -46,7 +46,10 @@ #include #include -using namespace crl::multisense::details; +namespace crl { +namespace multisense { +namespace details { +namespace wire { #pragma pack(push, 1) @@ -122,5 +125,9 @@ public: #endif // !SENSORPOD_FIRMWARE +} // namespace wire +} // namespace details +} // namespace multisense +} // namespace crl #endif diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMetaMessage.hh b/source/Wire/include/wire/FeatureMetaMessage.hh similarity index 95% rename from source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMetaMessage.hh rename to source/Wire/include/wire/FeatureMetaMessage.hh index e84743c3..6366fcd5 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMetaMessage.hh +++ b/source/Wire/include/wire/FeatureMetaMessage.hh @@ -1,5 +1,5 @@ /** - * @file LibMultiSense/FeatureDetectorMetaMessage.hh + * @file FeatureMetaMessage.hh * * Copyright 2013-2025 * Carnegie Robotics, LLC @@ -41,9 +41,12 @@ #include #include "utility/Portability.hh" -#include +#include -using namespace crl::multisense::details; +namespace crl { +namespace multisense { +namespace details { +namespace wire { class WIRE_HEADER_ATTRIBS_ FeatureDetectorMetaHeader { public: @@ -151,4 +154,9 @@ public: #endif // !SENSORPOD_FIRMWARE +} // namespace wire +} // namespace details +} // namespace multisense +} // namespace crl + #endif diff --git a/source/Wire/include/wire/SecondaryAppDataMessage.hh b/source/Wire/include/wire/SecondaryAppDataMessage.hh index c039fb98..37afaa82 100644 --- a/source/Wire/include/wire/SecondaryAppDataMessage.hh +++ b/source/Wire/include/wire/SecondaryAppDataMessage.hh @@ -40,7 +40,7 @@ #include #include -#include "MultiSense/details/utility/Portability.hh" +#include "utility/Portability.hh" namespace crl { namespace multisense { diff --git a/source/Wire/include/wire/SecondaryAppMetaMessage.hh b/source/Wire/include/wire/SecondaryAppMetaMessage.hh index 29c2d9c6..e66ec128 100644 --- a/source/Wire/include/wire/SecondaryAppMetaMessage.hh +++ b/source/Wire/include/wire/SecondaryAppMetaMessage.hh @@ -40,7 +40,7 @@ #include #include -#include "MultiSense/details/utility/Portability.hh" +#include "utility/Portability.hh" namespace crl { namespace multisense { From c24662c83fc4420dbeecd775f14f89459fecbd8e Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 17 Mar 2026 23:05:40 -0400 Subject: [PATCH 02/20] Add unit tests --- source/LibMultiSense/test/CMakeLists.txt | 1 + source/LibMultiSense/test/feature_test.cc | 191 ++++++++++++++++++++++ 2 files changed, 192 insertions(+) create mode 100644 source/LibMultiSense/test/feature_test.cc diff --git a/source/LibMultiSense/test/CMakeLists.txt b/source/LibMultiSense/test/CMakeLists.txt index a7e7bcf3..3e404366 100644 --- a/source/LibMultiSense/test/CMakeLists.txt +++ b/source/LibMultiSense/test/CMakeLists.txt @@ -3,6 +3,7 @@ find_package(GTest REQUIRED) set(TEST_NAMES calibration_test configuration_test + feature_test info_test message_test multi_channel_test diff --git a/source/LibMultiSense/test/feature_test.cc b/source/LibMultiSense/test/feature_test.cc new file mode 100644 index 00000000..c8fd9deb --- /dev/null +++ b/source/LibMultiSense/test/feature_test.cc @@ -0,0 +1,191 @@ +/** + * @file feature_test.cc + * + * Copyright 2024-2025 + * Carnegie Robotics, LLC + * 4501 Hatfield Street, Pittsburgh, PA 15201 + * http://www.carnegierobotics.com + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Carnegie Robotics, LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CARNEGIE ROBOTICS, LLC BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + **/ + +#include + +#include +#include + +#include +#include +#include + +#ifdef HAVE_OPENCV +#include +#include +#endif + +using namespace multisense; + +TEST(FeatureMessage, Basic) +{ + FeatureMessage msg; + msg.source = DataSource::LEFT_ORB_FEATURES; + msg.descriptor_type = DescriptorType::ORB; + + KeyPoint kp1; + kp1.x = 10.5f; + kp1.y = 20.2f; + kp1.response = 100.0f; + kp1.octave = 1; + kp1.class_id = 5; + + msg.keypoints.push_back(kp1); + + // ORB descriptor is 32 bytes + std::vector desc(32, 0xAA); + msg.descriptors.insert(msg.descriptors.end(), desc.begin(), desc.end()); + + EXPECT_EQ(msg.source, DataSource::LEFT_ORB_FEATURES); + EXPECT_EQ(msg.descriptor_type, DescriptorType::ORB); + EXPECT_EQ(msg.keypoints.size(), 1); + EXPECT_EQ(msg.descriptors.size(), 32); + EXPECT_FLOAT_EQ(msg.keypoints[0].x, 10.5f); +} + +TEST(ImageFrame, Features) +{ + ImageFrame frame; + frame.frame_id = 1234; + + FeatureMessage msg; + msg.source = DataSource::LEFT_ORB_FEATURES; + msg.keypoints.resize(10); + + frame.add_feature(msg); + + EXPECT_TRUE(frame.has_feature(DataSource::LEFT_ORB_FEATURES)); + EXPECT_FALSE(frame.has_feature(DataSource::RIGHT_ORB_FEATURES)); + EXPECT_EQ(frame.get_feature(DataSource::LEFT_ORB_FEATURES).keypoints.size(), 10); + EXPECT_EQ(frame.frame_id, 1234); +} + +#ifdef HAVE_OPENCV +TEST(FeatureMessage, OpenCVConversion) +{ + FeatureMessage msg; + msg.source = DataSource::LEFT_ORB_FEATURES; + msg.descriptor_type = DescriptorType::ORB; + + for (int i = 0; i < 5; ++i) + { + KeyPoint kp; + kp.x = static_cast(i * 10); + kp.y = static_cast(i * 20); + kp.angle = 45.0f; + kp.response = 50.0f; + kp.octave = 0; + kp.class_id = i; + msg.keypoints.push_back(kp); + } + + // 5 features, 32 bytes each = 160 bytes + msg.descriptors.resize(5 * 32, 0xBB); + + auto cv_kp = msg.cv_keypoints(); + auto cv_desc = msg.cv_descriptors(); + + ASSERT_EQ(cv_kp.size(), 5); + EXPECT_FLOAT_EQ(cv_kp[0].pt.x, 0.0f); + EXPECT_FLOAT_EQ(cv_kp[4].pt.x, 40.0f); + EXPECT_FLOAT_EQ(cv_kp[2].angle, 45.0f); + EXPECT_EQ(cv_kp[3].class_id, 3); + + ASSERT_EQ(cv_desc.rows, 5); + ASSERT_EQ(cv_desc.cols, 32); + ASSERT_EQ(cv_desc.type(), CV_8UC1); + EXPECT_EQ(cv_desc.at(0, 0), 0xBB); +} +#endif + +TEST(FeatureWire, Serialization) +{ + using namespace crl::multisense::details; + + // Simulate FeatureDetectorMeta + wire::FeatureDetectorMeta meta; + meta.frameId = 555; + meta.timeSeconds = 100; + meta.timeNanoSeconds = 200; + meta.ptpNanoSeconds = 300; + meta.numFeatures = 2; + meta.numDescriptors = 2; + + std::vector meta_buf(512); + utility::BufferStreamWriter meta_writer(meta_buf.data(), meta_buf.size()); + meta.serialize(meta_writer, wire::FeatureDetectorMeta::VERSION); + + // Simulate FeatureDetector data + std::vector data_buf(1024); + utility::BufferStreamWriter data_writer(data_buf.data(), data_buf.size()); + + // FeatureDetector has a specialized serialize that includes writing raw data + // In our implementation it just writes the header then seeks past data + // So we manually write the data after the header + data_writer & wire::FeatureDetectorHeader::VERSION; // version + data_writer & (uint64_t)wire::SOURCE_SECONDARY_APP_DATA_0; // source + data_writer & (int64_t)555; // frameId + data_writer & (uint16_t)2; // numFeatures + data_writer & (uint16_t)2; // numDescriptors + + wire::Feature f1{10, 20, 30, 40, 50, 60}; + wire::Feature f2{100, 200, 30, 40, 50, 60}; + data_writer.write(&f1, sizeof(f1)); + data_writer.write(&f2, sizeof(f2)); + + wire::Descriptor d1{{1,2,3,4,5,6,7,8}}; + wire::Descriptor d2{{10,20,30,40,50,60,70,80}}; + data_writer.write(&d1, sizeof(d1)); + data_writer.write(&d2, sizeof(d2)); + + // Now deserialize + utility::BufferStreamReader meta_reader(meta_buf.data(), meta_buf.size()); + wire::FeatureDetectorMeta meta_decoded(meta_reader, wire::FeatureDetectorMeta::VERSION); + EXPECT_EQ(meta_decoded.frameId, 555); + EXPECT_EQ(meta_decoded.numFeatures, 2); + + utility::BufferStreamReader data_reader(data_buf.data(), data_buf.size()); + wire::FeatureDetector data_decoded(data_reader, wire::FeatureDetector::VERSION); + EXPECT_EQ(data_decoded.frameId, 555); + EXPECT_EQ(data_decoded.numFeatures, 2); + + // Verify raw data pointer + wire::Feature* features_ptr = reinterpret_cast(data_decoded.dataP); + EXPECT_EQ(features_ptr[0].x, 10); + EXPECT_EQ(features_ptr[1].x, 100); + + wire::Descriptor* desc_ptr = reinterpret_cast(reinterpret_cast(data_decoded.dataP) + 2*sizeof(wire::Feature)); + EXPECT_EQ(desc_ptr[0].d[0], 1); + EXPECT_EQ(desc_ptr[1].d[0], 10); +} + From f241ae6d0ddc5336361e3c697e8791603c345f8c Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Wed, 18 Mar 2026 22:35:06 -0400 Subject: [PATCH 03/20] Support configuraton --- README.md | 63 ++++++++++++++ python/bindings.cc | 9 ++ .../LibMultiSense/details/legacy/channel.cc | 86 +++++++++++++++++++ .../include/MultiSense/MultiSenseChannel.hh | 10 +++ .../include/MultiSense/MultiSenseTypes.hh | 10 +++ .../include/details/legacy/channel.hh | 12 ++- source/LibMultiSense/test/feature_test.cc | 13 +++ .../include/wire/FeatureConfig.hh} | 27 +++--- .../wire/SecondaryAppGetConfigMessage.hh | 2 +- .../SecondaryAppGetRegisteredAppsMessage.hh | 2 +- .../wire/SecondaryAppRegisteredAppsMessage.hh | 2 +- 11 files changed, 220 insertions(+), 16 deletions(-) rename source/{Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorConfig.hh => Wire/include/wire/FeatureConfig.hh} (84%) diff --git a/README.md b/README.md index 87405bfc..2e132279 100644 --- a/README.md +++ b/README.md @@ -65,6 +65,9 @@ The LibMultiSense C++ and Python library has been tested with the following oper - [Feature Rendering](#feature-rendering) - [Python](#python-8) - [C++](#c-8) +- [Feature Configuration](#feature-configuration) + - [Python](#python-9) + - [C++](#c-9) ## Client Networking Prerequisite @@ -851,3 +854,63 @@ int main(int argc, char** argv) return 0; } ``` + +--- + +## Feature Configuration + +You can configure the on-camera feature detector's parameters, such as the maximum number of features +and whether to enable feature grouping. + +### Python + +```python +import libmultisense as lms + +def main(): + channel_config = lms.ChannelConfig() + channel_config.ip_address = "10.66.171.21" + + with lms.Channel.create(channel_config) as channel: + # Get current configuration + config = channel.get_feature_config() + if config: + print(f"Current features: {config.number_of_features}") + + # Update configuration + config.number_of_features = 2000 + config.grouping_enabled = True + channel.set_feature_config(config) + print("Configuration updated") + +if __name__ == "__main__": + main() +``` + +### C++ + +```c++ +#include +#include + +namespace lms = multisense; + +int main() +{ + const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"}); + + // Get current configuration + if (auto config = channel->get_feature_config(); config) + { + std::cout << "Current features: " << config->number_of_features << std::endl; + + // Update configuration + config->number_of_features = 2000; + config->grouping_enabled = true; + channel->set_feature_config(*config); + std::cout << "Configuration updated" << std::endl; + } + + return 0; +} +``` diff --git a/python/bindings.cc b/python/bindings.cc index 8caed7ab..0e18785a 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -178,6 +178,13 @@ PYBIND11_MODULE(_libmultisense, m) { .value("UNKNOWN", multisense::DescriptorType::UNKNOWN) .value("ORB", multisense::DescriptorType::ORB); + // FeatureDetectorConfig + py::class_(m, "FeatureDetectorConfig") + .def(py::init<>()) + .def_readwrite("number_of_features", &multisense::FeatureDetectorConfig::number_of_features) + .def_readwrite("grouping_enabled", &multisense::FeatureDetectorConfig::grouping_enabled) + .def_readwrite("motion_octave", &multisense::FeatureDetectorConfig::motion_octave); + // FeatureMessage py::class_(m, "FeatureMessage") .def(py::init<>()) @@ -749,6 +756,8 @@ PYBIND11_MODULE(_libmultisense, m) { .def("get_next_image_frame", &multisense::Channel::get_next_image_frame, py::call_guard()) .def("get_config", &multisense::Channel::get_config, py::call_guard()) .def("set_config", &multisense::Channel::set_config, py::call_guard()) + .def("get_feature_config", &multisense::Channel::get_feature_config, py::call_guard()) + .def("set_feature_config", &multisense::Channel::set_feature_config, py::call_guard()) .def("get_calibration", &multisense::Channel::get_calibration, py::call_guard()) .def("set_calibration", &multisense::Channel::set_calibration, py::call_guard()) .def("get_info", &multisense::Channel::get_info, py::call_guard()) diff --git a/source/LibMultiSense/details/legacy/channel.cc b/source/LibMultiSense/details/legacy/channel.cc index 827369d7..af9f0d09 100644 --- a/source/LibMultiSense/details/legacy/channel.cc +++ b/source/LibMultiSense/details/legacy/channel.cc @@ -92,10 +92,15 @@ #include #include #include +#include +#include +#include +#include #include #include #include #include +#include #include "details/legacy/channel.hh" #include "details/legacy/calibration.hh" @@ -600,6 +605,87 @@ Status LegacyChannel::set_config(const MultiSenseConfig &config) return Status::INTERNAL_ERROR; } +std::optional LegacyChannel::get_feature_config() +{ + using namespace crl::multisense::details; + + if (!m_connected) + { + return std::nullopt; + } + + const auto secondary_config = wait_for_data(m_message_assembler, + m_socket, + wire::SecondaryAppGetConfig(), + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); + + if (secondary_config) + { + if (secondary_config->dataLength < sizeof(wire::FeatureDetectorConfigParams)) + { + CRL_DEBUG("Feature detector config response too small: %u < %zu\n", + secondary_config->dataLength, sizeof(wire::FeatureDetectorConfigParams)); + return std::nullopt; + } + + const auto *params = reinterpret_cast(secondary_config->data); + FeatureDetectorConfig config; + config.number_of_features = params->numberOfFeatures; + config.grouping_enabled = params->grouping; + config.motion_octave = params->motion; + + return std::make_optional(config); + } + + return std::nullopt; +} + +Status LegacyChannel::set_feature_config(const FeatureDetectorConfig &config) +{ + using namespace crl::multisense::details; + + if (!m_connected) + { + return Status::UNINITIALIZED; + } + + // We first need to get the current config to preserve internal options + const auto secondary_config = wait_for_data(m_message_assembler, + m_socket, + wire::SecondaryAppGetConfig(), + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); + + wire::FeatureDetectorConfigParams params; + if (secondary_config && secondary_config->dataLength >= sizeof(wire::FeatureDetectorConfigParams)) + { + std::memcpy(¶ms, secondary_config->data, sizeof(wire::FeatureDetectorConfigParams)); + } + + params.numberOfFeatures = config.number_of_features; + params.grouping = config.grouping_enabled; + params.motion = config.motion_octave; + + wire::SecondaryAppControl control; + control.dataLength = sizeof(wire::FeatureDetectorConfigParams); + std::memcpy(control.data, ¶ms, sizeof(wire::FeatureDetectorConfigParams)); + + if (const auto ack = wait_for_ack(m_message_assembler, + m_socket, + control, + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); ack) + { + return get_status(ack->status); + } + + return Status::TIMEOUT; +} + StereoCalibration LegacyChannel::get_calibration() { std::lock_guard lock(m_mutex); diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseChannel.hh b/source/LibMultiSense/include/MultiSense/MultiSenseChannel.hh index 9153a08d..ea142cff 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseChannel.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseChannel.hh @@ -209,6 +209,16 @@ public: /// virtual Status set_config(const MultiSenseConfig &config) = 0; + /// + /// @brief Get the camera's feature detector configuration + /// + virtual std::optional get_feature_config() = 0; + + /// + /// @brief Set the camera's feature detector configuration + /// + virtual Status set_feature_config(const FeatureDetectorConfig &config) = 0; + /// /// @brief Get the current stereo calibration. The output calibration will correspond to the full-resolution /// operating mode of the camera diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh index 89c466cd..21388bb0 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh @@ -344,6 +344,16 @@ enum class DescriptorType : uint8_t ORB }; +/// +/// @brief Configuration for the on-camera feature detector +/// +struct FeatureDetectorConfig +{ + uint32_t number_of_features = 1500; + bool grouping_enabled = true; + uint32_t motion_octave = 1; +}; + /// /// @brief A collection of features and their descriptors /// diff --git a/source/LibMultiSense/include/details/legacy/channel.hh b/source/LibMultiSense/include/details/legacy/channel.hh index 38de2877..227da03b 100644 --- a/source/LibMultiSense/include/details/legacy/channel.hh +++ b/source/LibMultiSense/include/details/legacy/channel.hh @@ -194,7 +194,17 @@ public: /// /// @brief Get set the current MultiSense configuration /// - Status set_config(const MultiSenseConfig &config) final override; + Status set_config(const MultiSenseConfig &config) override; + + /// + /// @brief Get the camera's feature detector configuration + /// + std::optional get_feature_config() override; + + /// + /// @brief Set the camera's feature detector configuration + /// + Status set_feature_config(const FeatureDetectorConfig &config) override; /// /// @brief Get the current stereo calibration. The output calibration will correspond to the full-resolution diff --git a/source/LibMultiSense/test/feature_test.cc b/source/LibMultiSense/test/feature_test.cc index c8fd9deb..5093dd31 100644 --- a/source/LibMultiSense/test/feature_test.cc +++ b/source/LibMultiSense/test/feature_test.cc @@ -189,3 +189,16 @@ TEST(FeatureWire, Serialization) EXPECT_EQ(desc_ptr[1].d[0], 10); } +TEST(FeatureConfig, Basic) +{ + FeatureDetectorConfig config; + config.number_of_features = 2000; + config.grouping_enabled = false; + config.motion_octave = 2; + + EXPECT_EQ(config.number_of_features, 2000); + EXPECT_FALSE(config.grouping_enabled); + EXPECT_EQ(config.motion_octave, 2); +} + + diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorConfig.hh b/source/Wire/include/wire/FeatureConfig.hh similarity index 84% rename from source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorConfig.hh rename to source/Wire/include/wire/FeatureConfig.hh index 05927d8f..c8911786 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorConfig.hh +++ b/source/Wire/include/wire/FeatureConfig.hh @@ -1,5 +1,5 @@ /** - * @file LibMultiSense/FeatureDetectorMessage.hh + * @file FeatureConfig.hh * * Copyright 2024-2025 * Carnegie Robotics, LLC @@ -29,20 +29,18 @@ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Significant history (date, user, job code, action): - * 2024-22-11, patrick.smith@carnegierobotics.com, IRAD, Created file. - * 2025-14-01, hshibata@carnegierobotics.com, IRAD, options added **/ +#ifndef LibMultiSense_FeatureConfig_hh +#define LibMultiSense_FeatureConfig_hh -#ifndef __FEATURE_DETECTOR_CONFIG_H__ -#define __FEATURE_DETECTOR_CONFIG_H__ - -#include -#include +#include +#include -using namespace crl::multisense::details; +namespace crl { +namespace multisense { +namespace details { +namespace wire { #pragma pack(push, 1) @@ -91,4 +89,9 @@ struct FeatureDetectorConfigParams { #pragma pack(pop) -#endif /* end of include guard: __FEATURE_DETECTOR_CONFIG_H__ */ +} // namespace wire +} // namespace details +} // namespace multisense +} // namespace crl + +#endif diff --git a/source/Wire/include/wire/SecondaryAppGetConfigMessage.hh b/source/Wire/include/wire/SecondaryAppGetConfigMessage.hh index 4d7a711a..683153c2 100644 --- a/source/Wire/include/wire/SecondaryAppGetConfigMessage.hh +++ b/source/Wire/include/wire/SecondaryAppGetConfigMessage.hh @@ -39,7 +39,7 @@ #ifndef LibMultiSense_SecondaryAppGetConfig #define LibMultiSense_SecondaryAppGetConfig -#include "MultiSense/details/utility/Portability.hh" +#include "utility/Portability.hh" namespace crl { namespace multisense { diff --git a/source/Wire/include/wire/SecondaryAppGetRegisteredAppsMessage.hh b/source/Wire/include/wire/SecondaryAppGetRegisteredAppsMessage.hh index d254dd83..e0552f7d 100644 --- a/source/Wire/include/wire/SecondaryAppGetRegisteredAppsMessage.hh +++ b/source/Wire/include/wire/SecondaryAppGetRegisteredAppsMessage.hh @@ -39,7 +39,7 @@ #ifndef LibMultiSense_SecondaryAppGetRegisteredApps #define LibMultiSense_SecondaryAppGetRegisteredApps -#include "MultiSense/details/utility/Portability.hh" +#include "utility/Portability.hh" namespace crl { namespace multisense { diff --git a/source/Wire/include/wire/SecondaryAppRegisteredAppsMessage.hh b/source/Wire/include/wire/SecondaryAppRegisteredAppsMessage.hh index c01e75ea..904d80a5 100644 --- a/source/Wire/include/wire/SecondaryAppRegisteredAppsMessage.hh +++ b/source/Wire/include/wire/SecondaryAppRegisteredAppsMessage.hh @@ -39,7 +39,7 @@ #ifndef LibMultiSense_SecondaryAppRegisteredApps #define LibMultiSense_SecondaryAppRegisteredApps -#include "MultiSense/details/utility/Portability.hh" +#include "utility/Portability.hh" namespace crl { namespace multisense { From 127bd86720bb047082fe6482cefca4bb55c6bf13 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Wed, 18 Mar 2026 22:57:16 -0400 Subject: [PATCH 04/20] Fix build --- source/Legacy/include/MultiSense/details/storage.hh | 2 +- .../Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc | 2 +- .../FeatureDetectorUtility/include/FeatureDetectorUtilities.hh | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/source/Legacy/include/MultiSense/details/storage.hh b/source/Legacy/include/MultiSense/details/storage.hh index 18054385..c6331ae6 100644 --- a/source/Legacy/include/MultiSense/details/storage.hh +++ b/source/Legacy/include/MultiSense/details/storage.hh @@ -251,7 +251,7 @@ namespace details { std::pair will_drop_() { const bool will_drop = m_map.size() == m_depth; - KEY drop_key; // If will_drop is false, this value is intentionally uninitialized + KEY drop_key{}; if (will_drop) { diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc b/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc index 7fd8a3e3..a6ce9362 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc +++ b/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc @@ -547,7 +547,7 @@ int main(int argc, Status status; system::VersionInfo v; - VersionType version; + crl::multisense::VersionType version; std::vector deviceModes; system::DeviceMode operatingMode; image::Calibration calibration; diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh b/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh index e0d16c2f..e51015f5 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh +++ b/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh @@ -39,10 +39,11 @@ #include #include -#include "FeatureDetectorConfig.hh" +#include using namespace crl::multisense; using namespace crl::multisense::details; +using namespace crl::multisense::details::wire; namespace feature_detector { From 0c2693ad11e28a73d333a99e9cf944b7b3a79644 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Thu, 19 Mar 2026 12:18:28 -0400 Subject: [PATCH 05/20] Serialization updates --- README.md | 4 +-- python/bindings.cc | 1 + .../MultiSense/MultiSenseSerialization.hh | 5 ++++ .../include/MultiSense/MultiSenseTypes.hh | 30 +++++++++++++++++++ 4 files changed, 38 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2e132279..0aa6a2a0 100644 --- a/README.md +++ b/README.md @@ -876,7 +876,7 @@ def main(): config = channel.get_feature_config() if config: print(f"Current features: {config.number_of_features}") - + # Update configuration config.number_of_features = 2000 config.grouping_enabled = True @@ -898,7 +898,7 @@ namespace lms = multisense; int main() { const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"}); - + // Get current configuration if (auto config = channel->get_feature_config(); config) { diff --git a/python/bindings.cc b/python/bindings.cc index 0e18785a..2c4b4353 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -181,6 +181,7 @@ PYBIND11_MODULE(_libmultisense, m) { // FeatureDetectorConfig py::class_(m, "FeatureDetectorConfig") .def(py::init<>()) + PYBIND11_JSON_SUPPORT(multisense::FeatureDetectorConfig) .def_readwrite("number_of_features", &multisense::FeatureDetectorConfig::number_of_features) .def_readwrite("grouping_enabled", &multisense::FeatureDetectorConfig::grouping_enabled) .def_readwrite("motion_octave", &multisense::FeatureDetectorConfig::motion_octave); diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh b/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh index 1841d796..902ee81d 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh @@ -439,6 +439,11 @@ NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseInfo, imu, network) +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(FeatureDetectorConfig, + number_of_features, + grouping_enabled, + motion_octave) + NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Channel::ReceiveBufferConfig, num_small_buffers, small_buffer_size, diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh index 21388bb0..6a3f26fb 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh @@ -327,11 +327,34 @@ struct ImageHistogram /// struct KeyPoint { + /// + /// @brief x location of the keypoint in the output image + /// float x = 0.0f; + + /// + /// @brief y location of the keypoint in the output image + /// float y = 0.0f; + + /// + /// @brief the angle of the intensity centroid + /// float angle = 0.0f; + + /// + /// @brief harris conerness core + /// float response = 0.0f; + + /// + /// @brief the image pyramid layer the keypoint was generated from + /// uint32_t octave = 0; + + /// + /// @brief optional grouping id + /// uint32_t class_id = 0; }; @@ -349,7 +372,14 @@ enum class DescriptorType : uint8_t /// struct FeatureDetectorConfig { + /// + /// @brief Max number of features to detect + /// uint32_t number_of_features = 1500; + + /// + /// @brief Attempt to group features with similar class ids + /// bool grouping_enabled = true; uint32_t motion_octave = 1; }; From c4e01ac69e83177f0c854c10f548b6e2dd4fdfc3 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 24 Mar 2026 16:11:39 -0400 Subject: [PATCH 06/20] Better formatting --- source/LibMultiSense/details/legacy/channel.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/LibMultiSense/details/legacy/channel.cc b/source/LibMultiSense/details/legacy/channel.cc index af9f0d09..5225fc89 100644 --- a/source/LibMultiSense/details/legacy/channel.cc +++ b/source/LibMultiSense/details/legacy/channel.cc @@ -1162,7 +1162,7 @@ void LegacyChannel::secondary_app_data_callback(std::shared_ptr(*data); const auto source = convert_sources(static_cast(wire_data.sourceExtended) << 32 | wire_data.source); - + if (source.size() != 1 || !is_feature_source(source[0])) { return; @@ -1187,9 +1187,10 @@ void LegacyChannel::secondary_app_data_callback(std::shared_ptr(featureDetector.dataP); - + feature_msg.keypoints.reserve(featureDetector.numFeatures); - for (size_t i = 0; i < featureDetector.numFeatures; i++) { + for (size_t i = 0; i < featureDetector.numFeatures; i++) + { wire::Feature f = *reinterpret_cast(feature_data + (i * sizeof(wire::Feature))); KeyPoint kp; kp.x = f.x; @@ -1202,7 +1203,8 @@ void LegacyChannel::secondary_app_data_callback(std::shared_ptr(feature_data + (startDescriptor + (j * sizeof(wire::Descriptor)))); const uint8_t* d_bytes = reinterpret_cast(d.d); feature_msg.descriptors.insert(feature_msg.descriptors.end(), d_bytes, d_bytes + sizeof(wire::Descriptor)); @@ -1221,8 +1223,6 @@ void LegacyChannel::handle_and_dispatch_feature(FeatureMessage feature, int64_t // Create a new frame if one does not exist, or add the input feature to the corresponding frame if (m_frame_buffer.count(frame_id) == 0) { - // Actually, without image metadata it's hard to fill the complete frame structure. - // We will initialize a partial frame and it will be completed when the image arrives. ImageFrame frame; frame.frame_id = frame_id; frame.add_feature(std::move(feature)); From 180ce604c659131704c7ceda7222a550669fc541 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 24 Mar 2026 16:46:34 -0400 Subject: [PATCH 07/20] Stop secondary stream sources too --- source/LibMultiSense/include/details/legacy/utilities.hh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/source/LibMultiSense/include/details/legacy/utilities.hh b/source/LibMultiSense/include/details/legacy/utilities.hh index e51b5534..994e4f65 100644 --- a/source/LibMultiSense/include/details/legacy/utilities.hh +++ b/source/LibMultiSense/include/details/legacy/utilities.hh @@ -72,7 +72,13 @@ constexpr crl::multisense::details::wire::SourceType all_sources = { crl::multisense::details::wire::SOURCE_CHROMA_AUX | crl::multisense::details::wire::SOURCE_CHROMA_RECT_AUX | crl::multisense::details::wire::SOURCE_DISPARITY_COST | - crl::multisense::details::wire::SOURCE_IMU + crl::multisense::details::wire::SOURCE_IMU | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_0 | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_1 | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_2 | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_3 | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_4 | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_5 }; /// From b39754875909bf6edeccf3b3af6580d725a05648 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 24 Mar 2026 17:50:21 -0400 Subject: [PATCH 08/20] Use convert functions --- .../LibMultiSense/details/legacy/channel.cc | 15 +++++--------- .../details/legacy/configuration.cc | 20 +++++++++++++++++++ .../include/details/legacy/configuration.hh | 11 ++++++++++ 3 files changed, 36 insertions(+), 10 deletions(-) diff --git a/source/LibMultiSense/details/legacy/channel.cc b/source/LibMultiSense/details/legacy/channel.cc index 5225fc89..e70384c9 100644 --- a/source/LibMultiSense/details/legacy/channel.cc +++ b/source/LibMultiSense/details/legacy/channel.cc @@ -631,12 +631,8 @@ std::optional LegacyChannel::get_feature_config() } const auto *params = reinterpret_cast(secondary_config->data); - FeatureDetectorConfig config; - config.number_of_features = params->numberOfFeatures; - config.grouping_enabled = params->grouping; - config.motion_octave = params->motion; - return std::make_optional(config); + return std::make_optional(convert(*params)); } return std::nullopt; @@ -659,16 +655,15 @@ Status LegacyChannel::set_feature_config(const FeatureDetectorConfig &config) m_current_mtu, m_config.receive_timeout); - wire::FeatureDetectorConfigParams params; + wire::FeatureDetectorConfigParams params = convert(config); if (secondary_config && secondary_config->dataLength >= sizeof(wire::FeatureDetectorConfigParams)) { std::memcpy(¶ms, secondary_config->data, sizeof(wire::FeatureDetectorConfigParams)); + params.numberOfFeatures = config.number_of_features; + params.grouping = config.grouping_enabled; + params.motion = config.motion_octave; } - params.numberOfFeatures = config.number_of_features; - params.grouping = config.grouping_enabled; - params.motion = config.motion_octave; - wire::SecondaryAppControl control; control.dataLength = sizeof(wire::FeatureDetectorConfigParams); std::memcpy(control.data, ¶ms, sizeof(wire::FeatureDetectorConfigParams)); diff --git a/source/LibMultiSense/details/legacy/configuration.cc b/source/LibMultiSense/details/legacy/configuration.cc index 3813e1a2..92212a6e 100644 --- a/source/LibMultiSense/details/legacy/configuration.cc +++ b/source/LibMultiSense/details/legacy/configuration.cc @@ -42,6 +42,26 @@ namespace multisense { namespace legacy { +FeatureDetectorConfig convert(const crl::multisense::details::wire::FeatureDetectorConfigParams ¶ms) +{ + FeatureDetectorConfig config; + config.number_of_features = params.numberOfFeatures; + config.grouping_enabled = params.grouping; + config.motion_octave = params.motion; + + return config; +} + +crl::multisense::details::wire::FeatureDetectorConfigParams convert(const FeatureDetectorConfig &config) +{ + crl::multisense::details::wire::FeatureDetectorConfigParams params{}; + params.numberOfFeatures = config.number_of_features; + params.grouping = config.grouping_enabled; + params.motion = config.motion_octave; + + return params; +} + MultiSenseConfig convert(const crl::multisense::details::wire::CamConfig &config, const std::optional &aux_config, const std::optional &imu_config, diff --git a/source/LibMultiSense/include/details/legacy/configuration.hh b/source/LibMultiSense/include/details/legacy/configuration.hh index dde89803..8d40621d 100644 --- a/source/LibMultiSense/include/details/legacy/configuration.hh +++ b/source/LibMultiSense/include/details/legacy/configuration.hh @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -58,6 +59,16 @@ namespace multisense { namespace legacy { +/// +/// @brief Convert a wire feature detector config to our API config object +/// +FeatureDetectorConfig convert(const crl::multisense::details::wire::FeatureDetectorConfigParams ¶ms); + +/// +/// @brief Convert our API config object to a wire message +/// +crl::multisense::details::wire::FeatureDetectorConfigParams convert(const FeatureDetectorConfig &config); + /// /// @brief Convert a wire AuxCameraConfiguration to our AuxConfig API /// From 382dc4191fc4e245c613f1744b7060116894ce9c Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 24 Mar 2026 18:08:25 -0400 Subject: [PATCH 09/20] Move the feature detector config as part of the MultiSenseConfig --- README.md | 23 +-- python/bindings.cc | 2 - .../LibMultiSense/details/legacy/channel.cc | 132 ++++++++---------- .../details/legacy/configuration.cc | 4 + .../include/MultiSense/MultiSenseChannel.hh | 10 -- .../MultiSense/MultiSenseSerialization.hh | 3 +- .../include/MultiSense/MultiSenseTypes.hh | 18 ++- .../include/details/legacy/channel.hh | 10 -- .../include/details/legacy/configuration.hh | 1 + .../LibMultiSense/test/configuration_test.cc | 6 + 10 files changed, 98 insertions(+), 111 deletions(-) diff --git a/README.md b/README.md index a1948dd5..74cd8ced 100644 --- a/README.md +++ b/README.md @@ -1054,14 +1054,14 @@ def main(): with lms.Channel.create(channel_config) as channel: # Get current configuration - config = channel.get_feature_config() - if config: - print(f"Current features: {config.number_of_features}") + config = channel.get_config() + if config.feature_detector_config: + print(f"Current features: {config.feature_detector_config.number_of_features}") # Update configuration - config.number_of_features = 2000 - config.grouping_enabled = True - channel.set_feature_config(config) + config.feature_detector_config.number_of_features = 2000 + config.feature_detector_config.grouping_enabled = True + channel.set_config(config) print("Configuration updated") if __name__ == "__main__": @@ -1081,14 +1081,15 @@ int main() const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"}); // Get current configuration - if (auto config = channel->get_feature_config(); config) + auto config = channel->get_config(); + if (config.feature_detector_config) { - std::cout << "Current features: " << config->number_of_features << std::endl; + std::cout << "Current features: " << config.feature_detector_config->number_of_features << std::endl; // Update configuration - config->number_of_features = 2000; - config->grouping_enabled = true; - channel->set_feature_config(*config); + config.feature_detector_config->number_of_features = 2000; + config.feature_detector_config->grouping_enabled = true; + channel->set_config(config); std::cout << "Configuration updated" << std::endl; } diff --git a/python/bindings.cc b/python/bindings.cc index 64d4238f..8f7236cb 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -783,8 +783,6 @@ PYBIND11_MODULE(_libmultisense, m) { .def("get_next_imu_frame", &multisense::Channel::get_next_imu_frame, py::call_guard()) .def("get_config", &multisense::Channel::get_config, py::call_guard()) .def("set_config", &multisense::Channel::set_config, py::call_guard()) - .def("get_feature_config", &multisense::Channel::get_feature_config, py::call_guard()) - .def("set_feature_config", &multisense::Channel::set_feature_config, py::call_guard()) .def("get_calibration", &multisense::Channel::get_calibration, py::call_guard()) .def("set_calibration", &multisense::Channel::set_calibration, py::call_guard()) .def("get_info", &multisense::Channel::get_info, py::call_guard()) diff --git a/source/LibMultiSense/details/legacy/channel.cc b/source/LibMultiSense/details/legacy/channel.cc index e70384c9..739f22bc 100644 --- a/source/LibMultiSense/details/legacy/channel.cc +++ b/source/LibMultiSense/details/legacy/channel.cc @@ -565,6 +565,46 @@ Status LegacyChannel::set_config(const MultiSenseConfig &config) } } + // + // Set our feature detector config + // + if (config.feature_detector_config) + { + // We first need to get the current config to preserve internal options + const auto secondary_config = wait_for_data(m_message_assembler, + m_socket, + wire::SecondaryAppGetConfig(), + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); + + wire::FeatureDetectorConfigParams params = convert(config.feature_detector_config.value()); + if (secondary_config && secondary_config->dataLength >= sizeof(wire::FeatureDetectorConfigParams)) + { + std::memcpy(¶ms, secondary_config->data, sizeof(wire::FeatureDetectorConfigParams)); + params.numberOfFeatures = config.feature_detector_config->number_of_features; + params.grouping = config.feature_detector_config->grouping_enabled; + params.motion = config.feature_detector_config->motion_octave; + } + + wire::SecondaryAppControl control; + control.dataLength = sizeof(wire::FeatureDetectorConfigParams); + std::memcpy(control.data, ¶ms, sizeof(wire::FeatureDetectorConfigParams)); + + const auto feature_ack = wait_for_ack(m_message_assembler, + m_socket, + control, + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout, + setting_retries); + + if (!feature_ack || feature_ack->status != wire::Ack::Status_Ok) + { + responses.push_back(get_status(feature_ack->status)); + } + } + const auto errors = std::any_of(std::begin(responses), std::end(responses), [](const auto &e) { @@ -605,82 +645,6 @@ Status LegacyChannel::set_config(const MultiSenseConfig &config) return Status::INTERNAL_ERROR; } -std::optional LegacyChannel::get_feature_config() -{ - using namespace crl::multisense::details; - - if (!m_connected) - { - return std::nullopt; - } - - const auto secondary_config = wait_for_data(m_message_assembler, - m_socket, - wire::SecondaryAppGetConfig(), - m_transmit_id++, - m_current_mtu, - m_config.receive_timeout); - - if (secondary_config) - { - if (secondary_config->dataLength < sizeof(wire::FeatureDetectorConfigParams)) - { - CRL_DEBUG("Feature detector config response too small: %u < %zu\n", - secondary_config->dataLength, sizeof(wire::FeatureDetectorConfigParams)); - return std::nullopt; - } - - const auto *params = reinterpret_cast(secondary_config->data); - - return std::make_optional(convert(*params)); - } - - return std::nullopt; -} - -Status LegacyChannel::set_feature_config(const FeatureDetectorConfig &config) -{ - using namespace crl::multisense::details; - - if (!m_connected) - { - return Status::UNINITIALIZED; - } - - // We first need to get the current config to preserve internal options - const auto secondary_config = wait_for_data(m_message_assembler, - m_socket, - wire::SecondaryAppGetConfig(), - m_transmit_id++, - m_current_mtu, - m_config.receive_timeout); - - wire::FeatureDetectorConfigParams params = convert(config); - if (secondary_config && secondary_config->dataLength >= sizeof(wire::FeatureDetectorConfigParams)) - { - std::memcpy(¶ms, secondary_config->data, sizeof(wire::FeatureDetectorConfigParams)); - params.numberOfFeatures = config.number_of_features; - params.grouping = config.grouping_enabled; - params.motion = config.motion_octave; - } - - wire::SecondaryAppControl control; - control.dataLength = sizeof(wire::FeatureDetectorConfigParams); - std::memcpy(control.data, ¶ms, sizeof(wire::FeatureDetectorConfigParams)); - - if (const auto ack = wait_for_ack(m_message_assembler, - m_socket, - control, - m_transmit_id++, - m_current_mtu, - m_config.receive_timeout); ack) - { - return get_status(ack->status); - } - - return Status::TIMEOUT; -} - StereoCalibration LegacyChannel::get_calibration() { std::lock_guard lock(m_mutex); @@ -986,12 +950,28 @@ std::optional LegacyChannel::query_configuration(bool has_aux_ m_current_mtu, m_config.receive_timeout); + const auto secondary_config = wait_for_data(m_message_assembler, + m_socket, + wire::SecondaryAppGetConfig(), + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); + + std::optional feature_config = std::nullopt; + if (secondary_config && secondary_config->dataLength >= sizeof(wire::FeatureDetectorConfigParams)) + { + wire::FeatureDetectorConfigParams params; + std::memcpy(¶ms, secondary_config->data, sizeof(wire::FeatureDetectorConfigParams)); + feature_config = params; + } + if (camera_config) { return convert(camera_config.value(), aux_config, imu_config, led_config, + feature_config, packet_delay ? packet_delay.value() : wire::SysPacketDelay{false}, ptp_enabled, m_info.device, diff --git a/source/LibMultiSense/details/legacy/configuration.cc b/source/LibMultiSense/details/legacy/configuration.cc index 92212a6e..29267d0b 100644 --- a/source/LibMultiSense/details/legacy/configuration.cc +++ b/source/LibMultiSense/details/legacy/configuration.cc @@ -66,6 +66,7 @@ MultiSenseConfig convert(const crl::multisense::details::wire::CamConfig &config const std::optional &aux_config, const std::optional &imu_config, const std::optional &led_config, + const std::optional &feature_config, const crl::multisense::details::wire::SysPacketDelay &packet_delay, bool ptp_enabled, const MultiSenseInfo::DeviceInfo &info, @@ -120,6 +121,9 @@ MultiSenseConfig convert(const crl::multisense::details::wire::CamConfig &config std::nullopt, (led_config && led_config->available) ? std::make_optional(convert(led_config.value(), info.lighting_type)) : + std::nullopt, + feature_config ? + std::make_optional(convert(feature_config.value())) : std::nullopt}; } diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseChannel.hh b/source/LibMultiSense/include/MultiSense/MultiSenseChannel.hh index ea142cff..9153a08d 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseChannel.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseChannel.hh @@ -209,16 +209,6 @@ public: /// virtual Status set_config(const MultiSenseConfig &config) = 0; - /// - /// @brief Get the camera's feature detector configuration - /// - virtual std::optional get_feature_config() = 0; - - /// - /// @brief Set the camera's feature detector configuration - /// - virtual Status set_feature_config(const FeatureDetectorConfig &config) = 0; - /// /// @brief Get the current stereo calibration. The output calibration will correspond to the full-resolution /// operating mode of the camera diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh b/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh index 5e3847cf..b3aedf90 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh @@ -349,7 +349,8 @@ NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseConfig, time_config, network_config, imu_config, - lighting_config) + lighting_config, + feature_detector_config) NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseStatus::PtpStatus, grandmaster_present, diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh index 6a3f26fb..fd4d419c 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh @@ -382,6 +382,16 @@ struct FeatureDetectorConfig /// bool grouping_enabled = true; uint32_t motion_octave = 1; + + /// + /// @brief Equality operator + /// + bool operator==(const FeatureDetectorConfig &rhs) const + { + return number_of_features == rhs.number_of_features && + grouping_enabled == rhs.grouping_enabled && + motion_octave == rhs.motion_octave; + } }; /// @@ -1222,6 +1232,11 @@ struct MultiSenseConfig /// std::optional lighting_config = std::nullopt; + /// + /// @brief The feature detector configuration for the camera. If invalid, the camera does not support feature detection + /// + std::optional feature_detector_config = std::nullopt; + /// /// @brief Equality operator /// @@ -1237,7 +1252,8 @@ struct MultiSenseConfig time_config == rhs.time_config && network_config == rhs.network_config && imu_config == rhs.imu_config && - lighting_config == rhs.lighting_config; + lighting_config == rhs.lighting_config && + feature_detector_config == rhs.feature_detector_config; } }; diff --git a/source/LibMultiSense/include/details/legacy/channel.hh b/source/LibMultiSense/include/details/legacy/channel.hh index 227da03b..2dd06479 100644 --- a/source/LibMultiSense/include/details/legacy/channel.hh +++ b/source/LibMultiSense/include/details/legacy/channel.hh @@ -196,16 +196,6 @@ public: /// Status set_config(const MultiSenseConfig &config) override; - /// - /// @brief Get the camera's feature detector configuration - /// - std::optional get_feature_config() override; - - /// - /// @brief Set the camera's feature detector configuration - /// - Status set_feature_config(const FeatureDetectorConfig &config) override; - /// /// @brief Get the current stereo calibration. The output calibration will correspond to the full-resolution /// operating mode of the camera diff --git a/source/LibMultiSense/include/details/legacy/configuration.hh b/source/LibMultiSense/include/details/legacy/configuration.hh index 8d40621d..0eee198f 100644 --- a/source/LibMultiSense/include/details/legacy/configuration.hh +++ b/source/LibMultiSense/include/details/legacy/configuration.hh @@ -81,6 +81,7 @@ MultiSenseConfig convert(const crl::multisense::details::wire::CamConfig &config const std::optional &aux_config, const std::optional &imu_config, const std::optional &led_config, + const std::optional &feature_config, const crl::multisense::details::wire::SysPacketDelay &packet_delay, bool ptp_enabled, const MultiSenseInfo::DeviceInfo &info, diff --git a/source/LibMultiSense/test/configuration_test.cc b/source/LibMultiSense/test/configuration_test.cc index 9ddfdb17..57c0f29b 100644 --- a/source/LibMultiSense/test/configuration_test.cc +++ b/source/LibMultiSense/test/configuration_test.cc @@ -600,6 +600,7 @@ TEST(convert, cam_config) wire_aux_config, std::nullopt, std::nullopt, + std::nullopt, packet_config, false, create_device_info(1920, 1200), @@ -620,6 +621,7 @@ TEST(convert, cam_config_invalid_aux) std::nullopt, std::nullopt, std::nullopt, + std::nullopt, packet_config, false, create_device_info(1920, 1200), @@ -639,6 +641,7 @@ TEST(convert, cam_config_invalid_imu) std::nullopt, std::nullopt, std::nullopt, + std::nullopt, packet_config, false, create_device_info(1920, 1200), @@ -661,6 +664,7 @@ TEST(convert, cam_config_invalid_led) std::nullopt, std::nullopt, std::nullopt, + std::nullopt, packet_config, false, create_device_info(1920, 1200), @@ -687,6 +691,7 @@ TEST(convert, cam_config_valid_led_but_no_lights) std::nullopt, std::nullopt, std::make_optional(lighting_config), + std::nullopt, packet_config, false, create_device_info(1920, 1200), @@ -744,6 +749,7 @@ TEST(convert, network_config) std::nullopt, std::nullopt, std::nullopt, + std::nullopt, packet_config, false, create_device_info(1920, 1200), From 15014580065275a40f6e34aeab892da857d63e5e Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Wed, 25 Mar 2026 23:39:36 -0400 Subject: [PATCH 10/20] Refined support including app registration --- python/bindings.cc | 45 ++-- .../LibMultiSense/details/legacy/channel.cc | 222 ++++++++++++++---- .../details/legacy/configuration.cc | 18 +- .../LibMultiSense/details/legacy/utilities.cc | 35 +++ .../MultiSense/MultiSenseSerialization.hh | 10 +- .../include/MultiSense/MultiSenseTypes.hh | 76 ++++-- .../include/details/legacy/channel.hh | 27 +++ .../include/details/legacy/configuration.hh | 10 +- .../include/details/legacy/utilities.hh | 15 ++ source/LibMultiSense/test/feature_test.cc | 12 +- source/Wire/include/wire/Protocol.hh | 5 + 11 files changed, 363 insertions(+), 112 deletions(-) diff --git a/python/bindings.cc b/python/bindings.cc index 8f7236cb..d275a378 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -164,28 +164,20 @@ PYBIND11_MODULE(_libmultisense, m) { .value("JPEG", multisense::Image::PixelFormat::JPEG) .value("H264", multisense::Image::PixelFormat::H264); - // KeyPoint - py::class_(m, "KeyPoint") + // FeatureKeyPoint + py::class_(m, "FeatureKeyPoint") .def(py::init<>()) - .def_readwrite("x", &multisense::KeyPoint::x) - .def_readwrite("y", &multisense::KeyPoint::y) - .def_readwrite("angle", &multisense::KeyPoint::angle) - .def_readwrite("response", &multisense::KeyPoint::response) - .def_readwrite("octave", &multisense::KeyPoint::octave) - .def_readwrite("class_id", &multisense::KeyPoint::class_id); - - // DescriptorType - py::enum_(m, "DescriptorType") - .value("UNKNOWN", multisense::DescriptorType::UNKNOWN) - .value("ORB", multisense::DescriptorType::ORB); - - // FeatureDetectorConfig - py::class_(m, "FeatureDetectorConfig") - .def(py::init<>()) - PYBIND11_JSON_SUPPORT(multisense::FeatureDetectorConfig) - .def_readwrite("number_of_features", &multisense::FeatureDetectorConfig::number_of_features) - .def_readwrite("grouping_enabled", &multisense::FeatureDetectorConfig::grouping_enabled) - .def_readwrite("motion_octave", &multisense::FeatureDetectorConfig::motion_octave); + .def_readwrite("x", &multisense::FeatureKeyPoint::x) + .def_readwrite("y", &multisense::FeatureKeyPoint::y) + .def_readwrite("angle", &multisense::FeatureKeyPoint::angle) + .def_readwrite("response", &multisense::FeatureKeyPoint::response) + .def_readwrite("octave", &multisense::FeatureKeyPoint::octave) + .def_readwrite("class_id", &multisense::FeatureKeyPoint::class_id); + + // FeatureDescriptorType + py::enum_(m, "FeatureDescriptorType") + .value("UNKNOWN", multisense::FeatureDescriptorType::UNKNOWN) + .value("ORB", multisense::FeatureDescriptorType::ORB); // FeatureMessage py::class_(m, "FeatureMessage") @@ -520,6 +512,14 @@ PYBIND11_MODULE(_libmultisense, m) { .def_readwrite("internal", &multisense::MultiSenseConfig::LightingConfig::internal) .def_readwrite("external", &multisense::MultiSenseConfig::LightingConfig::external); + // FeatureDetectorConfig + py::class_(m, "FeatureDetectorConfig") + .def(py::init<>()) + PYBIND11_JSON_SUPPORT(multisense::MultiSenseConfig::FeatureDetectorConfig) + .def_readwrite("number_of_features", &multisense::MultiSenseConfig::FeatureDetectorConfig::number_of_features) + .def_readwrite("grouping_enabled", &multisense::MultiSenseConfig::FeatureDetectorConfig::grouping_enabled) + .def_readwrite("motion_octave", &multisense::MultiSenseConfig::FeatureDetectorConfig::motion_octave); + // MultiSenseConfig py::class_(m, "MultiSenseConfig") .def(py::init<>()) @@ -535,7 +535,8 @@ PYBIND11_MODULE(_libmultisense, m) { .def_readwrite("time_config", &multisense::MultiSenseConfig::time_config) .def_readwrite("network_config", &multisense::MultiSenseConfig::network_config) .def_readwrite("imu_config", &multisense::MultiSenseConfig::imu_config) - .def_readwrite("lighting_config", &multisense::MultiSenseConfig::lighting_config); + .def_readwrite("lighting_config", &multisense::MultiSenseConfig::lighting_config) + .def_readwrite("feature_detector_config", &multisense::MultiSenseConfig::feature_detector_config); // MultiSenseStatus::PtpStatus py::class_(m, "PtpStatus") diff --git a/source/LibMultiSense/details/legacy/channel.cc b/source/LibMultiSense/details/legacy/channel.cc index 739f22bc..0e0b4956 100644 --- a/source/LibMultiSense/details/legacy/channel.cc +++ b/source/LibMultiSense/details/legacy/channel.cc @@ -98,6 +98,8 @@ #include #include #include +#include +#include #include #include #include @@ -346,6 +348,11 @@ Status LegacyChannel::connect(const Config &config) CRL_EXCEPTION("Unable to query the camera's configuration"); } + // + // Check secondary applications available + // + m_avaliable_secondary_applications = query_available_secondary_applications(); + m_connected = true; return Status::OK; @@ -570,38 +577,60 @@ Status LegacyChannel::set_config(const MultiSenseConfig &config) // if (config.feature_detector_config) { - // We first need to get the current config to preserve internal options - const auto secondary_config = wait_for_data(m_message_assembler, - m_socket, - wire::SecondaryAppGetConfig(), - m_transmit_id++, - m_current_mtu, - m_config.receive_timeout); - - wire::FeatureDetectorConfigParams params = convert(config.feature_detector_config.value()); - if (secondary_config && secondary_config->dataLength >= sizeof(wire::FeatureDetectorConfigParams)) + // + // Activate the application + // + if (const auto status = manage_secondary_application(SecondaryApplication::FEATURE_DETECTOR, true); status != Status::OK) { - std::memcpy(¶ms, secondary_config->data, sizeof(wire::FeatureDetectorConfigParams)); - params.numberOfFeatures = config.feature_detector_config->number_of_features; - params.grouping = config.feature_detector_config->grouping_enabled; - params.motion = config.feature_detector_config->motion_octave; + responses.push_back(status); } - wire::SecondaryAppControl control; - control.dataLength = sizeof(wire::FeatureDetectorConfigParams); - std::memcpy(control.data, ¶ms, sizeof(wire::FeatureDetectorConfigParams)); + if (m_active_secondary_application && + m_active_secondary_application.value() == SecondaryApplication::FEATURE_DETECTOR) + { + // We first need to get the current config to preserve internal options + const auto secondary_config = wait_for_data(m_message_assembler, + m_socket, + wire::SecondaryAppGetConfig(), + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); + + wire::FeatureDetectorConfigParams params = convert(config.feature_detector_config.value()); + if (secondary_config && secondary_config->dataLength >= sizeof(wire::FeatureDetectorConfigParams)) + { + std::memcpy(¶ms, secondary_config->data, sizeof(wire::FeatureDetectorConfigParams)); + params.numberOfFeatures = config.feature_detector_config->number_of_features; + params.grouping = config.feature_detector_config->grouping_enabled; + params.motion = config.feature_detector_config->motion_octave; + } - const auto feature_ack = wait_for_ack(m_message_assembler, - m_socket, - control, - m_transmit_id++, - m_current_mtu, - m_config.receive_timeout, - setting_retries); + wire::SecondaryAppControl control; + control.dataLength = sizeof(wire::FeatureDetectorConfigParams); + std::memcpy(control.data, ¶ms, sizeof(wire::FeatureDetectorConfigParams)); - if (!feature_ack || feature_ack->status != wire::Ack::Status_Ok) + const auto feature_ack = wait_for_ack(m_message_assembler, + m_socket, + control, + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout, + setting_retries); + + if (!feature_ack || feature_ack->status != wire::Ack::Status_Ok) + { + responses.push_back(get_status(feature_ack->status)); + } + } + } + else + { + // + // Deactivate the application + // + if (const auto status = manage_secondary_application(SecondaryApplication::FEATURE_DETECTOR, false); status != Status::OK) { - responses.push_back(get_status(feature_ack->status)); + responses.push_back(status); } } @@ -962,7 +991,7 @@ std::optional LegacyChannel::query_configuration(bool has_aux_ { wire::FeatureDetectorConfigParams params; std::memcpy(¶ms, secondary_config->data, sizeof(wire::FeatureDetectorConfigParams)); - feature_config = params; + feature_config = std::move(params); } if (camera_config) @@ -1123,6 +1152,97 @@ Status LegacyChannel::stop_streams_internal(const std::vector &sourc return Status::TIMEOUT; } +std::optional> LegacyChannel::query_available_secondary_applications() +{ + using namespace crl::multisense::details; + + if (const auto applications = wait_for_data(m_message_assembler, + m_socket, + wire::SecondaryAppGetRegisteredApps(), + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); applications) + { + return std::make_optional(convert(applications.value())); + } + + return std::nullopt; +} + +Status LegacyChannel::manage_secondary_application(const SecondaryApplication &app, bool activate) +{ + if (m_active_secondary_application && m_active_secondary_application.value() == app && activate) + { + return Status::OK; + } + + if (!m_active_secondary_application && !activate) + { + return Status::OK; + } + + if (!m_avaliable_secondary_applications || !supported_application(m_avaliable_secondary_applications.value(), app)) + { + return Status::UNSUPPORTED; + } + + // + // Deactivate the current application first if one is running and we want to activate a new application + // + if (activate && m_active_secondary_application && m_active_secondary_application.value() != app) + { + if (const auto status = manage_secondary_application_raw(m_active_secondary_application.value(), false); status == Status::OK) + { + m_active_secondary_application = std::nullopt; + } + else + { + return status; + } + } + + // + // Manage the application + // + if (const auto status = manage_secondary_application_raw(app, activate); status == Status::OK) + { + m_active_secondary_application = activate ? std::make_optional(app) : std::nullopt; + } + else + { + return status; + } + + return Status::OK; +} + +Status LegacyChannel::manage_secondary_application_raw(const SecondaryApplication &app, bool activate) +{ + using namespace crl::multisense::details; + + const auto app_string = application_string(app); + + wire::SecondaryAppActivate cmd(activate ? 1 : 0, app_string); + + if (const auto ack = wait_for_ack(m_message_assembler, + m_socket, + cmd, + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); ack) + { + if (ack->status != wire::Ack::Status_Ok) + { + CRL_DEBUG("Manage secondary app %s ack invalid: %" PRIi32 "\n", app_string.c_str(), ack->status); + return get_status(ack->status); + } + + return Status::OK; + } + + return Status::TIMEOUT; +} + void LegacyChannel::secondary_app_meta_callback(std::shared_ptr> data) { using namespace crl::multisense::details; @@ -1150,43 +1270,43 @@ void LegacyChannel::secondary_app_data_callback(std::shared_ptr(meta->second.dataP), meta->second.dataLength); - wire::FeatureDetectorMeta feature_meta(metaStream, wire::FeatureDetectorMeta::VERSION); + utility::BufferStreamReader meta_stream(reinterpret_cast(meta->second.dataP), meta->second.dataLength); + wire::FeatureDetectorMeta feature_meta(meta_stream, wire::FeatureDetectorMeta::VERSION); utility::BufferStreamReader stream(reinterpret_cast(wire_data.dataP), wire_data.length); - wire::FeatureDetector featureDetector(stream, wire::FeatureDetector::VERSION); + wire::FeatureDetector detector(stream, wire::FeatureDetector::VERSION); FeatureMessage feature_msg; feature_msg.source = source[0]; - feature_msg.descriptor_type = DescriptorType::ORB; + feature_msg.descriptor_type = FeatureDescriptorType::ORB; - const size_t startDescriptor = featureDetector.numFeatures * sizeof(wire::Feature); - uint8_t * feature_data = reinterpret_cast(featureDetector.dataP); + const size_t start_descriptor = detector.numFeatures * sizeof(wire::Feature); + uint8_t * feature_data = reinterpret_cast(detector.dataP); - feature_msg.keypoints.reserve(featureDetector.numFeatures); - for (size_t i = 0; i < featureDetector.numFeatures; i++) + feature_msg.keypoints.reserve(detector.numFeatures); + for (size_t i = 0; i < detector.numFeatures; i++) { - wire::Feature f = *reinterpret_cast(feature_data + (i * sizeof(wire::Feature))); - KeyPoint kp; - kp.x = f.x; - kp.y = f.y; - kp.angle = f.angle; - kp.response = f.resp; - kp.octave = f.octave; - kp.class_id = f.descriptor; - feature_msg.keypoints.push_back(kp); + const auto f = reinterpret_cast(feature_data + (i * sizeof(wire::Feature))); + + FeatureKeyPoint keypoint; + keypoint.x = f->x; + keypoint.y = f->y; + keypoint.angle = f->angle; + keypoint.response = f->resp; + keypoint.octave = f->octave; + keypoint.class_id = f->descriptor; + feature_msg.keypoints.push_back(std::move(keypoint)); } - feature_msg.descriptors.reserve(featureDetector.numDescriptors * sizeof(wire::Descriptor)); - for (size_t j = 0; j < featureDetector.numDescriptors; j++) + feature_msg.descriptors.reserve(detector.numDescriptors * sizeof(wire::Descriptor)); + for (size_t j = 0; j < detector.numDescriptors; j++) { - wire::Descriptor d = *reinterpret_cast(feature_data + (startDescriptor + (j * sizeof(wire::Descriptor)))); + wire::Descriptor d = *reinterpret_cast(feature_data + (start_descriptor + (j * sizeof(wire::Descriptor)))); const uint8_t* d_bytes = reinterpret_cast(d.d); feature_msg.descriptors.insert(feature_msg.descriptors.end(), d_bytes, d_bytes + sizeof(wire::Descriptor)); } - const nanoseconds capture_time{seconds{wire_data.timeSeconds} + - microseconds{wire_data.timeMicroSeconds}}; + const nanoseconds capture_time{seconds{wire_data.timeSeconds} + microseconds{wire_data.timeMicroSeconds}}; const TimeT capture_time_point{capture_time}; const TimeT ptp_capture_time_point{nanoseconds{feature_meta.ptpNanoSeconds}}; @@ -1195,7 +1315,9 @@ void LegacyChannel::secondary_app_data_callback(std::shared_ptr convert(const crl::multisense::details::wire::SecondaryAppRegisteredApps &apps) +{ + std::vector output; + + for (const auto &app: apps.apps) + { + output.emplace_back(secondary_application(app.appName)); + } + + return output; +} + MultiSenseConfig::NetworkTransmissionConfig convert(const crl::multisense::details::wire::SysPacketDelay &packet) { diff --git a/source/LibMultiSense/details/legacy/utilities.cc b/source/LibMultiSense/details/legacy/utilities.cc index e0bb0b0e..3bc7c129 100644 --- a/source/LibMultiSense/details/legacy/utilities.cc +++ b/source/LibMultiSense/details/legacy/utilities.cc @@ -213,6 +213,41 @@ std::vector expand_source(const DataSource &source) } } +std::string application_string(const SecondaryApplication &app) +{ + switch (app) + { + case SecondaryApplication::FEATURE_DETECTOR: + { + return crl::multisense::details::wire::SECONDARY_APP_FEATURE_DETECTOR; + } + case SecondaryApplication::NONE: + default: + { + return ""; + } + + } +} + +SecondaryApplication secondary_application(const std::string &app) +{ + if (app == crl::multisense::details::wire::SECONDARY_APP_FEATURE_DETECTOR) + { + return SecondaryApplication::FEATURE_DETECTOR; + } + else + { + return SecondaryApplication::NONE; + } +} + + +bool supported_application(const std::vector &available_apps, const SecondaryApplication &target_app) +{ + return std::find(std::begin(available_apps), std::end(available_apps), target_app) != std::end(available_apps); +} + ImuSample add_wire_sample(ImuSample sample, const crl::multisense::details::wire::ImuSample &wire, const ImuSampleScalars &scalars) diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh b/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh index b3aedf90..ab90d520 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh @@ -338,6 +338,11 @@ NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseConfig::ImuConfig, gyroscope, magnetometer) +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseConfig::FeatureDetectorConfig, + number_of_features, + grouping_enabled, + motion_octave) + NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseConfig, width, height, @@ -469,11 +474,6 @@ NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseInfo, imu, network) -NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(FeatureDetectorConfig, - number_of_features, - grouping_enabled, - motion_octave) - NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(Channel::ReceiveBufferConfig, num_small_buffers, small_buffer_size, diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh index fd4d419c..3d4590ca 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh @@ -325,7 +325,7 @@ struct ImageHistogram /// /// @brief Represents a generic feature keypoint /// -struct KeyPoint +struct FeatureKeyPoint { /// /// @brief x location of the keypoint in the output image @@ -361,47 +361,35 @@ struct KeyPoint /// /// @brief The descriptor type for features /// -enum class DescriptorType : uint8_t +enum class FeatureDescriptorType : uint8_t { UNKNOWN, ORB }; /// -/// @brief Configuration for the on-camera feature detector +/// @brief A collection of features and their descriptors /// -struct FeatureDetectorConfig +struct FeatureMessage { /// - /// @brief Max number of features to detect + /// @brief DataSource associated with the features /// - uint32_t number_of_features = 1500; + DataSource source = DataSource::UNKNOWN; /// - /// @brief Attempt to group features with similar class ids + /// @brief The typeof feature descriptor used /// - bool grouping_enabled = true; - uint32_t motion_octave = 1; + FeatureDescriptorType descriptor_type = FeatureDescriptorType::UNKNOWN; /// - /// @brief Equality operator + /// @brief Image keypoints for featrues /// - bool operator==(const FeatureDetectorConfig &rhs) const - { - return number_of_features == rhs.number_of_features && - grouping_enabled == rhs.grouping_enabled && - motion_octave == rhs.motion_octave; - } -}; + std::vector keypoints{}; -/// -/// @brief A collection of features and their descriptors -/// -struct FeatureMessage -{ - DataSource source = DataSource::UNKNOWN; - DescriptorType descriptor_type = DescriptorType::UNKNOWN; - std::vector keypoints{}; + /// + /// @brief Raw descriptors for the features + /// std::vector descriptors{}; #ifdef HAVE_OPENCV @@ -647,6 +635,15 @@ struct ImuRange } }; +/// +/// @brief supported secondary applications +/// +enum class SecondaryApplication : uint8_t +{ + NONE, + FEATURE_DETECTOR +}; + /// /// @brief Complete configuration object for configuring the MultiSense. Can be updated during camera operation @@ -1174,6 +1171,35 @@ struct MultiSenseConfig } }; + /// + /// @brief Configuration for the on-camera feature detector + /// + struct FeatureDetectorConfig + { + + /// + /// @brief Max number of features to detect + /// + uint32_t number_of_features = 1500; + + /// + /// @brief Attempt to group features with similar class ids + /// + bool grouping_enabled = true; + + uint32_t motion_octave = 1; + + /// + /// @brief Equality operator + /// + bool operator==(const FeatureDetectorConfig &rhs) const + { + return number_of_features == rhs.number_of_features && + grouping_enabled == rhs.grouping_enabled && + motion_octave == rhs.motion_octave; + } + }; + /// /// @brief The operating width of the MultiSense in pixels. For available operating resolutions /// see the MultiSenseInfo::SupportedOperatingMode diff --git a/source/LibMultiSense/include/details/legacy/channel.hh b/source/LibMultiSense/include/details/legacy/channel.hh index 2dd06479..e900df0a 100644 --- a/source/LibMultiSense/include/details/legacy/channel.hh +++ b/source/LibMultiSense/include/details/legacy/channel.hh @@ -268,6 +268,23 @@ private: /// Status stop_streams_internal(const std::vector &sources); + /// + /// @brief Query the full set of secondary applications which the camera supports. Note there is an assumption + /// only one secondary application can be active at a given time + /// + std::optional> query_available_secondary_applications(); + + /// + /// @brief Manage secondary application on the camera. Handle auto deactivate + /// + Status manage_secondary_application(const SecondaryApplication &app, bool activate); + + /// + /// @brief Manage secondary application on the camera. Send raw control messages without regard for + /// the current state + /// + Status manage_secondary_application_raw(const SecondaryApplication &app, bool activate); + /// /// @brief Image meta callback used to internally receive images sent from the MultiSense /// @@ -379,6 +396,16 @@ private: /// std::set m_active_streams{}; + /// + /// @brief The current active secondary application + /// + std::optional m_active_secondary_application{}; + + /// + /// @brief Secondary applications which are available + /// + std::optional> m_avaliable_secondary_applications{}; + /// /// @brief The currently active image frame user callback /// diff --git a/source/LibMultiSense/include/details/legacy/configuration.hh b/source/LibMultiSense/include/details/legacy/configuration.hh index 0eee198f..83b4d8b3 100644 --- a/source/LibMultiSense/include/details/legacy/configuration.hh +++ b/source/LibMultiSense/include/details/legacy/configuration.hh @@ -51,6 +51,7 @@ #include #include #include +#include #include #include @@ -62,12 +63,12 @@ namespace legacy { /// /// @brief Convert a wire feature detector config to our API config object /// -FeatureDetectorConfig convert(const crl::multisense::details::wire::FeatureDetectorConfigParams ¶ms); +MultiSenseConfig::FeatureDetectorConfig convert(const crl::multisense::details::wire::FeatureDetectorConfigParams ¶ms); /// /// @brief Convert our API config object to a wire message /// -crl::multisense::details::wire::FeatureDetectorConfigParams convert(const FeatureDetectorConfig &config); +crl::multisense::details::wire::FeatureDetectorConfigParams convert(const MultiSenseConfig::FeatureDetectorConfig &config); /// /// @brief Convert a wire AuxCameraConfiguration to our AuxConfig API @@ -127,6 +128,11 @@ MultiSenseConfig::LightingConfig convert(const crl::multisense::details::wire::L /// crl::multisense::details::wire::LedSet convert (const MultiSenseConfig::LightingConfig &led); +/// +/// @brief Convert a wire secondary application configuration to a vector of applications +/// +std::vector convert(const crl::multisense::details::wire::SecondaryAppRegisteredApps &apps); + /// /// @brief Create a API transmission config object from wire types /// diff --git a/source/LibMultiSense/include/details/legacy/utilities.hh b/source/LibMultiSense/include/details/legacy/utilities.hh index 994e4f65..e35a63fb 100644 --- a/source/LibMultiSense/include/details/legacy/utilities.hh +++ b/source/LibMultiSense/include/details/legacy/utilities.hh @@ -143,6 +143,21 @@ crl::multisense::details::wire::SourceType convert_sources(const std::vector expand_source(const DataSource &source); +/// +/// @brief Convert a secondary application to a string +/// +std::string application_string(const SecondaryApplication &app); + +/// +/// @brief Convert a string to a secondary application +/// +SecondaryApplication secondary_application(const std::string &app); + +/// +/// @brief Check if an application is supported +/// +bool supported_application(const std::vector &available_apps, const SecondaryApplication &target_app); + /// /// @brief Add a wire sample to a ImuSample /// diff --git a/source/LibMultiSense/test/feature_test.cc b/source/LibMultiSense/test/feature_test.cc index 5093dd31..e7819eda 100644 --- a/source/LibMultiSense/test/feature_test.cc +++ b/source/LibMultiSense/test/feature_test.cc @@ -51,9 +51,9 @@ TEST(FeatureMessage, Basic) { FeatureMessage msg; msg.source = DataSource::LEFT_ORB_FEATURES; - msg.descriptor_type = DescriptorType::ORB; + msg.descriptor_type = FeatureDescriptorType::ORB; - KeyPoint kp1; + FeatureKeyPoint kp1; kp1.x = 10.5f; kp1.y = 20.2f; kp1.response = 100.0f; @@ -67,7 +67,7 @@ TEST(FeatureMessage, Basic) msg.descriptors.insert(msg.descriptors.end(), desc.begin(), desc.end()); EXPECT_EQ(msg.source, DataSource::LEFT_ORB_FEATURES); - EXPECT_EQ(msg.descriptor_type, DescriptorType::ORB); + EXPECT_EQ(msg.descriptor_type, FeatureDescriptorType::ORB); EXPECT_EQ(msg.keypoints.size(), 1); EXPECT_EQ(msg.descriptors.size(), 32); EXPECT_FLOAT_EQ(msg.keypoints[0].x, 10.5f); @@ -95,11 +95,11 @@ TEST(FeatureMessage, OpenCVConversion) { FeatureMessage msg; msg.source = DataSource::LEFT_ORB_FEATURES; - msg.descriptor_type = DescriptorType::ORB; + msg.descriptor_type = FeatureDescriptorType::ORB; for (int i = 0; i < 5; ++i) { - KeyPoint kp; + FeatureKeyPoint kp; kp.x = static_cast(i * 10); kp.y = static_cast(i * 20); kp.angle = 45.0f; @@ -191,7 +191,7 @@ TEST(FeatureWire, Serialization) TEST(FeatureConfig, Basic) { - FeatureDetectorConfig config; + MultiSenseConfig::FeatureDetectorConfig config; config.number_of_features = 2000; config.grouping_enabled = false; config.motion_octave = 2; diff --git a/source/Wire/include/wire/Protocol.hh b/source/Wire/include/wire/Protocol.hh index e444fa90..f06d3864 100644 --- a/source/Wire/include/wire/Protocol.hh +++ b/source/Wire/include/wire/Protocol.hh @@ -338,6 +338,11 @@ static CRL_CONSTEXPR uint32_t MAX_LIGHTS = 8; static CRL_CONSTEXPR float WIRE_IMAGER_GAIN_MAX = 1000.0f; +// +// Secondary Application names + +static CRL_CONSTEXPR char SECONDARY_APP_FEATURE_DETECTOR[] = "FeatureDetector"; + // // Some helper macros From 4d5c16824e2c35d03e30d2a22bbccede0d05db32 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Thu, 26 Mar 2026 10:22:28 -0400 Subject: [PATCH 11/20] Add feature detector utility --- source/Utilities/LibMultiSense/CMakeLists.txt | 1 + .../FeatureDetectorUtility/CMakeLists.txt | 12 ++ .../FeatureDetectorUtility.cc | 180 ++++++++++++++++++ .../feature_detector_utility.py | 100 ++++++++++ .../SaveImageUtility/CMakeLists.txt | 1 - 5 files changed, 293 insertions(+), 1 deletion(-) create mode 100644 source/Utilities/LibMultiSense/FeatureDetectorUtility/CMakeLists.txt create mode 100644 source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc create mode 100755 source/Utilities/LibMultiSense/FeatureDetectorUtility/feature_detector_utility.py diff --git a/source/Utilities/LibMultiSense/CMakeLists.txt b/source/Utilities/LibMultiSense/CMakeLists.txt index c6900859..7a8e52c4 100644 --- a/source/Utilities/LibMultiSense/CMakeLists.txt +++ b/source/Utilities/LibMultiSense/CMakeLists.txt @@ -14,6 +14,7 @@ endif() add_subdirectory(ChangeIpUtility) add_subdirectory(DeviceInfoUtility) +add_subdirectory(FeatureDetectorUtility) add_subdirectory(ImageCalUtility) add_subdirectory(MultiChannelUtility) add_subdirectory(PointCloudUtility) diff --git a/source/Utilities/LibMultiSense/FeatureDetectorUtility/CMakeLists.txt b/source/Utilities/LibMultiSense/FeatureDetectorUtility/CMakeLists.txt new file mode 100644 index 00000000..5b087015 --- /dev/null +++ b/source/Utilities/LibMultiSense/FeatureDetectorUtility/CMakeLists.txt @@ -0,0 +1,12 @@ +if(OpenCV_FOUND) + add_executable(FeatureDetectorUtility FeatureDetectorUtility.cc) + + target_link_libraries (FeatureDetectorUtility ${MULTISENSE_UTILITY_LIBS}) + + install(TARGETS FeatureDetectorUtility RUNTIME DESTINATION "bin") +else() + message("Missing OpenCV. Not building the c++ FeatureDetectorUtility") +endif() +install(FILES feature_detector_utility.py PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ + DESTINATION "bin") + diff --git a/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc new file mode 100644 index 00000000..e08c41f6 --- /dev/null +++ b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc @@ -0,0 +1,180 @@ +/** + * @file FeatureDetectorUtility.cc + * + * Copyright 2013-2026 + * Carnegie Robotics, LLC + * 4501 Hatfield Street, Pittsburgh, PA 15201 + * http://www.carnegierobotics.com + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Carnegie Robotics, LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CARNEGIE ROBOTICS, LLC BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Significant history (date, user, job code, action): + * 2026-03-26, malvarado@carnegierobotics.com, IRAD, Created file. + **/ + +#ifdef WIN32 +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN 1 +#endif + +#include +#include +#else +#include +#endif + +#include +#include +#include + +#define HAVE_OPENCV 1 +#include + +#include +#include + +#include "getopt/getopt.h" + +namespace lms = multisense; + +namespace +{ + +volatile bool done = false; + +void usage(const char *name) +{ + std::cerr << "USAGE: " << name << " []" << std::endl; + std::cerr << "Where are:" << std::endl; + std::cerr << "\t-a : CURRENT IPV4 address (default=10.66.171.21)" << std::endl; + std::cerr << "\t-m : MTU to use to communicate with the camera (default=1500)" << std::endl; + std::cerr << "\t-n : Max number of features to detect (default=1500)" << std::endl; + std::cerr << "\t-f : Framerate (default=10)" << std::endl; + std::cerr << "\t-h : Show help" << std::endl; + exit(1); +} + +#ifdef WIN32 +BOOL WINAPI signal_handler(DWORD dwCtrlType) +{ + (void) dwCtrlType; + done = true; + return TRUE; +} +#else +void signal_handler(int sig) +{ + (void) sig; + done = true; +} +#endif + +} + +int main(int argc, char** argv) +{ + using namespace std::chrono_literals; + +#if WIN32 + SetConsoleCtrlHandler (signal_handler, TRUE); +#else + signal(SIGINT, signal_handler); +#endif + + std::string ip_address = "10.66.171.21"; + int16_t mtu = 1500; + uint32_t number_of_features = 1500; + size_t fps = 10; + + int c; + while(-1 != (c = getopt(argc, argv, "a:m:n:f:h"))) + { + switch(c) + { + case 'a': ip_address = std::string(optarg); break; + case 'm': mtu = static_cast(atoi(optarg)); break; + case 'n': number_of_features = static_cast(atoi(optarg)); break; + case 'f': fps = static_cast(atoi(optarg)); break; + default: usage(*argv); break; + } + } + + const auto channel = lms::Channel::create(lms::Channel::Config{ip_address, mtu}); + if (!channel) + { + std::cerr << "Failed to create channel" << std::endl;; + return 1; + } + + auto config = channel->get_config(); + config.frames_per_second = fps; + config.feature_detector_config = lms::MultiSenseConfig::FeatureDetectorConfig{number_of_features, true, 1}; + if (const auto status = channel->set_config(config); status != lms::Status::OK && status != lms::Status::INCOMPLETE_APPLICATION) + { + std::cout << "Feature detector not supported: " << lms::to_string(status) << std::endl; + exit(1); + } + + + // Start both the rectified image and the corresponding feature stream + const std::vector sources = {lms::DataSource::LEFT_MONO_RAW, lms::DataSource::LEFT_ORB_FEATURES}; + + if (const auto status = channel->start_streams(sources); status != lms::Status::OK) + { + std::cerr << "Cannot start streams: " << lms::to_string(status) << std::endl; + return 1; + } + + while(!done) + { + if (const auto frame = channel->get_next_image_frame(); frame) + { + if (frame->has_image(lms::DataSource::LEFT_MONO_RAW)) + { + cv::Mat img = frame->get_image(lms::DataSource::LEFT_MONO_RAW).cv_mat(); + cv::Mat display_img; + cv::cvtColor(img, display_img, cv::COLOR_GRAY2BGR); + + if (frame->has_feature(lms::DataSource::LEFT_ORB_FEATURES)) + { + const auto& features = frame->get_feature(lms::DataSource::LEFT_ORB_FEATURES); + + // Use the native OpenCV utility to convert keypoints and draw them + cv::drawKeypoints(display_img, features.cv_keypoints(), display_img, cv::Scalar(0, 255, 0)); + } + + cv::imshow("MultiSense Features", display_img); + if (cv::waitKey(1) == 'q') + { + break; + } + } + } + } + + channel->stop_streams({lms::DataSource::ALL}); + + return 0; +} diff --git a/source/Utilities/LibMultiSense/FeatureDetectorUtility/feature_detector_utility.py b/source/Utilities/LibMultiSense/FeatureDetectorUtility/feature_detector_utility.py new file mode 100755 index 00000000..3a9ce2ee --- /dev/null +++ b/source/Utilities/LibMultiSense/FeatureDetectorUtility/feature_detector_utility.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python +# +# @file image_cal_utility.cc +# +# Copyright 2013-2026 +# Carnegie Robotics, LLC +# 4501 Hatfield Street, Pittsburgh, PA 15201 +# http://www.carnegierobotics.com +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the Carnegie Robotics, LLC nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL CARNEGIE ROBOTICS, LLC BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Significant history (date, user, job code, action): +# 2026-03-26, malvarado@carnegierobotics.com, IRAD, Created file. +# + +import argparse +import cv2 +from jsondiff import diff + +import libmultisense as lms + +def main(args): + channel_config = lms.ChannelConfig() + channel_config.ip_address = args.ip_address + channel_config.mtu = args.mtu + + with lms.Channel.create(channel_config) as channel: + if not channel: + print("Invalid channel") + exit(1) + + config = channel.get_config() + if config: + config.frames_per_second = args.fps + # Set feature configuration + config.feature_detector_config = lms.FeatureDetectorConfig() + config.feature_detector_config.number_of_features = args.num_features + config.feature_detector_config.grouping_enabled = True + config.feature_detector_config.motion_octave = 1 + status = channel.set_config(config) + if status != lms.Status.OK or status != lms.Status.INCOMPLETE_APPLICATION: + print("Cannot enable feature detector", lms.to_string(status)) + + + # Start both the rectified image and the corresponding feature stream + sources = [lms.DataSource.LEFT_MONO_RAW, lms.DataSource.LEFT_ORB_FEATURES] + if channel.start_streams(sources) != lms.Status.OK: + print("Unable to start streams") + exit(1) + + while True: + frame = channel.get_next_image_frame() + if frame and frame.has_image(lms.DataSource.LEFT_MONO_RAW): + img = frame.get_image(lms.DataSource.LEFT_MONO_RAW).as_array + + # Convert grayscale to BGR for color rendering + display_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + + # Draw features on the image + if frame.has_feature(lms.DataSource.LEFT_ORB_FEATURES): + features = frame.get_feature(lms.DataSource.LEFT_ORB_FEATURES) + print(f"Frame {frame.frame_id}: Received {len(features.keypoints)} features") + + for kp in features.keypoints: + cv2.circle(display_img, (int(kp.x), int(kp.y)), 3, (0, 255, 0), -1) + + cv2.imshow("MultiSense Features", display_img) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +if __name__ == '__main__': + parser = argparse.ArgumentParser("LibMultiSense feature detector utility") + parser.add_argument("-a", "--ip_address", default="10.66.171.21", help="The IPv4 address of the MultiSense.") + parser.add_argument("-m", "--mtu", type=int, default=1500, help="The MTU to use to communicate with the camera.") + parser.add_argument("-n", "--num-features", type=int, default=1500, help="The max number of features to target.") + parser.add_argument("-f", "--fps", type=int, default=10, help="The framerate of the camera.") + main(parser.parse_args()) + diff --git a/source/Utilities/LibMultiSense/SaveImageUtility/CMakeLists.txt b/source/Utilities/LibMultiSense/SaveImageUtility/CMakeLists.txt index 5c551430..5647b281 100644 --- a/source/Utilities/LibMultiSense/SaveImageUtility/CMakeLists.txt +++ b/source/Utilities/LibMultiSense/SaveImageUtility/CMakeLists.txt @@ -1,4 +1,3 @@ - add_executable(SaveImageUtility SaveImageUtility.cc) target_link_libraries (SaveImageUtility ${MULTISENSE_UTILITY_LIBS}) From eec3c3ead3f124e2056538fea276f35c14fddcd7 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Thu, 26 Mar 2026 11:20:36 -0400 Subject: [PATCH 12/20] In process debug --- CMakeLists.txt | 3 +- .../LibMultiSense/details/legacy/channel.cc | 52 +++++++++++++++++-- .../FeatureDetectorUtility.cc | 4 +- 3 files changed, 54 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2bcbb16c..dd4aebc3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -61,7 +61,8 @@ endif() if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) message(STATUS "Selecting default build type: Release") - set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Build type (default: Release)" FORCE) + #set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Build type (default: Release)" FORCE) + set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING "Build type (default: RelWithDebInfo)" FORCE) endif () # diff --git a/source/LibMultiSense/details/legacy/channel.cc b/source/LibMultiSense/details/legacy/channel.cc index 0e0b4956..f159c9a7 100644 --- a/source/LibMultiSense/details/legacy/channel.cc +++ b/source/LibMultiSense/details/legacy/channel.cc @@ -367,6 +367,14 @@ void LegacyChannel::disconnect() return; } + // + // Deactivate our current secondary application if one exists + // + if (m_active_secondary_application) + { + manage_secondary_application(m_active_secondary_application.value(), false); + } + // // Stop all our streams before disconnecting // @@ -1245,6 +1253,7 @@ Status LegacyChannel::manage_secondary_application_raw(const SecondaryApplicatio void LegacyChannel::secondary_app_meta_callback(std::shared_ptr> data) { + std::cout << "got feature meta" << std::endl; using namespace crl::multisense::details; const auto wire_meta = deserialize(*data); m_secondary_app_meta_cache[wire_meta.frameId] = wire_meta; @@ -1252,6 +1261,7 @@ void LegacyChannel::secondary_app_meta_callback(std::shared_ptr> data) { + std::cout << "got feature" << std::endl; using namespace crl::multisense::details; using namespace std::chrono; @@ -1283,6 +1293,8 @@ void LegacyChannel::secondary_app_data_callback(std::shared_ptr(detector.dataP); + std::cout << wire_data.frameId << " " << detector.numFeatures << std::endl; + feature_msg.keypoints.reserve(detector.numFeatures); for (size_t i = 0; i < detector.numFeatures; i++) { @@ -1330,6 +1342,20 @@ void LegacyChannel::handle_and_dispatch_feature(FeatureMessage feature, int64_t m_frame_buffer[frame_id].add_feature(std::move(feature)); } + const auto &f = m_frame_buffer[frame_id]; + for (const auto & e : m_active_streams) + { + std::cout << static_cast(e) << std::endl; + if (is_image_source(e)) + { + std::cout << f.has_image(e) << std::endl; + } + if (is_feature_source(e)) + { + std::cout << f.has_feature(e) << std::endl; + } + } + // // Check if our frame is valid, if so dispatch to our callbacks and notify anyone who is waiting on the next frame // @@ -1342,13 +1368,30 @@ void LegacyChannel::handle_and_dispatch_feature(FeatureMessage feature, int64_t return true; })) { + // + // Notify anyone waiting on the next frame + // m_image_frame_notifier.set_and_notify(frame); - std::lock_guard lock(m_image_callback_mutex); - if (m_user_image_frame_callback) + // + // Service the callback if it's valid + // { - m_user_image_frame_callback(frame); + std::lock_guard lock(m_image_callback_mutex); + if (m_user_image_frame_callback) + { + m_user_image_frame_callback(frame); + } } + + + // + // Remove our image frame from our frame buffer and the associated image metadata since we are + // now done with it internally + // + m_frame_buffer.erase(frame_id); + m_meta_cache.erase(frame_id); + //m_secondary_app_meta_cache.erase(frame_id); } } @@ -1397,6 +1440,8 @@ void LegacyChannel::image_callback(std::shared_ptr> d return; } + std::cout << "got image " << wire_image.frameId << std::endl; + // // Copy our calibration and device info locally to make this thread safe // @@ -1688,6 +1733,7 @@ void LegacyChannel::handle_and_dispatch(Image image, // m_frame_buffer.erase(frame_id); m_meta_cache.erase(frame_id); + //m_secondary_app_meta_cache.erase(frame_id); } // diff --git a/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc index e08c41f6..7252acf0 100644 --- a/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc +++ b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc @@ -50,7 +50,9 @@ #include #define HAVE_OPENCV 1 -#include +#include +#include +#include #include #include From 62197aee294a102b4681a6e1051d57526acbb0b1 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Thu, 26 Mar 2026 17:37:54 -0400 Subject: [PATCH 13/20] Solve memory issues --- CMakeLists.txt | 2 +- .../LibMultiSense/details/legacy/channel.cc | 36 +++++++------------ .../include/details/legacy/channel.hh | 2 +- .../FeatureDetectorUtility.cc | 2 +- .../include/wire/SecondaryAppMetaMessage.hh | 21 +++++++++++ 5 files changed, 37 insertions(+), 26 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index dd4aebc3..d79fd91d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -62,7 +62,7 @@ endif() if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) message(STATUS "Selecting default build type: Release") #set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Build type (default: Release)" FORCE) - set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING "Build type (default: RelWithDebInfo)" FORCE) + set(CMAKE_BUILD_TYPE "DebugWithRelInfo" CACHE STRING "Build type (default: DebugWithRelInfo)" FORCE) endif () # diff --git a/source/LibMultiSense/details/legacy/channel.cc b/source/LibMultiSense/details/legacy/channel.cc index f159c9a7..ff5c51a6 100644 --- a/source/LibMultiSense/details/legacy/channel.cc +++ b/source/LibMultiSense/details/legacy/channel.cc @@ -1253,15 +1253,14 @@ Status LegacyChannel::manage_secondary_application_raw(const SecondaryApplicatio void LegacyChannel::secondary_app_meta_callback(std::shared_ptr> data) { - std::cout << "got feature meta" << std::endl; using namespace crl::multisense::details; - const auto wire_meta = deserialize(*data); - m_secondary_app_meta_cache[wire_meta.frameId] = wire_meta; + auto wire_meta = std::make_shared(deserialize(*data)); + const auto frame_id = wire_meta->frameId; + m_secondary_app_meta_cache[frame_id] = std::move(wire_meta); } void LegacyChannel::secondary_app_data_callback(std::shared_ptr> data) { - std::cout << "got feature" << std::endl; using namespace crl::multisense::details; using namespace std::chrono; @@ -1280,7 +1279,7 @@ void LegacyChannel::secondary_app_data_callback(std::shared_ptr(meta->second.dataP), meta->second.dataLength); + utility::BufferStreamReader meta_stream(reinterpret_cast(meta->second->dataP), meta->second->dataLength); wire::FeatureDetectorMeta feature_meta(meta_stream, wire::FeatureDetectorMeta::VERSION); utility::BufferStreamReader stream(reinterpret_cast(wire_data.dataP), wire_data.length); @@ -1293,8 +1292,6 @@ void LegacyChannel::secondary_app_data_callback(std::shared_ptr(detector.dataP); - std::cout << wire_data.frameId << " " << detector.numFeatures << std::endl; - feature_msg.keypoints.reserve(detector.numFeatures); for (size_t i = 0; i < detector.numFeatures; i++) { @@ -1342,20 +1339,6 @@ void LegacyChannel::handle_and_dispatch_feature(FeatureMessage feature, int64_t m_frame_buffer[frame_id].add_feature(std::move(feature)); } - const auto &f = m_frame_buffer[frame_id]; - for (const auto & e : m_active_streams) - { - std::cout << static_cast(e) << std::endl; - if (is_image_source(e)) - { - std::cout << f.has_image(e) << std::endl; - } - if (is_feature_source(e)) - { - std::cout << f.has_feature(e) << std::endl; - } - } - // // Check if our frame is valid, if so dispatch to our callbacks and notify anyone who is waiting on the next frame // @@ -1393,6 +1376,14 @@ void LegacyChannel::handle_and_dispatch_feature(FeatureMessage feature, int64_t m_meta_cache.erase(frame_id); //m_secondary_app_meta_cache.erase(frame_id); } + + // + // Since frames will only monotonically increase, it's safe to also delete all the frame_ids earlier than + // the current frame id. + // + m_frame_buffer.erase(std::begin(m_frame_buffer), m_frame_buffer.lower_bound(frame_id)); + m_meta_cache.erase(std::begin(m_meta_cache), m_meta_cache.lower_bound(frame_id)); + //m_secondary_app_meta_cache.erase(std::begin(m_secondary_app_meta_cache), m_meta_cache.lower_bound(frame_id)); } void LegacyChannel::image_meta_callback(std::shared_ptr> data) @@ -1440,8 +1431,6 @@ void LegacyChannel::image_callback(std::shared_ptr> d return; } - std::cout << "got image " << wire_image.frameId << std::endl; - // // Copy our calibration and device info locally to make this thread safe // @@ -1742,6 +1731,7 @@ void LegacyChannel::handle_and_dispatch(Image image, // m_frame_buffer.erase(std::begin(m_frame_buffer), m_frame_buffer.lower_bound(frame_id)); m_meta_cache.erase(std::begin(m_meta_cache), m_meta_cache.lower_bound(frame_id)); + //m_secondary_app_meta_cache.erase(std::begin(m_secondary_app_meta_cache), m_meta_cache.lower_bound(frame_id)); } } diff --git a/source/LibMultiSense/include/details/legacy/channel.hh b/source/LibMultiSense/include/details/legacy/channel.hh index e900df0a..4b28ef22 100644 --- a/source/LibMultiSense/include/details/legacy/channel.hh +++ b/source/LibMultiSense/include/details/legacy/channel.hh @@ -434,7 +434,7 @@ private: /// /// @brief A cache of secondary app metadata associated with a specific frame id /// - std::map m_secondary_app_meta_cache{}; + std::map> m_secondary_app_meta_cache{}; /// /// @brief A cache of image frames associated with a specific frame id diff --git a/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc index 7252acf0..81520efd 100644 --- a/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc +++ b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc @@ -140,7 +140,7 @@ int main(int argc, char** argv) } - // Start both the rectified image and the corresponding feature stream + // Start both the rectified image and the corresponding feature stream const std::vector sources = {lms::DataSource::LEFT_MONO_RAW, lms::DataSource::LEFT_ORB_FEATURES}; if (const auto status = channel->start_streams(sources); status != lms::Status::OK) diff --git a/source/Wire/include/wire/SecondaryAppMetaMessage.hh b/source/Wire/include/wire/SecondaryAppMetaMessage.hh index e66ec128..bfb4a561 100644 --- a/source/Wire/include/wire/SecondaryAppMetaMessage.hh +++ b/source/Wire/include/wire/SecondaryAppMetaMessage.hh @@ -89,6 +89,27 @@ public: SecondaryAppMetadata() {}; + SecondaryAppMetadata(const SecondaryAppMetadata& other) = delete; + + SecondaryAppMetadata(SecondaryAppMetadata&& other) noexcept : + SecondaryAppMetaHeader(std::move(other)), + mRef(other.mRef) + { + other.mRef = nullptr; + } + + SecondaryAppMetadata& operator=(SecondaryAppMetadata&& other) noexcept { + if (this != &other) { + delete mRef; + + SecondaryAppMetaHeader::operator=(std::move(other)); + + mRef = other.mRef; + other.mRef = nullptr; + } + return *this; + } + ~SecondaryAppMetadata() { if (mRef) delete mRef; From fac8168e94a0cd9babdd0d4c6d4fb27d5c678d1d Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Thu, 26 Mar 2026 17:54:52 -0400 Subject: [PATCH 14/20] Logic fixes --- .../FeatureDetectorUtility/FeatureDetectorUtility.cc | 11 ++++++++--- .../feature_detector_utility.py | 8 ++++---- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc index 81520efd..11ce9aff 100644 --- a/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc +++ b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc @@ -132,15 +132,20 @@ int main(int argc, char** argv) auto config = channel->get_config(); config.frames_per_second = fps; + + // + // Set the feature detector to enable the feature detector config + // config.feature_detector_config = lms::MultiSenseConfig::FeatureDetectorConfig{number_of_features, true, 1}; if (const auto status = channel->set_config(config); status != lms::Status::OK && status != lms::Status::INCOMPLETE_APPLICATION) { - std::cout << "Feature detector not supported: " << lms::to_string(status) << std::endl; + std::cerr << "Feature detector not supported: " << lms::to_string(status) << std::endl; exit(1); } - - // Start both the rectified image and the corresponding feature stream + // + // Start both the left image and the corresponding feature stream + // const std::vector sources = {lms::DataSource::LEFT_MONO_RAW, lms::DataSource::LEFT_ORB_FEATURES}; if (const auto status = channel->start_streams(sources); status != lms::Status::OK) diff --git a/source/Utilities/LibMultiSense/FeatureDetectorUtility/feature_detector_utility.py b/source/Utilities/LibMultiSense/FeatureDetectorUtility/feature_detector_utility.py index 3a9ce2ee..4f4bb4f8 100755 --- a/source/Utilities/LibMultiSense/FeatureDetectorUtility/feature_detector_utility.py +++ b/source/Utilities/LibMultiSense/FeatureDetectorUtility/feature_detector_utility.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # -# @file image_cal_utility.cc +# @file feature_detector_utility.cc # # Copyright 2013-2026 # Carnegie Robotics, LLC @@ -54,13 +54,14 @@ def main(args): config = channel.get_config() if config: config.frames_per_second = args.fps - # Set feature configuration + + # Set feature configuration to enable the feature detector config.feature_detector_config = lms.FeatureDetectorConfig() config.feature_detector_config.number_of_features = args.num_features config.feature_detector_config.grouping_enabled = True config.feature_detector_config.motion_octave = 1 status = channel.set_config(config) - if status != lms.Status.OK or status != lms.Status.INCOMPLETE_APPLICATION: + if status != lms.Status.OK: print("Cannot enable feature detector", lms.to_string(status)) @@ -97,4 +98,3 @@ def main(args): parser.add_argument("-n", "--num-features", type=int, default=1500, help="The max number of features to target.") parser.add_argument("-f", "--fps", type=int, default=10, help="The framerate of the camera.") main(parser.parse_args()) - From 3208889734aff8f2270430816a877e5614d936d9 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Thu, 26 Mar 2026 17:59:58 -0400 Subject: [PATCH 15/20] Updates to README --- README.md | 100 +++++------------- .../FeatureDetectorUtility.cc | 4 +- 2 files changed, 28 insertions(+), 76 deletions(-) diff --git a/README.md b/README.md index 74cd8ced..e28e7260 100644 --- a/README.md +++ b/README.md @@ -68,9 +68,6 @@ The LibMultiSense C++ and Python library has been tested with the following oper - [Feature Rendering](#feature-rendering) - [Python](#python-9) - [C++](#c-9) -- [Feature Configuration](#feature-configuration) - - [Python](#python-10) - - [C++](#c-10) ## Client Networking Prerequisite @@ -946,22 +943,29 @@ def main(): print("Invalid channel") exit(1) + # Set the feature detector config to enable the feature detector + config = channel.get_config() + config.feature_detector_config = lms.FeatureDetectorConfig() + config.feature_detector_config.number_of_features = 1500 + config.feature_detector_config.grouping_enabled = True + channel.set_config(config) + # Start both the rectified image and the corresponding feature stream - sources = [lms.DataSource.LEFT_RECTIFIED_RAW, lms.DataSource.LEFT_RECTIFIED_ORB_FEATURES] + sources = [lms.DataSource.LEFT_MONO_RAW, lms.DataSource.LEFT_ORB_FEATURES] if channel.start_streams(sources) != lms.Status.OK: print("Unable to start streams") exit(1) while True: frame = channel.get_next_image_frame() - if frame and frame.has_image(lms.DataSource.LEFT_RECTIFIED_RAW): - img = frame.get_image(lms.DataSource.LEFT_RECTIFIED_RAW).as_array + if frame and frame.has_image(lms.DataSource.LEFT_MONO_RAW): + img = frame.get_image(lms.DataSource.LEFT_MONO_RAW).as_array # Convert grayscale to BGR for color rendering display_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - if frame.has_feature(lms.DataSource.LEFT_RECTIFIED_ORB_FEATURES): - features = frame.get_feature(lms.DataSource.LEFT_RECTIFIED_ORB_FEATURES) + if frame.has_feature(lms.DataSource.LEFT_ORB_FEATURES): + features = frame.get_feature(lms.DataSource.LEFT_ORB_FEATURES) print(f"Frame {frame.frame_id}: Received {len(features.keypoints)} features") for kp in features.keypoints: @@ -996,10 +1000,17 @@ int main(int argc, char** argv) return 1; } + // + // Set the feature detector config to enable the feature detector + // + auto config = channel->get_config(); + config.feature_detector_config = lms::MultiSenseConfig::FeatureDetectorConfig{number_of_features, true, 1}; + channel->set_config(config); + // Start both the rectified image and the corresponding feature stream const std::vector sources = { - lms::DataSource::LEFT_RECTIFIED_RAW, - lms::DataSource::LEFT_RECTIFIED_ORB_FEATURES + lms::DataSource::LEFT_MONO_RAW, + lms::DataSource::LEFT_ORB_FEATURES }; if (const auto status = channel->start_streams(sources); status != lms::Status::OK) @@ -1012,15 +1023,15 @@ int main(int argc, char** argv) { if (const auto frame = channel->get_next_image_frame(); frame) { - if (frame->has_image(lms::DataSource::LEFT_RECTIFIED_RAW)) + if (frame->has_image(lms::DataSource::LEFT_MONO_RAW)) { - cv::Mat img = frame->get_image(lms::DataSource::LEFT_RECTIFIED_RAW).cv_mat(); + cv::Mat img = frame->get_image(lms::DataSource::LEFT_MONO_RAW).cv_mat(); cv::Mat display_img; cv::cvtColor(img, display_img, cv::COLOR_GRAY2BGR); - if (frame->has_feature(lms::DataSource::LEFT_RECTIFIED_ORB_FEATURES)) + if (frame->has_feature(lms::DataSource::LEFT_ORB_FEATURES)) { - const auto& features = frame->get_feature(lms::DataSource::LEFT_RECTIFIED_ORB_FEATURES); + const auto& features = frame->get_feature(lms::DataSource::LEFT_ORB_FEATURES); // Use the native OpenCV utility to convert keypoints and draw them cv::drawKeypoints(display_img, features.cv_keypoints(), display_img, cv::Scalar(0, 255, 0)); @@ -1035,64 +1046,3 @@ int main(int argc, char** argv) return 0; } ``` - ---- - -## Feature Configuration - -You can configure the on-camera feature detector's parameters, such as the maximum number of features -and whether to enable feature grouping. - -### Python - -```python -import libmultisense as lms - -def main(): - channel_config = lms.ChannelConfig() - channel_config.ip_address = "10.66.171.21" - - with lms.Channel.create(channel_config) as channel: - # Get current configuration - config = channel.get_config() - if config.feature_detector_config: - print(f"Current features: {config.feature_detector_config.number_of_features}") - - # Update configuration - config.feature_detector_config.number_of_features = 2000 - config.feature_detector_config.grouping_enabled = True - channel.set_config(config) - print("Configuration updated") - -if __name__ == "__main__": - main() -``` - -### C++ - -```c++ -#include -#include - -namespace lms = multisense; - -int main() -{ - const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"}); - - // Get current configuration - auto config = channel->get_config(); - if (config.feature_detector_config) - { - std::cout << "Current features: " << config.feature_detector_config->number_of_features << std::endl; - - // Update configuration - config.feature_detector_config->number_of_features = 2000; - config.feature_detector_config->grouping_enabled = true; - channel->set_config(config); - std::cout << "Configuration updated" << std::endl; - } - - return 0; -} -``` diff --git a/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc index 11ce9aff..e3717574 100644 --- a/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc +++ b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc @@ -134,7 +134,7 @@ int main(int argc, char** argv) config.frames_per_second = fps; // - // Set the feature detector to enable the feature detector config + // Set the feature detector config to enable the feature detector // config.feature_detector_config = lms::MultiSenseConfig::FeatureDetectorConfig{number_of_features, true, 1}; if (const auto status = channel->set_config(config); status != lms::Status::OK && status != lms::Status::INCOMPLETE_APPLICATION) @@ -168,7 +168,9 @@ int main(int argc, char** argv) { const auto& features = frame->get_feature(lms::DataSource::LEFT_ORB_FEATURES); + // // Use the native OpenCV utility to convert keypoints and draw them + // cv::drawKeypoints(display_img, features.cv_keypoints(), display_img, cv::Scalar(0, 255, 0)); } From f5872610ae67e67bf07df3af0cd30a17763ef572 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Fri, 27 Mar 2026 14:50:40 -0400 Subject: [PATCH 16/20] Cleanup our feature metadata cache --- source/LibMultiSense/details/legacy/channel.cc | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/source/LibMultiSense/details/legacy/channel.cc b/source/LibMultiSense/details/legacy/channel.cc index ff5c51a6..3a78ce0b 100644 --- a/source/LibMultiSense/details/legacy/channel.cc +++ b/source/LibMultiSense/details/legacy/channel.cc @@ -1256,6 +1256,7 @@ void LegacyChannel::secondary_app_meta_callback(std::shared_ptr(deserialize(*data)); const auto frame_id = wire_meta->frameId; + std::cout << "meta frame id: " << frame_id << std::endl; m_secondary_app_meta_cache[frame_id] = std::move(wire_meta); } @@ -1267,11 +1268,13 @@ void LegacyChannel::secondary_app_data_callback(std::shared_ptr(*data); const auto source = convert_sources(static_cast(wire_data.sourceExtended) << 32 | wire_data.source); - if (source.size() != 1 || !is_feature_source(source[0])) + if (source.size() != 1 || !is_feature_source(source.front())) { return; } + std::cout << "features: " << wire_data.frameId << " " << static_cast(source.front()) << std::endl; + const auto meta = m_secondary_app_meta_cache.find(wire_data.frameId); if (meta == std::end(m_secondary_app_meta_cache)) { @@ -1286,7 +1289,7 @@ void LegacyChannel::secondary_app_data_callback(std::shared_ptr> data) @@ -1722,7 +1725,7 @@ void LegacyChannel::handle_and_dispatch(Image image, // m_frame_buffer.erase(frame_id); m_meta_cache.erase(frame_id); - //m_secondary_app_meta_cache.erase(frame_id); + m_secondary_app_meta_cache.erase(frame_id); } // @@ -1731,7 +1734,7 @@ void LegacyChannel::handle_and_dispatch(Image image, // m_frame_buffer.erase(std::begin(m_frame_buffer), m_frame_buffer.lower_bound(frame_id)); m_meta_cache.erase(std::begin(m_meta_cache), m_meta_cache.lower_bound(frame_id)); - //m_secondary_app_meta_cache.erase(std::begin(m_secondary_app_meta_cache), m_meta_cache.lower_bound(frame_id)); + m_secondary_app_meta_cache.erase(std::begin(m_secondary_app_meta_cache), m_secondary_app_meta_cache.lower_bound(frame_id)); } } From ea49f4cd4974fea7ba7e7ebd9c41392861fad583 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Fri, 27 Mar 2026 15:56:06 -0400 Subject: [PATCH 17/20] Let users know they need new firmware for the feature detector --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index e28e7260..960b6ead 100644 --- a/README.md +++ b/README.md @@ -928,6 +928,9 @@ corresponding image frames. The following example demonstrates how to retrieve and render features on a rectified image. +> [!NOTE] +> MultiSense firmware version v7.34 or newer is required to use the onboard feature detector + ### Python ```python From 2d5f5806c756078cd7213ced6c487d553ba285d9 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 31 Mar 2026 13:27:10 -0400 Subject: [PATCH 18/20] Get rid of debug prints --- source/LibMultiSense/details/legacy/channel.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/source/LibMultiSense/details/legacy/channel.cc b/source/LibMultiSense/details/legacy/channel.cc index 3a78ce0b..3123144b 100644 --- a/source/LibMultiSense/details/legacy/channel.cc +++ b/source/LibMultiSense/details/legacy/channel.cc @@ -1256,7 +1256,6 @@ void LegacyChannel::secondary_app_meta_callback(std::shared_ptr(deserialize(*data)); const auto frame_id = wire_meta->frameId; - std::cout << "meta frame id: " << frame_id << std::endl; m_secondary_app_meta_cache[frame_id] = std::move(wire_meta); } @@ -1273,8 +1272,6 @@ void LegacyChannel::secondary_app_data_callback(std::shared_ptr(source.front()) << std::endl; - const auto meta = m_secondary_app_meta_cache.find(wire_data.frameId); if (meta == std::end(m_secondary_app_meta_cache)) { From 3fc9d10393693301eb0f9228cfaab98221944620 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 7 Apr 2026 15:19:01 -0400 Subject: [PATCH 19/20] Fix bad cmake install --- .../LibMultiSense/FeatureDetectorUtility/CMakeLists.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/source/Utilities/LibMultiSense/FeatureDetectorUtility/CMakeLists.txt b/source/Utilities/LibMultiSense/FeatureDetectorUtility/CMakeLists.txt index 5b087015..90ad4f9d 100644 --- a/source/Utilities/LibMultiSense/FeatureDetectorUtility/CMakeLists.txt +++ b/source/Utilities/LibMultiSense/FeatureDetectorUtility/CMakeLists.txt @@ -7,6 +7,3 @@ if(OpenCV_FOUND) else() message("Missing OpenCV. Not building the c++ FeatureDetectorUtility") endif() -install(FILES feature_detector_utility.py PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ - DESTINATION "bin") - From 97bf035799e1c83647405850e4d27ab4afe8d2c2 Mon Sep 17 00:00:00 2001 From: Matt Alvarado Date: Tue, 7 Apr 2026 15:20:35 -0400 Subject: [PATCH 20/20] Fix utility and install --- pyproject.toml | 2 +- python/libmultisense/utilities/feature_detector_utility.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 4ddbbf24..77360e45 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,4 +30,4 @@ multisense_ptp_utility = "libmultisense.utilities.ptp_utility:main" multisense_rectified_focal_length_utility = "libmultisense.utilities.rectified_focal_length_utility:main" multisense_save_image_utility = "libmultisense.utilities.save_image_utility:main" multisense_version_info_utility = "libmultisense.utilities.version_info_utility:main" -multisense_feature_detectorutility = "libmultisense.utilities.feature_detector_utility:main" +multisense_feature_detector_utility = "libmultisense.utilities.feature_detector_utility:main" diff --git a/python/libmultisense/utilities/feature_detector_utility.py b/python/libmultisense/utilities/feature_detector_utility.py index 5f4767df..b6aab0fc 100755 --- a/python/libmultisense/utilities/feature_detector_utility.py +++ b/python/libmultisense/utilities/feature_detector_utility.py @@ -41,7 +41,7 @@ import libmultisense as lms -def main(args): +def main(): parser = argparse.ArgumentParser("LibMultiSense feature detector utility") parser.add_argument("-a", "--ip_address", default="10.66.171.21", help="The IPv4 address of the MultiSense.") parser.add_argument("-m", "--mtu", type=int, default=1500, help="The MTU to use to communicate with the camera.")