diff --git a/CMakeLists.txt b/CMakeLists.txt index 6afea221..fc5f59cd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -61,7 +61,8 @@ endif() if (NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) message(STATUS "Selecting default build type: Release") - set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Build type (default: Release)" FORCE) + #set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Build type (default: Release)" FORCE) + set(CMAKE_BUILD_TYPE "DebugWithRelInfo" CACHE STRING "Build type (default: DebugWithRelInfo)" FORCE) endif () # diff --git a/README.md b/README.md index 859930cc..d1f14e3e 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,9 @@ The LibMultiSense C++ and Python library has been tested with the following oper - [IMU Data Streaming](#imu-data-streaming) - [Python](#python-9) - [C++](#c-9) +- [Feature Rendering](#feature-rendering) + - [Python](#python-10) + - [C++](#c-10) ## Client Networking Prerequisite @@ -900,6 +903,7 @@ and ranges. ### Python ```python + import libmultisense as lms def main(): @@ -1064,3 +1068,134 @@ int main(int argc, char** argv) return 0; } ``` + +--- + +## Feature Rendering + +LibMultiSense supports retrieving image features computed on-camera. These are synchronized with the +corresponding image frames. + +The following example demonstrates how to retrieve and render features on a rectified image. + +> [!NOTE] +> MultiSense firmware version v7.34 or newer is required to use the onboard feature detector + +### Python + +```python +import libmultisense as lms +import cv2 + +def main(): + channel_config = lms.ChannelConfig() + channel_config.ip_address = "10.66.171.21" + + with lms.Channel.create(channel_config) as channel: + if not channel: + print("Invalid channel") + exit(1) + + # Set the feature detector config to enable the feature detector + config = channel.get_config() + config.feature_detector_config = lms.FeatureDetectorConfig() + config.feature_detector_config.number_of_features = 1500 + config.feature_detector_config.grouping_enabled = True + channel.set_config(config) + + # Start both the rectified image and the corresponding feature stream + sources = [lms.DataSource.LEFT_MONO_RAW, lms.DataSource.LEFT_ORB_FEATURES] + if channel.start_streams(sources) != lms.Status.OK: + print("Unable to start streams") + exit(1) + + while True: + frame = channel.get_next_image_frame() + if frame and frame.has_image(lms.DataSource.LEFT_MONO_RAW): + img = frame.get_image(lms.DataSource.LEFT_MONO_RAW).as_array + + # Convert grayscale to BGR for color rendering + display_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + + if frame.has_feature(lms.DataSource.LEFT_ORB_FEATURES): + features = frame.get_feature(lms.DataSource.LEFT_ORB_FEATURES) + print(f"Frame {frame.frame_id}: Received {len(features.keypoints)} features") + + for kp in features.keypoints: + cv2.circle(display_img, (int(kp.x), int(kp.y)), 3, (0, 255, 0), -1) + + cv2.imshow("MultiSense Features", display_img) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +if __name__ == "__main__": + main() +``` + +### C++ + +```c++ +#include +#include + +#define HAVE_OPENCV 1 +#include +#include + +namespace lms = multisense; + +int main(int argc, char** argv) +{ + const auto channel = lms::Channel::create(lms::Channel::Config{"10.66.171.21"}); + if (!channel) + { + std::cerr << "Failed to create channel" << std::endl; + return 1; + } + + // + // Set the feature detector config to enable the feature detector + // + auto config = channel->get_config(); + config.feature_detector_config = lms::MultiSenseConfig::FeatureDetectorConfig{number_of_features, true, 1}; + channel->set_config(config); + + // Start both the rectified image and the corresponding feature stream + const std::vector sources = { + lms::DataSource::LEFT_MONO_RAW, + lms::DataSource::LEFT_ORB_FEATURES + }; + + if (const auto status = channel->start_streams(sources); status != lms::Status::OK) + { + std::cerr << "Cannot start streams: " << lms::to_string(status) << std::endl; + return 1; + } + + while(true) + { + if (const auto frame = channel->get_next_image_frame(); frame) + { + if (frame->has_image(lms::DataSource::LEFT_MONO_RAW)) + { + cv::Mat img = frame->get_image(lms::DataSource::LEFT_MONO_RAW).cv_mat(); + cv::Mat display_img; + cv::cvtColor(img, display_img, cv::COLOR_GRAY2BGR); + + if (frame->has_feature(lms::DataSource::LEFT_ORB_FEATURES)) + { + const auto& features = frame->get_feature(lms::DataSource::LEFT_ORB_FEATURES); + + // Use the native OpenCV utility to convert keypoints and draw them + cv::drawKeypoints(display_img, features.cv_keypoints(), display_img, cv::Scalar(0, 255, 0)); + } + + cv::imshow("MultiSense Features", display_img); + if (cv::waitKey(1) == 'q') break; + } + } + } + + return 0; +} +``` diff --git a/pyproject.toml b/pyproject.toml index ea7b60cd..f0b58228 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,3 +30,4 @@ multisense_ptp_utility = "libmultisense.utilities.ptp_utility:main" multisense_rectified_focal_length_utility = "libmultisense.utilities.rectified_focal_length_utility:main" multisense_save_image_utility = "libmultisense.utilities.save_image_utility:main" multisense_version_info_utility = "libmultisense.utilities.version_info_utility:main" +multisense_feature_detector_utility = "libmultisense.utilities.feature_detector_utility:main" diff --git a/python/bindings.cc b/python/bindings.cc index 9998eaa9..6642079a 100644 --- a/python/bindings.cc +++ b/python/bindings.cc @@ -119,6 +119,12 @@ PYBIND11_MODULE(_libmultisense, m) { .value("AUX_RAW", multisense::DataSource::AUX_RAW) .value("AUX_RECTIFIED_RAW", multisense::DataSource::AUX_RECTIFIED_RAW) .value("COST_RAW", multisense::DataSource::COST_RAW) + .value("LEFT_ORB_FEATURES", multisense::DataSource::LEFT_ORB_FEATURES) + .value("RIGHT_ORB_FEATURES", multisense::DataSource::RIGHT_ORB_FEATURES) + .value("AUX_ORB_FEATURES", multisense::DataSource::AUX_ORB_FEATURES) + .value("LEFT_RECTIFIED_ORB_FEATURES", multisense::DataSource::LEFT_RECTIFIED_ORB_FEATURES) + .value("RIGHT_RECTIFIED_ORB_FEATURES", multisense::DataSource::RIGHT_RECTIFIED_ORB_FEATURES) + .value("AUX_RECTIFIED_ORB_FEATURES", multisense::DataSource::AUX_RECTIFIED_ORB_FEATURES) .value("IMU", multisense::DataSource::IMU) .def("__str__", py::overload_cast(&multisense::to_string), py::prepend()) .def("__repr__", py::overload_cast(&multisense::to_string), py::prepend()); @@ -162,6 +168,40 @@ PYBIND11_MODULE(_libmultisense, m) { .value("JPEG", multisense::Image::PixelFormat::JPEG) .value("H264", multisense::Image::PixelFormat::H264); + // FeatureKeyPoint + py::class_(m, "FeatureKeyPoint") + .def(py::init<>()) + .def_readwrite("x", &multisense::FeatureKeyPoint::x) + .def_readwrite("y", &multisense::FeatureKeyPoint::y) + .def_readwrite("angle", &multisense::FeatureKeyPoint::angle) + .def_readwrite("response", &multisense::FeatureKeyPoint::response) + .def_readwrite("octave", &multisense::FeatureKeyPoint::octave) + .def_readwrite("class_id", &multisense::FeatureKeyPoint::class_id); + + // FeatureDescriptorType + py::enum_(m, "FeatureDescriptorType") + .value("UNKNOWN", multisense::FeatureDescriptorType::UNKNOWN) + .value("ORB", multisense::FeatureDescriptorType::ORB); + + // FeatureMessage + py::class_(m, "FeatureMessage") + .def(py::init<>()) + .def_readonly("source", &multisense::FeatureMessage::source) + .def_readonly("descriptor_type", &multisense::FeatureMessage::descriptor_type) + .def_readonly("keypoints", &multisense::FeatureMessage::keypoints) + .def_property_readonly("descriptors", [](const multisense::FeatureMessage& msg) + { + const SSizeVector shape = {static_cast(msg.descriptors.size())}; + const SSizeVector strides = {static_cast(sizeof(uint8_t))}; + return py::array(py::buffer_info( + const_cast(msg.descriptors.data()), + static_cast(sizeof(uint8_t)), + py::format_descriptor::format(), + 1, + shape, + strides)); + }); + // Image py::class_(m, "Image") .def(py::init<>()) @@ -274,8 +314,11 @@ PYBIND11_MODULE(_libmultisense, m) { .def("add_image", &multisense::ImageFrame::add_image) .def("get_image", &multisense::ImageFrame::get_image) .def("has_image", &multisense::ImageFrame::has_image) + .def("get_feature", &multisense::ImageFrame::get_feature) + .def("has_feature", &multisense::ImageFrame::has_feature) .def_readonly("frame_id", &multisense::ImageFrame::frame_id) .def_readonly("images", &multisense::ImageFrame::images) + .def_readonly("features", &multisense::ImageFrame::features) .def_readonly("calibration", &multisense::ImageFrame::calibration) .def_readonly("frame_time", &multisense::ImageFrame::frame_time) .def_readonly("ptp_frame_time", &multisense::ImageFrame::ptp_frame_time) @@ -473,6 +516,14 @@ PYBIND11_MODULE(_libmultisense, m) { .def_readwrite("internal", &multisense::MultiSenseConfig::LightingConfig::internal) .def_readwrite("external", &multisense::MultiSenseConfig::LightingConfig::external); + // FeatureDetectorConfig + py::class_(m, "FeatureDetectorConfig") + .def(py::init<>()) + PYBIND11_JSON_SUPPORT(multisense::MultiSenseConfig::FeatureDetectorConfig) + .def_readwrite("number_of_features", &multisense::MultiSenseConfig::FeatureDetectorConfig::number_of_features) + .def_readwrite("grouping_enabled", &multisense::MultiSenseConfig::FeatureDetectorConfig::grouping_enabled) + .def_readwrite("motion_octave", &multisense::MultiSenseConfig::FeatureDetectorConfig::motion_octave); + // MultiSenseConfig py::class_(m, "MultiSenseConfig") .def(py::init<>()) @@ -488,7 +539,8 @@ PYBIND11_MODULE(_libmultisense, m) { .def_readwrite("time_config", &multisense::MultiSenseConfig::time_config) .def_readwrite("network_config", &multisense::MultiSenseConfig::network_config) .def_readwrite("imu_config", &multisense::MultiSenseConfig::imu_config) - .def_readwrite("lighting_config", &multisense::MultiSenseConfig::lighting_config); + .def_readwrite("lighting_config", &multisense::MultiSenseConfig::lighting_config) + .def_readwrite("feature_detector_config", &multisense::MultiSenseConfig::feature_detector_config); // MultiSenseStatus::PtpStatus py::class_(m, "PtpStatus") diff --git a/python/libmultisense/utilities/feature_detector_utility.py b/python/libmultisense/utilities/feature_detector_utility.py new file mode 100755 index 00000000..b6aab0fc --- /dev/null +++ b/python/libmultisense/utilities/feature_detector_utility.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python +# +# @file feature_detector_utility.cc +# +# Copyright 2013-2026 +# Carnegie Robotics, LLC +# 4501 Hatfield Street, Pittsburgh, PA 15201 +# http://www.carnegierobotics.com +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of the Carnegie Robotics, LLC nor the +# names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL CARNEGIE ROBOTICS, LLC BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Significant history (date, user, job code, action): +# 2026-03-26, malvarado@carnegierobotics.com, IRAD, Created file. +# + +import argparse +import cv2 +from jsondiff import diff + +import libmultisense as lms + +def main(): + parser = argparse.ArgumentParser("LibMultiSense feature detector utility") + parser.add_argument("-a", "--ip_address", default="10.66.171.21", help="The IPv4 address of the MultiSense.") + parser.add_argument("-m", "--mtu", type=int, default=1500, help="The MTU to use to communicate with the camera.") + parser.add_argument("-n", "--num-features", type=int, default=1500, help="The max number of features to target.") + parser.add_argument("-f", "--fps", type=int, default=10, help="The framerate of the camera.") + args = parser.parse_args() + + channel_config = lms.ChannelConfig() + channel_config.ip_address = args.ip_address + channel_config.mtu = args.mtu + + with lms.Channel.create(channel_config) as channel: + if not channel: + print("Invalid channel") + exit(1) + + config = channel.get_config() + if config: + config.frames_per_second = args.fps + + # Set feature configuration to enable the feature detector + config.feature_detector_config = lms.FeatureDetectorConfig() + config.feature_detector_config.number_of_features = args.num_features + config.feature_detector_config.grouping_enabled = True + config.feature_detector_config.motion_octave = 1 + status = channel.set_config(config) + if status != lms.Status.OK: + print("Cannot enable feature detector", lms.to_string(status)) + + + # Start both the rectified image and the corresponding feature stream + sources = [lms.DataSource.LEFT_MONO_RAW, lms.DataSource.LEFT_ORB_FEATURES] + if channel.start_streams(sources) != lms.Status.OK: + print("Unable to start streams") + exit(1) + + while True: + frame = channel.get_next_image_frame() + if frame and frame.has_image(lms.DataSource.LEFT_MONO_RAW): + img = frame.get_image(lms.DataSource.LEFT_MONO_RAW).as_array + + # Convert grayscale to BGR for color rendering + display_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + + # Draw features on the image + if frame.has_feature(lms.DataSource.LEFT_ORB_FEATURES): + features = frame.get_feature(lms.DataSource.LEFT_ORB_FEATURES) + print(f"Frame {frame.frame_id}: Received {len(features.keypoints)} features") + + for kp in features.keypoints: + cv2.circle(display_img, (int(kp.x), int(kp.y)), 3, (0, 255, 0), -1) + + cv2.imshow("MultiSense Features", display_img) + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +if __name__ == '__main__': + main() diff --git a/source/Legacy/include/MultiSense/details/storage.hh b/source/Legacy/include/MultiSense/details/storage.hh index 18054385..c6331ae6 100644 --- a/source/Legacy/include/MultiSense/details/storage.hh +++ b/source/Legacy/include/MultiSense/details/storage.hh @@ -251,7 +251,7 @@ namespace details { std::pair will_drop_() { const bool will_drop = m_map.size() == m_depth; - KEY drop_key; // If will_drop is false, this value is intentionally uninitialized + KEY drop_key{}; if (will_drop) { diff --git a/source/LibMultiSense/details/legacy/channel.cc b/source/LibMultiSense/details/legacy/channel.cc index ca94a977..a3cdeaeb 100644 --- a/source/LibMultiSense/details/legacy/channel.cc +++ b/source/LibMultiSense/details/legacy/channel.cc @@ -92,6 +92,17 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "details/legacy/channel.hh" #include "details/legacy/calibration.hh" @@ -252,6 +263,12 @@ Status LegacyChannel::connect(const Config &config) m_message_assembler.register_callback(wire::ImuData::ID, std::bind(&LegacyChannel::imu_callback, this, std::placeholders::_1)); + m_message_assembler.register_callback(wire::SecondaryAppData::ID, + std::bind(&LegacyChannel::secondary_app_data_callback, this, std::placeholders::_1)); + + m_message_assembler.register_callback(wire::SecondaryAppMetadata::ID, + std::bind(&LegacyChannel::secondary_app_meta_callback, this, std::placeholders::_1)); + // // Main thread which services incoming data and dispatches to callbacks and conditions attached to the channel // @@ -342,6 +359,11 @@ Status LegacyChannel::connect(const Config &config) return Status::FAILED; } + // + // Check secondary applications available + // + m_avaliable_secondary_applications = query_available_secondary_applications(); + m_connected = true; return Status::OK; @@ -349,6 +371,14 @@ Status LegacyChannel::connect(const Config &config) void LegacyChannel::disconnect() { + // + // Deactivate our current secondary application if one exists + // + if (m_active_secondary_application) + { + manage_secondary_application(m_active_secondary_application.value(), false); + } + // // Stop all our streams before disconnecting // @@ -370,11 +400,15 @@ void LegacyChannel::disconnect_internal() m_connected = false; - m_message_assembler.remove_callback(wire::Image::ID); m_message_assembler.remove_callback(wire::ImageMeta::ID); + m_message_assembler.remove_callback(wire::Image::ID); m_message_assembler.remove_callback(wire::CompressedImage::ID); m_message_assembler.remove_callback(wire::Disparity::ID); m_message_assembler.remove_callback(wire::ImuData::ID); + m_message_assembler.remove_callback(wire::SecondaryAppData::ID); + m_message_assembler.remove_callback(wire::SecondaryAppMetadata::ID); + + m_socket = NetworkSocket{}; m_udp_receiver = nullptr; @@ -567,6 +601,68 @@ Status LegacyChannel::set_config(const MultiSenseConfig &config) } } + // + // Set our feature detector config + // + if (config.feature_detector_config) + { + // + // Activate the application + // + if (const auto status = manage_secondary_application(SecondaryApplication::FEATURE_DETECTOR, true); status != Status::OK) + { + responses.push_back(status); + } + + if (m_active_secondary_application && + m_active_secondary_application.value() == SecondaryApplication::FEATURE_DETECTOR) + { + // We first need to get the current config to preserve internal options + const auto secondary_config = wait_for_data(m_message_assembler, + m_socket, + wire::SecondaryAppGetConfig(), + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); + + wire::FeatureDetectorConfigParams params = convert(config.feature_detector_config.value()); + if (secondary_config && secondary_config->dataLength >= sizeof(wire::FeatureDetectorConfigParams)) + { + std::memcpy(¶ms, secondary_config->data, sizeof(wire::FeatureDetectorConfigParams)); + params.numberOfFeatures = config.feature_detector_config->number_of_features; + params.grouping = config.feature_detector_config->grouping_enabled; + params.motion = config.feature_detector_config->motion_octave; + } + + wire::SecondaryAppControl control; + control.dataLength = sizeof(wire::FeatureDetectorConfigParams); + std::memcpy(control.data, ¶ms, sizeof(wire::FeatureDetectorConfigParams)); + + const auto feature_ack = wait_for_ack(m_message_assembler, + m_socket, + control, + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout, + setting_retries); + + if (!feature_ack || feature_ack->status != wire::Ack::Status_Ok) + { + responses.push_back(get_status(feature_ack->status)); + } + } + } + else + { + // + // Deactivate the application + // + if (const auto status = manage_secondary_application(SecondaryApplication::FEATURE_DETECTOR, false); status != Status::OK) + { + responses.push_back(status); + } + } + const auto errors = std::any_of(std::begin(responses), std::end(responses), [](const auto &e) { @@ -912,12 +1008,28 @@ std::optional LegacyChannel::query_configuration(bool has_aux_ m_current_mtu, m_config.receive_timeout); + const auto secondary_config = wait_for_data(m_message_assembler, + m_socket, + wire::SecondaryAppGetConfig(), + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); + + std::optional feature_config = std::nullopt; + if (secondary_config && secondary_config->dataLength >= sizeof(wire::FeatureDetectorConfigParams)) + { + wire::FeatureDetectorConfigParams params; + std::memcpy(¶ms, secondary_config->data, sizeof(wire::FeatureDetectorConfigParams)); + feature_config = std::move(params); + } + if (camera_config) { return convert(camera_config.value(), aux_config, imu_config, led_config, + feature_config, packet_delay ? packet_delay.value() : wire::SysPacketDelay{false}, ptp_enabled, m_info.device, @@ -1069,6 +1181,232 @@ Status LegacyChannel::stop_streams_internal(const std::vector &sourc return Status::TIMEOUT; } +std::optional> LegacyChannel::query_available_secondary_applications() +{ + using namespace crl::multisense::details; + + if (const auto applications = wait_for_data(m_message_assembler, + m_socket, + wire::SecondaryAppGetRegisteredApps(), + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); applications) + { + return std::make_optional(convert(applications.value())); + } + + return std::nullopt; +} + +Status LegacyChannel::manage_secondary_application(const SecondaryApplication &app, bool activate) +{ + if (m_active_secondary_application && m_active_secondary_application.value() == app && activate) + { + return Status::OK; + } + + if (!m_active_secondary_application && !activate) + { + return Status::OK; + } + + if (!m_avaliable_secondary_applications || !supported_application(m_avaliable_secondary_applications.value(), app)) + { + return Status::UNSUPPORTED; + } + + // + // Deactivate the current application first if one is running and we want to activate a new application + // + if (activate && m_active_secondary_application && m_active_secondary_application.value() != app) + { + if (const auto status = manage_secondary_application_raw(m_active_secondary_application.value(), false); status == Status::OK) + { + m_active_secondary_application = std::nullopt; + } + else + { + return status; + } + } + + // + // Manage the application + // + if (const auto status = manage_secondary_application_raw(app, activate); status == Status::OK) + { + m_active_secondary_application = activate ? std::make_optional(app) : std::nullopt; + } + else + { + return status; + } + + return Status::OK; +} + +Status LegacyChannel::manage_secondary_application_raw(const SecondaryApplication &app, bool activate) +{ + using namespace crl::multisense::details; + + const auto app_string = application_string(app); + + wire::SecondaryAppActivate cmd(activate ? 1 : 0, app_string); + + if (const auto ack = wait_for_ack(m_message_assembler, + m_socket, + cmd, + m_transmit_id++, + m_current_mtu, + m_config.receive_timeout); ack) + { + if (ack->status != wire::Ack::Status_Ok) + { + CRL_DEBUG("Manage secondary app %s ack invalid: %" PRIi32 "\n", app_string.c_str(), ack->status); + return get_status(ack->status); + } + + return Status::OK; + } + + return Status::TIMEOUT; +} + +void LegacyChannel::secondary_app_meta_callback(std::shared_ptr> data) +{ + using namespace crl::multisense::details; + auto wire_meta = std::make_shared(deserialize(*data)); + const auto frame_id = wire_meta->frameId; + m_secondary_app_meta_cache[frame_id] = std::move(wire_meta); +} + +void LegacyChannel::secondary_app_data_callback(std::shared_ptr> data) +{ + using namespace crl::multisense::details; + using namespace std::chrono; + + const auto wire_data = deserialize(*data); + const auto source = convert_sources(static_cast(wire_data.sourceExtended) << 32 | wire_data.source); + + if (source.size() != 1 || !is_feature_source(source.front())) + { + return; + } + + const auto meta = m_secondary_app_meta_cache.find(wire_data.frameId); + if (meta == std::end(m_secondary_app_meta_cache)) + { + CRL_DEBUG("Missing corresponding meta for feature frame_id %" PRIu64 "\n", wire_data.frameId); + return; + } + + utility::BufferStreamReader meta_stream(reinterpret_cast(meta->second->dataP), meta->second->dataLength); + wire::FeatureDetectorMeta feature_meta(meta_stream, wire::FeatureDetectorMeta::VERSION); + + utility::BufferStreamReader stream(reinterpret_cast(wire_data.dataP), wire_data.length); + wire::FeatureDetector detector(stream, wire::FeatureDetector::VERSION); + + FeatureMessage feature_msg; + feature_msg.source = source.front(); + feature_msg.descriptor_type = FeatureDescriptorType::ORB; + + const size_t start_descriptor = detector.numFeatures * sizeof(wire::Feature); + uint8_t * feature_data = reinterpret_cast(detector.dataP); + + feature_msg.keypoints.reserve(detector.numFeatures); + for (size_t i = 0; i < detector.numFeatures; i++) + { + const auto f = reinterpret_cast(feature_data + (i * sizeof(wire::Feature))); + + FeatureKeyPoint keypoint; + keypoint.x = f->x; + keypoint.y = f->y; + keypoint.angle = f->angle; + keypoint.response = f->resp; + keypoint.octave = f->octave; + keypoint.class_id = f->descriptor; + feature_msg.keypoints.push_back(std::move(keypoint)); + } + + feature_msg.descriptors.reserve(detector.numDescriptors * sizeof(wire::Descriptor)); + for (size_t j = 0; j < detector.numDescriptors; j++) + { + wire::Descriptor d = *reinterpret_cast(feature_data + (start_descriptor + (j * sizeof(wire::Descriptor)))); + const uint8_t* d_bytes = reinterpret_cast(d.d); + feature_msg.descriptors.insert(feature_msg.descriptors.end(), d_bytes, d_bytes + sizeof(wire::Descriptor)); + } + + const nanoseconds capture_time{seconds{wire_data.timeSeconds} + microseconds{wire_data.timeMicroSeconds}}; + const TimeT capture_time_point{capture_time}; + const TimeT ptp_capture_time_point{nanoseconds{feature_meta.ptpNanoSeconds}}; + + handle_and_dispatch_feature(std::move(feature_msg), wire_data.frameId); +} + +void LegacyChannel::handle_and_dispatch_feature(FeatureMessage feature, int64_t frame_id) +{ + // + // Create a new frame if one does not exist, or add the input feature to the corresponding frame + // + if (m_frame_buffer.count(frame_id) == 0) + { + ImageFrame frame; + frame.frame_id = frame_id; + frame.add_feature(std::move(feature)); + m_frame_buffer.emplace(frame_id, std::move(frame)); + } + else + { + m_frame_buffer[frame_id].add_feature(std::move(feature)); + } + + // + // Check if our frame is valid, if so dispatch to our callbacks and notify anyone who is waiting on the next frame + // + if (const auto &frame = m_frame_buffer[frame_id]; + std::all_of(std::begin(m_active_streams), + std::end(m_active_streams), + [&frame](const auto &e) { + if (is_image_source(e)) return frame.has_image(e); + if (is_feature_source(e)) return frame.has_feature(e); + return true; + })) + { + // + // Notify anyone waiting on the next frame + // + m_image_frame_notifier.set_and_notify(frame); + + // + // Service the callback if it's valid + // + { + std::lock_guard lock(m_image_callback_mutex); + if (m_user_image_frame_callback) + { + m_user_image_frame_callback(frame); + } + } + + + // + // Remove our image frame from our frame buffer and the associated image metadata since we are + // now done with it internally + // + m_frame_buffer.erase(frame_id); + m_meta_cache.erase(frame_id); + m_secondary_app_meta_cache.erase(frame_id); + } + + // + // Since frames will only monotonically increase, it's safe to also delete all the frame_ids earlier than + // the current frame id. + // + m_frame_buffer.erase(std::begin(m_frame_buffer), m_frame_buffer.lower_bound(frame_id)); + m_meta_cache.erase(std::begin(m_meta_cache), m_meta_cache.lower_bound(frame_id)); + m_secondary_app_meta_cache.erase(std::begin(m_secondary_app_meta_cache), m_secondary_app_meta_cache.lower_bound(frame_id)); +} + void LegacyChannel::image_meta_callback(std::shared_ptr> data) { using namespace crl::multisense::details; @@ -1353,6 +1691,7 @@ void LegacyChannel::handle_and_dispatch(Image image, const auto source = image.source; ImageFrame frame{frame_id, std::map{std::make_pair(source, std::move(image))}, + {}, calibration, capture_time, ptp_capture_time, @@ -1375,7 +1714,11 @@ void LegacyChannel::handle_and_dispatch(Image image, if (const auto &frame = m_frame_buffer[frame_id]; std::all_of(std::begin(m_active_streams), std::end(m_active_streams), - [&frame](const auto &e){return is_image_source(e) ? frame.has_image(e) : true;})) + [&frame](const auto &e) { + if (is_image_source(e)) return frame.has_image(e); + if (is_feature_source(e)) return frame.has_feature(e); + return true; + })) { // // Notify anyone waiting on the next frame @@ -1400,6 +1743,7 @@ void LegacyChannel::handle_and_dispatch(Image image, // m_frame_buffer.erase(frame_id); m_meta_cache.erase(frame_id); + m_secondary_app_meta_cache.erase(frame_id); } // @@ -1408,6 +1752,7 @@ void LegacyChannel::handle_and_dispatch(Image image, // m_frame_buffer.erase(std::begin(m_frame_buffer), m_frame_buffer.lower_bound(frame_id)); m_meta_cache.erase(std::begin(m_meta_cache), m_meta_cache.lower_bound(frame_id)); + m_secondary_app_meta_cache.erase(std::begin(m_secondary_app_meta_cache), m_secondary_app_meta_cache.lower_bound(frame_id)); } } diff --git a/source/LibMultiSense/details/legacy/configuration.cc b/source/LibMultiSense/details/legacy/configuration.cc index 7b8c8cc7..a13c9e6f 100644 --- a/source/LibMultiSense/details/legacy/configuration.cc +++ b/source/LibMultiSense/details/legacy/configuration.cc @@ -42,10 +42,31 @@ namespace multisense { namespace legacy { +MultiSenseConfig::FeatureDetectorConfig convert(const crl::multisense::details::wire::FeatureDetectorConfigParams ¶ms) +{ + MultiSenseConfig::FeatureDetectorConfig config; + config.number_of_features = params.numberOfFeatures; + config.grouping_enabled = params.grouping; + config.motion_octave = params.motion; + + return config; +} + +crl::multisense::details::wire::FeatureDetectorConfigParams convert(const MultiSenseConfig::FeatureDetectorConfig &config) +{ + crl::multisense::details::wire::FeatureDetectorConfigParams params{}; + params.numberOfFeatures = config.number_of_features; + params.grouping = config.grouping_enabled; + params.motion = config.motion_octave; + + return params; +} + MultiSenseConfig convert(const crl::multisense::details::wire::CamConfig &config, const std::optional &aux_config, const std::optional &imu_config, const std::optional &led_config, + const std::optional &feature_config, const crl::multisense::details::wire::SysPacketDelay &packet_delay, bool ptp_enabled, const MultiSenseInfo::DeviceInfo &info, @@ -100,6 +121,9 @@ MultiSenseConfig convert(const crl::multisense::details::wire::CamConfig &config std::nullopt, (led_config && led_config->available) ? std::make_optional(convert(led_config.value(), info)) : + std::nullopt, + feature_config ? + std::make_optional(convert(feature_config.value())) : std::nullopt}; } @@ -507,6 +531,18 @@ crl::multisense::details::wire::LedSet convert(const MultiSenseConfig::LightingC return output; } +std::vector convert(const crl::multisense::details::wire::SecondaryAppRegisteredApps &apps) +{ + std::vector output; + + for (const auto &app: apps.apps) + { + output.emplace_back(secondary_application(app.appName)); + } + + return output; +} + MultiSenseConfig::NetworkTransmissionConfig convert(const crl::multisense::details::wire::SysPacketDelay &packet) { diff --git a/source/LibMultiSense/details/legacy/utilities.cc b/source/LibMultiSense/details/legacy/utilities.cc index e55e93bd..3bc7c129 100644 --- a/source/LibMultiSense/details/legacy/utilities.cc +++ b/source/LibMultiSense/details/legacy/utilities.cc @@ -69,6 +69,22 @@ bool is_image_source(const DataSource &source) } } +bool is_feature_source(const DataSource &source) +{ + switch (source) + { + case DataSource::LEFT_ORB_FEATURES: + case DataSource::RIGHT_ORB_FEATURES: + case DataSource::AUX_ORB_FEATURES: + case DataSource::LEFT_RECTIFIED_ORB_FEATURES: + case DataSource::RIGHT_RECTIFIED_ORB_FEATURES: + case DataSource::AUX_RECTIFIED_ORB_FEATURES: + return true; + default: + return false; + } +} + Status get_status(const crl::multisense::details::wire::Ack::AckStatus &status) { using namespace crl::multisense::details::wire; @@ -127,6 +143,12 @@ std::vector convert_sources(const crl::multisense::details::wire::So if (source & wire::SOURCE_CHROMA_RECT_AUX) {sources.push_back(DataSource::AUX_CHROMA_RECTIFIED_RAW);} if (source & wire::SOURCE_DISPARITY_COST) {sources.push_back(DataSource::COST_RAW);} if (source & wire::SOURCE_IMU) {sources.push_back(DataSource::IMU);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_0) {sources.push_back(DataSource::LEFT_ORB_FEATURES);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_1) {sources.push_back(DataSource::RIGHT_ORB_FEATURES);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_2) {sources.push_back(DataSource::AUX_ORB_FEATURES);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_3) {sources.push_back(DataSource::LEFT_RECTIFIED_ORB_FEATURES);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_4) {sources.push_back(DataSource::RIGHT_RECTIFIED_ORB_FEATURES);} + if (source & wire::SOURCE_SECONDARY_APP_DATA_5) {sources.push_back(DataSource::AUX_RECTIFIED_ORB_FEATURES);} return sources; } @@ -161,6 +183,12 @@ crl::multisense::details::wire::SourceType convert_sources(const std::vector(source));} } @@ -185,6 +213,41 @@ std::vector expand_source(const DataSource &source) } } +std::string application_string(const SecondaryApplication &app) +{ + switch (app) + { + case SecondaryApplication::FEATURE_DETECTOR: + { + return crl::multisense::details::wire::SECONDARY_APP_FEATURE_DETECTOR; + } + case SecondaryApplication::NONE: + default: + { + return ""; + } + + } +} + +SecondaryApplication secondary_application(const std::string &app) +{ + if (app == crl::multisense::details::wire::SECONDARY_APP_FEATURE_DETECTOR) + { + return SecondaryApplication::FEATURE_DETECTOR; + } + else + { + return SecondaryApplication::NONE; + } +} + + +bool supported_application(const std::vector &available_apps, const SecondaryApplication &target_app) +{ + return std::find(std::begin(available_apps), std::end(available_apps), target_app) != std::end(available_apps); +} + ImuSample add_wire_sample(ImuSample sample, const crl::multisense::details::wire::ImuSample &wire, const ImuSampleScalars &scalars) diff --git a/source/LibMultiSense/details/utilities.cc b/source/LibMultiSense/details/utilities.cc index 7ca321c3..880757a4 100644 --- a/source/LibMultiSense/details/utilities.cc +++ b/source/LibMultiSense/details/utilities.cc @@ -55,6 +55,7 @@ #ifdef HAVE_OPENCV #include +#include #endif #include "MultiSense/MultiSenseUtilities.hh" @@ -181,6 +182,12 @@ std::string to_string(const DataSource &source) case DataSource::AUX_RECTIFIED_RAW: {return "AUX_RECTIFIED";} case DataSource::COST_RAW: {return "COST";} case DataSource::IMU: {return "IMU";} + case DataSource::LEFT_ORB_FEATURES: {return "LEFT_ORB";} + case DataSource::RIGHT_ORB_FEATURES: {return "RIGHT_ORB";} + case DataSource::AUX_ORB_FEATURES: {return "AUX_ORB";} + case DataSource::LEFT_RECTIFIED_ORB_FEATURES: {return "LEFT_RECTIFIED_ORB";} + case DataSource::RIGHT_RECTIFIED_ORB_FEATURES: {return "RIGHT_RECTIFIED_ORB";} + case DataSource::AUX_RECTIFIED_ORB_FEATURES: {return "AUX_RECTIFIED_ORB";} } return "UNKNOWN"; } @@ -204,6 +211,34 @@ cv::Mat Image::cv_mat() const cv_type, const_cast(raw_data->data() + image_data_offset)}; } + +std::vector FeatureMessage::cv_keypoints() const +{ + std::vector kp; + kp.reserve(keypoints.size()); + + for (const auto &k : keypoints) + { + kp.emplace_back(k.x, k.y, 0.0f, k.angle, k.response, k.octave, k.class_id); + } + + return kp; +} + +cv::Mat FeatureMessage::cv_descriptors() const +{ + if (keypoints.empty()) + { + return cv::Mat{}; + } + + const int descriptor_size = descriptors.size() / keypoints.size(); + + return cv::Mat{static_cast(keypoints.size()), + descriptor_size, + CV_8UC1, + const_cast(descriptors.data())}; +} #endif bool write_image(const Image &image, const std::filesystem::path &path) diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh b/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh index 03607c20..ab90d520 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseSerialization.hh @@ -171,7 +171,13 @@ NLOHMANN_JSON_SERIALIZE_ENUM(DataSource, { {DataSource::AUX_RAW, "AUX_RAW"}, {DataSource::AUX_RECTIFIED_RAW, "AUX_RECTIFIED_RAW"}, {DataSource::COST_RAW, "COST_RAW"}, - {DataSource::IMU, "IMU"} + {DataSource::IMU, "IMU"}, + {DataSource::LEFT_ORB_FEATURES, "LEFT_ORB_FEATURES"}, + {DataSource::RIGHT_ORB_FEATURES, "RIGHT_ORB_FEATURES"}, + {DataSource::AUX_ORB_FEATURES, "AUX_ORB_FEATURES"}, + {DataSource::LEFT_RECTIFIED_ORB_FEATURES, "LEFT_RECTIFIED_ORB_FEATURES"}, + {DataSource::RIGHT_RECTIFIED_ORB_FEATURES, "RIGHT_RECTIFIED_ORB_FEATURES"}, + {DataSource::AUX_RECTIFIED_ORB_FEATURES, "AUX_RECTIFIED_ORB_FEATURES"} }) NLOHMANN_JSON_SERIALIZE_ENUM(CameraCalibration::DistortionType, { @@ -332,6 +338,11 @@ NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseConfig::ImuConfig, gyroscope, magnetometer) +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseConfig::FeatureDetectorConfig, + number_of_features, + grouping_enabled, + motion_octave) + NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseConfig, width, height, @@ -343,7 +354,8 @@ NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseConfig, time_config, network_config, imu_config, - lighting_config) + lighting_config, + feature_detector_config) NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE(MultiSenseStatus::PtpStatus, grandmaster_present, diff --git a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh index fb05aa33..3d4590ca 100644 --- a/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh +++ b/source/LibMultiSense/include/MultiSense/MultiSenseTypes.hh @@ -104,7 +104,13 @@ enum class DataSource : uint16_t AUX_RAW, AUX_RECTIFIED_RAW, COST_RAW, - IMU + IMU, + LEFT_ORB_FEATURES, + RIGHT_ORB_FEATURES, + AUX_ORB_FEATURES, + LEFT_RECTIFIED_ORB_FEATURES, + RIGHT_RECTIFIED_ORB_FEATURES, + AUX_RECTIFIED_ORB_FEATURES }; enum class ColorImageEncoding : uint16_t @@ -316,6 +322,93 @@ struct ImageHistogram std::vector data{}; }; +/// +/// @brief Represents a generic feature keypoint +/// +struct FeatureKeyPoint +{ + /// + /// @brief x location of the keypoint in the output image + /// + float x = 0.0f; + + /// + /// @brief y location of the keypoint in the output image + /// + float y = 0.0f; + + /// + /// @brief the angle of the intensity centroid + /// + float angle = 0.0f; + + /// + /// @brief harris conerness core + /// + float response = 0.0f; + + /// + /// @brief the image pyramid layer the keypoint was generated from + /// + uint32_t octave = 0; + + /// + /// @brief optional grouping id + /// + uint32_t class_id = 0; +}; + +/// +/// @brief The descriptor type for features +/// +enum class FeatureDescriptorType : uint8_t +{ + UNKNOWN, + ORB +}; + +/// +/// @brief A collection of features and their descriptors +/// +struct FeatureMessage +{ + /// + /// @brief DataSource associated with the features + /// + DataSource source = DataSource::UNKNOWN; + + /// + /// @brief The typeof feature descriptor used + /// + FeatureDescriptorType descriptor_type = FeatureDescriptorType::UNKNOWN; + + /// + /// @brief Image keypoints for featrues + /// + std::vector keypoints{}; + + /// + /// @brief Raw descriptors for the features + /// + std::vector descriptors{}; + +#ifdef HAVE_OPENCV + /// + /// @brief Convert keypoints to native OpenCV keypoints + /// + std::vector cv_keypoints() const; + + /// + /// @brief Convert descriptors to a native OpenCV Mat. + /// The cv::Mat returned here wraps the underlying descriptor data pointer associated with + /// the FeatureMessage object. If the input FeatureMessage object goes out of scope while you are + /// still using the corresponding cv::Mat, you will need to `clone` the cv::Mat creating an internal + /// copy of all the data + /// + cv::Mat cv_descriptors() const; +#endif +}; + /// /// @brief A frame containing multiple images (indexed by DataSource). /// @@ -350,6 +443,35 @@ struct ImageFrame return (images.find(source) != images.end()); } + /// + /// @brief Add a feature message to the frame, keyed by its DataSource. + /// + void add_feature(const FeatureMessage& feature) + { + features[feature.source] = feature; + } + + /// + /// @brief Retrieve feature message by DataSource. Throws if not found. + /// + const FeatureMessage& get_feature(const DataSource &source) const + { + auto it = features.find(source); + if (it == features.end()) + { + throw std::runtime_error("No feature found for requested DataSource"); + } + return it->second; + } + + /// + /// @brief Check if we have features for a given data source + /// + bool has_feature(const DataSource &source) const + { + return (features.find(source) != features.end()); + } + /// /// @brief The unique monotonically increasing ID for each frame populated by the MultiSense /// @@ -360,6 +482,11 @@ struct ImageFrame /// std::map images{}; + /// + /// @brief The features associated with each source in the frame + /// + std::map features{}; + /// /// @brief The scaled calibration for the entire camera /// @@ -508,6 +635,15 @@ struct ImuRange } }; +/// +/// @brief supported secondary applications +/// +enum class SecondaryApplication : uint8_t +{ + NONE, + FEATURE_DETECTOR +}; + /// /// @brief Complete configuration object for configuring the MultiSense. Can be updated during camera operation @@ -1035,6 +1171,35 @@ struct MultiSenseConfig } }; + /// + /// @brief Configuration for the on-camera feature detector + /// + struct FeatureDetectorConfig + { + + /// + /// @brief Max number of features to detect + /// + uint32_t number_of_features = 1500; + + /// + /// @brief Attempt to group features with similar class ids + /// + bool grouping_enabled = true; + + uint32_t motion_octave = 1; + + /// + /// @brief Equality operator + /// + bool operator==(const FeatureDetectorConfig &rhs) const + { + return number_of_features == rhs.number_of_features && + grouping_enabled == rhs.grouping_enabled && + motion_octave == rhs.motion_octave; + } + }; + /// /// @brief The operating width of the MultiSense in pixels. For available operating resolutions /// see the MultiSenseInfo::SupportedOperatingMode @@ -1093,6 +1258,11 @@ struct MultiSenseConfig /// std::optional lighting_config = std::nullopt; + /// + /// @brief The feature detector configuration for the camera. If invalid, the camera does not support feature detection + /// + std::optional feature_detector_config = std::nullopt; + /// /// @brief Equality operator /// @@ -1108,7 +1278,8 @@ struct MultiSenseConfig time_config == rhs.time_config && network_config == rhs.network_config && imu_config == rhs.imu_config && - lighting_config == rhs.lighting_config; + lighting_config == rhs.lighting_config && + feature_detector_config == rhs.feature_detector_config; } }; diff --git a/source/LibMultiSense/include/details/legacy/channel.hh b/source/LibMultiSense/include/details/legacy/channel.hh index b060fd5b..48416e11 100644 --- a/source/LibMultiSense/include/details/legacy/channel.hh +++ b/source/LibMultiSense/include/details/legacy/channel.hh @@ -43,6 +43,7 @@ #include #include #include +#include #include "details/legacy/ip.hh" #include "details/legacy/message.hh" @@ -193,7 +194,7 @@ public: /// /// @brief Get set the current MultiSense configuration /// - Status set_config(const MultiSenseConfig &config) final override; + Status set_config(const MultiSenseConfig &config) override; /// /// @brief Get the current stereo calibration. The output calibration will correspond to the full-resolution @@ -272,6 +273,23 @@ private: /// Status stop_streams_internal(const std::vector &sources); + /// + /// @brief Query the full set of secondary applications which the camera supports. Note there is an assumption + /// only one secondary application can be active at a given time + /// + std::optional> query_available_secondary_applications(); + + /// + /// @brief Manage secondary application on the camera. Handle auto deactivate + /// + Status manage_secondary_application(const SecondaryApplication &app, bool activate); + + /// + /// @brief Manage secondary application on the camera. Send raw control messages without regard for + /// the current state + /// + Status manage_secondary_application_raw(const SecondaryApplication &app, bool activate); + /// /// @brief Image meta callback used to internally receive images sent from the MultiSense /// @@ -293,10 +311,20 @@ private: void disparity_callback(std::shared_ptr> data); /// - /// @brief Disparity callback used to internally receive images sent from the MultiSense + /// @brief Imu callback used to internally receive imu sent from the MultiSense /// void imu_callback(std::shared_ptr> data); + /// + /// @brief Secondary app meta callback used to internally receive secondary app meta sent from the MultiSense + /// + void secondary_app_meta_callback(std::shared_ptr> data); + + /// + /// @brief Secondary app data callback used to internally receive secondary app data sent from the MultiSense + /// + void secondary_app_data_callback(std::shared_ptr> data); + /// /// @brief Handle internal process, and potentially dispatch a image /// @@ -307,6 +335,12 @@ private: const TimeT &capture_time, const TimeT &ptp_capture_time); + /// + /// @brief Handle internal process, and potentially dispatch a feature + /// + void handle_and_dispatch_feature(FeatureMessage feature, + int64_t frame_id); + /// /// @brief Internal mutex used to handle updates from users /// @@ -367,6 +401,16 @@ private: /// std::set m_active_streams{}; + /// + /// @brief The current active secondary application + /// + std::optional m_active_secondary_application{}; + + /// + /// @brief Secondary applications which are available + /// + std::optional> m_avaliable_secondary_applications{}; + /// /// @brief The currently active image frame user callback /// @@ -392,6 +436,11 @@ private: /// std::map m_meta_cache{}; + /// + /// @brief A cache of secondary app metadata associated with a specific frame id + /// + std::map> m_secondary_app_meta_cache{}; + /// /// @brief A cache of image frames associated with a specific frame id /// diff --git a/source/LibMultiSense/include/details/legacy/configuration.hh b/source/LibMultiSense/include/details/legacy/configuration.hh index 8bb60e02..af982280 100644 --- a/source/LibMultiSense/include/details/legacy/configuration.hh +++ b/source/LibMultiSense/include/details/legacy/configuration.hh @@ -50,6 +50,8 @@ #include #include #include +#include +#include #include #include @@ -58,6 +60,16 @@ namespace multisense { namespace legacy { +/// +/// @brief Convert a wire feature detector config to our API config object +/// +MultiSenseConfig::FeatureDetectorConfig convert(const crl::multisense::details::wire::FeatureDetectorConfigParams ¶ms); + +/// +/// @brief Convert our API config object to a wire message +/// +crl::multisense::details::wire::FeatureDetectorConfigParams convert(const MultiSenseConfig::FeatureDetectorConfig &config); + /// /// @brief Convert a wire AuxCameraConfiguration to our AuxConfig API /// @@ -70,6 +82,7 @@ MultiSenseConfig convert(const crl::multisense::details::wire::CamConfig &config const std::optional &aux_config, const std::optional &imu_config, const std::optional &led_config, + const std::optional &feature_config, const crl::multisense::details::wire::SysPacketDelay &packet_delay, bool ptp_enabled, const MultiSenseInfo::DeviceInfo &info, @@ -115,6 +128,11 @@ MultiSenseConfig::LightingConfig convert(const crl::multisense::details::wire::L /// crl::multisense::details::wire::LedSet convert (const MultiSenseConfig::LightingConfig &led); +/// +/// @brief Convert a wire secondary application configuration to a vector of applications +/// +std::vector convert(const crl::multisense::details::wire::SecondaryAppRegisteredApps &apps); + /// /// @brief Create a API transmission config object from wire types /// diff --git a/source/LibMultiSense/include/details/legacy/utilities.hh b/source/LibMultiSense/include/details/legacy/utilities.hh index fc899527..e35a63fb 100644 --- a/source/LibMultiSense/include/details/legacy/utilities.hh +++ b/source/LibMultiSense/include/details/legacy/utilities.hh @@ -72,7 +72,13 @@ constexpr crl::multisense::details::wire::SourceType all_sources = { crl::multisense::details::wire::SOURCE_CHROMA_AUX | crl::multisense::details::wire::SOURCE_CHROMA_RECT_AUX | crl::multisense::details::wire::SOURCE_DISPARITY_COST | - crl::multisense::details::wire::SOURCE_IMU + crl::multisense::details::wire::SOURCE_IMU | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_0 | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_1 | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_2 | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_3 | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_4 | + crl::multisense::details::wire::SOURCE_SECONDARY_APP_DATA_5 }; /// @@ -102,6 +108,11 @@ struct ImuSampleScalars /// bool is_image_source(const DataSource &source); +/// +/// @brief Determine if a datasource is a feature source +/// +bool is_feature_source(const DataSource &source); + /// /// Convert a wire status to a API Status /// @@ -132,6 +143,21 @@ crl::multisense::details::wire::SourceType convert_sources(const std::vector expand_source(const DataSource &source); +/// +/// @brief Convert a secondary application to a string +/// +std::string application_string(const SecondaryApplication &app); + +/// +/// @brief Convert a string to a secondary application +/// +SecondaryApplication secondary_application(const std::string &app); + +/// +/// @brief Check if an application is supported +/// +bool supported_application(const std::vector &available_apps, const SecondaryApplication &target_app); + /// /// @brief Add a wire sample to a ImuSample /// diff --git a/source/LibMultiSense/test/CMakeLists.txt b/source/LibMultiSense/test/CMakeLists.txt index a7e7bcf3..3e404366 100644 --- a/source/LibMultiSense/test/CMakeLists.txt +++ b/source/LibMultiSense/test/CMakeLists.txt @@ -3,6 +3,7 @@ find_package(GTest REQUIRED) set(TEST_NAMES calibration_test configuration_test + feature_test info_test message_test multi_channel_test diff --git a/source/LibMultiSense/test/configuration_test.cc b/source/LibMultiSense/test/configuration_test.cc index c551dee9..efe3592b 100644 --- a/source/LibMultiSense/test/configuration_test.cc +++ b/source/LibMultiSense/test/configuration_test.cc @@ -600,6 +600,7 @@ TEST(convert, cam_config) wire_aux_config, std::nullopt, std::nullopt, + std::nullopt, packet_config, false, create_device_info(1920, 1200), @@ -620,6 +621,7 @@ TEST(convert, cam_config_invalid_aux) std::nullopt, std::nullopt, std::nullopt, + std::nullopt, packet_config, false, create_device_info(1920, 1200), @@ -639,6 +641,7 @@ TEST(convert, cam_config_invalid_imu) std::nullopt, std::nullopt, std::nullopt, + std::nullopt, packet_config, false, create_device_info(1920, 1200), @@ -661,6 +664,7 @@ TEST(convert, cam_config_invalid_led) std::nullopt, std::nullopt, std::nullopt, + std::nullopt, packet_config, false, create_device_info(1920, 1200), @@ -687,6 +691,7 @@ TEST(convert, cam_config_valid_led_but_no_lights) std::nullopt, std::nullopt, std::make_optional(lighting_config), + std::nullopt, packet_config, false, create_device_info(1920, 1200), @@ -745,6 +750,7 @@ TEST(convert, network_config) std::nullopt, std::nullopt, std::nullopt, + std::nullopt, packet_config, false, create_device_info(1920, 1200), diff --git a/source/LibMultiSense/test/feature_test.cc b/source/LibMultiSense/test/feature_test.cc new file mode 100644 index 00000000..e7819eda --- /dev/null +++ b/source/LibMultiSense/test/feature_test.cc @@ -0,0 +1,204 @@ +/** + * @file feature_test.cc + * + * Copyright 2024-2025 + * Carnegie Robotics, LLC + * 4501 Hatfield Street, Pittsburgh, PA 15201 + * http://www.carnegierobotics.com + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Carnegie Robotics, LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CARNEGIE ROBOTICS, LLC BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + **/ + +#include + +#include +#include + +#include +#include +#include + +#ifdef HAVE_OPENCV +#include +#include +#endif + +using namespace multisense; + +TEST(FeatureMessage, Basic) +{ + FeatureMessage msg; + msg.source = DataSource::LEFT_ORB_FEATURES; + msg.descriptor_type = FeatureDescriptorType::ORB; + + FeatureKeyPoint kp1; + kp1.x = 10.5f; + kp1.y = 20.2f; + kp1.response = 100.0f; + kp1.octave = 1; + kp1.class_id = 5; + + msg.keypoints.push_back(kp1); + + // ORB descriptor is 32 bytes + std::vector desc(32, 0xAA); + msg.descriptors.insert(msg.descriptors.end(), desc.begin(), desc.end()); + + EXPECT_EQ(msg.source, DataSource::LEFT_ORB_FEATURES); + EXPECT_EQ(msg.descriptor_type, FeatureDescriptorType::ORB); + EXPECT_EQ(msg.keypoints.size(), 1); + EXPECT_EQ(msg.descriptors.size(), 32); + EXPECT_FLOAT_EQ(msg.keypoints[0].x, 10.5f); +} + +TEST(ImageFrame, Features) +{ + ImageFrame frame; + frame.frame_id = 1234; + + FeatureMessage msg; + msg.source = DataSource::LEFT_ORB_FEATURES; + msg.keypoints.resize(10); + + frame.add_feature(msg); + + EXPECT_TRUE(frame.has_feature(DataSource::LEFT_ORB_FEATURES)); + EXPECT_FALSE(frame.has_feature(DataSource::RIGHT_ORB_FEATURES)); + EXPECT_EQ(frame.get_feature(DataSource::LEFT_ORB_FEATURES).keypoints.size(), 10); + EXPECT_EQ(frame.frame_id, 1234); +} + +#ifdef HAVE_OPENCV +TEST(FeatureMessage, OpenCVConversion) +{ + FeatureMessage msg; + msg.source = DataSource::LEFT_ORB_FEATURES; + msg.descriptor_type = FeatureDescriptorType::ORB; + + for (int i = 0; i < 5; ++i) + { + FeatureKeyPoint kp; + kp.x = static_cast(i * 10); + kp.y = static_cast(i * 20); + kp.angle = 45.0f; + kp.response = 50.0f; + kp.octave = 0; + kp.class_id = i; + msg.keypoints.push_back(kp); + } + + // 5 features, 32 bytes each = 160 bytes + msg.descriptors.resize(5 * 32, 0xBB); + + auto cv_kp = msg.cv_keypoints(); + auto cv_desc = msg.cv_descriptors(); + + ASSERT_EQ(cv_kp.size(), 5); + EXPECT_FLOAT_EQ(cv_kp[0].pt.x, 0.0f); + EXPECT_FLOAT_EQ(cv_kp[4].pt.x, 40.0f); + EXPECT_FLOAT_EQ(cv_kp[2].angle, 45.0f); + EXPECT_EQ(cv_kp[3].class_id, 3); + + ASSERT_EQ(cv_desc.rows, 5); + ASSERT_EQ(cv_desc.cols, 32); + ASSERT_EQ(cv_desc.type(), CV_8UC1); + EXPECT_EQ(cv_desc.at(0, 0), 0xBB); +} +#endif + +TEST(FeatureWire, Serialization) +{ + using namespace crl::multisense::details; + + // Simulate FeatureDetectorMeta + wire::FeatureDetectorMeta meta; + meta.frameId = 555; + meta.timeSeconds = 100; + meta.timeNanoSeconds = 200; + meta.ptpNanoSeconds = 300; + meta.numFeatures = 2; + meta.numDescriptors = 2; + + std::vector meta_buf(512); + utility::BufferStreamWriter meta_writer(meta_buf.data(), meta_buf.size()); + meta.serialize(meta_writer, wire::FeatureDetectorMeta::VERSION); + + // Simulate FeatureDetector data + std::vector data_buf(1024); + utility::BufferStreamWriter data_writer(data_buf.data(), data_buf.size()); + + // FeatureDetector has a specialized serialize that includes writing raw data + // In our implementation it just writes the header then seeks past data + // So we manually write the data after the header + data_writer & wire::FeatureDetectorHeader::VERSION; // version + data_writer & (uint64_t)wire::SOURCE_SECONDARY_APP_DATA_0; // source + data_writer & (int64_t)555; // frameId + data_writer & (uint16_t)2; // numFeatures + data_writer & (uint16_t)2; // numDescriptors + + wire::Feature f1{10, 20, 30, 40, 50, 60}; + wire::Feature f2{100, 200, 30, 40, 50, 60}; + data_writer.write(&f1, sizeof(f1)); + data_writer.write(&f2, sizeof(f2)); + + wire::Descriptor d1{{1,2,3,4,5,6,7,8}}; + wire::Descriptor d2{{10,20,30,40,50,60,70,80}}; + data_writer.write(&d1, sizeof(d1)); + data_writer.write(&d2, sizeof(d2)); + + // Now deserialize + utility::BufferStreamReader meta_reader(meta_buf.data(), meta_buf.size()); + wire::FeatureDetectorMeta meta_decoded(meta_reader, wire::FeatureDetectorMeta::VERSION); + EXPECT_EQ(meta_decoded.frameId, 555); + EXPECT_EQ(meta_decoded.numFeatures, 2); + + utility::BufferStreamReader data_reader(data_buf.data(), data_buf.size()); + wire::FeatureDetector data_decoded(data_reader, wire::FeatureDetector::VERSION); + EXPECT_EQ(data_decoded.frameId, 555); + EXPECT_EQ(data_decoded.numFeatures, 2); + + // Verify raw data pointer + wire::Feature* features_ptr = reinterpret_cast(data_decoded.dataP); + EXPECT_EQ(features_ptr[0].x, 10); + EXPECT_EQ(features_ptr[1].x, 100); + + wire::Descriptor* desc_ptr = reinterpret_cast(reinterpret_cast(data_decoded.dataP) + 2*sizeof(wire::Feature)); + EXPECT_EQ(desc_ptr[0].d[0], 1); + EXPECT_EQ(desc_ptr[1].d[0], 10); +} + +TEST(FeatureConfig, Basic) +{ + MultiSenseConfig::FeatureDetectorConfig config; + config.number_of_features = 2000; + config.grouping_enabled = false; + config.motion_octave = 2; + + EXPECT_EQ(config.number_of_features, 2000); + EXPECT_FALSE(config.grouping_enabled); + EXPECT_EQ(config.motion_octave, 2); +} + + diff --git a/source/LibMultiSense/test/multi_channel_test.cc b/source/LibMultiSense/test/multi_channel_test.cc index 49500715..f341b274 100644 --- a/source/LibMultiSense/test/multi_channel_test.cc +++ b/source/LibMultiSense/test/multi_channel_test.cc @@ -47,14 +47,14 @@ TEST(frames_synchronized, basic_test) EXPECT_TRUE(frames_synchronized(frames, 0ns)); - frames.emplace_back(ImageFrame{1, {}, {}, TimeT{10ms}, TimeT{10ms}, {}, {}, {}, {}}); - frames.emplace_back(ImageFrame{3, {}, {}, TimeT{30ms}, TimeT{30ms}, {}, {}, {}, {}}); + frames.emplace_back(ImageFrame{1, {}, {}, {}, TimeT{10ms}, TimeT{10ms}, {}, {}, {}, {}}); + frames.emplace_back(ImageFrame{3, {}, {}, {}, TimeT{30ms}, TimeT{30ms}, {}, {}, {}, {}}); EXPECT_FALSE(frames_synchronized(frames, 0ns)); EXPECT_TRUE(frames_synchronized(frames, 21ms)); - frames.emplace_back(ImageFrame{37, {}, {}, TimeT{38ms}, TimeT{38ms}, {}, {}, {}, {}}); - frames.emplace_back(ImageFrame{7, {}, {}, TimeT{31ms}, TimeT{31ms}, {}, {}, {}, {}}); + frames.emplace_back(ImageFrame{37, {}, {}, {}, TimeT{38ms}, TimeT{38ms}, {}, {}, {}, {}}); + frames.emplace_back(ImageFrame{7, {}, {}, {}, TimeT{31ms}, TimeT{31ms}, {}, {}, {}, {}}); EXPECT_FALSE(frames_synchronized(frames, 21ms)); EXPECT_TRUE(frames_synchronized(frames, 29ms)); diff --git a/source/LibMultiSense/test/multisense_utilities_test.cc b/source/LibMultiSense/test/multisense_utilities_test.cc index efa5ac0c..48319dbf 100644 --- a/source/LibMultiSense/test/multisense_utilities_test.cc +++ b/source/LibMultiSense/test/multisense_utilities_test.cc @@ -181,7 +181,7 @@ TEST(create_depth_image, mono_and_float) const auto disparity_image = create_example_disparity_image(left_calibration, right_calibration, 3.0, disk_distance_m, 1920, 1200); - ImageFrame frame{0, {}, StereoCalibration{left_calibration, right_calibration, aux_calibration}, {}, {}, {}, {}, {}, {}}; + ImageFrame frame{0, {}, {}, StereoCalibration{left_calibration, right_calibration, aux_calibration}, {}, {}, {}, {}, {}, {}}; frame.add_image(disparity_image); const float invalid = 7000.0; @@ -266,7 +266,7 @@ TEST(get_aux_3d_point, basic_tests) const auto disparity_image = create_example_disparity_image(left_calibration, right_calibration, 3.0, disk_distance_m, 1920, 1200); - ImageFrame frame{0, {}, StereoCalibration{left_calibration, right_calibration, aux_calibration}, {}, {}, {}, {}, {}, {}}; + ImageFrame frame{0, {}, {}, StereoCalibration{left_calibration, right_calibration, aux_calibration}, {}, {}, {}, {}, {}, {}}; frame.add_image(disparity_image); const auto invalid_point = get_aux_3d_point(frame, Pixel{0, 0}, 100, 0.01); @@ -371,7 +371,7 @@ TEST(create_pointcloud, basic_tests) const auto disparity_image = create_example_disparity_image(left_calibration, right_calibration, 3.0, disk_distance_m, 1920, 1200); - ImageFrame frame{0, {}, StereoCalibration{left_calibration, right_calibration, std::nullopt}, {}, {}, {}, {}, {}, {}}; + ImageFrame frame{0, {}, {}, StereoCalibration{left_calibration, right_calibration, std::nullopt}, {}, {}, {}, {}, {}, {}}; frame.add_image(disparity_image); const auto point_cloud = create_pointcloud(frame, disk_distance_m + 3.0); diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc b/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc index 38825606..a6ce9362 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc +++ b/source/Utilities/Legacy/FeatureDetectorUtility/FeatureDetectorUtility.cc @@ -461,8 +461,8 @@ void featureDetectorCallback(const secondary_app::Header& header, } else { - if ((fHeader.source == feature_detector::Source_Feature_Left) - || (fHeader.source == feature_detector::Source_Feature_Rectified_Left)) + if ((fHeader.source == feature_detector::Source_ORB_Feature_Left) + || (fHeader.source == feature_detector::Source_ORB_Feature_Left_Rectified)) { auto it = userData->elapsedTime.find(fHeader.frameId); if (it == userData->elapsedTime.end()) { @@ -547,7 +547,7 @@ int main(int argc, Status status; system::VersionInfo v; - VersionType version; + crl::multisense::VersionType version; std::vector deviceModes; system::DeviceMode operatingMode; image::Calibration calibration; @@ -747,11 +747,11 @@ int main(int argc, { status = channelP->startStreams((operatingMode.supportedDataSources & Source_Luma_Left) | (operatingMode.supportedDataSources & Source_Luma_Right) | - feature_detector::Source_Feature_Left|feature_detector::Source_Feature_Right); + feature_detector::Source_ORB_Feature_Left|feature_detector::Source_ORB_Feature_Right); } else { - status = channelP->startStreams(feature_detector::Source_Feature_Left|feature_detector::Source_Feature_Right); + status = channelP->startStreams(feature_detector::Source_ORB_Feature_Left|feature_detector::Source_ORB_Feature_Right); } diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh b/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh index 59f3d373..e51015f5 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh +++ b/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorUtilities.hh @@ -37,22 +37,23 @@ #ifndef __FEATURE_DETECTOR_UTILITIES_H__ #define __FEATURE_DETECTOR_UTILITIES_H__ -#include "FeatureDetectorMessage.hh" -#include "FeatureDetectorMetaMessage.hh" -#include "FeatureDetectorConfig.hh" +#include +#include +#include using namespace crl::multisense; using namespace crl::multisense::details; +using namespace crl::multisense::details::wire; namespace feature_detector { -static CRL_CONSTEXPR DataSource Source_Feature_Left = Source_Secondary_App_Data_0; -static CRL_CONSTEXPR DataSource Source_Feature_Right = Source_Secondary_App_Data_1; -static CRL_CONSTEXPR DataSource Source_Feature_Aux = Source_Secondary_App_Data_2; -static CRL_CONSTEXPR DataSource Source_Feature_Rectified_Left = Source_Secondary_App_Data_3; -static CRL_CONSTEXPR DataSource Source_Feature_Rectified_Right = Source_Secondary_App_Data_4; -static CRL_CONSTEXPR DataSource Source_Feature_Rectified_Aux = Source_Secondary_App_Data_5; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Left = Source_Secondary_App_Data_0; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Right = Source_Secondary_App_Data_1; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Aux = Source_Secondary_App_Data_2; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Left_Rectified = Source_Secondary_App_Data_3; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Right_Rectified = Source_Secondary_App_Data_4; +static CRL_CONSTEXPR DataSource Source_ORB_Feature_Aux_Rectified = Source_Secondary_App_Data_5; /** The recommended maximum number of features for full resolution camera operation */ static CRL_CONSTEXPR int RECOMMENDED_MAX_FEATURES_FULL_RES = 5000; @@ -229,10 +230,10 @@ Status secondaryAppDataExtract(feature_detector::Header &header, const secondary using namespace crl::multisense::details; utility::BufferStreamReader stream(reinterpret_cast(orig.secondaryAppDataP), orig.secondaryAppDataLength); - FeatureDetector featureDetector(stream, FeatureDetector::VERSION); + wire::FeatureDetector featureDetector(stream, wire::FeatureDetector::VERSION); utility::BufferStreamReader metaStream(reinterpret_cast(orig.secondaryAppMetadataP), orig.secondaryAppMetadataLength); - FeatureDetectorMeta _meta(metaStream, FeatureDetectorMeta::VERSION); + wire::FeatureDetectorMeta _meta(metaStream, wire::FeatureDetectorMeta::VERSION); header.source = featureDetector.source; @@ -256,16 +257,16 @@ Status secondaryAppDataExtract(feature_detector::Header &header, const secondary header.numFeatures = featureDetector.numFeatures; header.numDescriptors = featureDetector.numDescriptors; - const size_t startDescriptor=featureDetector.numFeatures*sizeof(Feature); + const size_t startDescriptor=featureDetector.numFeatures*sizeof(wire::Feature); uint8_t * dataP = reinterpret_cast(featureDetector.dataP); for (size_t i = 0; i < featureDetector.numFeatures; i++) { - feature_detector::Feature f = *reinterpret_cast(dataP + (i * sizeof(Feature))); + feature_detector::Feature f = *reinterpret_cast(dataP + (i * sizeof(wire::Feature))); header.features->push_back(f); } for (size_t j = 0;j < featureDetector.numDescriptors; j++) { - feature_detector::Descriptor d = *reinterpret_cast(dataP + (startDescriptor + (j * sizeof(Descriptor)))); + feature_detector::Descriptor d = *reinterpret_cast(dataP + (startDescriptor + (j * sizeof(wire::Descriptor)))); header.descriptors->push_back(d); } diff --git a/source/Utilities/LibMultiSense/CMakeLists.txt b/source/Utilities/LibMultiSense/CMakeLists.txt index c6900859..7a8e52c4 100644 --- a/source/Utilities/LibMultiSense/CMakeLists.txt +++ b/source/Utilities/LibMultiSense/CMakeLists.txt @@ -14,6 +14,7 @@ endif() add_subdirectory(ChangeIpUtility) add_subdirectory(DeviceInfoUtility) +add_subdirectory(FeatureDetectorUtility) add_subdirectory(ImageCalUtility) add_subdirectory(MultiChannelUtility) add_subdirectory(PointCloudUtility) diff --git a/source/Utilities/LibMultiSense/FeatureDetectorUtility/CMakeLists.txt b/source/Utilities/LibMultiSense/FeatureDetectorUtility/CMakeLists.txt new file mode 100644 index 00000000..90ad4f9d --- /dev/null +++ b/source/Utilities/LibMultiSense/FeatureDetectorUtility/CMakeLists.txt @@ -0,0 +1,9 @@ +if(OpenCV_FOUND) + add_executable(FeatureDetectorUtility FeatureDetectorUtility.cc) + + target_link_libraries (FeatureDetectorUtility ${MULTISENSE_UTILITY_LIBS}) + + install(TARGETS FeatureDetectorUtility RUNTIME DESTINATION "bin") +else() + message("Missing OpenCV. Not building the c++ FeatureDetectorUtility") +endif() diff --git a/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc new file mode 100644 index 00000000..e3717574 --- /dev/null +++ b/source/Utilities/LibMultiSense/FeatureDetectorUtility/FeatureDetectorUtility.cc @@ -0,0 +1,189 @@ +/** + * @file FeatureDetectorUtility.cc + * + * Copyright 2013-2026 + * Carnegie Robotics, LLC + * 4501 Hatfield Street, Pittsburgh, PA 15201 + * http://www.carnegierobotics.com + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the Carnegie Robotics, LLC nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL CARNEGIE ROBOTICS, LLC BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Significant history (date, user, job code, action): + * 2026-03-26, malvarado@carnegierobotics.com, IRAD, Created file. + **/ + +#ifdef WIN32 +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN 1 +#endif + +#include +#include +#else +#include +#endif + +#include +#include +#include + +#define HAVE_OPENCV 1 +#include +#include +#include + +#include +#include + +#include "getopt/getopt.h" + +namespace lms = multisense; + +namespace +{ + +volatile bool done = false; + +void usage(const char *name) +{ + std::cerr << "USAGE: " << name << " []" << std::endl; + std::cerr << "Where are:" << std::endl; + std::cerr << "\t-a : CURRENT IPV4 address (default=10.66.171.21)" << std::endl; + std::cerr << "\t-m : MTU to use to communicate with the camera (default=1500)" << std::endl; + std::cerr << "\t-n : Max number of features to detect (default=1500)" << std::endl; + std::cerr << "\t-f : Framerate (default=10)" << std::endl; + std::cerr << "\t-h : Show help" << std::endl; + exit(1); +} + +#ifdef WIN32 +BOOL WINAPI signal_handler(DWORD dwCtrlType) +{ + (void) dwCtrlType; + done = true; + return TRUE; +} +#else +void signal_handler(int sig) +{ + (void) sig; + done = true; +} +#endif + +} + +int main(int argc, char** argv) +{ + using namespace std::chrono_literals; + +#if WIN32 + SetConsoleCtrlHandler (signal_handler, TRUE); +#else + signal(SIGINT, signal_handler); +#endif + + std::string ip_address = "10.66.171.21"; + int16_t mtu = 1500; + uint32_t number_of_features = 1500; + size_t fps = 10; + + int c; + while(-1 != (c = getopt(argc, argv, "a:m:n:f:h"))) + { + switch(c) + { + case 'a': ip_address = std::string(optarg); break; + case 'm': mtu = static_cast(atoi(optarg)); break; + case 'n': number_of_features = static_cast(atoi(optarg)); break; + case 'f': fps = static_cast(atoi(optarg)); break; + default: usage(*argv); break; + } + } + + const auto channel = lms::Channel::create(lms::Channel::Config{ip_address, mtu}); + if (!channel) + { + std::cerr << "Failed to create channel" << std::endl;; + return 1; + } + + auto config = channel->get_config(); + config.frames_per_second = fps; + + // + // Set the feature detector config to enable the feature detector + // + config.feature_detector_config = lms::MultiSenseConfig::FeatureDetectorConfig{number_of_features, true, 1}; + if (const auto status = channel->set_config(config); status != lms::Status::OK && status != lms::Status::INCOMPLETE_APPLICATION) + { + std::cerr << "Feature detector not supported: " << lms::to_string(status) << std::endl; + exit(1); + } + + // + // Start both the left image and the corresponding feature stream + // + const std::vector sources = {lms::DataSource::LEFT_MONO_RAW, lms::DataSource::LEFT_ORB_FEATURES}; + + if (const auto status = channel->start_streams(sources); status != lms::Status::OK) + { + std::cerr << "Cannot start streams: " << lms::to_string(status) << std::endl; + return 1; + } + + while(!done) + { + if (const auto frame = channel->get_next_image_frame(); frame) + { + if (frame->has_image(lms::DataSource::LEFT_MONO_RAW)) + { + cv::Mat img = frame->get_image(lms::DataSource::LEFT_MONO_RAW).cv_mat(); + cv::Mat display_img; + cv::cvtColor(img, display_img, cv::COLOR_GRAY2BGR); + + if (frame->has_feature(lms::DataSource::LEFT_ORB_FEATURES)) + { + const auto& features = frame->get_feature(lms::DataSource::LEFT_ORB_FEATURES); + + // + // Use the native OpenCV utility to convert keypoints and draw them + // + cv::drawKeypoints(display_img, features.cv_keypoints(), display_img, cv::Scalar(0, 255, 0)); + } + + cv::imshow("MultiSense Features", display_img); + if (cv::waitKey(1) == 'q') + { + break; + } + } + } + } + + channel->stop_streams({lms::DataSource::ALL}); + + return 0; +} diff --git a/source/Utilities/LibMultiSense/SaveImageUtility/CMakeLists.txt b/source/Utilities/LibMultiSense/SaveImageUtility/CMakeLists.txt index acdca067..f5ea6913 100644 --- a/source/Utilities/LibMultiSense/SaveImageUtility/CMakeLists.txt +++ b/source/Utilities/LibMultiSense/SaveImageUtility/CMakeLists.txt @@ -1,4 +1,3 @@ - add_executable(SaveImageUtility SaveImageUtility.cc) target_link_libraries (SaveImageUtility ${MULTISENSE_UTILITY_LIBS}) diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorConfig.hh b/source/Wire/include/wire/FeatureConfig.hh similarity index 84% rename from source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorConfig.hh rename to source/Wire/include/wire/FeatureConfig.hh index 05927d8f..c8911786 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorConfig.hh +++ b/source/Wire/include/wire/FeatureConfig.hh @@ -1,5 +1,5 @@ /** - * @file LibMultiSense/FeatureDetectorMessage.hh + * @file FeatureConfig.hh * * Copyright 2024-2025 * Carnegie Robotics, LLC @@ -29,20 +29,18 @@ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Significant history (date, user, job code, action): - * 2024-22-11, patrick.smith@carnegierobotics.com, IRAD, Created file. - * 2025-14-01, hshibata@carnegierobotics.com, IRAD, options added **/ +#ifndef LibMultiSense_FeatureConfig_hh +#define LibMultiSense_FeatureConfig_hh -#ifndef __FEATURE_DETECTOR_CONFIG_H__ -#define __FEATURE_DETECTOR_CONFIG_H__ - -#include -#include +#include +#include -using namespace crl::multisense::details; +namespace crl { +namespace multisense { +namespace details { +namespace wire { #pragma pack(push, 1) @@ -91,4 +89,9 @@ struct FeatureDetectorConfigParams { #pragma pack(pop) -#endif /* end of include guard: __FEATURE_DETECTOR_CONFIG_H__ */ +} // namespace wire +} // namespace details +} // namespace multisense +} // namespace crl + +#endif diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMessage.hh b/source/Wire/include/wire/FeatureMessage.hh similarity index 93% rename from source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMessage.hh rename to source/Wire/include/wire/FeatureMessage.hh index e3ca9030..18fc178b 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMessage.hh +++ b/source/Wire/include/wire/FeatureMessage.hh @@ -1,5 +1,5 @@ /** - * @file LibMultiSense/FeatureDetectorMessage.hh + * @file FeatureMessage.hh * * This message contains first class feature data. * @@ -37,8 +37,8 @@ * 2024-22-11, patrick.smith@carnegierobotics.com, IRAD, Moved file. **/ -#ifndef LibMultiSense_FeatureDetectorMessage -#define LibMultiSense_FeatureDetectorMessage +#ifndef LibMultiSense_FeatureMessage +#define LibMultiSense_FeatureMessage #include @@ -46,7 +46,10 @@ #include #include -using namespace crl::multisense::details; +namespace crl { +namespace multisense { +namespace details { +namespace wire { #pragma pack(push, 1) @@ -122,5 +125,9 @@ public: #endif // !SENSORPOD_FIRMWARE +} // namespace wire +} // namespace details +} // namespace multisense +} // namespace crl #endif diff --git a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMetaMessage.hh b/source/Wire/include/wire/FeatureMetaMessage.hh similarity index 95% rename from source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMetaMessage.hh rename to source/Wire/include/wire/FeatureMetaMessage.hh index e84743c3..6366fcd5 100644 --- a/source/Utilities/Legacy/FeatureDetectorUtility/include/FeatureDetectorMetaMessage.hh +++ b/source/Wire/include/wire/FeatureMetaMessage.hh @@ -1,5 +1,5 @@ /** - * @file LibMultiSense/FeatureDetectorMetaMessage.hh + * @file FeatureMetaMessage.hh * * Copyright 2013-2025 * Carnegie Robotics, LLC @@ -41,9 +41,12 @@ #include #include "utility/Portability.hh" -#include +#include -using namespace crl::multisense::details; +namespace crl { +namespace multisense { +namespace details { +namespace wire { class WIRE_HEADER_ATTRIBS_ FeatureDetectorMetaHeader { public: @@ -151,4 +154,9 @@ public: #endif // !SENSORPOD_FIRMWARE +} // namespace wire +} // namespace details +} // namespace multisense +} // namespace crl + #endif diff --git a/source/Wire/include/wire/Protocol.hh b/source/Wire/include/wire/Protocol.hh index 7376e4ce..cfdff7b1 100644 --- a/source/Wire/include/wire/Protocol.hh +++ b/source/Wire/include/wire/Protocol.hh @@ -340,6 +340,11 @@ static CRL_CONSTEXPR uint32_t MAX_LIGHTS = 8; static CRL_CONSTEXPR float WIRE_IMAGER_GAIN_MAX = 1000.0f; +// +// Secondary Application names + +static CRL_CONSTEXPR char SECONDARY_APP_FEATURE_DETECTOR[] = "FeatureDetector"; + // // Some helper macros diff --git a/source/Wire/include/wire/SecondaryAppDataMessage.hh b/source/Wire/include/wire/SecondaryAppDataMessage.hh index c039fb98..37afaa82 100644 --- a/source/Wire/include/wire/SecondaryAppDataMessage.hh +++ b/source/Wire/include/wire/SecondaryAppDataMessage.hh @@ -40,7 +40,7 @@ #include #include -#include "MultiSense/details/utility/Portability.hh" +#include "utility/Portability.hh" namespace crl { namespace multisense { diff --git a/source/Wire/include/wire/SecondaryAppGetConfigMessage.hh b/source/Wire/include/wire/SecondaryAppGetConfigMessage.hh index 4d7a711a..683153c2 100644 --- a/source/Wire/include/wire/SecondaryAppGetConfigMessage.hh +++ b/source/Wire/include/wire/SecondaryAppGetConfigMessage.hh @@ -39,7 +39,7 @@ #ifndef LibMultiSense_SecondaryAppGetConfig #define LibMultiSense_SecondaryAppGetConfig -#include "MultiSense/details/utility/Portability.hh" +#include "utility/Portability.hh" namespace crl { namespace multisense { diff --git a/source/Wire/include/wire/SecondaryAppGetRegisteredAppsMessage.hh b/source/Wire/include/wire/SecondaryAppGetRegisteredAppsMessage.hh index d254dd83..e0552f7d 100644 --- a/source/Wire/include/wire/SecondaryAppGetRegisteredAppsMessage.hh +++ b/source/Wire/include/wire/SecondaryAppGetRegisteredAppsMessage.hh @@ -39,7 +39,7 @@ #ifndef LibMultiSense_SecondaryAppGetRegisteredApps #define LibMultiSense_SecondaryAppGetRegisteredApps -#include "MultiSense/details/utility/Portability.hh" +#include "utility/Portability.hh" namespace crl { namespace multisense { diff --git a/source/Wire/include/wire/SecondaryAppMetaMessage.hh b/source/Wire/include/wire/SecondaryAppMetaMessage.hh index 29c2d9c6..bfb4a561 100644 --- a/source/Wire/include/wire/SecondaryAppMetaMessage.hh +++ b/source/Wire/include/wire/SecondaryAppMetaMessage.hh @@ -40,7 +40,7 @@ #include #include -#include "MultiSense/details/utility/Portability.hh" +#include "utility/Portability.hh" namespace crl { namespace multisense { @@ -89,6 +89,27 @@ public: SecondaryAppMetadata() {}; + SecondaryAppMetadata(const SecondaryAppMetadata& other) = delete; + + SecondaryAppMetadata(SecondaryAppMetadata&& other) noexcept : + SecondaryAppMetaHeader(std::move(other)), + mRef(other.mRef) + { + other.mRef = nullptr; + } + + SecondaryAppMetadata& operator=(SecondaryAppMetadata&& other) noexcept { + if (this != &other) { + delete mRef; + + SecondaryAppMetaHeader::operator=(std::move(other)); + + mRef = other.mRef; + other.mRef = nullptr; + } + return *this; + } + ~SecondaryAppMetadata() { if (mRef) delete mRef; diff --git a/source/Wire/include/wire/SecondaryAppRegisteredAppsMessage.hh b/source/Wire/include/wire/SecondaryAppRegisteredAppsMessage.hh index c01e75ea..904d80a5 100644 --- a/source/Wire/include/wire/SecondaryAppRegisteredAppsMessage.hh +++ b/source/Wire/include/wire/SecondaryAppRegisteredAppsMessage.hh @@ -39,7 +39,7 @@ #ifndef LibMultiSense_SecondaryAppRegisteredApps #define LibMultiSense_SecondaryAppRegisteredApps -#include "MultiSense/details/utility/Portability.hh" +#include "utility/Portability.hh" namespace crl { namespace multisense {