diff --git a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml index 9e63832f5e522..ea50101719d65 100644 --- a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml +++ b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light.launch.xml @@ -2,30 +2,62 @@ - - - - + + - + + + + + + + + + + + + - + - + + + + + + + + + + + + + + + + + + + + + + + + - + - + + + + - - - @@ -33,13 +65,21 @@ - - - - + + + + - + + + + + + + + + diff --git a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py index a3fd412577e1f..964008dafa5cd 100644 --- a/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py +++ b/launch/tier4_perception_launch/launch/traffic_light_recognition/traffic_light_node_container.launch.py @@ -35,38 +35,39 @@ def add_launch_arg(name: str, default_value=None, description=None): DeclareLaunchArgument(name, default_value=default_value, description=description) ) - ssd_fine_detector_share_dir = get_package_share_directory("traffic_light_ssd_fine_detector") + fine_detector_share_dir = get_package_share_directory("traffic_light_fine_detector") classifier_share_dir = get_package_share_directory("traffic_light_classifier") add_launch_arg("enable_image_decompressor", "True") add_launch_arg("enable_fine_detection", "True") add_launch_arg("input/image", "/sensing/camera/traffic_light/image_raw") - # traffic_light_ssd_fine_detector + # traffic_light_fine_detector add_launch_arg( - "onnx_file", os.path.join(ssd_fine_detector_share_dir, "data", "mb2-ssd-lite-tlr.onnx") + "fine_detector_model_path", + os.path.join(fine_detector_share_dir, "data", "tlr_yolox_s.onnx"), ) add_launch_arg( - "label_file", os.path.join(ssd_fine_detector_share_dir, "data", "voc_labels_tl.txt") + "fine_detector_label_path", + os.path.join(fine_detector_share_dir, "data", "tlr_labels.txt"), ) - add_launch_arg("dnn_header_type", "pytorch") - add_launch_arg("fine_detector_precision", "FP32") - add_launch_arg("score_thresh", "0.7") - add_launch_arg("max_batch_size", "8") + add_launch_arg("fine_detector_precision", "fp16") + add_launch_arg("fine_detector_score_thresh", "0.3") + add_launch_arg("fine_detector_nms_thresh", "0.65") + add_launch_arg("approximate_sync", "False") - add_launch_arg("mean", "[0.5, 0.5, 0.5]") - add_launch_arg("std", "[0.5, 0.5, 0.5]") # traffic_light_classifier add_launch_arg("classifier_type", "1") add_launch_arg( - "model_file_path", - os.path.join(classifier_share_dir, "data", "traffic_light_classifier_mobilenetv2.onnx"), + "classifier_model_path", + os.path.join(classifier_share_dir, "data", "traffic_light_classifier_efficientNet_b1.onnx"), + ) + add_launch_arg( + "classifier_label_path", os.path.join(classifier_share_dir, "data", "lamp_labels.txt") ) - add_launch_arg("label_file_path", os.path.join(classifier_share_dir, "data", "lamp_labels.txt")) - add_launch_arg("precision", "fp16") - add_launch_arg("input_c", "3") - add_launch_arg("input_h", "224") - add_launch_arg("input_w", "224") + add_launch_arg("classifier_precision", "fp16") + add_launch_arg("classifier_mean", "[123.675, 116.28, 103.53]") + add_launch_arg("classifier_std", "[58.395, 57.12, 57.375]") add_launch_arg("use_crosswalk_traffic_light_estimator", "True") add_launch_arg("use_intra_process", "False") @@ -80,7 +81,7 @@ def create_parameter_dict(*args): container = ComposableNodeContainer( name="traffic_light_node_container", - namespace="/perception/traffic_light_recognition", + namespace="", package="rclcpp_components", executable=LaunchConfiguration("container_executable"), composable_node_descriptions=[ @@ -92,12 +93,11 @@ def create_parameter_dict(*args): create_parameter_dict( "approximate_sync", "classifier_type", - "model_file_path", - "label_file_path", - "precision", - "input_c", - "input_h", - "input_w", + "classifier_model_path", + "classifier_label_path", + "classifier_precision", + "classifier_mean", + "classifier_std", ) ], remappings=[ @@ -195,28 +195,25 @@ def create_parameter_dict(*args): condition=IfCondition(LaunchConfiguration("enable_image_decompressor")), ) - ssd_fine_detector_param = create_parameter_dict( - "onnx_file", - "label_file", - "dnn_header_type", - "score_thresh", - "max_batch_size", - "approximate_sync", - "mean", - "std", + fine_detector_param = create_parameter_dict( + "fine_detector_model_path", + "fine_detector_label_path", + "fine_detector_precision", + "fine_detector_score_thresh", + "fine_detector_nms_thresh", ) - ssd_fine_detector_param["mode"] = LaunchConfiguration("fine_detector_precision") fine_detector_loader = LoadComposableNodes( composable_node_descriptions=[ ComposableNode( - package="traffic_light_ssd_fine_detector", - plugin="traffic_light::TrafficLightSSDFineDetectorNodelet", - name="traffic_light_ssd_fine_detector", - parameters=[ssd_fine_detector_param], + package="traffic_light_fine_detector", + plugin="traffic_light::TrafficLightFineDetectorNodelet", + name="traffic_light_fine_detector", + parameters=[fine_detector_param], remappings=[ ("~/input/image", LaunchConfiguration("input/image")), ("~/input/rois", "rough/rois"), + ("~/expect/rois", "expect/rois"), ("~/output/rois", "rois"), ], extra_arguments=[ diff --git a/launch/tier4_planning_launch/launch/scenario_planning/lane_driving/motion_planning/motion_planning.launch.py b/launch/tier4_planning_launch/launch/scenario_planning/lane_driving/motion_planning/motion_planning.launch.py index d491e14bcf3e4..94461c1748439 100644 --- a/launch/tier4_planning_launch/launch/scenario_planning/lane_driving/motion_planning/motion_planning.launch.py +++ b/launch/tier4_planning_launch/launch/scenario_planning/lane_driving/motion_planning/motion_planning.launch.py @@ -108,7 +108,6 @@ def launch_setup(context, *args, **kwargs): parameters=[ obstacle_velocity_limiter_param, vehicle_info_param, - {"obstacles.dynamic_source": "static_only"}, ], extra_arguments=[{"use_intra_process_comms": LaunchConfiguration("use_intra_process")}], ) diff --git a/localization/yabloc/README.md b/localization/yabloc/README.md index 34d6c70453c6c..f80dcfc88cf2e 100644 --- a/localization/yabloc/README.md +++ b/localization/yabloc/README.md @@ -4,6 +4,10 @@ [![thumbnail](docs/yabloc_thumbnail.jpg)](https://youtu.be/Eaf6r_BNFfk) +It estimates position by matching road surface markings extracted from images with a vector map. +Point cloud maps and LiDAR are not required. +YabLoc enables users localize vehicles that are not equipped with LiDAR and in environments where point cloud maps are not available. + ## Packages - [yabloc_common](yabloc_common/README.md) @@ -13,8 +17,8 @@ ## How to launch YabLoc instead of NDT -When launching autoware, if you set `localization_mode:=yabloc` as an argument, YabLoc will be launched instead of NDT. -By default, `localization_mode` is `ndt`. +When launching autoware, if you set `localization_mode:=camera` as an argument, YabLoc will be launched instead of NDT. +By default, `localization_mode` is `lidar`. A sample command to run YabLoc is as follows @@ -23,9 +27,58 @@ ros2 launch autoware_launch logging_simulator.launch.xml \ map_path:=$HOME/autoware_map/sample-map-rosbag\ vehicle_model:=sample_vehicle \ sensor_model:=sample_sensor_kit \ - localization_mode:=yabloc + localization_mode:=camera ``` ## Architecture ![node_diagram](docs/yabloc_architecture.drawio.svg) + +## Principle + +The diagram below illustrates the basic principle of YabLoc. +It extracts road surface markings by extracting the line segments using the road area obtained from graph-based segmentation. +The red line at the center-top of the diagram represents the line segments identified as road surface markings. +YabLoc transforms these segments for each particle and determines the particle's weight by comparing them with the cost map generated from Lanelet2. + +![principle](docs/yabloc_principle.png) + +## Visualization + +### Core visualization topics + +These topics are not visualized by default. + + + +| index | topic name | description | +| ----- | -------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 1 | `/localization/yabloc/pf/predicted_particle_marker` | particle distribution of particle filter. Red particles are probable candidate. | +| 2 | `/localization/yabloc/pf/scored_cloud` | 3D projected line segments. the color indicates how well they match the map. | +| 3 | `/localization/yabloc/image_processing/lanelet2_overlay_image` | overlay of lanelet2 (yellow lines) onto image based on estimated pose. If they match well with the actual road markings, it means that the localization performs well. | + +### Image topics for debug + +These topics are not visualized by default. + + + +| index | topic name | description | +| ----- | ----------------------------------------------------------------------- | ----------------------------------------------------------------------------- | +| 1 | `/localization/yabloc/pf/cost_map_image` | cost map made from lanelet2 | +| 2 | `/localization/yabloc/pf/match_image` | projected line segments | +| 3 | `/localization/yabloc/image_processing/image_with_colored_line_segment` | classified line segments. green line segments are used in particle correction | +| 4 | `/localization/yabloc/image_processing/lanelet2_overlay_image` | overlay of lanelet2 | +| 5 | `/localization/yabloc/image_processing/segmented_image` | graph based segmentation result | + +## Limitation + +- Running YabLoc and NDT simultaneously is not supported. + - This is because running both at the same time may be computationally too expensive. + - Also, in most cases, NDT is superior to YabLoc, so there is less benefit to running them at the same time. +- It does not estimate roll and pitch, therefore some of the perception nodes may not work well. +- It does not support multiple cameras now. But it will in the future. +- In places where there are few road surface markings, such as intersections, the estimation heavily relies on GNSS, IMU, and vehicle's wheel odometry. +- If the road boundary or road surface markings are not included in the Lanelet2, the estimation is likely to fail. +- The sample rosbag provided in the autoware tutorial does not include images, so it is not possible to run YabLoc with it. + - If you want to test the functionality of YabLoc, the sample test data provided in this [PR](https://github.com/autowarefoundation/autoware.universe/pull/3946) is useful. diff --git a/localization/yabloc/docs/yabloc_image_description.png b/localization/yabloc/docs/yabloc_image_description.png new file mode 100644 index 0000000000000..d76977535d306 Binary files /dev/null and b/localization/yabloc/docs/yabloc_image_description.png differ diff --git a/localization/yabloc/docs/yabloc_principle.png b/localization/yabloc/docs/yabloc_principle.png new file mode 100644 index 0000000000000..0d7a8294babed Binary files /dev/null and b/localization/yabloc/docs/yabloc_principle.png differ diff --git a/localization/yabloc/docs/yabloc_rviz_description.png b/localization/yabloc/docs/yabloc_rviz_description.png new file mode 100644 index 0000000000000..bd931b139f93b Binary files /dev/null and b/localization/yabloc/docs/yabloc_rviz_description.png differ diff --git a/perception/crosswalk_traffic_light_estimator/README.md b/perception/crosswalk_traffic_light_estimator/README.md index 54d7c561e05c2..e23b454905081 100644 --- a/perception/crosswalk_traffic_light_estimator/README.md +++ b/perception/crosswalk_traffic_light_estimator/README.md @@ -8,17 +8,17 @@ ### Input -| Name | Type | Description | -| ------------------------------------ | -------------------------------------------------------- | ------------------ | -| `~/input/vector_map` | `autoware_auto_mapping_msgs::msg::HADMapBin` | vector map | -| `~/input/route` | `autoware_planning_msgs::msg::LaneletRoute` | route | -| `~/input/classified/traffic_signals` | `autoware_auto_perception_msgs::msg::TrafficSignalArray` | classified signals | +| Name | Type | Description | +| ------------------------------------ | ------------------------------------------------ | ------------------ | +| `~/input/vector_map` | `autoware_auto_mapping_msgs::msg::HADMapBin` | vector map | +| `~/input/route` | `autoware_planning_msgs::msg::LaneletRoute` | route | +| `~/input/classified/traffic_signals` | `tier4_perception_msgs::msg::TrafficSignalArray` | classified signals | ### Output -| Name | Type | Description | -| -------------------------- | -------------------------------------------------------- | --------------------------------------------------------- | -| `~/output/traffic_signals` | `autoware_auto_perception_msgs::msg::TrafficSignalArray` | output that contains estimated pedestrian traffic signals | +| Name | Type | Description | +| -------------------------- | ------------------------------------------------ | --------------------------------------------------------- | +| `~/output/traffic_signals` | `tier4_perception_msgs::msg::TrafficSignalArray` | output that contains estimated pedestrian traffic signals | ## Parameters diff --git a/perception/crosswalk_traffic_light_estimator/include/crosswalk_traffic_light_estimator/node.hpp b/perception/crosswalk_traffic_light_estimator/include/crosswalk_traffic_light_estimator/node.hpp index 852e99ff7ba54..0850436851e6a 100644 --- a/perception/crosswalk_traffic_light_estimator/include/crosswalk_traffic_light_estimator/node.hpp +++ b/perception/crosswalk_traffic_light_estimator/include/crosswalk_traffic_light_estimator/node.hpp @@ -22,11 +22,11 @@ #include #include -#include -#include -#include #include #include +#include +#include +#include #include #include @@ -43,13 +43,13 @@ namespace traffic_light { using autoware_auto_mapping_msgs::msg::HADMapBin; -using autoware_auto_perception_msgs::msg::TrafficLight; -using autoware_auto_perception_msgs::msg::TrafficSignal; -using autoware_auto_perception_msgs::msg::TrafficSignalArray; using autoware_planning_msgs::msg::LaneletRoute; using tier4_autoware_utils::DebugPublisher; using tier4_autoware_utils::StopWatch; using tier4_debug_msgs::msg::Float64Stamped; +using tier4_perception_msgs::msg::TrafficLightElement; +using tier4_perception_msgs::msg::TrafficSignal; +using tier4_perception_msgs::msg::TrafficSignalArray; using TrafficSignalAndTime = std::pair; using TrafficLightIdMap = std::unordered_map; diff --git a/perception/crosswalk_traffic_light_estimator/package.xml b/perception/crosswalk_traffic_light_estimator/package.xml index 1a3b90f617f4e..435ecf6e6fa3d 100644 --- a/perception/crosswalk_traffic_light_estimator/package.xml +++ b/perception/crosswalk_traffic_light_estimator/package.xml @@ -11,13 +11,13 @@ autoware_cmake autoware_auto_mapping_msgs - autoware_auto_perception_msgs autoware_auto_planning_msgs autoware_planning_msgs lanelet2_extension rclcpp rclcpp_components tier4_autoware_utils + tier4_perception_msgs ament_lint_auto autoware_lint_common diff --git a/perception/crosswalk_traffic_light_estimator/src/node.cpp b/perception/crosswalk_traffic_light_estimator/src/node.cpp index 8400a1708e6af..55d272cb71cfe 100644 --- a/perception/crosswalk_traffic_light_estimator/src/node.cpp +++ b/perception/crosswalk_traffic_light_estimator/src/node.cpp @@ -160,7 +160,7 @@ void CrosswalkTrafficLightEstimatorNode::onTrafficLightArray( TrafficLightIdMap traffic_light_id_map; for (const auto & traffic_signal : msg->signals) { - traffic_light_id_map[traffic_signal.map_primitive_id] = + traffic_light_id_map[traffic_signal.traffic_light_id] = std::pair(traffic_signal, get_clock()->now()); } @@ -185,17 +185,17 @@ void CrosswalkTrafficLightEstimatorNode::updateLastDetectedSignal( const TrafficLightIdMap & traffic_light_id_map) { for (const auto & input_traffic_signal : traffic_light_id_map) { - const auto & lights = input_traffic_signal.second.first.lights; + const auto & elements = input_traffic_signal.second.first.elements; - if (lights.empty()) { + if (elements.empty()) { continue; } - if (lights.front().color == TrafficLight::UNKNOWN) { + if (elements.front().color == TrafficLightElement::UNKNOWN) { continue; } - const auto & id = input_traffic_signal.second.first.map_primitive_id; + const auto & id = input_traffic_signal.second.first.traffic_light_id; if (last_detect_color_.count(id) == 0) { last_detect_color_.insert(std::make_pair(id, input_traffic_signal.second)); @@ -207,7 +207,7 @@ void CrosswalkTrafficLightEstimatorNode::updateLastDetectedSignal( std::vector erase_id_list; for (auto & last_traffic_signal : last_detect_color_) { - const auto & id = last_traffic_signal.second.first.map_primitive_id; + const auto & id = last_traffic_signal.second.first.traffic_light_id; if (traffic_light_id_map.count(id) == 0) { // hold signal recognition results for [last_detect_color_hold_time_] seconds. @@ -233,11 +233,11 @@ void CrosswalkTrafficLightEstimatorNode::setCrosswalkTrafficSignal( const auto ll_traffic_light = static_cast(traffic_light); TrafficSignal output_traffic_signal; - TrafficLight output_traffic_light; + TrafficLightElement output_traffic_light; output_traffic_light.color = color; output_traffic_light.confidence = 1.0; - output_traffic_signal.lights.push_back(output_traffic_light); - output_traffic_signal.map_primitive_id = ll_traffic_light.id(); + output_traffic_signal.elements.push_back(output_traffic_light); + output_traffic_signal.traffic_light_id = ll_traffic_light.id(); msg.signals.push_back(output_traffic_signal); } } @@ -265,13 +265,14 @@ lanelet::ConstLanelets CrosswalkTrafficLightEstimatorNode::getNonRedLanelets( continue; } - const auto current_is_not_red = current_detected_signal - ? current_detected_signal.get() == TrafficLight::GREEN || - current_detected_signal.get() == TrafficLight::AMBER - : true; + const auto current_is_not_red = + current_detected_signal ? current_detected_signal.get() == TrafficLightElement::GREEN || + current_detected_signal.get() == TrafficLightElement::AMBER + : true; const auto current_is_unknown_or_none = - current_detected_signal ? current_detected_signal.get() == TrafficLight::UNKNOWN : true; + current_detected_signal ? current_detected_signal.get() == TrafficLightElement::UNKNOWN + : true; const auto last_detected_signal = getHighestConfidenceTrafficSignal(traffic_lights_for_vehicle, last_detect_color_); @@ -281,8 +282,8 @@ lanelet::ConstLanelets CrosswalkTrafficLightEstimatorNode::getNonRedLanelets( } const auto was_not_red = current_is_unknown_or_none && - (last_detected_signal.get() == TrafficLight::GREEN || - last_detected_signal.get() == TrafficLight::AMBER) && + (last_detected_signal.get() == TrafficLightElement::GREEN || + last_detected_signal.get() == TrafficLightElement::AMBER) && use_last_detect_color_; if (!current_is_not_red && !was_not_red) { @@ -323,12 +324,13 @@ uint8_t CrosswalkTrafficLightEstimatorNode::estimateCrosswalkTrafficSignal( } if (has_straight_non_red_lane || has_related_non_red_tl) { - return TrafficLight::RED; + return TrafficLightElement::RED; } const auto has_merge_lane = hasMergeLane(non_red_lanelets, routing_graph_ptr_); - return !has_merge_lane && has_left_non_red_lane && has_right_non_red_lane ? TrafficLight::RED - : TrafficLight::UNKNOWN; + return !has_merge_lane && has_left_non_red_lane && has_right_non_red_lane + ? TrafficLightElement::RED + : TrafficLightElement::UNKNOWN; } boost::optional CrosswalkTrafficLightEstimatorNode::getHighestConfidenceTrafficSignal( @@ -348,13 +350,13 @@ boost::optional CrosswalkTrafficLightEstimatorNode::getHighestConfidenc continue; } - const auto & lights = traffic_light_id_map.at(id).first.lights; - if (lights.empty()) { + const auto & elements = traffic_light_id_map.at(id).first.elements; + if (elements.empty()) { continue; } - const auto & color = lights.front().color; - const auto & confidence = lights.front().confidence; + const auto & color = elements.front().color; + const auto & confidence = elements.front().confidence; if (confidence < highest_confidence) { continue; } diff --git a/perception/traffic_light_classifier/CMakeLists.txt b/perception/traffic_light_classifier/CMakeLists.txt index e125abc7b435a..aeea36ecde047 100644 --- a/perception/traffic_light_classifier/CMakeLists.txt +++ b/perception/traffic_light_classifier/CMakeLists.txt @@ -84,14 +84,14 @@ function(download FILE_NAME FILE_HASH) else() message(STATUS "diff ${FILE_NAME}") message(STATUS "File hash changes. Downloading now ...") - file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/${FILE_NAME} ${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300) + file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/traffic_light_classifier/v1/${FILE_NAME} ${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300) list(GET DOWNLOAD_STATUS 0 STATUS_CODE) list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE) endif() else() message(STATUS "not found ${FILE_NAME}") message(STATUS "File doesn't exists. Downloading now ...") - file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/${FILE_NAME} ${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300) + file(DOWNLOAD https://awf.ml.dev.web.auto/perception/models/traffic_light_classifier/v1/${FILE_NAME} ${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300) list(GET DOWNLOAD_STATUS 0 STATUS_CODE) list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE) endif() @@ -101,8 +101,9 @@ function(download FILE_NAME FILE_HASH) message(FATAL_ERROR "Error occurred during download: ${ERROR_MESSAGE}") endif() endfunction() -download(traffic_light_classifier_mobilenetv2.onnx 7dc31c696b0400ddfc2cc5521586fa51) -download(lamp_labels.txt 20167c8e9a1f9d2ec7b0b0088c4100f0) +download(traffic_light_classifier_mobilenetv2.onnx caa51f2080aa2df943e4f884c41898eb) +download(traffic_light_classifier_efficientNet_b1.onnx 82baba4fcf1abe0c040cd55005e34510) +download(lamp_labels.txt 4b2cf910d97d05d464e7c26901af3d4c) find_package(ament_cmake_auto REQUIRED) ament_auto_find_build_dependencies() @@ -113,15 +114,16 @@ if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL) add_definitions(-DENABLE_GPU) include_directories( - utils ${OpenCV_INCLUDE_DIRS} ${CUDA_INCLUDE_DIRS} ) - ament_auto_add_library(libutils SHARED - utils/trt_common.cpp + ament_auto_add_library(traffic_light_classifier_nodelet SHARED + src/color_classifier.cpp + src/cnn_classifier.cpp + src/nodelet.cpp ) - target_link_libraries(libutils + target_link_libraries(traffic_light_classifier_nodelet ${OpenCV_LIBRARIES} ${NVINFER} ${NVONNXPARSER} @@ -131,16 +133,6 @@ if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL) ${CUDNN_LIBRARY} stdc++fs ) - - ament_auto_add_library(traffic_light_classifier_nodelet SHARED - src/color_classifier.cpp - src/cnn_classifier.cpp - src/nodelet.cpp - ) - target_link_libraries(traffic_light_classifier_nodelet - libutils - ${OpenCV_LIBRARIES} - ) rclcpp_components_register_node(traffic_light_classifier_nodelet PLUGIN "traffic_light::TrafficLightClassifierNodelet" EXECUTABLE traffic_light_classifier_node @@ -153,7 +145,14 @@ if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL) src/single_image_debug_inference_node.cpp ) target_link_libraries(single_image_debug_inference_node - libutils + ${OpenCV_LIBRARIES} + ${NVINFER} + ${NVONNXPARSER} + ${NVINFER_PLUGIN} + ${CUDA_LIBRARIES} + ${CUBLAS_LIBRARIES} + ${CUDNN_LIBRARY} + stdc++fs opencv_core opencv_highgui ) diff --git a/perception/traffic_light_classifier/README.md b/perception/traffic_light_classifier/README.md index 0935a451862e1..e2faf1633d331 100644 --- a/perception/traffic_light_classifier/README.md +++ b/perception/traffic_light_classifier/README.md @@ -8,8 +8,14 @@ traffic_light_classifier is a package for classifying traffic light labels using ### cnn_classifier -Traffic light labels are classified by MobileNetV2. -Totally 37600 (26300 for training, 6800 for evaluation and 4500 for test) TIER IV internal images of Japanese traffic lights were used for fine-tuning. +Traffic light labels are classified by EfficientNet-b1 or MobiletNet-v2. +Totally 83400 (58600 for training, 14800 for evaluation and 10000 for test) TIER IV internal images of Japanese traffic lights were used for fine-tuning. +The information of the models is listed here: + +| Name | Input Size | Test Accuracy | +| --------------- | ---------- | ------------- | +| EfficientNet-b1 | 128 x 128 | 99.76% | +| MobileNet-v2 | 224 x 224 | 99.81% | ### hsv_classifier @@ -30,17 +36,17 @@ These colors and shapes are assigned to the message as follows: ### Input -| Name | Type | Description | -| --------------- | ---------------------------------------------------------- | ---------------------- | -| `~/input/image` | `sensor_msgs::msg::Image` | input image | -| `~/input/rois` | `autoware_auto_perception_msgs::msg::TrafficLightRoiArray` | rois of traffic lights | +| Name | Type | Description | +| --------------- | -------------------------------------------------- | ---------------------- | +| `~/input/image` | `sensor_msgs::msg::Image` | input image | +| `~/input/rois` | `tier4_perception_msgs::msg::TrafficLightRoiArray` | rois of traffic lights | ### Output -| Name | Type | Description | -| -------------------------- | -------------------------------------------------------- | ------------------- | -| `~/output/traffic_signals` | `autoware_auto_perception_msgs::msg::TrafficSignalArray` | classified signals | -| `~/output/debug/image` | `sensor_msgs::msg::Image` | image for debugging | +| Name | Type | Description | +| -------------------------- | ------------------------------------------------ | ------------------- | +| `~/output/traffic_signals` | `tier4_perception_msgs::msg::TrafficSignalArray` | classified signals | +| `~/output/debug/image` | `sensor_msgs::msg::Image` | image for debugging | ## Parameters @@ -54,19 +60,14 @@ These colors and shapes are assigned to the message as follows: #### cnn_classifier -| Name | Type | Description | -| ----------------- | ------ | ------------------------------------------------- | -| `model_file_path` | str | path to the model file | -| `label_file_path` | str | path to the label file | -| `precision` | str | TensorRT precision, `fp16` or `int8` | -| `input_c` | str | the channel size of an input image | -| `input_h` | str | the height of an input image | -| `input_w` | str | the width of an input image | -| `input_name` | str | the name of neural network's input layer | -| `output_name` | str | the name of neural network's output name | -| `mean` | double | mean values for image normalization | -| `std` | double | std values for image normalization | -| `build_only` | bool | shutdown node after TensorRT engine file is built | +| Name | Type | Description | +| ----------------------- | --------------- | ------------------------------------ | +| `classifier_label_path` | str | path to the model file | +| `classifier_model_path` | str | path to the label file | +| `classifier_precision` | str | TensorRT precision, `fp16` or `int8` | +| `classifier_mean` | vector\ | 3-channel input image mean | +| `classifier_std` | vector\ | 3-channel input image std | +| `apply_softmax` | bool | whether or not apply softmax | #### hsv_classifier @@ -93,10 +94,10 @@ These colors and shapes are assigned to the message as follows: ## Customization of CNN model -Currently, in Autoware, [MobileNetV2](https://arxiv.org/abs/1801.04381v3) is used as CNN classifier by default. The corresponding onnx file is `data/traffic_light_classifier_mobilenetv2.onnx`(This file will be downloaded during the build process). +Currently, in Autoware, [MobileNetV2](https://arxiv.org/abs/1801.04381v3) and [EfficientNet-b1](https://arxiv.org/abs/1905.11946v5) are provided. +The corresponding onnx files are `data/traffic_light_classifier_mobilenetv2.onnx` and `data/traffic_light_classifier_efficientNet_b1.onnx` (These files will be downloaded during the build process). Also, you can apply the following models shown as below, for example. -- [EfficientNet](https://arxiv.org/abs/1905.11946v5) - [ResNet](https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html) - [MobileNetV3](https://arxiv.org/abs/1905.02244) ... @@ -153,7 +154,7 @@ python3 pytorch2onnx.py YOUR_CONFIG.py ... ``` After obtaining your onnx model, update parameters defined in the launch file (e.g. `model_file_path`, `label_file_path`, `input_h`, `input_w`...). -Note that, we only support labels defined in [autoware_auto_perception_msgs::msg::TrafficLight](https://github.com/tier4/autoware_auto_msgs/blob/tier4/main/autoware_auto_perception_msgs/msg/TrafficLight.idl). +Note that, we only support labels defined in [tier4_perception_msgs::msg::TrafficLightElement](https://github.com/tier4/tier4_autoware_msgs/blob/tier4/universe/tier4_perception_msgs/msg/traffic_light/TrafficLightElement.msg). ## Assumptions / Known limits @@ -191,6 +192,8 @@ Example: [1] M. Sandler, A. Howard, M. Zhu, A. Zhmoginov and L. Chen, "MobileNetV2: Inverted Residuals and Linear Bottlenecks," 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, Salt Lake City, UT, 2018, pp. 4510-4520, doi: 10.1109/CVPR.2018.00474. +[2] Tan, Mingxing, and Quoc Le. "Efficientnet: Rethinking model scaling for convolutional neural networks." International conference on machine learning. PMLR, 2019. + ## (Optional) Future extensions / Unimplemented parts @@ -17,17 +16,9 @@ - - - - - - - - - - - + + + diff --git a/perception/traffic_light_classifier/package.xml b/perception/traffic_light_classifier/package.xml index 2f7b07962a23b..89129cf0fe9d4 100644 --- a/perception/traffic_light_classifier/package.xml +++ b/perception/traffic_light_classifier/package.xml @@ -9,10 +9,11 @@ Apache License 2.0 ament_cmake_auto - autoware_cmake wget - autoware_auto_perception_msgs + autoware_cmake + + cuda_utils cv_bridge image_transport libboost-filesystem-dev @@ -20,6 +21,8 @@ rclcpp rclcpp_components sensor_msgs + tensorrt_common + tier4_perception_msgs ament_cmake diff --git a/perception/traffic_light_classifier/src/cnn_classifier.cpp b/perception/traffic_light_classifier/src/cnn_classifier.cpp index 621466206ac01..45ef5f94af146 100644 --- a/perception/traffic_light_classifier/src/cnn_classifier.cpp +++ b/perception/traffic_light_classifier/src/cnn_classifier.cpp @@ -1,4 +1,4 @@ -// Copyright 2020 Tier IV, Inc. +// Copyright 2023 TIER IV, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,23 +33,41 @@ CNNClassifier::CNNClassifier(rclcpp::Node * node_ptr) : node_ptr_(node_ptr) std::string precision; std::string label_file_path; std::string model_file_path; - precision = node_ptr_->declare_parameter("precision", "fp16"); - label_file_path = node_ptr_->declare_parameter("label_file_path", "labels.txt"); - model_file_path = node_ptr_->declare_parameter("model_file_path", "model.onnx"); - input_c_ = node_ptr_->declare_parameter("input_c", 3); - input_h_ = node_ptr_->declare_parameter("input_h", 224); - input_w_ = node_ptr_->declare_parameter("input_w", 224); - mean_ = node_ptr_->declare_parameter("mean", std::vector({0.242, 0.193, 0.201})); - std_ = node_ptr_->declare_parameter("std", std::vector({1.0, 1.0, 1.0})); - std::string input_name = node_ptr_->declare_parameter("input_name", std::string("input_0")); - std::string output_name = node_ptr_->declare_parameter("output_name", std::string("output_0")); - apply_softmax_ = node_ptr_->declare_parameter("apply_softmax", true); + precision = node_ptr_->declare_parameter("classifier_precision", "fp16"); + label_file_path = node_ptr_->declare_parameter("classifier_label_path", "labels.txt"); + model_file_path = node_ptr_->declare_parameter("classifier_model_path", "model.onnx"); + apply_softmax_ = node_ptr_->declare_parameter("apply_softmax", false); + mean_ = + node_ptr->declare_parameter("classifier_mean", std::vector{123.675, 116.28, 103.53}); + std_ = node_ptr->declare_parameter("classifier_std", std::vector{58.395, 57.12, 57.375}); + if (mean_.size() != 3 || std_.size() != 3) { + RCLCPP_ERROR(node_ptr->get_logger(), "classifier_mean and classifier_std must be of size 3"); + return; + } readLabelfile(label_file_path, labels_); - trt_ = std::make_shared(model_file_path, precision, input_name, output_name); - trt_->setup(); + tensorrt_common::BatchConfig batch_config{1, 1, 1}; + size_t max_workspace_size = 1 << 30; + trt_common_ = std::make_unique( + model_file_path, precision, nullptr, batch_config, max_workspace_size); + trt_common_->setup(); + if (!trt_common_->isInitialized()) { + return; + } + const auto input_dims = trt_common_->getBindingDimensions(0); + if (input_dims.nbDims != 4) { + RCLCPP_ERROR(node_ptr_->get_logger(), "Model input dimension must be 4!"); + } + batch_size_ = input_dims.d[0]; + input_c_ = input_dims.d[1]; + input_h_ = input_dims.d[2]; + input_w_ = input_dims.d[3]; + num_input_ = batch_size_ * input_c_ * input_h_ * input_w_; + const auto output_dims = trt_common_->getBindingDimensions(1); + num_output_ = + std::accumulate(output_dims.d, output_dims.d + output_dims.nbDims, 1, std::multiplies()); if (node_ptr_->declare_parameter("build_only", false)) { RCLCPP_INFO(node_ptr_->get_logger(), "TensorRT engine is built and shutdown node."); rclcpp::shutdown(); @@ -57,36 +75,33 @@ CNNClassifier::CNNClassifier(rclcpp::Node * node_ptr) : node_ptr_(node_ptr) } bool CNNClassifier::getTrafficSignal( - const cv::Mat & input_image, autoware_auto_perception_msgs::msg::TrafficSignal & traffic_signal) + const cv::Mat & input_image, tier4_perception_msgs::msg::TrafficSignal & traffic_signal) { - if (!trt_->isInitialized()) { + if (!trt_common_->isInitialized()) { RCLCPP_WARN(node_ptr_->get_logger(), "failed to init tensorrt"); return false; } - int num_input = trt_->getNumInput(); - int num_output = trt_->getNumOutput(); - - std::vector input_data_host(num_input); + std::vector input_data_host(num_input_); cv::Mat image = input_image.clone(); preProcess(image, input_data_host, true); - auto input_data_device = Tn::make_unique(num_input); + auto input_data_device = cuda_utils::make_unique(num_input_); cudaMemcpy( - input_data_device.get(), input_data_host.data(), num_input * sizeof(float), + input_data_device.get(), input_data_host.data(), num_input_ * sizeof(float), cudaMemcpyHostToDevice); - auto output_data_device = Tn::make_unique(num_output); + auto output_data_device = cuda_utils::make_unique(num_output_); // do inference std::vector bindings = {input_data_device.get(), output_data_device.get()}; - trt_->context_->executeV2(bindings.data()); + trt_common_->enqueueV2(bindings.data(), *stream_, nullptr); - std::vector output_data_host(num_output); + std::vector output_data_host(num_output_); cudaMemcpy( - output_data_host.data(), output_data_device.get(), num_output * sizeof(float), + output_data_host.data(), output_data_device.get(), num_output_ * sizeof(float), cudaMemcpyDeviceToHost); postProcess(output_data_host, traffic_signal, apply_softmax_); @@ -101,17 +116,17 @@ bool CNNClassifier::getTrafficSignal( } void CNNClassifier::outputDebugImage( - cv::Mat & debug_image, const autoware_auto_perception_msgs::msg::TrafficSignal & traffic_signal) + cv::Mat & debug_image, const tier4_perception_msgs::msg::TrafficSignal & traffic_signal) { float probability; std::string label; - for (std::size_t i = 0; i < traffic_signal.lights.size(); i++) { - auto light = traffic_signal.lights.at(i); + for (std::size_t i = 0; i < traffic_signal.elements.size(); i++) { + auto light = traffic_signal.elements.at(i); const auto light_label = state2label_[light.color] + "-" + state2label_[light.shape]; label += light_label; // all lamp confidence are the same probability = light.confidence; - if (i < traffic_signal.lights.size() - 1) { + if (i < traffic_signal.elements.size() - 1) { label += ","; } } @@ -134,11 +149,13 @@ void CNNClassifier::outputDebugImage( void CNNClassifier::preProcess(cv::Mat & image, std::vector & input_tensor, bool normalize) { - /* normalize */ - /* ((channel[0] / 255) - mean[0]) / std[0] */ - - // cv::cvtColor(image, image, cv::COLOR_BGR2RGB, 3); - cv::resize(image, image, cv::Size(input_w_, input_h_)); + const float scale = + std::min(static_cast(input_w_) / image.cols, static_cast(input_h_) / image.rows); + const auto scale_size = cv::Size(image.cols * scale, image.rows * scale); + cv::resize(image, image, scale_size, 0, 0, cv::INTER_CUBIC); + const auto bottom = input_h_ - image.rows; + const auto right = input_w_ - image.cols; + copyMakeBorder(image, image, 0, bottom, 0, right, cv::BORDER_CONSTANT, {0, 0, 0}); const size_t strides_cv[3] = { static_cast(input_w_ * input_c_), static_cast(input_c_), 1}; @@ -151,8 +168,7 @@ void CNNClassifier::preProcess(cv::Mat & image, std::vector & input_tenso const size_t offset_cv = i * strides_cv[0] + j * strides_cv[1] + k * strides_cv[2]; const size_t offset = k * strides[0] + i * strides[1] + j * strides[2]; if (normalize) { - input_tensor[offset] = - ((static_cast(image.data[offset_cv]) / 255) - mean_[k]) / std_[k]; + input_tensor[offset] = (static_cast(image.data[offset_cv]) - mean_[k]) / std_[k]; } else { input_tensor[offset] = static_cast(image.data[offset_cv]); } @@ -162,19 +178,14 @@ void CNNClassifier::preProcess(cv::Mat & image, std::vector & input_tenso } bool CNNClassifier::postProcess( - std::vector & output_tensor, - autoware_auto_perception_msgs::msg::TrafficSignal & traffic_signal, bool apply_softmax) + std::vector & output_tensor, tier4_perception_msgs::msg::TrafficSignal & traffic_signal, + bool apply_softmax) { std::vector probs; - int num_output = trt_->getNumOutput(); if (apply_softmax) { - calcSoftmax(output_tensor, probs, num_output); + calcSoftmax(output_tensor, probs, num_output_); } - std::vector sorted_indices = argsort(output_tensor, num_output); - - // ROS_INFO("label: %s, score: %.2f\%", - // labels_[sorted_indices[0]].c_str(), - // probs[sorted_indices[0]] * 100); + std::vector sorted_indices = argsort(output_tensor, num_output_); size_t max_indice = sorted_indices.front(); std::string match_label = labels_[max_indice]; @@ -194,27 +205,27 @@ bool CNNClassifier::postProcess( node_ptr_->get_logger(), "cnn_classifier does not have a key [%s]", label.c_str()); continue; } - autoware_auto_perception_msgs::msg::TrafficLight light; + tier4_perception_msgs::msg::TrafficLightElement element; if (label.find("-") != std::string::npos) { // found "-" delimiter in label string std::vector color_and_shape; boost::algorithm::split(color_and_shape, label, boost::is_any_of("-")); - light.color = label2state_[color_and_shape.at(0)]; - light.shape = label2state_[color_and_shape.at(1)]; + element.color = label2state_[color_and_shape.at(0)]; + element.shape = label2state_[color_and_shape.at(1)]; } else { - if (label == state2label_[autoware_auto_perception_msgs::msg::TrafficLight::UNKNOWN]) { - light.color = autoware_auto_perception_msgs::msg::TrafficLight::UNKNOWN; - light.shape = autoware_auto_perception_msgs::msg::TrafficLight::UNKNOWN; + if (label == state2label_[tier4_perception_msgs::msg::TrafficLightElement::UNKNOWN]) { + element.color = tier4_perception_msgs::msg::TrafficLightElement::UNKNOWN; + element.shape = tier4_perception_msgs::msg::TrafficLightElement::UNKNOWN; } else if (isColorLabel(label)) { - light.color = label2state_[label]; - light.shape = autoware_auto_perception_msgs::msg::TrafficLight::CIRCLE; + element.color = label2state_[label]; + element.shape = tier4_perception_msgs::msg::TrafficLightElement::CIRCLE; } else { - light.color = autoware_auto_perception_msgs::msg::TrafficLight::GREEN; - light.shape = label2state_[label]; + element.color = tier4_perception_msgs::msg::TrafficLightElement::GREEN; + element.shape = label2state_[label]; } } - light.confidence = probability; - traffic_signal.lights.push_back(light); + element.confidence = probability; + traffic_signal.elements.push_back(element); } return true; @@ -262,12 +273,12 @@ std::vector CNNClassifier::argsort(std::vector & tensor, int num_ bool CNNClassifier::isColorLabel(const std::string label) { - using autoware_auto_perception_msgs::msg::TrafficSignal; + using tier4_perception_msgs::msg::TrafficSignal; if ( - label == state2label_[autoware_auto_perception_msgs::msg::TrafficLight::GREEN] || - label == state2label_[autoware_auto_perception_msgs::msg::TrafficLight::AMBER] || - label == state2label_[autoware_auto_perception_msgs::msg::TrafficLight::RED] || - label == state2label_[autoware_auto_perception_msgs::msg::TrafficLight::WHITE]) { + label == state2label_[tier4_perception_msgs::msg::TrafficLightElement::GREEN] || + label == state2label_[tier4_perception_msgs::msg::TrafficLightElement::AMBER] || + label == state2label_[tier4_perception_msgs::msg::TrafficLightElement::RED] || + label == state2label_[tier4_perception_msgs::msg::TrafficLightElement::WHITE]) { return true; } return false; diff --git a/perception/traffic_light_classifier/src/color_classifier.cpp b/perception/traffic_light_classifier/src/color_classifier.cpp index 0267b3cd3c3ac..4b7fb37096021 100644 --- a/perception/traffic_light_classifier/src/color_classifier.cpp +++ b/perception/traffic_light_classifier/src/color_classifier.cpp @@ -1,4 +1,4 @@ -// Copyright 2020 Tier IV, Inc. +// Copyright 2023 TIER IV, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -52,7 +52,7 @@ ColorClassifier::ColorClassifier(rclcpp::Node * node_ptr) : node_ptr_(node_ptr) } bool ColorClassifier::getTrafficSignal( - const cv::Mat & input_image, autoware_auto_perception_msgs::msg::TrafficSignal & traffic_signal) + const cv::Mat & input_image, tier4_perception_msgs::msg::TrafficSignal & traffic_signal) { cv::Mat green_image; cv::Mat yellow_image; @@ -157,25 +157,25 @@ bool ColorClassifier::getTrafficSignal( static_cast(red_filtered_bin_image.rows * red_filtered_bin_image.cols); if (yellow_ratio < green_ratio && red_ratio < green_ratio) { - autoware_auto_perception_msgs::msg::TrafficLight light; - light.color = autoware_auto_perception_msgs::msg::TrafficLight::GREEN; - light.confidence = std::min(1.0, static_cast(green_pixel_num) / (20.0 * 20.0)); - traffic_signal.lights.push_back(light); + tier4_perception_msgs::msg::TrafficLightElement element; + element.color = tier4_perception_msgs::msg::TrafficLightElement::GREEN; + element.confidence = std::min(1.0, static_cast(green_pixel_num) / (20.0 * 20.0)); + traffic_signal.elements.push_back(element); } else if (green_ratio < yellow_ratio && red_ratio < yellow_ratio) { - autoware_auto_perception_msgs::msg::TrafficLight light; - light.color = autoware_auto_perception_msgs::msg::TrafficLight::AMBER; - light.confidence = std::min(1.0, static_cast(yellow_pixel_num) / (20.0 * 20.0)); - traffic_signal.lights.push_back(light); + tier4_perception_msgs::msg::TrafficLightElement element; + element.color = tier4_perception_msgs::msg::TrafficLightElement::AMBER; + element.confidence = std::min(1.0, static_cast(yellow_pixel_num) / (20.0 * 20.0)); + traffic_signal.elements.push_back(element); } else if (green_ratio < red_ratio && yellow_ratio < red_ratio) { - autoware_auto_perception_msgs::msg::TrafficLight light; - light.color = ::autoware_auto_perception_msgs::msg::TrafficLight::RED; - light.confidence = std::min(1.0, static_cast(red_pixel_num) / (20.0 * 20.0)); - traffic_signal.lights.push_back(light); + tier4_perception_msgs::msg::TrafficLightElement element; + element.color = ::tier4_perception_msgs::msg::TrafficLightElement::RED; + element.confidence = std::min(1.0, static_cast(red_pixel_num) / (20.0 * 20.0)); + traffic_signal.elements.push_back(element); } else { - autoware_auto_perception_msgs::msg::TrafficLight light; - light.color = ::autoware_auto_perception_msgs::msg::TrafficLight::UNKNOWN; - light.confidence = 0.0; - traffic_signal.lights.push_back(light); + tier4_perception_msgs::msg::TrafficLightElement element; + element.color = ::tier4_perception_msgs::msg::TrafficLightElement::UNKNOWN; + element.confidence = 0.0; + traffic_signal.elements.push_back(element); } return true; } diff --git a/perception/traffic_light_classifier/src/nodelet.cpp b/perception/traffic_light_classifier/src/nodelet.cpp index a593f34fd8273..5332f5d3baacc 100644 --- a/perception/traffic_light_classifier/src/nodelet.cpp +++ b/perception/traffic_light_classifier/src/nodelet.cpp @@ -1,4 +1,4 @@ -// Copyright 2020 Tier IV, Inc. +// Copyright 2023 TIER IV, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ TrafficLightClassifierNodelet::TrafficLightClassifierNodelet(const rclcpp::NodeO } traffic_signal_array_pub_ = - this->create_publisher( + this->create_publisher( "~/output/traffic_signals", rclcpp::QoS{1}); using std::chrono_literals::operator""ms; @@ -74,7 +74,7 @@ void TrafficLightClassifierNodelet::connectCb() void TrafficLightClassifierNodelet::imageRoiCallback( const sensor_msgs::msg::Image::ConstSharedPtr & input_image_msg, - const autoware_auto_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & input_rois_msg) + const tier4_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & input_rois_msg) { if (classifier_ptr_.use_count() == 0) { return; @@ -89,15 +89,15 @@ void TrafficLightClassifierNodelet::imageRoiCallback( input_image_msg->encoding.c_str()); } - autoware_auto_perception_msgs::msg::TrafficSignalArray output_msg; + tier4_perception_msgs::msg::TrafficSignalArray output_msg; for (size_t i = 0; i < input_rois_msg->rois.size(); ++i) { const sensor_msgs::msg::RegionOfInterest & roi = input_rois_msg->rois.at(i).roi; cv::Mat clipped_image( cv_ptr->image, cv::Rect(roi.x_offset, roi.y_offset, roi.width, roi.height)); - autoware_auto_perception_msgs::msg::TrafficSignal traffic_signal; - traffic_signal.map_primitive_id = input_rois_msg->rois.at(i).id; + tier4_perception_msgs::msg::TrafficSignal traffic_signal; + traffic_signal.traffic_light_id = input_rois_msg->rois.at(i).traffic_light_id; if (!classifier_ptr_->getTrafficSignal(clipped_image, traffic_signal)) { RCLCPP_ERROR(this->get_logger(), "failed classify image, abort callback"); return; diff --git a/perception/traffic_light_classifier/src/single_image_debug_inference_node.cpp b/perception/traffic_light_classifier/src/single_image_debug_inference_node.cpp index f324b04f1628c..10d36978224ab 100644 --- a/perception/traffic_light_classifier/src/single_image_debug_inference_node.cpp +++ b/perception/traffic_light_classifier/src/single_image_debug_inference_node.cpp @@ -1,4 +1,4 @@ -// Copyright 2023 Tier IV, Inc. +// Copyright 2023 TIER IV, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -28,31 +28,31 @@ namespace { std::string toString(const uint8_t state) { - if (state == autoware_auto_perception_msgs::msg::TrafficLight::RED) { + if (state == tier4_perception_msgs::msg::TrafficLightElement::RED) { return "red"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::AMBER) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::AMBER) { return "yellow"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::GREEN) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::GREEN) { return "green"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::WHITE) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::WHITE) { return "white"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::CIRCLE) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::CIRCLE) { return "circle"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::LEFT_ARROW) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::LEFT_ARROW) { return "left"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::RIGHT_ARROW) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::RIGHT_ARROW) { return "right"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::UP_ARROW) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::UP_ARROW) { return "straight"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::DOWN_ARROW) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::DOWN_ARROW) { return "down"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::DOWN_LEFT_ARROW) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::DOWN_LEFT_ARROW) { return "down_left"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::DOWN_RIGHT_ARROW) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::DOWN_RIGHT_ARROW) { return "down_right"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::CROSS) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::CROSS) { return "cross"; - } else if (state == autoware_auto_perception_msgs::msg::TrafficLight::UNKNOWN) { + } else if (state == tier4_perception_msgs::msg::TrafficLightElement::UNKNOWN) { return "unknown"; } else { return ""; @@ -122,17 +122,17 @@ class SingleImageDebugInferenceNode : public rclcpp::Node return; } cv::cvtColor(crop, crop, cv::COLOR_BGR2RGB); - autoware_auto_perception_msgs::msg::TrafficSignal traffic_signal; + tier4_perception_msgs::msg::TrafficSignal traffic_signal; if (!classifier_ptr_->getTrafficSignal(crop, traffic_signal)) { RCLCPP_ERROR(get_logger(), "failed to classify image"); return; } cv::Scalar color; cv::Scalar text_color; - for (const auto & light : traffic_signal.lights) { - auto color_str = toString(light.color); - auto shape_str = toString(light.shape); - auto confidence_str = std::to_string(light.confidence); + for (const auto & element : traffic_signal.elements) { + auto color_str = toString(element.color); + auto shape_str = toString(element.shape); + auto confidence_str = std::to_string(element.confidence); if (shape_str == "circle") { if (color_str == "red") { color = cv::Scalar(0, 0, 255); diff --git a/perception/traffic_light_classifier/utils/trt_common.cpp b/perception/traffic_light_classifier/utils/trt_common.cpp deleted file mode 100644 index f12be16879321..0000000000000 --- a/perception/traffic_light_classifier/utils/trt_common.cpp +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2020 Tier IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include - -#if (defined(_MSC_VER) or (defined(__GNUC__) and (7 <= __GNUC_MAJOR__))) -#include -namespace fs = ::std::filesystem; -#else -#include -namespace fs = ::std::experimental::filesystem; -#endif - -#include -#include - -namespace Tn -{ -void check_error(const ::cudaError_t e, decltype(__FILE__) f, decltype(__LINE__) n) -{ - if (e != ::cudaSuccess) { - std::stringstream s; - s << ::cudaGetErrorName(e) << " (" << e << ")@" << f << "#L" << n << ": " - << ::cudaGetErrorString(e); - throw std::runtime_error{s.str()}; - } -} - -TrtCommon::TrtCommon( - std::string model_path, std::string precision, std::string input_name, std::string output_name) -: model_file_path_(model_path), - precision_(precision), - input_name_(input_name), - output_name_(output_name), - is_initialized_(false) -{ - runtime_ = UniquePtr(nvinfer1::createInferRuntime(logger_)); -} - -void TrtCommon::setup() -{ - const fs::path path(model_file_path_); - std::string extension = path.extension().string(); - - if (fs::exists(path)) { - if (extension == ".engine") { - loadEngine(model_file_path_); - } else if (extension == ".onnx") { - fs::path cache_engine_path{model_file_path_}; - cache_engine_path.replace_extension("engine"); - if (fs::exists(cache_engine_path)) { - loadEngine(cache_engine_path.string()); - } else { - logger_.log(nvinfer1::ILogger::Severity::kINFO, "start build engine"); - buildEngineFromOnnx(model_file_path_, cache_engine_path.string()); - logger_.log(nvinfer1::ILogger::Severity::kINFO, "end build engine"); - } - } else { - is_initialized_ = false; - return; - } - } else { - is_initialized_ = false; - return; - } - - context_ = UniquePtr(engine_->createExecutionContext()); - -#if (NV_TENSORRT_MAJOR * 10000) + (NV_TENSORRT_MINOR * 100) + NV_TENSOR_PATCH >= 80500 - input_dims_ = engine_->getTensorShape(input_name_.c_str()); - output_dims_ = engine_->getTensorShape(output_name_.c_str()); -#else - // Deprecated since 8.5 - input_dims_ = engine_->getBindingDimensions(engine_->getBindingIndex(input_name_.c_str())); - output_dims_ = engine_->getBindingDimensions(engine_->getBindingIndex(output_name_.c_str())); -#endif - - is_initialized_ = true; -} - -bool TrtCommon::loadEngine(std::string engine_file_path) -{ - std::ifstream engine_file(engine_file_path); - std::stringstream engine_buffer; - engine_buffer << engine_file.rdbuf(); - std::string engine_str = engine_buffer.str(); - engine_ = UniquePtr(runtime_->deserializeCudaEngine( - reinterpret_cast(engine_str.data()), engine_str.size())); - return true; -} - -bool TrtCommon::buildEngineFromOnnx(std::string onnx_file_path, std::string output_engine_file_path) -{ - auto builder = UniquePtr(nvinfer1::createInferBuilder(logger_)); - const auto explicitBatch = - 1U << static_cast(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); - auto network = UniquePtr(builder->createNetworkV2(explicitBatch)); - auto config = UniquePtr(builder->createBuilderConfig()); - - auto parser = UniquePtr(nvonnxparser::createParser(*network, logger_)); - if (!parser->parseFromFile( - onnx_file_path.c_str(), static_cast(nvinfer1::ILogger::Severity::kERROR))) { - return false; - } - -#if (NV_TENSORRT_MAJOR * 1000) + (NV_TENSORRT_MINOR * 100) + NV_TENSOR_PATCH >= 8400 - config->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kWORKSPACE, 16 << 20); -#else - config->setMaxWorkspaceSize(16 << 20); -#endif - - if (precision_ == "fp16") { - config->setFlag(nvinfer1::BuilderFlag::kFP16); - } else if (precision_ == "int8") { - config->setFlag(nvinfer1::BuilderFlag::kINT8); - } else { - return false; - } - - auto plan = UniquePtr(builder->buildSerializedNetwork(*network, *config)); - if (!plan) { - return false; - } - engine_ = - UniquePtr(runtime_->deserializeCudaEngine(plan->data(), plan->size())); - if (!engine_) { - return false; - } - - // save engine - std::ofstream file; - file.open(output_engine_file_path, std::ios::binary | std::ios::out); - if (!file.is_open()) { - return false; - } - file.write((const char *)plan->data(), plan->size()); - file.close(); - - return true; -} - -bool TrtCommon::isInitialized() -{ - return is_initialized_; -} - -int TrtCommon::getNumInput() -{ - return std::accumulate( - input_dims_.d, input_dims_.d + input_dims_.nbDims, 1, std::multiplies()); -} - -int TrtCommon::getNumOutput() -{ - return std::accumulate( - output_dims_.d, output_dims_.d + output_dims_.nbDims, 1, std::multiplies()); -} - -} // namespace Tn diff --git a/perception/traffic_light_classifier/utils/trt_common.hpp b/perception/traffic_light_classifier/utils/trt_common.hpp deleted file mode 100644 index d7e314a3b4705..0000000000000 --- a/perception/traffic_light_classifier/utils/trt_common.hpp +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2020 Tier IV, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#ifndef PERCEPTION__TRAFFIC_LIGHT_CLASSIFIER__UTILS__TRT_COMMON_HPP_ -#define PERCEPTION__TRAFFIC_LIGHT_CLASSIFIER__UTILS__TRT_COMMON_HPP_ - -#include -#include - -#include <./cudnn.h> -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#define CHECK_CUDA_ERROR(e) (Tn::check_error(e, __FILE__, __LINE__)) - -namespace Tn -{ -class Logger : public nvinfer1::ILogger -{ -public: - Logger() : Logger(Severity::kINFO) {} - - explicit Logger(Severity severity) : reportableSeverity(severity) {} - - void log(Severity severity, const char * msg) noexcept override - { - // suppress messages with severity enum value greater than the reportable - if (severity > reportableSeverity) { - return; - } - - switch (severity) { - case Severity::kINTERNAL_ERROR: - std::cerr << "[TRT_COMMON][INTERNAL_ERROR]: "; - break; - case Severity::kERROR: - std::cerr << "[TRT_COMMON][ERROR]: "; - break; - case Severity::kWARNING: - std::cerr << "[TRT_COMMON][WARNING]: "; - break; - case Severity::kINFO: - std::cerr << "[TRT_COMMON][INFO]: "; - break; - default: - std::cerr << "[TRT_COMMON][UNKNOWN]: "; - break; - } - std::cerr << msg << std::endl; - } - - Severity reportableSeverity{Severity::kWARNING}; -}; - -void check_error(const ::cudaError_t e, decltype(__FILE__) f, decltype(__LINE__) n); - -struct InferDeleter -{ - void operator()(void * p) const { ::cudaFree(p); } -}; - -template -using UniquePtr = std::unique_ptr; - -// auto array = Tn::make_unique(n); -// ::cudaMemcpy(array.get(), src_array, sizeof(float)*n, ::cudaMemcpyHostToDevice); -template -typename std::enable_if::value, Tn::UniquePtr>::type make_unique( - const std::size_t n) -{ - using U = typename std::remove_extent::type; - U * p; - ::cudaMalloc(reinterpret_cast(&p), sizeof(U) * n); - return Tn::UniquePtr{p}; -} - -// auto value = Tn::make_unique(); -// ::cudaMemcpy(value.get(), src_value, sizeof(my_class), ::cudaMemcpyHostToDevice); -template -Tn::UniquePtr make_unique() -{ - T * p; - ::cudaMalloc(reinterpret_cast(&p), sizeof(T)); - return Tn::UniquePtr{p}; -} - -class TrtCommon -{ -public: - TrtCommon( - std::string model_path, std::string precision, std::string input_name, std::string output_name); - ~TrtCommon() {} - - bool loadEngine(std::string engine_file_path); - bool buildEngineFromOnnx(std::string onnx_file_path, std::string output_engine_file_path); - void setup(); - - bool isInitialized(); - int getNumInput(); - int getNumOutput(); - - UniquePtr context_; - -private: - Logger logger_; - std::string model_file_path_; - UniquePtr runtime_; - UniquePtr engine_; - - nvinfer1::Dims input_dims_; - nvinfer1::Dims output_dims_; - std::string cache_dir_; - std::string precision_; - std::string input_name_; - std::string output_name_; - bool is_initialized_; -}; - -} // namespace Tn - -#endif // PERCEPTION__TRAFFIC_LIGHT_CLASSIFIER__UTILS__TRT_COMMON_HPP_ diff --git a/perception/traffic_light_fine_detector/.gitignore b/perception/traffic_light_fine_detector/.gitignore new file mode 100644 index 0000000000000..8fce603003c1e --- /dev/null +++ b/perception/traffic_light_fine_detector/.gitignore @@ -0,0 +1 @@ +data/ diff --git a/perception/traffic_light_fine_detector/CMakeLists.txt b/perception/traffic_light_fine_detector/CMakeLists.txt new file mode 100644 index 0000000000000..a26cafff1ee4d --- /dev/null +++ b/perception/traffic_light_fine_detector/CMakeLists.txt @@ -0,0 +1,156 @@ +cmake_minimum_required(VERSION 3.14) +project(traffic_light_fine_detector) + +find_package(autoware_cmake REQUIRED) +autoware_package() + +add_compile_options(-Wno-deprecated-declarations) + +option(CUDA_VERBOSE "Verbose output of CUDA modules" OFF) + +find_package(OpenCV REQUIRED) + +# set flags for CUDA availability +option(CUDA_AVAIL "CUDA available" OFF) +find_package(CUDA) +if(CUDA_FOUND) + find_library(CUBLAS_LIBRARIES cublas HINTS + ${CUDA_TOOLKIT_ROOT_DIR}/lib64 + ${CUDA_TOOLKIT_ROOT_DIR}/lib + ) + if(CUDA_VERBOSE) + message(STATUS "CUDA is available!") + message(STATUS "CUDA Libs: ${CUDA_LIBRARIES}") + message(STATUS "CUDA Headers: ${CUDA_INCLUDE_DIRS}") + endif() + set(CUDA_AVAIL ON) +else() + message(STATUS "CUDA NOT FOUND") + set(CUDA_AVAIL OFF) +endif() + +# set flags for TensorRT availability +option(TRT_AVAIL "TensorRT available" OFF) +# try to find the tensorRT modules +find_library(NVINFER NAMES nvinfer) +find_library(NVONNXPARSER nvonnxparser) +find_library(NVINFER_PLUGIN NAMES nvinfer_plugin) +if(NVINFER AND NVONNXPARSER AND NVINFER_PLUGIN) + if(CUDA_VERBOSE) + message(STATUS "TensorRT is available!") + message(STATUS "NVINFER: ${NVINFER}") + message(STATUS "NVPARSERS: ${NVPARSERS}") + message(STATUS "NVINFER_PLUGIN: ${NVINFER_PLUGIN}") + message(STATUS "NVONNXPARSER: ${NVONNXPARSER}") + endif() + set(TRT_AVAIL ON) +else() + message(STATUS "TensorRT is NOT Available") + set(TRT_AVAIL OFF) +endif() + +# set flags for CUDNN availability +option(CUDNN_AVAIL "CUDNN available" OFF) +# try to find the CUDNN module +find_library(CUDNN_LIBRARY + NAMES libcudnn.so${__cudnn_ver_suffix} libcudnn${__cudnn_ver_suffix}.dylib ${__cudnn_lib_win_name} + PATHS $ENV{LD_LIBRARY_PATH} ${__libpath_cudart} ${CUDNN_ROOT_DIR} ${PC_CUDNN_LIBRARY_DIRS} ${CMAKE_INSTALL_PREFIX} + PATH_SUFFIXES lib lib64 bin + DOC "CUDNN library." +) +if(CUDNN_LIBRARY) + if(CUDA_VERBOSE) + message(STATUS "CUDNN is available!") + message(STATUS "CUDNN_LIBRARY: ${CUDNN_LIBRARY}") + endif() + set(CUDNN_AVAIL ON) +else() + message(STATUS "CUDNN is NOT Available") + set(CUDNN_AVAIL OFF) +endif() + +# Download caffemodel and prototxt +set(PRETRAINED_MODEL_HASH f3af5bd588c6c99ccf9ca236a07ad41e) +set(LAMP_LABEL_HASH e9f45efb02f2a9aa8ac27b3d5c164905) +set(DATA_DIR https://awf.ml.dev.web.auto/perception/models/tlr_yolox_s/v2) + +set(DATA_PATH "${CMAKE_CURRENT_SOURCE_DIR}/data") +if(NOT EXISTS "${DATA_PATH}") + execute_process(COMMAND mkdir -p ${DATA_PATH}) +endif() +function(download FILE_NAME FILE_HASH) + message(STATUS "Checking and downloading ${FILE_NAME}") + set(FILE_PATH ${DATA_PATH}/${FILE_NAME}) + set(STATUS_CODE 0) + message(STATUS "start ${FILE_NAME}") + if(EXISTS ${FILE_PATH}) + message(STATUS "found ${FILE_NAME}") + file(MD5 ${FILE_PATH} EXISTING_FILE_HASH) + if(${FILE_HASH} STREQUAL ${EXISTING_FILE_HASH}) + message(STATUS "same ${FILE_NAME}") + message(STATUS "File already exists.") + else() + message(STATUS "diff ${FILE_NAME}") + message(STATUS "File hash changes. Downloading now ...") + file(DOWNLOAD ${DATA_DIR}/${FILE_NAME} ${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300) + list(GET DOWNLOAD_STATUS 0 STATUS_CODE) + list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE) + endif() + else() + message(STATUS "not found ${FILE_NAME}") + message(STATUS "File doesn't exists. Downloading now ...") + file(DOWNLOAD ${DATA_DIR}/${FILE_NAME} ${FILE_PATH} STATUS DOWNLOAD_STATUS TIMEOUT 300) + list(GET DOWNLOAD_STATUS 0 STATUS_CODE) + list(GET DOWNLOAD_STATUS 1 ERROR_MESSAGE) + endif() + if(${STATUS_CODE} EQUAL 0) + message(STATUS "Download completed successfully!") + else() + message(FATAL_ERROR "Error occurred during download: ${ERROR_MESSAGE}") + endif() +endfunction() +download(tlr_yolox_s.onnx ${PRETRAINED_MODEL_HASH}) +download(tlr_labels.txt ${LAMP_LABEL_HASH}) + +if(TRT_AVAIL AND CUDA_AVAIL AND CUDNN_AVAIL) + include_directories( + ${OpenCV_INCLUDE_DIRS} + ${CUDA_INCLUDE_DIRS} + ) + + ament_auto_add_library(traffic_light_fine_detector_nodelet SHARED + src/nodelet.cpp + ) + + target_include_directories(traffic_light_fine_detector_nodelet PUBLIC + lib/include + ) + + target_link_libraries(traffic_light_fine_detector_nodelet + ${NVINFER} + ${NVONNXPARSER} + ${NVINFER_PLUGIN} + ${CUDA_LIBRARIES} + ${CUBLAS_LIBRARIES} + ${CUDNN_LIBRARY} + ${OpenCV_LIB} + stdc++fs + ) + + rclcpp_components_register_node(traffic_light_fine_detector_nodelet + PLUGIN "traffic_light::TrafficLightFineDetectorNodelet" + EXECUTABLE traffic_light_fine_detector_node + ) + + ament_auto_package(INSTALL_TO_SHARE + data + launch + ) + +else() + message(STATUS "TrafficLightFineDetector won't be built, CUDA and/or TensorRT were not found.") + # to avoid launch file missing without a gpu + ament_auto_package(INSTALL_TO_SHARE + launch + ) +endif() diff --git a/perception/traffic_light_fine_detector/README.md b/perception/traffic_light_fine_detector/README.md new file mode 100644 index 0000000000000..1ed6debfeae91 --- /dev/null +++ b/perception/traffic_light_fine_detector/README.md @@ -0,0 +1,66 @@ +# traffic_light_fine_detector + +## Purpose + +It is a package for traffic light detection using YoloX-s. + +## Training Information + +### Pretrained Model + +The model is based on [YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) and the pretrained model could be downloaded from [here](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s.pth). + +### Training Data + +The model was fine-tuned on around 17,000 TIER IV internal images of Japanese traffic lights. + +### Trained Onnx model + +- + +## Inner-workings / Algorithms + +Based on the camera image and the global ROI array detected by `map_based_detection` node, a CNN-based detection method enables highly accurate traffic light detection. + +## Inputs / Outputs + +### Input + +| Name | Type | Description | +| --------------- | -------------------------------------------------- | ------------------------------------------------------------------- | +| `~/input/image` | `sensor_msgs/Image` | The full size camera image | +| `~/input/rois` | `tier4_perception_msgs::msg::TrafficLightRoiArray` | The array of ROIs detected by map_based_detector | +| `~/expect/rois` | `tier4_perception_msgs::msg::TrafficLightRoiArray` | The array of ROIs detected by map_based_detector without any offset | + +### Output + +| Name | Type | Description | +| --------------------- | -------------------------------------------------- | ---------------------------- | +| `~/output/rois` | `tier4_perception_msgs::msg::TrafficLightRoiArray` | The detected accurate rois | +| `~/debug/exe_time_ms` | `tier4_debug_msgs::msg::Float32Stamped` | The time taken for inference | + +## Parameters + +### Core Parameters + +| Name | Type | Default Value | Description | +| ---------------------------- | ------ | ------------- | ---------------------------------------------------------------------- | +| `fine_detector_score_thresh` | double | 0.3 | If the objectness score is less than this value, the object is ignored | +| `fine_detector_nms_thresh` | double | 0.65 | IoU threshold to perform Non-Maximum Suppression | + +### Node Parameters + +| Name | Type | Default Value | Description | +| -------------------------- | ------ | ------------- | ------------------------------------------------------------------ | +| `fine_detector_model_path` | string | "" | The onnx file name for yolo model | +| `fine_detector_label_path` | string | "" | The label file with label names for detected objects written on it | +| `fine_detector_precision` | string | "fp32" | The inference mode: "fp32", "fp16" | +| `approximate_sync` | bool | false | Flag for whether to ues approximate sync policy | + +## Assumptions / Known limits + +## Reference repositories + +YOLOX github repository + +- diff --git a/perception/traffic_light_fine_detector/include/traffic_light_fine_detector/nodelet.hpp b/perception/traffic_light_fine_detector/include/traffic_light_fine_detector/nodelet.hpp new file mode 100644 index 0000000000000..f115596453835 --- /dev/null +++ b/perception/traffic_light_fine_detector/include/traffic_light_fine_detector/nodelet.hpp @@ -0,0 +1,172 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TRAFFIC_LIGHT_FINE_DETECTOR__NODELET_HPP_ +#define TRAFFIC_LIGHT_FINE_DETECTOR__NODELET_HPP_ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#if __has_include() +#include +#else +#include +#endif +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +typedef struct Detection +{ + float x, y, w, h, prob; +} Detection; + +namespace traffic_light +{ +class TrafficLightFineDetectorNodelet : public rclcpp::Node +{ + using TrafficLightRoi = tier4_perception_msgs::msg::TrafficLightRoi; + using TrafficLightRoiArray = tier4_perception_msgs::msg::TrafficLightRoiArray; + +public: + explicit TrafficLightFineDetectorNodelet(const rclcpp::NodeOptions & options); + void connectCb(); + /** + * @brief main process function. + * + * @param image_msg ros image message + * @param rough_roi_msg rough rois message + * @param expect_roi_msg expect rois message + */ + void callback( + const sensor_msgs::msg::Image::ConstSharedPtr image_msg, + const TrafficLightRoiArray::ConstSharedPtr rough_roi_msg, + const TrafficLightRoiArray::ConstSharedPtr expect_roi_msg); + +private: + /** + * @brief convert the images into data stream ready for gpu process + * + * @param in_imgs cv::Mat images + * @param num_rois number of rois + * @param data output data stream + * @return true succeed + * @return false failed + */ + bool cvMat2CnnInput( + const std::vector & in_imgs, const int num_rois, std::vector & data); + /** + * @brief Calculate the match score. Details will be explained in docs of evalMatchScore + * + * @param id2expectRoi + * @param id2detections + * @param id2bestDetection + * @return float + */ + float evalMatchScore( + std::map & id2expectRoi, + std::map & id2detections, + std::map & id2bestDetection); + /** + * @brief Every traffic light roi might have several possible detections. This function + * is designed to select the best detection for every traffic light by making use of + * the relative positions between the traffic lights projected on the image (expect/rois). + * To be specified, for every detection, all the expect rois will be transfered so that + * this detection will match the corresponding expect roi. Note that the expect rois + * of other traffic lights will also be transfered equally. Then, for every expect roi, + * it will calculate the match score (which is IoU_detection_roi * detection_confidence) + * with every detection. + * The combination of detections that will get highest match score sum will be the selected + * detections + * + * @param id2expectRoi id to expect/roi map + * @param id2detections id to detections map + * @param out_rois output rois converted from the selected detections + */ + void detectionMatch( + std::map & id2expectRoi, + std::map & id2detections, TrafficLightRoiArray & out_rois); + /** + * @brief convert image message to cv::Mat + * + * @param image_msg input image message + * @param image output cv::Mat image + * @param encode image encode + * @return true succeed + * @return false failed + */ + bool rosMsg2CvMat( + const sensor_msgs::msg::Image::ConstSharedPtr image_msg, cv::Mat & image, + std::string encode = "rgb8"); + bool fitInFrame(cv::Point & lt, cv::Point & rb, const cv::Size & size); + /** + * @brief Read the label file to get class number and traffic_light class index of the model + * + * @param filepath path to the label file + * @param tlr_id output traffic light class index + * @param num_class output class number + * @return true succeed + * @return false failed + */ + bool readLabelFile(const std::string & filepath, int & tlr_id, int & num_class); + + // variables + image_transport::SubscriberFilter image_sub_; + message_filters::Subscriber rough_roi_sub_; + message_filters::Subscriber expect_roi_sub_; + std::mutex connect_mutex_; + rclcpp::Publisher::SharedPtr output_roi_pub_; + rclcpp::Publisher::SharedPtr exe_time_pub_; + rclcpp::TimerBase::SharedPtr timer_; + + typedef message_filters::sync_policies::ExactTime< + sensor_msgs::msg::Image, TrafficLightRoiArray, TrafficLightRoiArray> + SyncPolicy; + typedef message_filters::Synchronizer Sync; + std::shared_ptr sync_; + + typedef message_filters::sync_policies::ApproximateTime< + sensor_msgs::msg::Image, TrafficLightRoiArray, TrafficLightRoiArray> + ApproximateSyncPolicy; + typedef message_filters::Synchronizer ApproximateSync; + std::shared_ptr approximate_sync_; + + bool is_approximate_sync_; + double score_thresh_; + int tlr_id_; + + std::unique_ptr trt_yolox_; +}; // TrafficLightFineDetectorNodelet + +} // namespace traffic_light + +#endif // TRAFFIC_LIGHT_FINE_DETECTOR__NODELET_HPP_ diff --git a/perception/traffic_light_fine_detector/launch/traffic_light_fine_detector.launch.xml b/perception/traffic_light_fine_detector/launch/traffic_light_fine_detector.launch.xml new file mode 100644 index 0000000000000..442aa3f45ea88 --- /dev/null +++ b/perception/traffic_light_fine_detector/launch/traffic_light_fine_detector.launch.xml @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perception/traffic_light_fine_detector/package.xml b/perception/traffic_light_fine_detector/package.xml new file mode 100644 index 0000000000000..960a90379279a --- /dev/null +++ b/perception/traffic_light_fine_detector/package.xml @@ -0,0 +1,29 @@ + + + + traffic_light_fine_detector + 0.1.0 + The traffic_light_fine_detector package + Mingyu Li + Apache License 2.0 + + ament_cmake_auto + + autoware_cmake + + cv_bridge + image_transport + message_filters + rclcpp + rclcpp_components + sensor_msgs + tensorrt_yolox + tier4_debug_msgs + tier4_perception_msgs + + autoware_lint_common + + + ament_cmake + + diff --git a/perception/traffic_light_fine_detector/src/nodelet.cpp b/perception/traffic_light_fine_detector/src/nodelet.cpp new file mode 100644 index 0000000000000..75812a072f4c6 --- /dev/null +++ b/perception/traffic_light_fine_detector/src/nodelet.cpp @@ -0,0 +1,310 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "traffic_light_fine_detector/nodelet.hpp" + +#if (defined(_MSC_VER) or (defined(__GNUC__) and (7 <= __GNUC_MAJOR__))) +#include +namespace fs = ::std::filesystem; +#else +#include +namespace fs = ::std::experimental::filesystem; +#endif + +#include +#include +#include +#include + +namespace +{ +float calWeightedIou( + const sensor_msgs::msg::RegionOfInterest & bbox1, const tensorrt_yolox::Object & bbox2) +{ + int x1 = std::max(static_cast(bbox1.x_offset), bbox2.x_offset); + int x2 = std::min(static_cast(bbox1.x_offset + bbox1.width), bbox2.x_offset + bbox2.width); + int y1 = std::max(static_cast(bbox1.y_offset), bbox2.y_offset); + int y2 = std::min(static_cast(bbox1.y_offset + bbox1.height), bbox2.y_offset + bbox2.height); + int area1 = std::max(x2 - x1, 0) * std::max(y2 - y1, 0); + int area2 = bbox1.width * bbox1.height + bbox2.width * bbox2.height - area1; + if (area2 == 0) { + return 0.0; + } + return bbox2.score * area1 / area2; +} + +} // namespace + +namespace traffic_light +{ +inline std::vector toFloatVector(const std::vector double_vector) +{ + return std::vector(double_vector.begin(), double_vector.end()); +} + +TrafficLightFineDetectorNodelet::TrafficLightFineDetectorNodelet( + const rclcpp::NodeOptions & options) +: Node("traffic_light_fine_detector_node", options) +{ + int num_class = 2; + using std::placeholders::_1; + using std::placeholders::_2; + using std::placeholders::_3; + + std::string model_path = declare_parameter("fine_detector_model_path", ""); + std::string label_path = declare_parameter("fine_detector_label_path", ""); + std::string precision = declare_parameter("fine_detector_precision", "fp32"); + // Objects with a score lower than this value will be ignored. + // This threshold will be ignored if specified model contains EfficientNMS_TRT module in it + score_thresh_ = declare_parameter("fine_detector_score_thresh", 0.3); + // Detection results will be ignored if IoU over this value. + // This threshold will be ignored if specified model contains EfficientNMS_TRT module in it + float nms_threshold = declare_parameter("fine_detector_nms_thresh", 0.65); + is_approximate_sync_ = this->declare_parameter("approximate_sync", false); + + if (!readLabelFile(label_path, tlr_id_, num_class)) { + RCLCPP_ERROR(this->get_logger(), "Could not find tlr id"); + } + trt_yolox_ = std::make_unique( + model_path, precision, num_class, score_thresh_, nms_threshold); + + using std::chrono_literals::operator""ms; + timer_ = rclcpp::create_timer( + this, get_clock(), 100ms, std::bind(&TrafficLightFineDetectorNodelet::connectCb, this)); + + std::lock_guard lock(connect_mutex_); + output_roi_pub_ = this->create_publisher("~/output/rois", 1); + exe_time_pub_ = + this->create_publisher("~/debug/exe_time_ms", 1); + if (is_approximate_sync_) { + approximate_sync_.reset( + new ApproximateSync(ApproximateSyncPolicy(10), image_sub_, rough_roi_sub_, expect_roi_sub_)); + approximate_sync_->registerCallback( + std::bind(&TrafficLightFineDetectorNodelet::callback, this, _1, _2, _3)); + } else { + sync_.reset(new Sync(SyncPolicy(10), image_sub_, rough_roi_sub_, expect_roi_sub_)); + sync_->registerCallback( + std::bind(&TrafficLightFineDetectorNodelet::callback, this, _1, _2, _3)); + } + + if (declare_parameter("build_only", false)) { + RCLCPP_INFO(get_logger(), "TensorRT engine is built and shutdown node."); + rclcpp::shutdown(); + } +} + +void TrafficLightFineDetectorNodelet::connectCb() +{ + std::lock_guard lock(connect_mutex_); + if (output_roi_pub_->get_subscription_count() == 0) { + image_sub_.unsubscribe(); + rough_roi_sub_.unsubscribe(); + expect_roi_sub_.unsubscribe(); + } else if (!image_sub_.getSubscriber()) { + image_sub_.subscribe(this, "~/input/image", "raw", rmw_qos_profile_sensor_data); + rough_roi_sub_.subscribe(this, "~/input/rois", rclcpp::QoS{1}.get_rmw_qos_profile()); + expect_roi_sub_.subscribe(this, "~/expect/rois", rclcpp::QoS{1}.get_rmw_qos_profile()); + } +} + +void TrafficLightFineDetectorNodelet::callback( + const sensor_msgs::msg::Image::ConstSharedPtr in_image_msg, + const TrafficLightRoiArray::ConstSharedPtr rough_roi_msg, + const TrafficLightRoiArray::ConstSharedPtr expect_roi_msg) +{ + if (in_image_msg->width < 2 || in_image_msg->height < 2) { + return; + } + + using std::chrono::high_resolution_clock; + using std::chrono::milliseconds; + const auto exe_start_time = high_resolution_clock::now(); + cv::Mat original_image; + TrafficLightRoiArray out_rois; + std::map id2expectRoi; + std::map id2detections; + for (const auto & expect_roi : expect_roi_msg->rois) { + id2expectRoi[expect_roi.traffic_light_id] = expect_roi; + } + + rosMsg2CvMat(in_image_msg, original_image, "bgr8"); + for (const auto & rough_roi : rough_roi_msg->rois) { + cv::Point lt(rough_roi.roi.x_offset, rough_roi.roi.y_offset); + cv::Point rb( + rough_roi.roi.x_offset + rough_roi.roi.width, rough_roi.roi.y_offset + rough_roi.roi.height); + fitInFrame(lt, rb, cv::Size(original_image.size())); + cv::Mat cropped_img = cv::Mat(original_image, cv::Rect(lt, rb)); + tensorrt_yolox::ObjectArrays inference_results; + if (!trt_yolox_->doInference({cropped_img}, inference_results)) { + RCLCPP_WARN(this->get_logger(), "Fail to inference"); + return; + } + for (tensorrt_yolox::Object & detection : inference_results[0]) { + if (detection.score < score_thresh_ || detection.type != tlr_id_) { + continue; + } + cv::Point lt_roi(lt.x + detection.x_offset, lt.y + detection.y_offset); + cv::Point rb_roi(lt_roi.x + detection.width, lt_roi.y + detection.height); + fitInFrame(lt_roi, rb_roi, cv::Size(original_image.size())); + tensorrt_yolox::Object det = detection; + det.x_offset = lt_roi.x; + det.y_offset = lt_roi.y; + det.width = rb_roi.x - lt_roi.x; + det.height = rb_roi.y - lt_roi.y; + id2detections[rough_roi.traffic_light_id].push_back(det); + } + } + detectionMatch(id2expectRoi, id2detections, out_rois); + out_rois.header = rough_roi_msg->header; + output_roi_pub_->publish(out_rois); + const auto exe_end_time = high_resolution_clock::now(); + const double exe_time = + std::chrono::duration_cast(exe_end_time - exe_start_time).count(); + tier4_debug_msgs::msg::Float32Stamped exe_time_msg; + exe_time_msg.data = exe_time; + exe_time_msg.stamp = this->now(); + exe_time_pub_->publish(exe_time_msg); +} + +float TrafficLightFineDetectorNodelet::evalMatchScore( + std::map & id2expectRoi, + std::map & id2detections, + std::map & id2bestDetection) +{ + float score_sum = 0.0f; + id2bestDetection.clear(); + for (const auto & roi_p : id2expectRoi) { + int tlr_id = roi_p.first; + float max_score = 0.0f; + const sensor_msgs::msg::RegionOfInterest & expected_roi = roi_p.second.roi; + for (const tensorrt_yolox::Object & detection : id2detections[tlr_id]) { + float score = ::calWeightedIou(expected_roi, detection); + if (score >= max_score) { + max_score = score; + id2bestDetection[tlr_id] = detection; + } + } + score_sum += max_score; + } + return score_sum; +} + +void TrafficLightFineDetectorNodelet::detectionMatch( + std::map & id2expectRoi, + std::map & id2detections, TrafficLightRoiArray & out_rois) +{ + float max_score = 0.0f; + std::map bestDetections; + for (const auto & roi_pair : id2expectRoi) { + int tlr_id = roi_pair.first; + // the expected roi calculated from tf information + const sensor_msgs::msg::RegionOfInterest & expect_roi = roi_pair.second.roi; + int expect_cx = expect_roi.x_offset + expect_roi.width / 2; + int expect_cy = expect_roi.y_offset + expect_roi.height / 2; + for (const tensorrt_yolox::Object & det : id2detections[tlr_id]) { + // for every detection, calculate the center offset between the detection and the + // corresponding expected roi + int det_cx = det.x_offset + det.width / 2; + int det_cy = det.y_offset + det.height / 2; + int dx = det_cx - expect_cx; + int dy = det_cy - expect_cy; + // transfer all the rough rois by the offset + std::map id2expectRoi_copy = id2expectRoi; + for (auto & p : id2expectRoi_copy) { + p.second.roi.x_offset += dx; + p.second.roi.y_offset += dy; + } + // calculate the "match score" between expected rois and id2detections_copy + std::map id2bestDetection; + float score = evalMatchScore(id2expectRoi_copy, id2detections, id2bestDetection); + if (score > max_score) { + max_score = score; + bestDetections = id2bestDetection; + } + } + } + + out_rois.rois.clear(); + for (const auto & p : bestDetections) { + TrafficLightRoi tlr; + tlr.traffic_light_id = p.first; + tlr.roi.x_offset = p.second.x_offset; + tlr.roi.y_offset = p.second.y_offset; + tlr.roi.width = p.second.width; + tlr.roi.height = p.second.height; + out_rois.rois.push_back(tlr); + } +} + +bool TrafficLightFineDetectorNodelet::rosMsg2CvMat( + const sensor_msgs::msg::Image::ConstSharedPtr image_msg, cv::Mat & image, std::string encode) +{ + try { + cv_bridge::CvImagePtr cv_image = cv_bridge::toCvCopy(image_msg, encode); + image = cv_image->image; + } catch (cv_bridge::Exception & e) { + RCLCPP_ERROR( + this->get_logger(), "Failed to convert sensor_msgs::msg::Image to cv::Mat \n%s", e.what()); + return false; + } + + return true; +} + +bool TrafficLightFineDetectorNodelet::fitInFrame( + cv::Point & lt, cv::Point & rb, const cv::Size & size) +{ + const int width = static_cast(size.width); + const int height = static_cast(size.height); + { + const int x_min = 0, x_max = width - 2; + const int y_min = 0, y_max = height - 2; + lt.x = std::min(std::max(lt.x, x_min), x_max); + lt.y = std::min(std::max(lt.y, y_min), y_max); + } + { + const int x_min = lt.x + 1, x_max = width - 1; + const int y_min = lt.y + 1, y_max = height - 1; + rb.x = std::min(std::max(rb.x, x_min), x_max); + rb.y = std::min(std::max(rb.y, y_min), y_max); + } + + return true; +} + +bool TrafficLightFineDetectorNodelet::readLabelFile( + const std::string & filepath, int & tlr_id, int & num_class) +{ + tlr_id = -1; + std::ifstream labelsFile(filepath); + if (!labelsFile.is_open()) { + RCLCPP_ERROR(this->get_logger(), "Could not open label file. [%s]", filepath.c_str()); + return false; + } + std::string label; + int idx = 0; + while (getline(labelsFile, label)) { + if (label == "traffic_light") { + tlr_id = idx; + } + idx++; + } + num_class = idx; + return tlr_id != -1; +} + +} // namespace traffic_light + +#include +RCLCPP_COMPONENTS_REGISTER_NODE(traffic_light::TrafficLightFineDetectorNodelet) diff --git a/perception/traffic_light_map_based_detector/README.md b/perception/traffic_light_map_based_detector/README.md index 435969e6e98a1..e03a4c6dd8f69 100644 --- a/perception/traffic_light_map_based_detector/README.md +++ b/perception/traffic_light_map_based_detector/README.md @@ -21,17 +21,22 @@ If the node receives no route information, it looks at a radius of 200 meters an ## Output topics -| Name | Type | Description | -| ---------------- | --------------------------------------------------- | -------------------------------------------------------------------- | -| `~output/rois` | autoware_auto_perception_msgs::TrafficLightRoiArray | location of traffic lights in image corresponding to the camera info | -| `~debug/markers` | visualization_msgs::MarkerArray | visualization to debug | +| Name | Type | Description | +| ---------------- | ------------------------------------------- | -------------------------------------------------------------------- | +| `~output/rois` | tier4_perception_msgs::TrafficLightRoiArray | location of traffic lights in image corresponding to the camera info | +| `~expect/rois` | tier4_perception_msgs::TrafficLightRoiArray | location of traffic lights in image without any offset | +| `~debug/markers` | visualization_msgs::MarkerArray | visualization to debug | ## Node parameters -| Parameter | Type | Description | -| ---------------------- | ------ | ----------------------------------------------------------- | -| `max_vibration_pitch` | double | Maximum error in pitch direction. If -5~+5, it will be 10. | -| `max_vibration_yaw` | double | Maximum error in yaw direction. If -5~+5, it will be 10. | -| `max_vibration_height` | double | Maximum error in height direction. If -5~+5, it will be 10. | -| `max_vibration_width` | double | Maximum error in width direction. If -5~+5, it will be 10. | -| `max_vibration_depth` | double | Maximum error in depth direction. If -5~+5, it will be 10. | +| Parameter | Type | Description | +| ---------------------- | ------ | --------------------------------------------------------------------- | +| `max_vibration_pitch` | double | Maximum error in pitch direction. If -5~+5, it will be 10. | +| `max_vibration_yaw` | double | Maximum error in yaw direction. If -5~+5, it will be 10. | +| `max_vibration_height` | double | Maximum error in height direction. If -5~+5, it will be 10. | +| `max_vibration_width` | double | Maximum error in width direction. If -5~+5, it will be 10. | +| `max_vibration_depth` | double | Maximum error in depth direction. If -5~+5, it will be 10. | +| `max_detection_range` | double | Maximum detection range in meters. Must be positive | +| `min_timestamp_offset` | double | Minimum timestamp offset when searching for corresponding tf | +| `max_timestamp_offset` | double | Maximum timestamp offset when searching for corresponding tf | +| `timestamp_sample_len` | double | sampling length between min_timestamp_offset and max_timestamp_offset | diff --git a/perception/traffic_light_map_based_detector/config/traffic_light_map_based_detector.param.yaml b/perception/traffic_light_map_based_detector/config/traffic_light_map_based_detector.param.yaml index 6acb255a79aa2..2b8ff4aa737b7 100644 --- a/perception/traffic_light_map_based_detector/config/traffic_light_map_based_detector.param.yaml +++ b/perception/traffic_light_map_based_detector/config/traffic_light_map_based_detector.param.yaml @@ -1,7 +1,8 @@ /**: ros__parameters: - max_vibration_pitch: 0.00436332312 # -0.5 ~ 0.5 deg - max_vibration_yaw: 0.00436332312 # -0.5 ~ 0.5 deg - max_vibration_height: 0.01 # -0.05 ~ 0.05 m - max_vibration_width: 0.01 # -0.05 ~ 0.05 m - max_vibration_depth: 0.01 # -0.05 ~ 0.05 m + max_vibration_pitch: 0.01745329251 # -0.5 ~ 0.5 deg + max_vibration_yaw: 0.01745329251 # -0.5 ~ 0.5 deg + max_vibration_height: 0.5 # -0.25 ~ 0.25 m + max_vibration_width: 0.5 # -0.25 ~ 0.25 m + max_vibration_depth: 0.5 # -0.25 ~ 0.25 m + max_detection_range: 200.0 diff --git a/perception/traffic_light_map_based_detector/include/traffic_light_map_based_detector/node.hpp b/perception/traffic_light_map_based_detector/include/traffic_light_map_based_detector/node.hpp index 941e0596bf061..9db8d7242c64d 100644 --- a/perception/traffic_light_map_based_detector/include/traffic_light_map_based_detector/node.hpp +++ b/perception/traffic_light_map_based_detector/include/traffic_light_map_based_detector/node.hpp @@ -1,4 +1,4 @@ -// Copyright 2020 Tier IV, Inc. +// Copyright 2023 TIER IV, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,24 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -/* - * Copyright 2019 Autoware Foundation. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Authors: Yukihiro Saito - * - */ #ifndef TRAFFIC_LIGHT_MAP_BASED_DETECTOR__NODE_HPP_ #define TRAFFIC_LIGHT_MAP_BASED_DETECTOR__NODE_HPP_ @@ -38,25 +20,24 @@ #include #include -#include #include #include #include +#include #include -#if __has_include() -#include -#else #include -#endif #include #include #include #include #include +#include +#include #include #include +#include #include namespace traffic_light @@ -74,6 +55,10 @@ class MapBasedDetector : public rclcpp::Node double max_vibration_height; double max_vibration_width; double max_vibration_depth; + double min_timestamp_offset; + double max_timestamp_offset; + double timestamp_sample_len; + double max_detection_range; }; struct IdLessThan @@ -89,8 +74,16 @@ class MapBasedDetector : public rclcpp::Node rclcpp::Subscription::SharedPtr map_sub_; rclcpp::Subscription::SharedPtr camera_info_sub_; rclcpp::Subscription::SharedPtr route_sub_; - - rclcpp::Publisher::SharedPtr roi_pub_; + /** + * @brief publish the rois of traffic lights with angular and distance offset + * + */ + rclcpp::Publisher::SharedPtr roi_pub_; + /** + * @brief publish the rois of traffic lights with zero angular and distance offset + * + */ + rclcpp::Publisher::SharedPtr expect_roi_pub_; rclcpp::Publisher::SharedPtr viz_pub_; tf2_ros::Buffer tf_buffer_; @@ -104,30 +97,92 @@ class MapBasedDetector : public rclcpp::Node lanelet::LaneletMapPtr lanelet_map_ptr_; lanelet::traffic_rules::TrafficRulesPtr traffic_rules_ptr_; lanelet::routing::RoutingGraphPtr routing_graph_ptr_; - Config config_; + Config config_; + /** + * @brief Calculated the transform from map to frame_id at timestamp t + * + * @param t specified timestamp + * @param frame_id specified target frame id + * @param tf calculated transform + * @return true calculation succeed + * @return false calculation failed + */ + bool getTransform( + const rclcpp::Time & t, const std::string & frame_id, tf2::Transform & tf) const; + /** + * @brief callback function for the map message + * + * @param input_msg + */ void mapCallback(const autoware_auto_mapping_msgs::msg::HADMapBin::ConstSharedPtr input_msg); + /** + * @brief callback function for the camera info message. The main process function of the node + * + * @param input_msg + */ void cameraInfoCallback(const sensor_msgs::msg::CameraInfo::ConstSharedPtr input_msg); + /** + * @brief callback function for the route message + * + * @param input_msg + */ void routeCallback(const autoware_planning_msgs::msg::LaneletRoute::ConstSharedPtr input_msg); + /** + * @brief Get the Visible Traffic Lights object + * + * @param all_traffic_lights all the traffic lights in the route or in the map + * @param tf_map2camera_vec the transformation sequences from map to camera + * @param pinhole_camera_model pinhole model calculated from camera_info + * @param visible_traffic_lights the visible traffic lights object + */ void getVisibleTrafficLights( - const TrafficLightSet & all_traffic_lights, const geometry_msgs::msg::Pose & camera_pose, + const TrafficLightSet & all_traffic_lights, + const std::vector & tf_map2camera_vec, const image_geometry::PinholeCameraModel & pinhole_camera_model, - std::vector & visible_traffic_lights); - bool isInDistanceRange( - const geometry_msgs::msg::Point & tl_point, const geometry_msgs::msg::Point & camera_point, - const double max_distance_range) const; - bool isInAngleRange( - const double & tl_yaw, const double & camera_yaw, const double max_angle_range) const; - bool isInImageFrame( + std::vector & visible_traffic_lights) const; + /** + * @brief Get the Traffic Light Roi from one tf + * + * @param tf_map2camera the transformation from map to camera + * @param pinhole_camera_model pinhole model calculated from camera_info + * @param traffic_light lanelet traffic light object + * @param config offset configuration + * @param roi computed result result + * @return true the computation succeed + * @return false the computation failed + */ + bool getTrafficLightRoi( + const tf2::Transform & tf_map2camera, const image_geometry::PinholeCameraModel & pinhole_camera_model, - const geometry_msgs::msg::Point & point) const; + const lanelet::ConstLineString3d traffic_light, const Config & config, + tier4_perception_msgs::msg::TrafficLightRoi & roi) const; + /** + * @brief Calculate one traffic light roi for every tf and return the roi containing all of them + * + * @param tf_map2camera_vec the transformation vector + * @param pinhole_camera_model pinhole model calculated from camera_info + * @param traffic_light lanelet traffic light object + * @param config offset configuration + * @param roi computed result result + * @return true the computation succeed + * @return false the computation failed + */ bool getTrafficLightRoi( - const geometry_msgs::msg::Pose & camera_pose, + const std::vector & tf_map2camera_vec, const image_geometry::PinholeCameraModel & pinhole_camera_model, const lanelet::ConstLineString3d traffic_light, const Config & config, - autoware_auto_perception_msgs::msg::TrafficLightRoi & tl_roi); + tier4_perception_msgs::msg::TrafficLightRoi & roi) const; + /** + * @brief Publish the traffic lights for visualization + * + * @param tf_map2camera the transformation from map to camera + * @param cam_info_header header of the camera_info message + * @param visible_traffic_lights the visible traffic light object vector + * @param pub publisher + */ void publishVisibleTrafficLights( - const geometry_msgs::msg::PoseStamped camera_pose_stamped, + const tf2::Transform & tf_map2camera, const std_msgs::msg::Header & cam_info_header, const std::vector & visible_traffic_lights, const rclcpp::Publisher::SharedPtr pub); }; diff --git a/perception/traffic_light_map_based_detector/launch/traffic_light_map_based_detector.launch.xml b/perception/traffic_light_map_based_detector/launch/traffic_light_map_based_detector.launch.xml index 465fd4171b9ec..2dc6afa4fa4d5 100644 --- a/perception/traffic_light_map_based_detector/launch/traffic_light_map_based_detector.launch.xml +++ b/perception/traffic_light_map_based_detector/launch/traffic_light_map_based_detector.launch.xml @@ -3,14 +3,26 @@ + + + + + + + + + + + + diff --git a/perception/traffic_light_map_based_detector/package.xml b/perception/traffic_light_map_based_detector/package.xml index 3b19413e5ecd6..1021757e99a07 100644 --- a/perception/traffic_light_map_based_detector/package.xml +++ b/perception/traffic_light_map_based_detector/package.xml @@ -8,10 +8,10 @@ Apache License 2.0 ament_cmake_auto - autoware_cmake + + autoware_cmake autoware_auto_mapping_msgs - autoware_auto_perception_msgs autoware_auto_planning_msgs autoware_planning_msgs geometry_msgs @@ -21,8 +21,10 @@ rclcpp_components sensor_msgs tf2 + tf2_eigen tf2_ros tier4_autoware_utils + tier4_perception_msgs ament_lint_auto autoware_lint_common diff --git a/perception/traffic_light_map_based_detector/src/node.cpp b/perception/traffic_light_map_based_detector/src/node.cpp index d2d54098859c9..99ec2ddb607ad 100644 --- a/perception/traffic_light_map_based_detector/src/node.cpp +++ b/perception/traffic_light_map_based_detector/src/node.cpp @@ -1,4 +1,4 @@ -// Copyright 2020 Tier IV, Inc. +// Copyright 2023 TIER IV, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,49 +11,21 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -/* - * Copyright 2019 Autoware Foundation. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Authors: Yukihiro Saito - * - */ #include "traffic_light_map_based_detector/node.hpp" #include #include #include -#include #include -#include - #include -#include #include #include #include -#include #include #include -#include -#include -#include -#include - #define EIGEN_MPL2_ONLY #include #include @@ -68,11 +40,10 @@ cv::Point2d calcRawImagePointFromPoint3D( } cv::Point2d calcRawImagePointFromPoint3D( - const image_geometry::PinholeCameraModel & pinhole_camera_model, - const geometry_msgs::msg::Point & point3d) + const image_geometry::PinholeCameraModel & pinhole_camera_model, const tf2::Vector3 & point3d) { return calcRawImagePointFromPoint3D( - pinhole_camera_model, cv::Point3d(point3d.x, point3d.y, point3d.z)); + pinhole_camera_model, cv::Point3d(point3d.x(), point3d.y(), point3d.z())); } void roundInImageFrame( @@ -84,6 +55,60 @@ void roundInImageFrame( point.y = std::max(std::min(point.y, static_cast(static_cast(camera_info.height) - 1)), 0.0); } + +bool isInDistanceRange( + const tf2::Vector3 & p1, const tf2::Vector3 & p2, const double max_distance_range) +{ + const double sq_dist = + (p1.x() - p2.x()) * (p1.x() - p2.x()) + (p1.y() - p2.y()) * (p1.y() - p2.y()); + return sq_dist < (max_distance_range * max_distance_range); +} + +bool isInAngleRange(const double & tl_yaw, const double & camera_yaw, const double max_angle_range) +{ + Eigen::Vector2d vec1, vec2; + vec1 << std::cos(tl_yaw), std::sin(tl_yaw); + vec2 << std::cos(camera_yaw), std::sin(camera_yaw); + const double diff_angle = std::acos(vec1.dot(vec2)); + return std::fabs(diff_angle) < max_angle_range; +} + +bool isInImageFrame( + const image_geometry::PinholeCameraModel & pinhole_camera_model, const tf2::Vector3 & point) +{ + if (point.z() <= 0.0) { + return false; + } + + cv::Point2d point2d = calcRawImagePointFromPoint3D(pinhole_camera_model, point); + if (0 <= point2d.x && point2d.x < pinhole_camera_model.cameraInfo().width) { + if (0 <= point2d.y && point2d.y < pinhole_camera_model.cameraInfo().height) { + return true; + } + } + return false; +} + +tf2::Vector3 getTrafficLightTopLeft(const lanelet::ConstLineString3d & traffic_light) +{ + const auto & tl_bl = traffic_light.front(); + const double tl_height = traffic_light.attributeOr("height", 0.0); + return tf2::Vector3(tl_bl.x(), tl_bl.y(), tl_bl.z() + tl_height); +} + +tf2::Vector3 getTrafficLightBottomRight(const lanelet::ConstLineString3d & traffic_light) +{ + const auto & tl_bl = traffic_light.back(); + return tf2::Vector3(tl_bl.x(), tl_bl.y(), tl_bl.z()); +} + +tf2::Vector3 getTrafficLightCenter(const lanelet::ConstLineString3d & traffic_light) +{ + tf2::Vector3 top_left = getTrafficLightTopLeft(traffic_light); + tf2::Vector3 bottom_right = getTrafficLightBottomRight(traffic_light); + return (top_left + bottom_right) / 2; +} + } // namespace namespace traffic_light @@ -95,6 +120,36 @@ MapBasedDetector::MapBasedDetector(const rclcpp::NodeOptions & node_options) { using std::placeholders::_1; + // parameter declaration needs default values: are 0.0 goof defaults for this? + config_.max_vibration_pitch = declare_parameter("max_vibration_pitch", 0.0); + config_.max_vibration_yaw = declare_parameter("max_vibration_yaw", 0.0); + config_.max_vibration_height = declare_parameter("max_vibration_height", 0.0); + config_.max_vibration_width = declare_parameter("max_vibration_width", 0.0); + config_.max_vibration_depth = declare_parameter("max_vibration_depth", 0.0); + config_.min_timestamp_offset = declare_parameter("min_timestamp_offset", 0.0); + config_.max_timestamp_offset = declare_parameter("max_timestamp_offset", 0.0); + config_.timestamp_sample_len = declare_parameter("timestamp_sample_len", 0.01); + config_.max_detection_range = declare_parameter("max_detection_range", 200.0); + + if (config_.max_detection_range <= 0) { + RCLCPP_ERROR_STREAM( + get_logger(), "Invalid param max_detection_range = " << config_.max_detection_range + << ", set to default value = 200"); + config_.max_detection_range = 200.0; + } + if (config_.timestamp_sample_len <= 0) { + RCLCPP_ERROR_STREAM( + get_logger(), "Invalid param timestamp_sample_len = " << config_.timestamp_sample_len + << ", set to default value = 0.01"); + config_.timestamp_sample_len = 200.0; + } + if (config_.max_timestamp_offset <= config_.min_timestamp_offset) { + RCLCPP_ERROR_STREAM( + get_logger(), "max_timestamp_offset <= min_timestamp_offset. Set both to 0"); + config_.max_timestamp_offset = 0.0; + config_.min_timestamp_offset = 0.0; + } + // subscribers map_sub_ = create_subscription( "~/input/vector_map", rclcpp::QoS{1}.transient_local(), @@ -107,16 +162,24 @@ MapBasedDetector::MapBasedDetector(const rclcpp::NodeOptions & node_options) std::bind(&MapBasedDetector::routeCallback, this, _1)); // publishers - roi_pub_ = this->create_publisher( - "~/output/rois", 1); + roi_pub_ = + this->create_publisher("~/output/rois", 1); + expect_roi_pub_ = + this->create_publisher("~/expect/rois", 1); viz_pub_ = this->create_publisher("~/debug/markers", 1); +} - // parameter declaration needs default values: are 0.0 goof defaults for this? - config_.max_vibration_pitch = declare_parameter("max_vibration_pitch", 0.0); - config_.max_vibration_yaw = declare_parameter("max_vibration_yaw", 0.0); - config_.max_vibration_height = declare_parameter("max_vibration_height", 0.0); - config_.max_vibration_width = declare_parameter("max_vibration_width", 0.0); - config_.max_vibration_depth = declare_parameter("max_vibration_depth", 0.0); +bool MapBasedDetector::getTransform( + const rclcpp::Time & t, const std::string & frame_id, tf2::Transform & tf) const +{ + try { + geometry_msgs::msg::TransformStamped transform = + tf_buffer_.lookupTransform("map", frame_id, t, rclcpp::Duration::from_seconds(0.2)); + tf2::fromMsg(transform.transform, tf); + } catch (tf2::TransformException & ex) { + return false; + } + return true; } void MapBasedDetector::cameraInfoCallback( @@ -129,23 +192,35 @@ void MapBasedDetector::cameraInfoCallback( image_geometry::PinholeCameraModel pinhole_camera_model; pinhole_camera_model.fromCameraInfo(*input_msg); - autoware_auto_perception_msgs::msg::TrafficLightRoiArray output_msg; + tier4_perception_msgs::msg::TrafficLightRoiArray output_msg; output_msg.header = input_msg->header; - - /* Camera pose */ - geometry_msgs::msg::PoseStamped camera_pose_stamped; - try { - geometry_msgs::msg::TransformStamped transform; - transform = tf_buffer_.lookupTransform( - "map", input_msg->header.frame_id, input_msg->header.stamp, - rclcpp::Duration::from_seconds(0.2)); - camera_pose_stamped.header = input_msg->header; - camera_pose_stamped.pose = tier4_autoware_utils::transform2pose(transform.transform); - } catch (tf2::TransformException & ex) { + tier4_perception_msgs::msg::TrafficLightRoiArray expect_roi_msg; + expect_roi_msg = output_msg; + + /* Camera pose in the period*/ + std::vector tf_map2camera_vec; + rclcpp::Time t1 = rclcpp::Time(input_msg->header.stamp) + + rclcpp::Duration::from_seconds(config_.min_timestamp_offset); + rclcpp::Time t2 = rclcpp::Time(input_msg->header.stamp) + + rclcpp::Duration::from_seconds(config_.max_timestamp_offset); + rclcpp::Duration interval = rclcpp::Duration::from_seconds(0.01); + for (auto t = t1; t <= t2; t += interval) { + tf2::Transform tf; + if (getTransform(t, input_msg->header.frame_id, tf)) { + tf_map2camera_vec.push_back(tf); + } + } + /* camera pose at the exact moment*/ + tf2::Transform tf_map2camera; + if (!getTransform( + rclcpp::Time(input_msg->header.stamp), input_msg->header.frame_id, tf_map2camera)) { RCLCPP_WARN_THROTTLE( get_logger(), *get_clock(), 5000, "cannot get transform from map frame to camera frame"); return; } + if (tf_map2camera_vec.empty()) { + tf_map2camera_vec.push_back(tf_map2camera); + } /* * visible_traffic_lights : for each traffic light in map check if in range and in view angle of @@ -155,13 +230,11 @@ void MapBasedDetector::cameraInfoCallback( // If get a route, use only traffic lights on the route. if (route_traffic_lights_ptr_ != nullptr) { getVisibleTrafficLights( - *route_traffic_lights_ptr_, camera_pose_stamped.pose, pinhole_camera_model, - visible_traffic_lights); + *route_traffic_lights_ptr_, tf_map2camera_vec, pinhole_camera_model, visible_traffic_lights); // If don't get a route, use the traffic lights around ego vehicle. } else if (all_traffic_lights_ptr_ != nullptr) { getVisibleTrafficLights( - *all_traffic_lights_ptr_, camera_pose_stamped.pose, pinhole_camera_model, - visible_traffic_lights); + *all_traffic_lights_ptr_, tf_map2camera_vec, pinhole_camera_model, visible_traffic_lights); // This shouldn't run. } else { return; @@ -171,100 +244,134 @@ void MapBasedDetector::cameraInfoCallback( * Get the ROI from the lanelet and the intrinsic matrix of camera to determine where it appears * in image. */ + /** + * set all offset to zero when calculating the expect roi + */ + Config expect_roi_cfg = config_; + expect_roi_cfg.max_vibration_depth = 0; + expect_roi_cfg.max_vibration_height = 0; + expect_roi_cfg.max_vibration_width = 0; + expect_roi_cfg.max_vibration_yaw = 0; + expect_roi_cfg.max_vibration_pitch = 0; for (const auto & traffic_light : visible_traffic_lights) { - autoware_auto_perception_msgs::msg::TrafficLightRoi tl_roi; + tier4_perception_msgs::msg::TrafficLightRoi rough_roi, expect_roi; + if (!getTrafficLightRoi( + tf_map2camera, pinhole_camera_model, traffic_light, expect_roi_cfg, expect_roi)) { + continue; + } if (!getTrafficLightRoi( - camera_pose_stamped.pose, pinhole_camera_model, traffic_light, config_, tl_roi)) { + tf_map2camera_vec, pinhole_camera_model, traffic_light, config_, rough_roi)) { continue; } - output_msg.rois.push_back(tl_roi); + output_msg.rois.push_back(rough_roi); + expect_roi_msg.rois.push_back(expect_roi); } + roi_pub_->publish(output_msg); - publishVisibleTrafficLights(camera_pose_stamped, visible_traffic_lights, viz_pub_); + expect_roi_pub_->publish(expect_roi_msg); + publishVisibleTrafficLights( + tf_map2camera_vec[0], input_msg->header, visible_traffic_lights, viz_pub_); } bool MapBasedDetector::getTrafficLightRoi( - const geometry_msgs::msg::Pose & camera_pose, + const tf2::Transform & tf_map2camera, const image_geometry::PinholeCameraModel & pinhole_camera_model, const lanelet::ConstLineString3d traffic_light, const Config & config, - autoware_auto_perception_msgs::msg::TrafficLightRoi & tl_roi) + tier4_perception_msgs::msg::TrafficLightRoi & roi) const { - const double tl_height = traffic_light.attributeOr("height", 0.0); - const auto & tl_left_down_point = traffic_light.front(); - const auto & tl_right_down_point = traffic_light.back(); - - tf2::Transform tf_map2camera( - tf2::Quaternion( - camera_pose.orientation.x, camera_pose.orientation.y, camera_pose.orientation.z, - camera_pose.orientation.w), - tf2::Vector3(camera_pose.position.x, camera_pose.position.y, camera_pose.position.z)); - // id - tl_roi.id = traffic_light.id(); + roi.traffic_light_id = traffic_light.id(); // for roi.x_offset and roi.y_offset { - tf2::Transform tf_map2tl( - tf2::Quaternion(0, 0, 0, 1), - tf2::Vector3( - tl_left_down_point.x(), tl_left_down_point.y(), tl_left_down_point.z() + tl_height)); - tf2::Transform tf_camera2tl; - tf_camera2tl = tf_map2camera.inverse() * tf_map2tl; + tf2::Vector3 map2tl = getTrafficLightTopLeft(traffic_light); + tf2::Vector3 camera2tl = tf_map2camera.inverse() * map2tl; // max vibration const double max_vibration_x = - std::sin(config.max_vibration_yaw * 0.5) * tf_camera2tl.getOrigin().z() + - config.max_vibration_width * 0.5; - const double max_vibration_y = - std::sin(config.max_vibration_pitch * 0.5) * tf_camera2tl.getOrigin().z() + - config.max_vibration_height * 0.5; + std::sin(config.max_vibration_yaw * 0.5) * camera2tl.z() + config.max_vibration_width * 0.5; + const double max_vibration_y = std::sin(config.max_vibration_pitch * 0.5) * camera2tl.z() + + config.max_vibration_height * 0.5; const double max_vibration_z = config.max_vibration_depth * 0.5; - // target position in camera coordinate - geometry_msgs::msg::Point point3d; - point3d.x = tf_camera2tl.getOrigin().x() - max_vibration_x; - point3d.y = tf_camera2tl.getOrigin().y() - max_vibration_y; - point3d.z = tf_camera2tl.getOrigin().z() - max_vibration_z; - if (point3d.z <= 0.0) { - return false; + // enlarged target position in camera coordinate + { + tf2::Vector3 point3d = + camera2tl - tf2::Vector3(max_vibration_x, max_vibration_y, max_vibration_z); + if (point3d.z() <= 0.0) { + return false; + } + cv::Point2d point2d = calcRawImagePointFromPoint3D(pinhole_camera_model, point3d); + roundInImageFrame(pinhole_camera_model, point2d); + roi.roi.x_offset = point2d.x; + roi.roi.y_offset = point2d.y; } - cv::Point2d point2d = calcRawImagePointFromPoint3D(pinhole_camera_model, point3d); - roundInImageFrame(pinhole_camera_model, point2d); - tl_roi.roi.x_offset = point2d.x; - tl_roi.roi.y_offset = point2d.y; } // for roi.width and roi.height { - tf2::Transform tf_map2tl( - tf2::Quaternion(0, 0, 0, 1), - tf2::Vector3(tl_right_down_point.x(), tl_right_down_point.y(), tl_right_down_point.z())); - tf2::Transform tf_camera2tl; - tf_camera2tl = tf_map2camera.inverse() * tf_map2tl; + tf2::Vector3 map2tl = getTrafficLightBottomRight(traffic_light); + tf2::Vector3 camera2tl = tf_map2camera.inverse() * map2tl; // max vibration const double max_vibration_x = - std::sin(config.max_vibration_yaw * 0.5) * tf_camera2tl.getOrigin().z() + - config.max_vibration_width * 0.5; - const double max_vibration_y = - std::sin(config.max_vibration_pitch * 0.5) * tf_camera2tl.getOrigin().z() + - config.max_vibration_height * 0.5; + std::sin(config.max_vibration_yaw * 0.5) * camera2tl.z() + config.max_vibration_width * 0.5; + const double max_vibration_y = std::sin(config.max_vibration_pitch * 0.5) * camera2tl.z() + + config.max_vibration_height * 0.5; const double max_vibration_z = config.max_vibration_depth * 0.5; - // target position in camera coordinate - geometry_msgs::msg::Point point3d; - point3d.x = tf_camera2tl.getOrigin().x() + max_vibration_x; - point3d.y = tf_camera2tl.getOrigin().y() + max_vibration_y; - point3d.z = tf_camera2tl.getOrigin().z() - max_vibration_z; - if (point3d.z <= 0.0) { - return false; + // enlarged target position in camera coordinate + { + tf2::Vector3 point3d = + camera2tl + tf2::Vector3(max_vibration_x, max_vibration_y, -max_vibration_z); + if (point3d.z() <= 0.0) { + return false; + } + cv::Point2d point2d = calcRawImagePointFromPoint3D(pinhole_camera_model, point3d); + roundInImageFrame(pinhole_camera_model, point2d); + roi.roi.width = point2d.x - roi.roi.x_offset; + roi.roi.height = point2d.y - roi.roi.y_offset; } - cv::Point2d point2d = calcRawImagePointFromPoint3D(pinhole_camera_model, point3d); - roundInImageFrame(pinhole_camera_model, point2d); - tl_roi.roi.width = point2d.x - tl_roi.roi.x_offset; - tl_roi.roi.height = point2d.y - tl_roi.roi.y_offset; - if (tl_roi.roi.width < 1 || tl_roi.roi.height < 1) { + + if (roi.roi.width < 1 || roi.roi.height < 1) { return false; } } return true; } +bool MapBasedDetector::getTrafficLightRoi( + const std::vector & tf_map2camera_vec, + const image_geometry::PinholeCameraModel & pinhole_camera_model, + const lanelet::ConstLineString3d traffic_light, const Config & config, + tier4_perception_msgs::msg::TrafficLightRoi & out_roi) const +{ + std::vector rois; + for (const auto & tf_map2camera : tf_map2camera_vec) { + tier4_perception_msgs::msg::TrafficLightRoi roi; + if (getTrafficLightRoi(tf_map2camera, pinhole_camera_model, traffic_light, config, roi)) { + rois.push_back(roi); + } + } + if (rois.empty()) { + return false; + } + out_roi = rois.front(); + /** + * get the maximum possible rough roi among all the tf + */ + uint32_t x1 = pinhole_camera_model.cameraInfo().width - 1; + uint32_t x2 = 0; + uint32_t y1 = pinhole_camera_model.cameraInfo().height - 1; + uint32_t y2 = 0; + for (const auto & roi : rois) { + x1 = std::min(x1, roi.roi.x_offset); + x2 = std::max(x2, roi.roi.x_offset + roi.roi.width); + y1 = std::min(y1, roi.roi.y_offset); + y2 = std::max(y2, roi.roi.y_offset + roi.roi.height); + } + out_roi.roi.x_offset = x1; + out_roi.roi.y_offset = y1; + out_roi.roi.width = x2 - x1; + out_roi.roi.height = y2 - y1; + return true; +} + void MapBasedDetector::mapCallback( const autoware_auto_mapping_msgs::msg::HADMapBin::ConstSharedPtr input_msg) { @@ -326,9 +433,9 @@ void MapBasedDetector::routeCallback( void MapBasedDetector::getVisibleTrafficLights( const MapBasedDetector::TrafficLightSet & all_traffic_lights, - const geometry_msgs::msg::Pose & camera_pose, + const std::vector & tf_map2camera_vec, const image_geometry::PinholeCameraModel & pinhole_camera_model, - std::vector & visible_traffic_lights) + std::vector & visible_traffic_lights) const { for (const auto & traffic_light : all_traffic_lights) { // some "Traffic Light" are actually not traffic lights @@ -337,132 +444,62 @@ void MapBasedDetector::getVisibleTrafficLights( traffic_light.attribute("subtype").value() == "solid") { continue; } - const auto & tl_left_down_point = traffic_light.front(); - const auto & tl_right_down_point = traffic_light.back(); - const double tl_height = traffic_light.attributeOr("height", 0.0); - + // traffic light bottom left + const auto & tl_bl = traffic_light.front(); + // traffic light bottom right + const auto & tl_br = traffic_light.back(); // check distance range - geometry_msgs::msg::Point tl_central_point; - tl_central_point.x = (tl_right_down_point.x() + tl_left_down_point.x()) / 2.0; - tl_central_point.y = (tl_right_down_point.y() + tl_left_down_point.y()) / 2.0; - tl_central_point.z = (tl_right_down_point.z() + tl_left_down_point.z() + tl_height) / 2.0; - constexpr double max_distance_range = 200.0; - if (!isInDistanceRange(tl_central_point, camera_pose.position, max_distance_range)) { - continue; - } - - // check angle range - const double tl_yaw = tier4_autoware_utils::normalizeRadian( - std::atan2( - tl_right_down_point.y() - tl_left_down_point.y(), - tl_right_down_point.x() - tl_left_down_point.x()) + - M_PI_2); - constexpr double max_angle_range = tier4_autoware_utils::deg2rad(40.0); - - // get direction of z axis - tf2::Vector3 camera_z_dir(0, 0, 1); - tf2::Matrix3x3 camera_rotation_matrix(tf2::Quaternion( - camera_pose.orientation.x, camera_pose.orientation.y, camera_pose.orientation.z, - camera_pose.orientation.w)); - camera_z_dir = camera_rotation_matrix * camera_z_dir; - double camera_yaw = std::atan2(camera_z_dir.y(), camera_z_dir.x()); - camera_yaw = tier4_autoware_utils::normalizeRadian(camera_yaw); - if (!isInAngleRange(tl_yaw, camera_yaw, max_angle_range)) { - continue; - } - - // check within image frame - tf2::Transform tf_map2camera( - tf2::Quaternion( - camera_pose.orientation.x, camera_pose.orientation.y, camera_pose.orientation.z, - camera_pose.orientation.w), - tf2::Vector3(camera_pose.position.x, camera_pose.position.y, camera_pose.position.z)); - tf2::Transform tf_map2tl( - tf2::Quaternion(0, 0, 0, 1), - tf2::Vector3(tl_central_point.x, tl_central_point.y, tl_central_point.z)); - tf2::Transform tf_camera2tl; - tf_camera2tl = tf_map2camera.inverse() * tf_map2tl; - - geometry_msgs::msg::Point camera2tl_point; - camera2tl_point.x = tf_camera2tl.getOrigin().x(); - camera2tl_point.y = tf_camera2tl.getOrigin().y(); - camera2tl_point.z = tf_camera2tl.getOrigin().z(); - if (!isInImageFrame(pinhole_camera_model, camera2tl_point)) { - continue; - } - visible_traffic_lights.push_back(traffic_light); - } -} - -bool MapBasedDetector::isInDistanceRange( - const geometry_msgs::msg::Point & tl_point, const geometry_msgs::msg::Point & camera_point, - const double max_distance_range) const -{ - const double sq_dist = (tl_point.x - camera_point.x) * (tl_point.x - camera_point.x) + - (tl_point.y - camera_point.y) * (tl_point.y - camera_point.y); - return sq_dist < (max_distance_range * max_distance_range); -} - -bool MapBasedDetector::isInAngleRange( - const double & tl_yaw, const double & camera_yaw, const double max_angle_range) const -{ - Eigen::Vector2d vec1, vec2; - vec1 << std::cos(tl_yaw), std::sin(tl_yaw); - vec2 << std::cos(camera_yaw), std::sin(camera_yaw); - const double diff_angle = std::acos(vec1.dot(vec2)); - return std::fabs(diff_angle) < max_angle_range; -} + tf2::Vector3 tl_center = getTrafficLightCenter(traffic_light); + // for every possible transformation, check if the tl is visible. + // If under any tf the tl is visible, keep it + for (const auto & tf_map2camera : tf_map2camera_vec) { + if (!isInDistanceRange(tl_center, tf_map2camera.getOrigin(), config_.max_detection_range)) { + continue; + } -bool MapBasedDetector::isInImageFrame( - const image_geometry::PinholeCameraModel & pinhole_camera_model, - const geometry_msgs::msg::Point & point) const -{ - if (point.z <= 0.0) { - return false; - } + // check angle range + const double tl_yaw = tier4_autoware_utils::normalizeRadian( + std::atan2(tl_br.y() - tl_bl.y(), tl_br.x() - tl_bl.x()) + M_PI_2); + constexpr double max_angle_range = tier4_autoware_utils::deg2rad(40.0); + + // get direction of z axis + tf2::Vector3 camera_z_dir(0, 0, 1); + tf2::Matrix3x3 camera_rotation_matrix(tf_map2camera.getRotation()); + camera_z_dir = camera_rotation_matrix * camera_z_dir; + double camera_yaw = std::atan2(camera_z_dir.y(), camera_z_dir.x()); + camera_yaw = tier4_autoware_utils::normalizeRadian(camera_yaw); + if (!isInAngleRange(tl_yaw, camera_yaw, max_angle_range)) { + continue; + } - cv::Point2d point2d = calcRawImagePointFromPoint3D(pinhole_camera_model, point); - if (0 <= point2d.x && point2d.x < pinhole_camera_model.cameraInfo().width) { - if (0 <= point2d.y && point2d.y < pinhole_camera_model.cameraInfo().height) { - return true; + // check within image frame + tf2::Vector3 tf_camera2tltl = tf_map2camera.inverse() * getTrafficLightTopLeft(traffic_light); + tf2::Vector3 tf_camera2tlbr = + tf_map2camera.inverse() * getTrafficLightBottomRight(traffic_light); + if ( + !isInImageFrame(pinhole_camera_model, tf_camera2tltl) && + !isInImageFrame(pinhole_camera_model, tf_camera2tlbr)) { + continue; + } + visible_traffic_lights.push_back(traffic_light); + break; } } - return false; } void MapBasedDetector::publishVisibleTrafficLights( - const geometry_msgs::msg::PoseStamped camera_pose_stamped, + const tf2::Transform & tf_map2camera, const std_msgs::msg::Header & cam_info_header, const std::vector & visible_traffic_lights, const rclcpp::Publisher::SharedPtr pub) { visualization_msgs::msg::MarkerArray output_msg; for (const auto & traffic_light : visible_traffic_lights) { - const auto & tl_left_down_point = traffic_light.front(); - const auto & tl_right_down_point = traffic_light.back(); - const double tl_height = traffic_light.attributeOr("height", 0.0); const int id = traffic_light.id(); - - geometry_msgs::msg::Point tl_central_point; - tl_central_point.x = (tl_right_down_point.x() + tl_left_down_point.x()) / 2.0; - tl_central_point.y = (tl_right_down_point.y() + tl_left_down_point.y()) / 2.0; - tl_central_point.z = (tl_right_down_point.z() + tl_left_down_point.z() + tl_height) / 2.0; + tf2::Vector3 tl_central_point = getTrafficLightCenter(traffic_light); + tf2::Vector3 camera2tl = tf_map2camera.inverse() * tl_central_point; visualization_msgs::msg::Marker marker; - - tf2::Transform tf_map2camera( - tf2::Quaternion( - camera_pose_stamped.pose.orientation.x, camera_pose_stamped.pose.orientation.y, - camera_pose_stamped.pose.orientation.z, camera_pose_stamped.pose.orientation.w), - tf2::Vector3( - camera_pose_stamped.pose.position.x, camera_pose_stamped.pose.position.y, - camera_pose_stamped.pose.position.z)); - tf2::Transform tf_map2tl( - tf2::Quaternion(0, 0, 0, 1), - tf2::Vector3(tl_central_point.x, tl_central_point.y, tl_central_point.z)); - tf2::Transform tf_camera2tl; - tf_camera2tl = tf_map2camera.inverse() * tf_map2tl; - - marker.header = camera_pose_stamped.header; + marker.header = cam_info_header; marker.id = id; marker.type = visualization_msgs::msg::Marker::LINE_LIST; marker.ns = std::string("beam"); @@ -480,9 +517,9 @@ void MapBasedDetector::publishVisibleTrafficLights( point.y = 0.0; point.z = 0.0; marker.points.push_back(point); - point.x = tf_camera2tl.getOrigin().x(); - point.y = tf_camera2tl.getOrigin().y(); - point.z = tf_camera2tl.getOrigin().z(); + point.x = camera2tl.x(); + point.y = camera2tl.y(); + point.z = camera2tl.z(); marker.points.push_back(point); marker.lifetime = rclcpp::Duration::from_seconds(0.2); diff --git a/perception/traffic_light_multi_camera_fusion/CMakeLists.txt b/perception/traffic_light_multi_camera_fusion/CMakeLists.txt new file mode 100644 index 0000000000000..5765ebf58fd45 --- /dev/null +++ b/perception/traffic_light_multi_camera_fusion/CMakeLists.txt @@ -0,0 +1,23 @@ +cmake_minimum_required(VERSION 3.14) +project(traffic_light_multi_camera_fusion) + +find_package(autoware_cmake REQUIRED) +autoware_package() + +include_directories( + SYSTEM +) + +ament_auto_add_library(traffic_light_multi_camera_fusion SHARED + src/node.cpp +) + +rclcpp_components_register_node(traffic_light_multi_camera_fusion + PLUGIN "traffic_light::MultiCameraFusion" + EXECUTABLE traffic_light_multi_camera_fusion_node +) + +ament_auto_package(INSTALL_TO_SHARE + launch + config +) diff --git a/perception/traffic_light_multi_camera_fusion/README.md b/perception/traffic_light_multi_camera_fusion/README.md new file mode 100644 index 0000000000000..ded0613a202ad --- /dev/null +++ b/perception/traffic_light_multi_camera_fusion/README.md @@ -0,0 +1,34 @@ +# The `traffic_light_multi_camera_fusion` Package + +## Overview + +`traffic_light_multi_camera_fusion` performs traffic light signal fusion which can be summarized as the following two tasks: + +1. Multi-Camera-Fusion: performed on single traffic light signal detected by different cameras. +2. Group-Fusion: performed on traffic light signals within the same group, which means traffic lights sharing the same regulatory element id defined in lanetlet2 map. + +## Input topics + +For every camera, the following three topics are subscribed: +| Name | Type | | +| ---------------------------------------| -------------------------------------------------------|----------------------------------------------------| +| `~//camera_info` | sensor_msgs::CameraInfo |camera info from traffic_light_map_based_detector | +| `~//rois` | tier4_perception_msgs::TrafficLightRoiArray |detection roi from traffic_light_fine_detector | +| `~//traffic_signals` | tier4_perception_msgs::TrafficLightSignalArray |classification result from traffic_light_classifier | + +You don't need to configure these topics manually. Just provide the `camera_namespaces` parameter and the node will automatically extract the `` and create the subscribers. + +## Output topics + +| Name | Type | Description | +| -------------------------- | ---------------------------------------------- | ---------------------------------- | +| `~/output/traffic_signals` | tier4_perception_msgs::TrafficLightSignalArray | traffic light signal fusion result | + +## Node parameters + +| Parameter | Type | Description | +| ---------------------- | --------------- | ------------------------------------------------ | +| `camera_namespaces` | vector\ | Camera Namespaces to be fused | +| `message_lifespan` | double | The maximum timestamp span to be fused | +| `approximate_sync` | bool | Whether work in Approximate Synchronization Mode | +| `perform_group_fusion` | bool | Whether perform Group Fusion | diff --git a/perception/traffic_light_multi_camera_fusion/config/traffic_light_multi_camera_fusion.param.yaml b/perception/traffic_light_multi_camera_fusion/config/traffic_light_multi_camera_fusion.param.yaml new file mode 100644 index 0000000000000..4e9459ce4a71c --- /dev/null +++ b/perception/traffic_light_multi_camera_fusion/config/traffic_light_multi_camera_fusion.param.yaml @@ -0,0 +1,6 @@ +/**: + ros__parameters: + camera_namespaces: ["camera6", "camera7"] + message_lifespan: 0.09 + approximate_sync: false + perform_group_fusion: true diff --git a/perception/traffic_light_multi_camera_fusion/include/traffic_light_multi_camera_fusion/node.hpp b/perception/traffic_light_multi_camera_fusion/include/traffic_light_multi_camera_fusion/node.hpp new file mode 100644 index 0000000000000..bc63f34b191e7 --- /dev/null +++ b/perception/traffic_light_multi_camera_fusion/include/traffic_light_multi_camera_fusion/node.hpp @@ -0,0 +1,131 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef TRAFFIC_LIGHT_MULTI_CAMERA_FUSION__NODE_HPP_ +#define TRAFFIC_LIGHT_MULTI_CAMERA_FUSION__NODE_HPP_ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace traffic_light +{ + +namespace mf = message_filters; + +struct FusionRecord +{ + std_msgs::msg::Header header; + sensor_msgs::msg::CameraInfo cam_info; + tier4_perception_msgs::msg::TrafficLightRoi roi; + tier4_perception_msgs::msg::TrafficSignal signal; +}; + +struct FusionRecordArr +{ + std_msgs::msg::Header header; + sensor_msgs::msg::CameraInfo cam_info; + tier4_perception_msgs::msg::TrafficLightRoiArray rois; + tier4_perception_msgs::msg::TrafficSignalArray signals; +}; + +bool operator<(const FusionRecordArr & r1, const FusionRecordArr & r2) +{ + return rclcpp::Time(r1.header.stamp) < rclcpp::Time(r2.header.stamp); +} + +class MultiCameraFusion : public rclcpp::Node +{ +public: + typedef sensor_msgs::msg::CameraInfo CamInfoType; + typedef tier4_perception_msgs::msg::TrafficLightRoi RoiType; + typedef tier4_perception_msgs::msg::TrafficSignal SignalType; + typedef tier4_perception_msgs::msg::TrafficSignalArray SignalArrayType; + typedef tier4_perception_msgs::msg::TrafficLightRoiArray RoiArrayType; + typedef tier4_perception_msgs::msg::TrafficLightRoi::_traffic_light_id_type IdType; + typedef autoware_perception_msgs::msg::TrafficSignal NewSignalType; + typedef autoware_perception_msgs::msg::TrafficSignalArray NewSignalArrayType; + + typedef std::pair RecordArrayType; + + explicit MultiCameraFusion(const rclcpp::NodeOptions & node_options); + +private: + void trafficSignalRoiCallback( + const CamInfoType::ConstSharedPtr cam_info_msg, const RoiArrayType::ConstSharedPtr roi_msg, + const SignalArrayType::ConstSharedPtr signal_msg); + + void mapCallback(const autoware_auto_mapping_msgs::msg::HADMapBin::ConstSharedPtr input_msg); + + void multiCameraFusion(std::map & fusioned_record_map); + + void convertOutputMsg( + const std::map & grouped_record_map, NewSignalArrayType & msg_out); + + void groupFusion( + std::map & fusioned_record_map, + std::map & grouped_record_map); + + typedef mf::sync_policies::ExactTime ExactSyncPolicy; + typedef mf::Synchronizer ExactSync; + typedef mf::sync_policies::ApproximateTime + ApproSyncPolicy; + typedef mf::Synchronizer ApproSync; + + std::vector>> signal_subs_; + std::vector>> roi_subs_; + std::vector>> cam_info_subs_; + std::vector> exact_sync_subs_; + std::vector> appro_sync_subs_; + rclcpp::Subscription::SharedPtr map_sub_; + + rclcpp::Publisher::SharedPtr signal_pub_; + /* + the mappping from traffic light id (instance id) to regulatory element id (group id) + */ + std::map traffic_light_id_to_regulatory_ele_id_; + /* + save record arrays by increasing timestamp order. + use multiset in case there are multiple cameras publishing images at exactly the same time + */ + std::multiset record_arr_set_; + bool is_approximate_sync_; + /* + for every input message input_m, if the timestamp difference between input_m and the latest + message is smaller than message_lifespan_, then input_m would be used for the fusion. Otherwise, + it would be discarded + */ + double message_lifespan_; +}; +} // namespace traffic_light +#endif // TRAFFIC_LIGHT_MULTI_CAMERA_FUSION__NODE_HPP_ diff --git a/perception/traffic_light_multi_camera_fusion/launch/traffic_light_multi_camera_fusion.launch.xml b/perception/traffic_light_multi_camera_fusion/launch/traffic_light_multi_camera_fusion.launch.xml new file mode 100644 index 0000000000000..9aa4958378c1c --- /dev/null +++ b/perception/traffic_light_multi_camera_fusion/launch/traffic_light_multi_camera_fusion.launch.xml @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/perception/traffic_light_multi_camera_fusion/package.xml b/perception/traffic_light_multi_camera_fusion/package.xml new file mode 100644 index 0000000000000..6dd0bf89bd716 --- /dev/null +++ b/perception/traffic_light_multi_camera_fusion/package.xml @@ -0,0 +1,28 @@ + + + + traffic_light_multi_camera_fusion + 0.1.0 + The traffic_light_multi_camera_fusion package + Mingyu Li + Apache License 2.0 + + ament_cmake_auto + + autoware_cmake + + autoware_auto_mapping_msgs + autoware_perception_msgs + lanelet2_extension + rclcpp + rclcpp_components + sensor_msgs + tier4_perception_msgs + + ament_lint_auto + autoware_lint_common + + + ament_cmake + + diff --git a/perception/traffic_light_multi_camera_fusion/src/node.cpp b/perception/traffic_light_multi_camera_fusion/src/node.cpp new file mode 100644 index 0000000000000..7500193a3b643 --- /dev/null +++ b/perception/traffic_light_multi_camera_fusion/src/node.cpp @@ -0,0 +1,322 @@ +// Copyright 2023 TIER IV, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "traffic_light_multi_camera_fusion/node.hpp" + +#include +#include +#include +#include + +namespace +{ + +bool isUnknown(const tier4_perception_msgs::msg::TrafficSignal & signal) +{ + return signal.elements.size() == 1 && + signal.elements[0].color == tier4_perception_msgs::msg::TrafficLightElement::UNKNOWN && + signal.elements[0].shape == tier4_perception_msgs::msg::TrafficLightElement::UNKNOWN; +} + +/** + * @brief Currently the visible score only considers the truncation. + * If the detection roi is very close to the image boundary, it would be considered as truncated. + * + * @param record fusion record + * @return 0 if traffic light is truncated, otherwise 1 + */ +int calVisibleScore(const traffic_light::FusionRecord & record) +{ + const uint32_t boundary = 5; + uint32_t x1 = record.roi.roi.x_offset; + uint32_t x2 = record.roi.roi.x_offset + record.roi.roi.width; + uint32_t y1 = record.roi.roi.y_offset; + uint32_t y2 = record.roi.roi.y_offset + record.roi.roi.height; + if ( + x1 <= boundary || (record.cam_info.width - x2) <= boundary || y1 <= boundary || + (record.cam_info.height - y2) <= boundary) { + return 0; + } else { + return 1; + } +} + +int compareRecord(const traffic_light::FusionRecord & r1, const traffic_light::FusionRecord & r2) +{ + /* + if both records are from the same sensor but different stamp, trust the latest one + */ + double t1 = rclcpp::Time(r1.header.stamp).seconds(); + double t2 = rclcpp::Time(r2.header.stamp).seconds(); + const double dt_thres = 1e-3; + if (r1.header.frame_id == r2.header.frame_id && std::abs(t1 - t2) >= dt_thres) { + return t1 < t2 ? -1 : 1; + } + bool r1_is_unknown = isUnknown(r1.signal); + bool r2_is_unknown = isUnknown(r2.signal); + /* + if both are unknown, they are of the same priority + */ + if (r1_is_unknown && r2_is_unknown) { + return 0; + } else if (r1_is_unknown ^ r2_is_unknown) { + /* + if either is unknown, the unknown is of lower priority + */ + return r1_is_unknown ? -1 : 1; + } + int visible_score_1 = calVisibleScore(r1); + int visible_score_2 = calVisibleScore(r2); + if (visible_score_1 == visible_score_2) { + int area_1 = r1.roi.roi.width * r1.roi.roi.height; + int area_2 = r2.roi.roi.width * r2.roi.roi.height; + if (area_1 < area_2) { + return -1; + } else { + return static_cast(area_1 > area_2); + } + } else { + return visible_score_1 < visible_score_2 ? -1 : 1; + } +} + +template +V at_or(const std::unordered_map & map, const K & key, const V & value) +{ + return map.count(key) ? map.at(key) : value; +} + +autoware_perception_msgs::msg::TrafficLightElement convert( + const tier4_perception_msgs::msg::TrafficLightElement & input) +{ + typedef tier4_perception_msgs::msg::TrafficLightElement OldElem; + typedef autoware_perception_msgs::msg::TrafficLightElement NewElem; + static const std::unordered_map color_map( + {{OldElem::RED, NewElem::RED}, + {OldElem::AMBER, NewElem::AMBER}, + {OldElem::GREEN, NewElem::GREEN}, + {OldElem::WHITE, NewElem::WHITE}}); + static const std::unordered_map shape_map( + {{OldElem::CIRCLE, NewElem::CIRCLE}, + {OldElem::LEFT_ARROW, NewElem::LEFT_ARROW}, + {OldElem::RIGHT_ARROW, NewElem::RIGHT_ARROW}, + {OldElem::UP_ARROW, NewElem::UP_ARROW}, + {OldElem::UP_LEFT_ARROW, NewElem::UP_LEFT_ARROW}, + {OldElem::UP_RIGHT_ARROW, NewElem::UP_RIGHT_ARROW}, + {OldElem::DOWN_ARROW, NewElem::DOWN_ARROW}, + {OldElem::DOWN_LEFT_ARROW, NewElem::DOWN_LEFT_ARROW}, + {OldElem::DOWN_RIGHT_ARROW, NewElem::DOWN_RIGHT_ARROW}, + {OldElem::CROSS, NewElem::CROSS}}); + static const std::unordered_map status_map( + {{OldElem::SOLID_OFF, NewElem::SOLID_OFF}, + {OldElem::SOLID_ON, NewElem::SOLID_ON}, + {OldElem::FLASHING, NewElem::FLASHING}}); + // clang-format on + + NewElem output; + output.color = at_or(color_map, input.color, NewElem::UNKNOWN); + output.shape = at_or(shape_map, input.shape, NewElem::UNKNOWN); + output.status = at_or(status_map, input.status, NewElem::UNKNOWN); + output.confidence = input.confidence; + return output; +} + +} // namespace + +namespace traffic_light +{ + +MultiCameraFusion::MultiCameraFusion(const rclcpp::NodeOptions & node_options) +: Node("traffic_light_multi_camera_fusion", node_options) +{ + using std::placeholders::_1; + using std::placeholders::_2; + using std::placeholders::_3; + + std::vector camera_namespaces = + this->declare_parameter("camera_namespaces", std::vector{}); + is_approximate_sync_ = this->declare_parameter("approximate_sync", false); + message_lifespan_ = this->declare_parameter("message_lifespan", 0.09); + for (const std::string & camera_ns : camera_namespaces) { + std::string signal_topic = camera_ns + "/traffic_signals"; + std::string roi_topic = camera_ns + "/rois"; + std::string cam_info_topic = camera_ns + "/camera_info"; + roi_subs_.emplace_back( + new mf::Subscriber(this, roi_topic, rclcpp::QoS{1}.get_rmw_qos_profile())); + signal_subs_.emplace_back(new mf::Subscriber( + this, signal_topic, rclcpp::QoS{1}.get_rmw_qos_profile())); + cam_info_subs_.emplace_back(new mf::Subscriber( + this, cam_info_topic, rclcpp::SensorDataQoS().get_rmw_qos_profile())); + if (is_approximate_sync_ == false) { + exact_sync_subs_.emplace_back(new ExactSync( + ExactSyncPolicy(10), *(cam_info_subs_.back()), *(roi_subs_.back()), + *(signal_subs_.back()))); + exact_sync_subs_.back()->registerCallback( + std::bind(&MultiCameraFusion::trafficSignalRoiCallback, this, _1, _2, _3)); + } else { + appro_sync_subs_.emplace_back(new ApproSync( + ApproSyncPolicy(10), *(cam_info_subs_.back()), *(roi_subs_.back()), + *(signal_subs_.back()))); + appro_sync_subs_.back()->registerCallback( + std::bind(&MultiCameraFusion::trafficSignalRoiCallback, this, _1, _2, _3)); + } + } + + map_sub_ = create_subscription( + "~/input/vector_map", rclcpp::QoS{1}.transient_local(), + std::bind(&MultiCameraFusion::mapCallback, this, _1)); + signal_pub_ = create_publisher("~/output/traffic_signals", rclcpp::QoS{1}); +} + +void MultiCameraFusion::trafficSignalRoiCallback( + const CamInfoType::ConstSharedPtr cam_info_msg, const RoiArrayType::ConstSharedPtr roi_msg, + const SignalArrayType::ConstSharedPtr signal_msg) +{ + rclcpp::Time stamp(roi_msg->header.stamp); + /* + Insert the received record array to the table. + Attention should be paied that this record array might not have the newest timestamp + */ + record_arr_set_.insert( + FusionRecordArr{cam_info_msg->header, *cam_info_msg, *roi_msg, *signal_msg}); + + std::map fusioned_record_map, grouped_record_map; + multiCameraFusion(fusioned_record_map); + groupFusion(fusioned_record_map, grouped_record_map); + + NewSignalArrayType msg_out; + convertOutputMsg(grouped_record_map, msg_out); + msg_out.stamp = cam_info_msg->header.stamp; + signal_pub_->publish(msg_out); +} + +void MultiCameraFusion::mapCallback( + const autoware_auto_mapping_msgs::msg::HADMapBin::ConstSharedPtr input_msg) +{ + lanelet::LaneletMapPtr lanelet_map_ptr = std::make_shared(); + + lanelet::utils::conversion::fromBinMsg(*input_msg, lanelet_map_ptr); + lanelet::ConstLanelets all_lanelets = lanelet::utils::query::laneletLayer(lanelet_map_ptr); + std::vector all_lanelet_traffic_lights = + lanelet::utils::query::autowareTrafficLights(all_lanelets); + for (auto tl_itr = all_lanelet_traffic_lights.begin(); tl_itr != all_lanelet_traffic_lights.end(); + ++tl_itr) { + lanelet::AutowareTrafficLightConstPtr tl = *tl_itr; + + auto lights = tl->trafficLights(); + for (const auto & light : lights) { + traffic_light_id_to_regulatory_ele_id_[light.id()] = tl->id(); + } + } +} + +void MultiCameraFusion::convertOutputMsg( + const std::map & grouped_record_map, NewSignalArrayType & msg_out) +{ + msg_out.signals.clear(); + for (const auto & p : grouped_record_map) { + IdType reg_ele_id = p.first; + const SignalType & signal = p.second.signal; + NewSignalType signal_out; + signal_out.traffic_signal_id = reg_ele_id; + for (const auto & ele : signal.elements) { + signal_out.elements.push_back(convert(ele)); + } + msg_out.signals.push_back(signal_out); + } +} + +void MultiCameraFusion::multiCameraFusion(std::map & fusioned_record_map) +{ + fusioned_record_map.clear(); + /* + this should not happen. Just in case + */ + if (record_arr_set_.empty()) { + RCLCPP_ERROR(get_logger(), "record_arr_set_ is empty! This should not happen"); + return; + } + const rclcpp::Time & newest_stamp(record_arr_set_.rbegin()->header.stamp); + for (auto it = record_arr_set_.begin(); it != record_arr_set_.end();) { + /* + remove all old record arrays whose timestamp difference with newest record is larger than + threshold + */ + if ( + (newest_stamp - rclcpp::Time(it->header.stamp)) > + rclcpp::Duration::from_seconds(message_lifespan_)) { + it = record_arr_set_.erase(it); + } else { + /* + generate fusioned record result with the saved records + */ + const FusionRecordArr & record_arr = *it; + for (size_t i = 0; i < record_arr.rois.rois.size(); i++) { + const RoiType & roi = record_arr.rois.rois[i]; + auto signal_it = std::find_if( + record_arr.signals.signals.begin(), record_arr.signals.signals.end(), + [roi](const SignalType & s1) { return roi.traffic_light_id == s1.traffic_light_id; }); + /* + failed to find corresponding signal. skip it + */ + if (signal_it == record_arr.signals.signals.end()) { + continue; + } + FusionRecord record{record_arr.header, record_arr.cam_info, roi, *signal_it}; + /* + if this traffic light is not detected yet or can be updated by higher priority record, + update it + */ + if ( + fusioned_record_map.find(roi.traffic_light_id) == fusioned_record_map.end() || + ::compareRecord(record, fusioned_record_map[roi.traffic_light_id]) >= 0) { + fusioned_record_map[roi.traffic_light_id] = record; + } + } + it++; + } + } +} + +void MultiCameraFusion::groupFusion( + std::map & fusioned_record_map, + std::map & grouped_record_map) +{ + grouped_record_map.clear(); + for (auto & p : fusioned_record_map) { + IdType roi_id = p.second.roi.traffic_light_id; + /* + this should not happen + */ + if (traffic_light_id_to_regulatory_ele_id_.count(roi_id) == 0) { + RCLCPP_WARN_STREAM( + get_logger(), "Found Traffic Light Id = " << roi_id << " which is not defined in Map"); + } else { + /* + keep the best record for every regulatory element id + */ + IdType reg_ele_id = traffic_light_id_to_regulatory_ele_id_[p.second.roi.traffic_light_id]; + if ( + grouped_record_map.count(reg_ele_id) == 0 || + ::compareRecord(p.second, grouped_record_map[reg_ele_id]) >= 0) { + grouped_record_map[reg_ele_id] = p.second; + } + } + } +} + +} // namespace traffic_light + +#include +RCLCPP_COMPONENTS_REGISTER_NODE(traffic_light::MultiCameraFusion) diff --git a/perception/traffic_light_selector/README.md b/perception/traffic_light_selector/README.md index 9e38333c0e345..d20d5dc790e3a 100644 --- a/perception/traffic_light_selector/README.md +++ b/perception/traffic_light_selector/README.md @@ -32,9 +32,9 @@ A temporary node that converts old message type to new message type. #### Input -| Name | Type | Description | -| -------------------- | ---------------------------------------------------- | ------------------------------ | -| ~/sub/traffic_lights | autoware_auto_perception_msgs/msg/TrafficSignalArray | The state in old message type. | +| Name | Type | Description | +| -------------------- | -------------------------------------------- | ------------------------------ | +| ~/sub/traffic_lights | tier4_perception_msgs/msg/TrafficSignalArray | The state in old message type. | #### Output diff --git a/perception/traffic_light_selector/package.xml b/perception/traffic_light_selector/package.xml index 75796ac3cd710..fb696dd54dddd 100644 --- a/perception/traffic_light_selector/package.xml +++ b/perception/traffic_light_selector/package.xml @@ -11,12 +11,12 @@ autoware_cmake autoware_auto_mapping_msgs - autoware_auto_perception_msgs autoware_perception_msgs lanelet2_core lanelet2_extension rclcpp rclcpp_components + tier4_perception_msgs ament_lint_auto autoware_lint_common diff --git a/perception/traffic_light_selector/src/traffic_light_converter.cpp b/perception/traffic_light_selector/src/traffic_light_converter.cpp index d67cfaa53de79..afc8dab72e788 100644 --- a/perception/traffic_light_selector/src/traffic_light_converter.cpp +++ b/perception/traffic_light_selector/src/traffic_light_converter.cpp @@ -20,9 +20,9 @@ namespace converter { -using OldList = autoware_auto_perception_msgs::msg::TrafficSignalArray; -using OldData = autoware_auto_perception_msgs::msg::TrafficSignal; -using OldElem = autoware_auto_perception_msgs::msg::TrafficLight; +using OldList = tier4_perception_msgs::msg::TrafficSignalArray; +using OldData = tier4_perception_msgs::msg::TrafficSignal; +using OldElem = tier4_perception_msgs::msg::TrafficLightElement; using NewList = autoware_perception_msgs::msg::TrafficLightArray; using NewData = autoware_perception_msgs::msg::TrafficLight; using NewElem = autoware_perception_msgs::msg::TrafficLightElement; @@ -53,8 +53,8 @@ NewList convert(const OldList & input) NewData convert(const OldData & input) { NewData output; - output.traffic_light_id = input.map_primitive_id; - output.elements = convert_vector(input.lights); + output.traffic_light_id = input.traffic_light_id; + output.elements = convert_vector(input.elements); return output; } diff --git a/perception/traffic_light_selector/src/traffic_light_converter.hpp b/perception/traffic_light_selector/src/traffic_light_converter.hpp index db6d0a76c9c52..8c6c6545d9dc2 100644 --- a/perception/traffic_light_selector/src/traffic_light_converter.hpp +++ b/perception/traffic_light_selector/src/traffic_light_converter.hpp @@ -17,8 +17,8 @@ #include -#include #include +#include class TrafficLightConverter : public rclcpp::Node { @@ -26,7 +26,7 @@ class TrafficLightConverter : public rclcpp::Node explicit TrafficLightConverter(const rclcpp::NodeOptions & options); private: - using OldMessage = autoware_auto_perception_msgs::msg::TrafficSignalArray; + using OldMessage = tier4_perception_msgs::msg::TrafficSignalArray; using NewMessage = autoware_perception_msgs::msg::TrafficLightArray; rclcpp::Subscription::SharedPtr sub_; rclcpp::Publisher::SharedPtr pub_; diff --git a/perception/traffic_light_ssd_fine_detector/README.md b/perception/traffic_light_ssd_fine_detector/README.md index 01a30fac88fe4..1dd05665709f5 100644 --- a/perception/traffic_light_ssd_fine_detector/README.md +++ b/perception/traffic_light_ssd_fine_detector/README.md @@ -98,17 +98,17 @@ Based on the camera image and the global ROI array detected by `map_based_detect ### Input -| Name | Type | Description | -| --------------- | ---------------------------------------------------------- | ------------------------------------------------ | -| `~/input/image` | `sensor_msgs/Image` | The full size camera image | -| `~/input/rois` | `autoware_auto_perception_msgs::msg::TrafficLightRoiArray` | The array of ROIs detected by map_based_detector | +| Name | Type | Description | +| --------------- | -------------------------------------------------- | ------------------------------------------------ | +| `~/input/image` | `sensor_msgs/Image` | The full size camera image | +| `~/input/rois` | `tier4_perception_msgs::msg::TrafficLightRoiArray` | The array of ROIs detected by map_based_detector | ### Output -| Name | Type | Description | -| --------------------- | ---------------------------------------------------------- | ---------------------------- | -| `~/output/rois` | `autoware_auto_perception_msgs::msg::TrafficLightRoiArray` | The detected accurate rois | -| `~/debug/exe_time_ms` | `tier4_debug_msgs::msg::Float32Stamped` | The time taken for inference | +| Name | Type | Description | +| --------------------- | -------------------------------------------------- | ---------------------------- | +| `~/output/rois` | `tier4_perception_msgs::msg::TrafficLightRoiArray` | The detected accurate rois | +| `~/debug/exe_time_ms` | `tier4_debug_msgs::msg::Float32Stamped` | The time taken for inference | ## Parameters diff --git a/perception/traffic_light_ssd_fine_detector/include/traffic_light_ssd_fine_detector/nodelet.hpp b/perception/traffic_light_ssd_fine_detector/include/traffic_light_ssd_fine_detector/nodelet.hpp index a2fa16de01a25..14aa7fcbd94fe 100644 --- a/perception/traffic_light_ssd_fine_detector/include/traffic_light_ssd_fine_detector/nodelet.hpp +++ b/perception/traffic_light_ssd_fine_detector/include/traffic_light_ssd_fine_detector/nodelet.hpp @@ -22,10 +22,10 @@ #include #include -#include #include #include #include +#include #if __has_include() #include @@ -58,8 +58,7 @@ class TrafficLightSSDFineDetectorNodelet : public rclcpp::Node void connectCb(); void callback( const sensor_msgs::msg::Image::ConstSharedPtr image_msg, - const autoware_auto_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr - traffic_light_roi_msg); + const tier4_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr traffic_light_roi_msg); private: bool cvMat2CnnInput( @@ -70,29 +69,27 @@ class TrafficLightSSDFineDetectorNodelet : public rclcpp::Node bool rosMsg2CvMat(const sensor_msgs::msg::Image::ConstSharedPtr image_msg, cv::Mat & image); bool fitInFrame(cv::Point & lt, cv::Point & rb, const cv::Size & size); void cvRect2TlRoiMsg( - const cv::Rect & rect, const int32_t id, - autoware_auto_perception_msgs::msg::TrafficLightRoi & tl_roi); + const cv::Rect & rect, const int32_t id, tier4_perception_msgs::msg::TrafficLightRoi & tl_roi); bool readLabelFile(std::string filepath, std::vector & labels); bool getTlrIdFromLabel(const std::vector & labels, int & tlr_id); // variables std::shared_ptr image_transport_; image_transport::SubscriberFilter image_sub_; - message_filters::Subscriber roi_sub_; + message_filters::Subscriber roi_sub_; std::mutex connect_mutex_; - rclcpp::Publisher::SharedPtr - output_roi_pub_; + rclcpp::Publisher::SharedPtr output_roi_pub_; rclcpp::Publisher::SharedPtr exe_time_pub_; rclcpp::TimerBase::SharedPtr timer_; typedef message_filters::sync_policies::ExactTime< - sensor_msgs::msg::Image, autoware_auto_perception_msgs::msg::TrafficLightRoiArray> + sensor_msgs::msg::Image, tier4_perception_msgs::msg::TrafficLightRoiArray> SyncPolicy; typedef message_filters::Synchronizer Sync; std::shared_ptr sync_; typedef message_filters::sync_policies::ApproximateTime< - sensor_msgs::msg::Image, autoware_auto_perception_msgs::msg::TrafficLightRoiArray> + sensor_msgs::msg::Image, tier4_perception_msgs::msg::TrafficLightRoiArray> ApproximateSyncPolicy; typedef message_filters::Synchronizer ApproximateSync; std::shared_ptr approximate_sync_; diff --git a/perception/traffic_light_ssd_fine_detector/package.xml b/perception/traffic_light_ssd_fine_detector/package.xml index 49f67d171295a..994f84bacdae4 100644 --- a/perception/traffic_light_ssd_fine_detector/package.xml +++ b/perception/traffic_light_ssd_fine_detector/package.xml @@ -10,7 +10,6 @@ ament_cmake_auto autoware_cmake - autoware_auto_perception_msgs cv_bridge image_transport message_filters @@ -18,6 +17,7 @@ rclcpp_components sensor_msgs tier4_debug_msgs + tier4_perception_msgs autoware_lint_common diff --git a/perception/traffic_light_ssd_fine_detector/src/nodelet.cpp b/perception/traffic_light_ssd_fine_detector/src/nodelet.cpp index 96014800ce842..ffdca742dc918 100644 --- a/perception/traffic_light_ssd_fine_detector/src/nodelet.cpp +++ b/perception/traffic_light_ssd_fine_detector/src/nodelet.cpp @@ -103,8 +103,7 @@ TrafficLightSSDFineDetectorNodelet::TrafficLightSSDFineDetectorNodelet( std::lock_guard lock(connect_mutex_); output_roi_pub_ = - this->create_publisher( - "~/output/rois", 1); + this->create_publisher("~/output/rois", 1); exe_time_pub_ = this->create_publisher("~/debug/exe_time_ms", 1); if (is_approximate_sync_) { @@ -136,7 +135,7 @@ void TrafficLightSSDFineDetectorNodelet::connectCb() void TrafficLightSSDFineDetectorNodelet::callback( const sensor_msgs::msg::Image::ConstSharedPtr in_image_msg, - const autoware_auto_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr in_roi_msg) + const tier4_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr in_roi_msg) { if (in_image_msg->width < 2 || in_image_msg->height < 2) { return; @@ -146,7 +145,7 @@ void TrafficLightSSDFineDetectorNodelet::callback( using std::chrono::milliseconds; const auto exe_start_time = high_resolution_clock::now(); cv::Mat original_image; - autoware_auto_perception_msgs::msg::TrafficLightRoiArray out_rois; + tier4_perception_msgs::msg::TrafficLightRoiArray out_rois; rosMsg2CvMat(in_image_msg, original_image); int num_rois = in_roi_msg->rois.size(); @@ -216,9 +215,10 @@ void TrafficLightSSDFineDetectorNodelet::callback( lts.at(i).x + detections.at(i).x + detections.at(i).w, lts.at(i).y + detections.at(i).y + detections.at(i).h); fitInFrame(lt_roi, rb_roi, cv::Size(original_image.size())); - autoware_auto_perception_msgs::msg::TrafficLightRoi tl_roi; + tier4_perception_msgs::msg::TrafficLightRoi tl_roi; cvRect2TlRoiMsg( - cv::Rect(lt_roi, rb_roi), in_roi_msg->rois.at(i + batch_count * batch_size).id, tl_roi); + cv::Rect(lt_roi, rb_roi), + in_roi_msg->rois.at(i + batch_count * batch_size).traffic_light_id, tl_roi); out_rois.rois.push_back(tl_roi); } } @@ -345,10 +345,9 @@ bool TrafficLightSSDFineDetectorNodelet::fitInFrame( } void TrafficLightSSDFineDetectorNodelet::cvRect2TlRoiMsg( - const cv::Rect & rect, const int32_t id, - autoware_auto_perception_msgs::msg::TrafficLightRoi & tl_roi) + const cv::Rect & rect, const int32_t id, tier4_perception_msgs::msg::TrafficLightRoi & tl_roi) { - tl_roi.id = id; + tl_roi.traffic_light_id = id; tl_roi.roi.x_offset = rect.x; tl_roi.roi.y_offset = rect.y; tl_roi.roi.width = rect.width; diff --git a/perception/traffic_light_visualization/README.md b/perception/traffic_light_visualization/README.md index f675d42aaf089..5d99d6614a1ae 100644 --- a/perception/traffic_light_visualization/README.md +++ b/perception/traffic_light_visualization/README.md @@ -17,10 +17,10 @@ The `traffic_light_visualization` is a package that includes two visualizing nod #### Input -| Name | Type | Description | -| -------------------- | -------------------------------------------------------- | ------------------------ | -| `~/input/tl_state` | `autoware_auto_perception_msgs::msg::TrafficSignalArray` | status of traffic lights | -| `~/input/vector_map` | `autoware_auto_mapping_msgs::msg::HADMapBin` | vector map | +| Name | Type | Description | +| -------------------- | ------------------------------------------------ | ------------------------ | +| `~/input/tl_state` | `tier4_perception_msgs::msg::TrafficSignalArray` | status of traffic lights | +| `~/input/vector_map` | `autoware_auto_mapping_msgs::msg::HADMapBin` | vector map | #### Output @@ -32,12 +32,12 @@ The `traffic_light_visualization` is a package that includes two visualizing nod #### Input -| Name | Type | Description | -| ----------------------------- | ---------------------------------------------------------- | ------------------------------------------------------- | -| `~/input/tl_state` | `autoware_auto_perception_msgs::msg::TrafficSignalArray` | status of traffic lights | -| `~/input/image` | `sensor_msgs::msg::Image` | the image captured by perception cameras | -| `~/input/rois` | `autoware_auto_perception_msgs::msg::TrafficLightRoiArray` | the ROIs detected by `traffic_light_ssd_fine_detector` | -| `~/input/rough/rois` (option) | `autoware_auto_perception_msgs::msg::TrafficLightRoiArray` | the ROIs detected by `traffic_light_map_based_detector` | +| Name | Type | Description | +| ----------------------------- | -------------------------------------------------- | ------------------------------------------------------- | +| `~/input/tl_state` | `tier4_perception_msgs::msg::TrafficSignalArray` | status of traffic lights | +| `~/input/image` | `sensor_msgs::msg::Image` | the image captured by perception cameras | +| `~/input/rois` | `tier4_perception_msgs::msg::TrafficLightRoiArray` | the ROIs detected by `traffic_light_ssd_fine_detector` | +| `~/input/rough/rois` (option) | `tier4_perception_msgs::msg::TrafficLightRoiArray` | the ROIs detected by `traffic_light_map_based_detector` | #### Output diff --git a/perception/traffic_light_visualization/include/traffic_light_map_visualizer/node.hpp b/perception/traffic_light_visualization/include/traffic_light_map_visualizer/node.hpp index 3f82712538a76..07cb21e60b137 100644 --- a/perception/traffic_light_visualization/include/traffic_light_map_visualizer/node.hpp +++ b/perception/traffic_light_visualization/include/traffic_light_map_visualizer/node.hpp @@ -20,7 +20,7 @@ #include #include -#include +#include #include #include @@ -35,15 +35,14 @@ class TrafficLightMapVisualizerNode : public rclcpp::Node TrafficLightMapVisualizerNode(const std::string & node_name, const rclcpp::NodeOptions & options); ~TrafficLightMapVisualizerNode() = default; void trafficSignalsCallback( - const autoware_auto_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr + const autoware_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr input_traffic_signals_msg); void binMapCallback( const autoware_auto_mapping_msgs::msg::HADMapBin::ConstSharedPtr input_map_msg); private: rclcpp::Publisher::SharedPtr light_marker_pub_; - rclcpp::Subscription::SharedPtr - tl_state_sub_; + rclcpp::Subscription::SharedPtr tl_state_sub_; rclcpp::Subscription::SharedPtr vector_map_sub_; std::vector aw_tl_reg_elems_; diff --git a/perception/traffic_light_visualization/include/traffic_light_roi_visualizer/nodelet.hpp b/perception/traffic_light_visualization/include/traffic_light_roi_visualizer/nodelet.hpp index dfffde9018108..1c64eb2f9e82b 100644 --- a/perception/traffic_light_visualization/include/traffic_light_roi_visualizer/nodelet.hpp +++ b/perception/traffic_light_visualization/include/traffic_light_roi_visualizer/nodelet.hpp @@ -19,15 +19,11 @@ #include #include -#include -#include #include +#include +#include -#if __has_include() -#include -#else #include -#endif #include #include #include @@ -54,75 +50,70 @@ class TrafficLightRoiVisualizerNodelet : public rclcpp::Node void imageRoiCallback( const sensor_msgs::msg::Image::ConstSharedPtr & input_image_msg, - const autoware_auto_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & - input_tl_roi_msg, - const autoware_auto_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr & + const tier4_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & input_tl_roi_msg, + const tier4_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr & input_traffic_signals_msg); void imageRoughRoiCallback( const sensor_msgs::msg::Image::ConstSharedPtr & input_image_msg, - const autoware_auto_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & - input_tl_roi_msg, - const autoware_auto_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & - input_tl_rough_roi_msg, - const autoware_auto_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr & + const tier4_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & input_tl_roi_msg, + const tier4_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & input_tl_rough_roi_msg, + const tier4_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr & input_traffic_signals_msg); private: std::map state2label_{ // color - {autoware_auto_perception_msgs::msg::TrafficLight::RED, "red"}, - {autoware_auto_perception_msgs::msg::TrafficLight::AMBER, "yellow"}, - {autoware_auto_perception_msgs::msg::TrafficLight::GREEN, "green"}, - {autoware_auto_perception_msgs::msg::TrafficLight::WHITE, "white"}, + {tier4_perception_msgs::msg::TrafficLightElement::RED, "red"}, + {tier4_perception_msgs::msg::TrafficLightElement::AMBER, "yellow"}, + {tier4_perception_msgs::msg::TrafficLightElement::GREEN, "green"}, + {tier4_perception_msgs::msg::TrafficLightElement::WHITE, "white"}, // shape - {autoware_auto_perception_msgs::msg::TrafficLight::CIRCLE, "circle"}, - {autoware_auto_perception_msgs::msg::TrafficLight::LEFT_ARROW, "left"}, - {autoware_auto_perception_msgs::msg::TrafficLight::RIGHT_ARROW, "right"}, - {autoware_auto_perception_msgs::msg::TrafficLight::UP_ARROW, "straight"}, - {autoware_auto_perception_msgs::msg::TrafficLight::DOWN_ARROW, "down"}, - {autoware_auto_perception_msgs::msg::TrafficLight::DOWN_LEFT_ARROW, "down_left"}, - {autoware_auto_perception_msgs::msg::TrafficLight::DOWN_RIGHT_ARROW, "down_right"}, - {autoware_auto_perception_msgs::msg::TrafficLight::CROSS, "cross"}, + {tier4_perception_msgs::msg::TrafficLightElement::CIRCLE, "circle"}, + {tier4_perception_msgs::msg::TrafficLightElement::LEFT_ARROW, "left"}, + {tier4_perception_msgs::msg::TrafficLightElement::RIGHT_ARROW, "right"}, + {tier4_perception_msgs::msg::TrafficLightElement::UP_ARROW, "straight"}, + {tier4_perception_msgs::msg::TrafficLightElement::DOWN_ARROW, "down"}, + {tier4_perception_msgs::msg::TrafficLightElement::DOWN_LEFT_ARROW, "down_left"}, + {tier4_perception_msgs::msg::TrafficLightElement::DOWN_RIGHT_ARROW, "down_right"}, + {tier4_perception_msgs::msg::TrafficLightElement::CROSS, "cross"}, // other - {autoware_auto_perception_msgs::msg::TrafficLight::UNKNOWN, "unknown"}, + {tier4_perception_msgs::msg::TrafficLightElement::UNKNOWN, "unknown"}, }; bool createRect( - cv::Mat & image, const autoware_auto_perception_msgs::msg::TrafficLightRoi & tl_roi, + cv::Mat & image, const tier4_perception_msgs::msg::TrafficLightRoi & tl_roi, const cv::Scalar & color); bool createRect( - cv::Mat & image, const autoware_auto_perception_msgs::msg::TrafficLightRoi & tl_roi, + cv::Mat & image, const tier4_perception_msgs::msg::TrafficLightRoi & tl_roi, const ClassificationResult & result); bool getClassificationResult( - int id, const autoware_auto_perception_msgs::msg::TrafficSignalArray & traffic_signals, + int id, const tier4_perception_msgs::msg::TrafficSignalArray & traffic_signals, ClassificationResult & result); bool getRoiFromId( - int id, const autoware_auto_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & rois, - autoware_auto_perception_msgs::msg::TrafficLightRoi & correspond_roi); + int id, const tier4_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & rois, + tier4_perception_msgs::msg::TrafficLightRoi & correspond_roi); rclcpp::TimerBase::SharedPtr timer_; image_transport::SubscriberFilter image_sub_; - message_filters::Subscriber roi_sub_; - message_filters::Subscriber - rough_roi_sub_; - message_filters::Subscriber - traffic_signals_sub_; + message_filters::Subscriber roi_sub_; + message_filters::Subscriber rough_roi_sub_; + message_filters::Subscriber traffic_signals_sub_; image_transport::Publisher image_pub_; typedef message_filters::sync_policies::ApproximateTime< - sensor_msgs::msg::Image, autoware_auto_perception_msgs::msg::TrafficLightRoiArray, - autoware_auto_perception_msgs::msg::TrafficSignalArray> + sensor_msgs::msg::Image, tier4_perception_msgs::msg::TrafficLightRoiArray, + tier4_perception_msgs::msg::TrafficSignalArray> SyncPolicy; typedef message_filters::Synchronizer Sync; std::shared_ptr sync_; typedef message_filters::sync_policies::ApproximateTime< - sensor_msgs::msg::Image, autoware_auto_perception_msgs::msg::TrafficLightRoiArray, - autoware_auto_perception_msgs::msg::TrafficLightRoiArray, - autoware_auto_perception_msgs::msg::TrafficSignalArray> + sensor_msgs::msg::Image, tier4_perception_msgs::msg::TrafficLightRoiArray, + tier4_perception_msgs::msg::TrafficLightRoiArray, + tier4_perception_msgs::msg::TrafficSignalArray> SyncPolicyWithRoughRoi; typedef message_filters::Synchronizer SyncWithRoughRoi; std::shared_ptr sync_with_rough_roi_; diff --git a/perception/traffic_light_visualization/package.xml b/perception/traffic_light_visualization/package.xml index d90cf992b96be..276f04762cdea 100644 --- a/perception/traffic_light_visualization/package.xml +++ b/perception/traffic_light_visualization/package.xml @@ -10,7 +10,7 @@ ament_cmake_auto autoware_cmake - autoware_auto_perception_msgs + autoware_perception_msgs cv_bridge image_transport lanelet2_extension @@ -18,6 +18,7 @@ rclcpp rclcpp_components sensor_msgs + tier4_perception_msgs ament_lint_auto autoware_lint_common diff --git a/perception/traffic_light_visualization/src/traffic_light_map_visualizer/node.cpp b/perception/traffic_light_visualization/src/traffic_light_map_visualizer/node.cpp index 31f7e88a02c7f..ced36476e00ef 100644 --- a/perception/traffic_light_visualization/src/traffic_light_map_visualizer/node.cpp +++ b/perception/traffic_light_visualization/src/traffic_light_map_visualizer/node.cpp @@ -47,16 +47,6 @@ bool isAttributeValue( return false; } -bool isAttributeValue( - const lanelet::ConstLineString3d l, const std::string attr_str, const int value) -{ - lanelet::Attribute attr = l.attribute(attr_str); - if (std::stoi(attr.value()) == value) { - return true; - } - return false; -} - void lightAsMarker( const rclcpp::node_interfaces::NodeLoggingInterface::SharedPtr & node_logging, lanelet::ConstPoint3d p, visualization_msgs::msg::Marker * marker, const std::string ns, @@ -121,7 +111,7 @@ TrafficLightMapVisualizerNode::TrafficLightMapVisualizerNode( { light_marker_pub_ = create_publisher("~/output/traffic_light", 1); - tl_state_sub_ = create_subscription( + tl_state_sub_ = create_subscription( "~/input/tl_state", 1, std::bind(&TrafficLightMapVisualizerNode::trafficSignalsCallback, this, _1)); vector_map_sub_ = create_subscription( @@ -129,8 +119,7 @@ TrafficLightMapVisualizerNode::TrafficLightMapVisualizerNode( std::bind(&TrafficLightMapVisualizerNode::binMapCallback, this, _1)); } void TrafficLightMapVisualizerNode::trafficSignalsCallback( - const autoware_auto_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr - input_traffic_signals) + const autoware_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr input_traffic_signals) { visualization_msgs::msg::MarkerArray output_msg; const auto current_time = now(); @@ -167,28 +156,29 @@ void TrafficLightMapVisualizerNode::trafficSignalsCallback( continue; } for (const auto & input_traffic_signal : input_traffic_signals->signals) { - if (isAttributeValue(ls, "traffic_light_id", input_traffic_signal.map_primitive_id)) { + if ((*tli)->id() == input_traffic_signal.traffic_signal_id) { + // if (isAttributeValue(ls, "traffic_light_id", input_traffic_signal.map_primitive_id)) { for (auto pt : ls) { if (!pt.hasAttribute("color")) { continue; } - for (const auto & light : input_traffic_signal.lights) { + for (const auto & elem : input_traffic_signal.elements) { visualization_msgs::msg::Marker marker; if ( isAttributeValue(pt, "color", "red") && - light.color == autoware_auto_perception_msgs::msg::TrafficLight::RED) { + elem.color == autoware_perception_msgs::msg::TrafficLightElement::RED) { lightAsMarker( get_node_logging_interface(), pt, &marker, "traffic_light", current_time); } else if ( // NOLINT isAttributeValue(pt, "color", "green") && - light.color == autoware_auto_perception_msgs::msg::TrafficLight::GREEN) { + elem.color == autoware_perception_msgs::msg::TrafficLightElement::GREEN) { lightAsMarker( get_node_logging_interface(), pt, &marker, "traffic_light", current_time); } else if ( // NOLINT isAttributeValue(pt, "color", "yellow") && - light.color == autoware_auto_perception_msgs::msg::TrafficLight::AMBER) { + elem.color == autoware_perception_msgs::msg::TrafficLightElement::AMBER) { lightAsMarker( get_node_logging_interface(), pt, &marker, "traffic_light", current_time); } else { diff --git a/perception/traffic_light_visualization/src/traffic_light_roi_visualizer/nodelet.cpp b/perception/traffic_light_visualization/src/traffic_light_roi_visualizer/nodelet.cpp index 5ebf4f9a61c50..a3b1a2b16a743 100644 --- a/perception/traffic_light_visualization/src/traffic_light_roi_visualizer/nodelet.cpp +++ b/perception/traffic_light_visualization/src/traffic_light_roi_visualizer/nodelet.cpp @@ -72,7 +72,7 @@ void TrafficLightRoiVisualizerNodelet::connectCb() } bool TrafficLightRoiVisualizerNodelet::createRect( - cv::Mat & image, const autoware_auto_perception_msgs::msg::TrafficLightRoi & tl_roi, + cv::Mat & image, const tier4_perception_msgs::msg::TrafficLightRoi & tl_roi, const cv::Scalar & color) { cv::rectangle( @@ -80,13 +80,14 @@ bool TrafficLightRoiVisualizerNodelet::createRect( cv::Point(tl_roi.roi.x_offset + tl_roi.roi.width, tl_roi.roi.y_offset + tl_roi.roi.height), color, 3); cv::putText( - image, std::to_string(tl_roi.id), cv::Point(tl_roi.roi.x_offset, tl_roi.roi.y_offset), - cv::FONT_HERSHEY_COMPLEX, 1.0, color, 1, CV_AA); + image, std::to_string(tl_roi.traffic_light_id), + cv::Point(tl_roi.roi.x_offset, tl_roi.roi.y_offset), cv::FONT_HERSHEY_COMPLEX, 1.0, color, 1, + CV_AA); return true; } bool TrafficLightRoiVisualizerNodelet::createRect( - cv::Mat & image, const autoware_auto_perception_msgs::msg::TrafficLightRoi & tl_roi, + cv::Mat & image, const tier4_perception_msgs::msg::TrafficLightRoi & tl_roi, const ClassificationResult & result) { cv::Scalar color; @@ -120,8 +121,8 @@ bool TrafficLightRoiVisualizerNodelet::createRect( void TrafficLightRoiVisualizerNodelet::imageRoiCallback( const sensor_msgs::msg::Image::ConstSharedPtr & input_image_msg, - const autoware_auto_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & input_tl_roi_msg, - [[maybe_unused]] const autoware_auto_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr & + const tier4_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & input_tl_roi_msg, + [[maybe_unused]] const tier4_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr & input_traffic_signals_msg) { cv_bridge::CvImagePtr cv_ptr; @@ -138,21 +139,21 @@ void TrafficLightRoiVisualizerNodelet::imageRoiCallback( } bool TrafficLightRoiVisualizerNodelet::getClassificationResult( - int id, const autoware_auto_perception_msgs::msg::TrafficSignalArray & traffic_signals, + int id, const tier4_perception_msgs::msg::TrafficSignalArray & traffic_signals, ClassificationResult & result) { bool has_correspond_traffic_signal = false; for (const auto & traffic_signal : traffic_signals.signals) { - if (id != traffic_signal.map_primitive_id) { + if (id != traffic_signal.traffic_light_id) { continue; } has_correspond_traffic_signal = true; - for (size_t i = 0; i < traffic_signal.lights.size(); i++) { - auto light = traffic_signal.lights.at(i); + for (size_t i = 0; i < traffic_signal.elements.size(); i++) { + auto element = traffic_signal.elements.at(i); // all lamp confidence are the same - result.prob = light.confidence; - result.label += (state2label_[light.color] + "-" + state2label_[light.shape]); - if (i < traffic_signal.lights.size() - 1) { + result.prob = element.confidence; + result.label += (state2label_[element.color] + "-" + state2label_[element.shape]); + if (i < traffic_signal.elements.size() - 1) { result.label += ","; } } @@ -161,11 +162,11 @@ bool TrafficLightRoiVisualizerNodelet::getClassificationResult( } bool TrafficLightRoiVisualizerNodelet::getRoiFromId( - int id, const autoware_auto_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & rois, - autoware_auto_perception_msgs::msg::TrafficLightRoi & correspond_roi) + int id, const tier4_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & rois, + tier4_perception_msgs::msg::TrafficLightRoi & correspond_roi) { for (const auto roi : rois->rois) { - if (roi.id == id) { + if (roi.traffic_light_id == id) { correspond_roi = roi; return true; } @@ -175,11 +176,9 @@ bool TrafficLightRoiVisualizerNodelet::getRoiFromId( void TrafficLightRoiVisualizerNodelet::imageRoughRoiCallback( const sensor_msgs::msg::Image::ConstSharedPtr & input_image_msg, - const autoware_auto_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & input_tl_roi_msg, - const autoware_auto_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & - input_tl_rough_roi_msg, - const autoware_auto_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr & - input_traffic_signals_msg) + const tier4_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & input_tl_roi_msg, + const tier4_perception_msgs::msg::TrafficLightRoiArray::ConstSharedPtr & input_tl_rough_roi_msg, + const tier4_perception_msgs::msg::TrafficSignalArray::ConstSharedPtr & input_traffic_signals_msg) { cv_bridge::CvImagePtr cv_ptr; try { @@ -190,9 +189,10 @@ void TrafficLightRoiVisualizerNodelet::imageRoughRoiCallback( ClassificationResult result; bool has_correspond_traffic_signal = - getClassificationResult(tl_rough_roi.id, *input_traffic_signals_msg, result); - autoware_auto_perception_msgs::msg::TrafficLightRoi tl_roi; - bool has_correspond_roi = getRoiFromId(tl_rough_roi.id, input_tl_roi_msg, tl_roi); + getClassificationResult(tl_rough_roi.traffic_light_id, *input_traffic_signals_msg, result); + tier4_perception_msgs::msg::TrafficLightRoi tl_roi; + bool has_correspond_roi = + getRoiFromId(tl_rough_roi.traffic_light_id, input_tl_roi_msg, tl_roi); if (has_correspond_roi && has_correspond_traffic_signal) { // has fine detection and classification results diff --git a/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/lane_change_module_data.hpp b/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/lane_change_module_data.hpp index 67a23f23d5827..006040d8eaa4a 100644 --- a/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/lane_change_module_data.hpp +++ b/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/lane_change_module_data.hpp @@ -94,6 +94,11 @@ struct LaneChangePhaseInfo double lane_changing{0.0}; [[nodiscard]] double sum() const { return prepare + lane_changing; } + + LaneChangePhaseInfo(const double _prepare, const double _lane_changing) + : prepare(_prepare), lane_changing(_lane_changing) + { + } }; struct LaneChangeTargetObjectIndices diff --git a/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/lane_change_path.hpp b/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/lane_change_path.hpp index 41c2b62ba31e5..7e78ab9045c64 100644 --- a/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/lane_change_path.hpp +++ b/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/lane_change_path.hpp @@ -36,9 +36,12 @@ struct LaneChangePath Pose lane_changing_end{}; ShiftedPath shifted_path{}; ShiftLine shift_line{}; - double acceleration{0.0}; - LaneChangePhaseInfo length{}; - LaneChangePhaseInfo duration{}; + + // longitudinal acceleration applied on the prepare and lane-changing phase + LaneChangePhaseInfo longitudinal_acceleration{0.0, 0.0}; + + LaneChangePhaseInfo length{0.0, 0.0}; + LaneChangePhaseInfo duration{0.0, 0.0}; PathWithLaneId prev_path{}; }; using LaneChangePaths = std::vector; diff --git a/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/utils.hpp b/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/utils.hpp index 9570ffec56576..a724410192b59 100644 --- a/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/utils.hpp +++ b/planning/behavior_path_planner/include/behavior_path_planner/utils/lane_change/utils.hpp @@ -90,10 +90,10 @@ std::optional constructCandidatePath( const PathWithLaneId & prepare_segment, const PathWithLaneId & target_segment, const PathWithLaneId & target_lane_reference_path, const ShiftLine & shift_line, const lanelet::ConstLanelets & original_lanelets, const lanelet::ConstLanelets & target_lanelets, - const std::vector> & sorted_lane_ids, const double longitudinal_acceleration, - const double lateral_acceleration, const LaneChangePhaseInfo lane_change_length, - const LaneChangePhaseInfo lane_change_velocity, const double terminal_lane_changing_velocity, - const LaneChangePhaseInfo lane_change_time); + const std::vector> & sorted_lane_ids, + const LaneChangePhaseInfo longitudinal_acceleration, const double lateral_acceleration, + const LaneChangePhaseInfo lane_change_length, const LaneChangePhaseInfo lane_change_velocity, + const double terminal_lane_changing_velocity, const LaneChangePhaseInfo lane_change_time); PathSafetyStatus isLaneChangePathSafe( const LaneChangePath & lane_change_path, const PredictedObjects::ConstSharedPtr dynamic_objects, @@ -101,8 +101,8 @@ PathSafetyStatus isLaneChangePathSafe( const Twist & current_twist, const BehaviorPathPlannerParameters & common_parameter, const behavior_path_planner::LaneChangeParameters & lane_change_parameter, const double front_decel, const double rear_decel, - std::unordered_map & debug_data, const double prepare_acc = 0.0, - const double lane_changing_acc = 0.0); + std::unordered_map & debug_data, const double prepare_acc, + const double lane_changing_acc); bool isObjectIndexIncluded( const size_t & index, const std::vector & dynamic_objects_indices); @@ -150,12 +150,6 @@ lanelet::ConstLanelets getBackwardLanelets( const RouteHandler & route_handler, const lanelet::ConstLanelets & target_lanes, const Pose & current_pose, const double backward_length); -LaneChangeTargetObjectIndices filterObjectIndices( - const LaneChangePaths & lane_change_paths, const PredictedObjects & objects, - const lanelet::ConstLanelets & target_backward_lanes, const Pose & current_pose, - const double forward_path_length, const LaneChangeParameters & lane_change_parameter, - const double filter_width); - bool isTargetObjectType(const PredictedObject & object, const LaneChangeParameters & parameter); double calcLateralBufferForFiltering(const double vehicle_width, const double lateral_buffer = 0.0); @@ -186,5 +180,14 @@ boost::optional getLeadingStaticObjectIdx( const RouteHandler & route_handler, const LaneChangePath & lane_change_path, const PredictedObjects & objects, const std::vector & obj_indices, const double object_check_min_road_shoulder_width, const double object_shiftable_ratio_threshold); + +std::optional createPolygon( + const lanelet::ConstLanelets & lanes, const double start_dist, const double end_dist); + +LaneChangeTargetObjectIndices filterObject( + const PredictedObjects & objects, const lanelet::ConstLanelets & current_lanes, + const lanelet::ConstLanelets & target_lanes, const lanelet::ConstLanelets & target_backward_lanes, + const Pose & current_pose, const RouteHandler & route_handler, + const LaneChangeParameters & lane_change_parameter); } // namespace behavior_path_planner::utils::lane_change #endif // BEHAVIOR_PATH_PLANNER__UTILS__LANE_CHANGE__UTILS_HPP_ diff --git a/planning/behavior_path_planner/src/scene_module/lane_change/interface.cpp b/planning/behavior_path_planner/src/scene_module/lane_change/interface.cpp index ab2c87c7e1ff9..713188afb7f0b 100644 --- a/planning/behavior_path_planner/src/scene_module/lane_change/interface.cpp +++ b/planning/behavior_path_planner/src/scene_module/lane_change/interface.cpp @@ -402,13 +402,13 @@ void LaneChangeInterface::acceptVisitor(const std::shared_ptrgetLaneChangeStatus().lane_change_lanes; + const auto & current_lanes = module_type_->getLaneChangeStatus().current_lanes; const auto & is_valid = module_type_->getLaneChangeStatus().is_valid_path; const auto & lane_change_path = module_type_->getLaneChangeStatus().lane_change_path; const auto & lane_change_param = module_type_->getLaneChangeParam(); if ( - module_type_->getModuleType() != LaneChangeModuleType::NORMAL || target_lanes.empty() || + module_type_->getModuleType() != LaneChangeModuleType::NORMAL || current_lanes.empty() || !is_valid) { return original_turn_signal_info; } @@ -436,13 +436,15 @@ TurnSignalInfo LaneChangeInterface::getCurrentTurnSignalInfo( const auto & route_handler = module_type_->getRouteHandler(); const auto & common_parameter = module_type_->getCommonParam(); const auto shift_intervals = - route_handler->getLateralIntervalsToPreferredLane(target_lanes.back()); + route_handler->getLateralIntervalsToPreferredLane(current_lanes.back()); const double next_lane_change_buffer = utils::calcMinimumLaneChangeLength(common_parameter, shift_intervals); const double & nearest_dist_threshold = common_parameter.ego_nearest_dist_threshold; const double & nearest_yaw_threshold = common_parameter.ego_nearest_yaw_threshold; + const double & base_to_front = common_parameter.base_link2front; - const double buffer = next_lane_change_buffer + min_length_for_turn_signal_activation; + const double buffer = + next_lane_change_buffer + min_length_for_turn_signal_activation + base_to_front; const double path_length = motion_utils::calcArcLength(path.points); const auto & front_point = path.points.front().point.pose.position; const size_t & current_nearest_seg_idx = diff --git a/planning/behavior_path_planner/src/scene_module/lane_change/normal.cpp b/planning/behavior_path_planner/src/scene_module/lane_change/normal.cpp index c004a0b3aec26..41561493e08d6 100644 --- a/planning/behavior_path_planner/src/scene_module/lane_change/normal.cpp +++ b/planning/behavior_path_planner/src/scene_module/lane_change/normal.cpp @@ -284,7 +284,7 @@ TurnSignalInfo NormalLaneChange::updateOutputTurnSignal() lanelet::ConstLanelets NormalLaneChange::getCurrentLanes() const { - return utils::getCurrentLanesFromPath(prev_module_reference_path_, planner_data_); + return utils::getCurrentLanesFromPath(prev_module_path_, planner_data_); } lanelet::ConstLanelets NormalLaneChange::getLaneChangeLanes( @@ -506,6 +506,7 @@ bool NormalLaneChange::getLaneChangePaths( const auto & route_handler = *getRouteHandler(); const auto & dynamic_objects = planner_data_->dynamic_object; const auto & common_parameter = planner_data_->parameters; + const auto & current_pose = getEgoPose(); const auto backward_path_length = common_parameter.backward_path_length; const auto forward_path_length = common_parameter.forward_path_length; @@ -559,8 +560,6 @@ bool NormalLaneChange::getLaneChangePaths( const auto sorted_lane_ids = utils::lane_change::getSortedLaneIds( route_handler, original_lanelets, target_lanelets, arc_position_from_target.distance); - const auto lateral_buffer = - utils::lane_change::calcLateralBufferForFiltering(common_parameter.vehicle_width, 0.5); const auto target_preferred_lanelets = utils::lane_change::getTargetPreferredLanes( route_handler, original_lanelets, target_lanelets, direction, type_); @@ -569,7 +568,13 @@ bool NormalLaneChange::getLaneChangePaths( const auto target_preferred_lane_poly_2d = lanelet::utils::to2D(target_preferred_lane_poly).basicPolygon(); - LaneChangeTargetObjectIndices dynamic_object_indices; + const auto backward_length = lane_change_parameters_->backward_lane_length; + const auto backward_target_lanes_for_object_filtering = utils::lane_change::getBackwardLanelets( + route_handler, target_lanelets, getEgoPose(), backward_length); + const auto dynamic_object_indices = utils::lane_change::filterObject( + *dynamic_objects, original_lanelets, target_lanelets, + backward_target_lanes_for_object_filtering, current_pose, route_handler, + *lane_change_parameters_); candidate_paths->reserve(longitudinal_acc_sampling_values.size() * lateral_acc_sampling_num); for (const auto & sampled_longitudinal_acc : longitudinal_acc_sampling_values) { @@ -578,11 +583,13 @@ bool NormalLaneChange::getLaneChangePaths( minimum_lane_changing_velocity); // compute actual longitudinal acceleration - const double longitudinal_acc = (prepare_velocity - current_velocity) / prepare_duration; + const double longitudinal_acc_on_prepare = + (prepare_velocity - current_velocity) / prepare_duration; // get path on original lanes const double prepare_length = std::max( - current_velocity * prepare_duration + 0.5 * longitudinal_acc * std::pow(prepare_duration, 2), + current_velocity * prepare_duration + + 0.5 * longitudinal_acc_on_prepare * std::pow(prepare_duration, 2), minimum_prepare_length); if (prepare_length < target_length) { @@ -629,14 +636,15 @@ bool NormalLaneChange::getLaneChangePaths( lateral_acc += lateral_acc_resolution) { const auto lane_changing_time = PathShifter::calcShiftTimeFromJerk( shift_length, common_parameter.lane_changing_lateral_jerk, lateral_acc); - const double lane_changing_lon_acc = utils::lane_change::calcLaneChangingAcceleration( - initial_lane_changing_velocity, max_path_velocity, lane_changing_time, - sampled_longitudinal_acc); + const double longitudinal_acc_on_lane_changing = + utils::lane_change::calcLaneChangingAcceleration( + initial_lane_changing_velocity, max_path_velocity, lane_changing_time, + sampled_longitudinal_acc); const auto lane_changing_length = initial_lane_changing_velocity * lane_changing_time + - 0.5 * lane_changing_lon_acc * lane_changing_time * lane_changing_time; + 0.5 * longitudinal_acc_on_lane_changing * lane_changing_time * lane_changing_time; const auto terminal_lane_changing_velocity = - initial_lane_changing_velocity + lane_changing_lon_acc * lane_changing_time; + initial_lane_changing_velocity + longitudinal_acc_on_lane_changing * lane_changing_time; utils::lane_change::setPrepareVelocity( prepare_segment, current_velocity, terminal_lane_changing_velocity); @@ -701,7 +709,8 @@ bool NormalLaneChange::getLaneChangePaths( const auto candidate_path = utils::lane_change::constructCandidatePath( prepare_segment, target_segment, target_lane_reference_path, shift_line, original_lanelets, - target_lanelets, sorted_lane_ids, lane_changing_lon_acc, lateral_acc, lc_length, + target_lanelets, sorted_lane_ids, + {longitudinal_acc_on_prepare, longitudinal_acc_on_lane_changing}, lateral_acc, lc_length, lc_velocity, terminal_lane_changing_velocity, lc_time); if (!candidate_path) { @@ -719,16 +728,6 @@ bool NormalLaneChange::getLaneChangePaths( } if (candidate_paths->empty()) { - // only compute dynamic object indices once - const auto backward_length = lane_change_parameters_->backward_lane_length; - const auto backward_target_lanes_for_object_filtering = - utils::lane_change::getBackwardLanelets( - route_handler, target_lanelets, getEgoPose(), backward_length); - dynamic_object_indices = utils::lane_change::filterObjectIndices( - {*candidate_path}, *dynamic_objects, backward_target_lanes_for_object_filtering, - getEgoPose(), common_parameter.forward_path_length, *lane_change_parameters_, - lateral_buffer); - const double object_check_min_road_shoulder_width = lane_change_parameters_->object_check_min_road_shoulder_width; const double object_shiftable_ratio_threshold = @@ -752,8 +751,8 @@ bool NormalLaneChange::getLaneChangePaths( const auto [is_safe, is_object_coming_from_rear] = utils::lane_change::isLaneChangePathSafe( *candidate_path, dynamic_objects, dynamic_object_indices, getEgoPose(), getEgoTwist(), common_parameter, *lane_change_parameters_, common_parameter.expected_front_deceleration, - common_parameter.expected_rear_deceleration, object_debug_, longitudinal_acc, - lane_changing_lon_acc); + common_parameter.expected_rear_deceleration, object_debug_, longitudinal_acc_on_prepare, + longitudinal_acc_on_lane_changing); if (is_safe) { return true; @@ -771,26 +770,26 @@ PathSafetyStatus NormalLaneChange::isApprovedPathSafe() const const auto & dynamic_objects = planner_data_->dynamic_object; const auto & common_parameters = getCommonParam(); const auto & lane_change_parameters = *lane_change_parameters_; - const auto & route_handler = getRouteHandler(); + const auto & route_handler = *getRouteHandler(); const auto & path = status_.lane_change_path; + const auto & current_lanes = status_.current_lanes; + const auto & target_lanes = status_.lane_change_lanes; // get lanes used for detection const auto backward_target_lanes_for_object_filtering = utils::lane_change::getBackwardLanelets( - *route_handler, path.target_lanelets, current_pose, - lane_change_parameters.backward_lane_length); + route_handler, path.target_lanelets, current_pose, lane_change_parameters.backward_lane_length); - CollisionCheckDebugMap debug_data; - const auto lateral_buffer = - utils::lane_change::calcLateralBufferForFiltering(common_parameters.vehicle_width); - const auto dynamic_object_indices = utils::lane_change::filterObjectIndices( - {path}, *dynamic_objects, backward_target_lanes_for_object_filtering, current_pose, - common_parameters.forward_path_length, lane_change_parameters, lateral_buffer); + const auto dynamic_object_indices = utils::lane_change::filterObject( + *dynamic_objects, current_lanes, target_lanes, backward_target_lanes_for_object_filtering, + current_pose, route_handler, *lane_change_parameters_); + CollisionCheckDebugMap debug_data; const auto safety_status = utils::lane_change::isLaneChangePathSafe( path, dynamic_objects, dynamic_object_indices, current_pose, current_twist, common_parameters, *lane_change_parameters_, common_parameters.expected_front_deceleration_for_abort, common_parameters.expected_rear_deceleration_for_abort, debug_data, - status_.lane_change_path.acceleration); + status_.lane_change_path.longitudinal_acceleration.prepare, + status_.lane_change_path.longitudinal_acceleration.lane_changing); return safety_status; } diff --git a/planning/behavior_path_planner/src/scene_module/start_planner/start_planner_module.cpp b/planning/behavior_path_planner/src/scene_module/start_planner/start_planner_module.cpp index 533076be8012a..e9e28b8e62f9b 100644 --- a/planning/behavior_path_planner/src/scene_module/start_planner/start_planner_module.cpp +++ b/planning/behavior_path_planner/src/scene_module/start_planner/start_planner_module.cpp @@ -112,7 +112,12 @@ bool StartPlannerModule::isExecutionRequested() const tier4_autoware_utils::pose2transform(planner_data_->self_odometry->pose.pose)); // Check if ego is not out of lanes - const auto current_lanes = utils::getExtendedCurrentLanes(planner_data_); + // const auto current_lanes = utils::getExtendedCurrentLanes(planner_data_); + const double backward_path_length = + planner_data_->parameters.backward_path_length + parameters_->max_back_distance; + const auto current_lanes = + utils::getCurrentLanes(planner_data_, backward_path_length, std::numeric_limits::max()); + const auto pull_out_lanes = start_planner_utils::getPullOutLanes(planner_data_); auto lanes = current_lanes; lanes.insert(lanes.end(), pull_out_lanes.begin(), pull_out_lanes.end()); @@ -305,7 +310,12 @@ BehaviorModuleOutput StartPlannerModule::planWaitingApproval() return output; } - const auto current_lanes = utils::getExtendedCurrentLanes(planner_data_); + // const auto current_lanes = utils::getExtendedCurrentLanes(planner_data_); + const double backward_path_length = + planner_data_->parameters.backward_path_length + parameters_->max_back_distance; + const auto current_lanes = + utils::getCurrentLanes(planner_data_, backward_path_length, std::numeric_limits::max()); + const auto pull_out_lanes = start_planner_utils::getPullOutLanes(planner_data_); auto stop_path = status_.back_finished ? getCurrentPath() : status_.backward_path; const auto drivable_lanes = @@ -542,7 +552,10 @@ void StartPlannerModule::updatePullOutStatus() const auto & current_pose = planner_data_->self_odometry->pose.pose; const auto & goal_pose = planner_data_->route_handler->getGoalPose(); - status_.current_lanes = utils::getExtendedCurrentLanes(planner_data_); + const double backward_path_length = + planner_data_->parameters.backward_path_length + parameters_->max_back_distance; + status_.current_lanes = + utils::getCurrentLanes(planner_data_, backward_path_length, std::numeric_limits::max()); status_.pull_out_lanes = start_planner_utils::getPullOutLanes(planner_data_); // combine road and shoulder lanes diff --git a/planning/behavior_path_planner/src/utils/lane_change/utils.cpp b/planning/behavior_path_planner/src/utils/lane_change/utils.cpp index 04224b380414e..6d1b4486856e1 100644 --- a/planning/behavior_path_planner/src/utils/lane_change/utils.cpp +++ b/planning/behavior_path_planner/src/utils/lane_change/utils.cpp @@ -210,15 +210,15 @@ std::optional constructCandidatePath( const PathWithLaneId & prepare_segment, const PathWithLaneId & target_segment, const PathWithLaneId & target_lane_reference_path, const ShiftLine & shift_line, const lanelet::ConstLanelets & original_lanelets, const lanelet::ConstLanelets & target_lanelets, - const std::vector> & sorted_lane_ids, const double longitudinal_acceleration, - const double lateral_acceleration, const LaneChangePhaseInfo lane_change_length, - const LaneChangePhaseInfo lane_change_velocity, const double terminal_lane_changing_velocity, - const LaneChangePhaseInfo lane_change_time) + const std::vector> & sorted_lane_ids, + const LaneChangePhaseInfo longitudinal_acceleration, const double lateral_acceleration, + const LaneChangePhaseInfo lane_change_length, const LaneChangePhaseInfo lane_change_velocity, + const double terminal_lane_changing_velocity, const LaneChangePhaseInfo lane_change_time) { PathShifter path_shifter; path_shifter.setPath(target_lane_reference_path); path_shifter.addShiftLine(shift_line); - path_shifter.setLongitudinalAcceleration(longitudinal_acceleration); + path_shifter.setLongitudinalAcceleration(longitudinal_acceleration.lane_changing); ShiftedPath shifted_path; // offset front side @@ -238,7 +238,7 @@ std::optional constructCandidatePath( const auto & lane_changing_length = lane_change_length.lane_changing; LaneChangePath candidate_path; - candidate_path.acceleration = longitudinal_acceleration; + candidate_path.longitudinal_acceleration = longitudinal_acceleration; candidate_path.length.prepare = prepare_length; candidate_path.length.lane_changing = lane_changing_length; candidate_path.duration.prepare = lane_change_time.prepare; @@ -822,86 +822,6 @@ lanelet::ConstLanelets getBackwardLanelets( return backward_lanes; } -LaneChangeTargetObjectIndices filterObjectIndices( - const LaneChangePaths & lane_change_paths, const PredictedObjects & objects, - const lanelet::ConstLanelets & target_backward_lanes, const Pose & current_pose, - const double forward_path_length, const LaneChangeParameters & lane_change_parameter, - const double filter_width) -{ - // Reserve maximum amount possible - - std::vector current_lane_obj_indices{}; - std::vector target_lane_obj_indices{}; - std::vector others_obj_indices{}; - current_lane_obj_indices.reserve(objects.objects.size()); - target_lane_obj_indices.reserve(objects.objects.size()); - others_obj_indices.reserve(objects.objects.size()); - - const auto & longest_path = lane_change_paths.front(); - const auto & current_lanes = longest_path.reference_lanelets; - const auto & target_lanes = longest_path.target_lanelets; - const auto & ego_path = longest_path.path; - - const auto get_basic_polygon = - [](const lanelet::ConstLanelets & lanes, const double start_dist, const double end_dist) { - const auto polygon_3d = lanelet::utils::getPolygonFromArcLength(lanes, start_dist, end_dist); - return lanelet::utils::to2D(polygon_3d).basicPolygon(); - }; - const auto arc = lanelet::utils::getArcCoordinates(current_lanes, current_pose); - const auto current_polygon = - get_basic_polygon(current_lanes, arc.length, arc.length + forward_path_length); - const auto target_polygon = - get_basic_polygon(target_lanes, 0.0, std::numeric_limits::max()); - LineString2d ego_path_linestring; - ego_path_linestring.reserve(ego_path.points.size()); - for (const auto & pt : ego_path.points) { - const auto & position = pt.point.pose.position; - boost::geometry::append(ego_path_linestring, Point2d(position.x, position.y)); - } - - for (size_t i = 0; i < objects.objects.size(); ++i) { - const auto & obj = objects.objects.at(i); - - if (!isTargetObjectType(obj, lane_change_parameter)) { - continue; - } - - const auto obj_polygon = tier4_autoware_utils::toPolygon2d(obj); - if (boost::geometry::intersects(current_polygon, obj_polygon)) { - const double distance = boost::geometry::distance(obj_polygon, ego_path_linestring); - - if (distance < filter_width) { - current_lane_obj_indices.push_back(i); - continue; - } - } - - const bool is_intersect_with_target = boost::geometry::intersects(target_polygon, obj_polygon); - if (is_intersect_with_target) { - target_lane_obj_indices.push_back(i); - continue; - } - - const bool is_intersect_with_backward = std::invoke([&]() { - for (const auto & ll : target_backward_lanes) { - const bool is_intersect_with_backward = - boost::geometry::intersects(ll.polygon2d().basicPolygon(), obj_polygon); - if (is_intersect_with_backward) { - target_lane_obj_indices.push_back(i); - return true; - } - } - return false; - }); - - if (!is_intersect_with_backward) { - others_obj_indices.push_back(i); - } - } - - return {current_lane_obj_indices, target_lane_obj_indices, others_obj_indices}; -} - bool isTargetObjectType(const PredictedObject & object, const LaneChangeParameters & parameter) { using autoware_auto_perception_msgs::msg::ObjectClassification; @@ -1163,4 +1083,87 @@ boost::optional getLeadingStaticObjectIdx( return leading_obj_idx; } + +std::optional createPolygon( + const lanelet::ConstLanelets & lanes, const double start_dist, const double end_dist) +{ + if (lanes.empty()) { + return {}; + } + const auto polygon_3d = lanelet::utils::getPolygonFromArcLength(lanes, start_dist, end_dist); + return lanelet::utils::to2D(polygon_3d).basicPolygon(); +} + +LaneChangeTargetObjectIndices filterObject( + const PredictedObjects & objects, const lanelet::ConstLanelets & current_lanes, + const lanelet::ConstLanelets & target_lanes, const lanelet::ConstLanelets & target_backward_lanes, + const Pose & current_pose, const RouteHandler & route_handler, + const LaneChangeParameters & lane_change_parameter) +{ + // Guard + if (objects.objects.empty()) { + return {}; + } + + // Get path + const auto path = + route_handler.getCenterLinePath(current_lanes, 0.0, std::numeric_limits::max()); + + const auto current_polygon = + createPolygon(current_lanes, 0.0, std::numeric_limits::max()); + const auto target_polygon = createPolygon(target_lanes, 0.0, std::numeric_limits::max()); + const auto target_backward_polygon = + createPolygon(target_backward_lanes, 0.0, std::numeric_limits::max()); + + LaneChangeTargetObjectIndices filtered_obj_indices; + for (size_t i = 0; i < objects.objects.size(); ++i) { + const auto & object = objects.objects.at(i); + const auto & obj_velocity = object.kinematics.initial_twist_with_covariance.twist.linear.x; + + // ignore specific object types + if (!isTargetObjectType(object, lane_change_parameter)) { + continue; + } + + const auto obj_polygon = tier4_autoware_utils::toPolygon2d(object); + + // calc distance from the current ego position + double max_dist_ego_to_obj = std::numeric_limits::lowest(); + for (const auto & polygon_p : obj_polygon.outer()) { + const auto obj_p = tier4_autoware_utils::createPoint(polygon_p.x(), polygon_p.y(), 0.0); + const double dist_ego_to_obj = + motion_utils::calcSignedArcLength(path.points, current_pose.position, obj_p); + max_dist_ego_to_obj = std::max(dist_ego_to_obj, max_dist_ego_to_obj); + } + + // ignore static object that are behind the ego vehicle + if (obj_velocity < 1.0 && max_dist_ego_to_obj < 0.0) { + continue; + } + + // check if the object intersects with target lanes + if (target_polygon && boost::geometry::intersects(target_polygon.value(), obj_polygon)) { + filtered_obj_indices.target_lane.push_back(i); + continue; + } + + // check if the object intersects with target backward lanes + if ( + target_backward_polygon && + boost::geometry::intersects(target_backward_polygon.value(), obj_polygon)) { + filtered_obj_indices.target_lane.push_back(i); + continue; + } + + // check if the object intersects with current lanes + if (current_polygon && boost::geometry::intersects(current_polygon.value(), obj_polygon)) { + filtered_obj_indices.current_lane.push_back(i); + continue; + } + + filtered_obj_indices.other_lane.push_back(i); + } + + return filtered_obj_indices; +} } // namespace behavior_path_planner::utils::lane_change diff --git a/planning/behavior_path_planner/src/utils/start_planner/geometric_pull_out.cpp b/planning/behavior_path_planner/src/utils/start_planner/geometric_pull_out.cpp index 261a297f859bf..e4f0e13953924 100644 --- a/planning/behavior_path_planner/src/utils/start_planner/geometric_pull_out.cpp +++ b/planning/behavior_path_planner/src/utils/start_planner/geometric_pull_out.cpp @@ -40,7 +40,11 @@ boost::optional GeometricPullOut::plan(Pose start_pose, Pose goal_p PullOutPath output; // combine road lane and shoulder lane - const auto road_lanes = utils::getExtendedCurrentLanes(planner_data_); + const double backward_path_length = + planner_data_->parameters.backward_path_length + parameters_.max_back_distance; + const auto road_lanes = + utils::getCurrentLanes(planner_data_, backward_path_length, std::numeric_limits::max()); + const auto shoulder_lanes = getPullOutLanes(planner_data_); auto lanes = road_lanes; lanes.insert(lanes.end(), shoulder_lanes.begin(), shoulder_lanes.end()); diff --git a/planning/behavior_path_planner/src/utils/start_planner/shift_pull_out.cpp b/planning/behavior_path_planner/src/utils/start_planner/shift_pull_out.cpp index 7d8448802b847..b11bb7731b826 100644 --- a/planning/behavior_path_planner/src/utils/start_planner/shift_pull_out.cpp +++ b/planning/behavior_path_planner/src/utils/start_planner/shift_pull_out.cpp @@ -44,12 +44,16 @@ boost::optional ShiftPullOut::plan(Pose start_pose, Pose goal_pose) const auto & route_handler = planner_data_->route_handler; const auto & common_parameters = planner_data_->parameters; const auto & dynamic_objects = planner_data_->dynamic_object; - const auto & road_lanes = utils::getExtendedCurrentLanes(planner_data_); - const auto & shoulder_lanes = getPullOutLanes(planner_data_); + const auto shoulder_lanes = getPullOutLanes(planner_data_); if (shoulder_lanes.empty()) { return boost::none; } + const double backward_path_length = + planner_data_->parameters.backward_path_length + parameters_.max_back_distance; + const auto road_lanes = + utils::getCurrentLanes(planner_data_, backward_path_length, std::numeric_limits::max()); + // find candidate paths auto pull_out_paths = calcPullOutPaths( *route_handler, road_lanes, shoulder_lanes, start_pose, goal_pose, common_parameters, @@ -135,6 +139,7 @@ std::vector ShiftPullOut::calcPullOutPaths( } // rename parameter + const double forward_path_length = common_parameter.forward_path_length; const double backward_path_length = common_parameter.backward_path_length; const double minimum_shift_pull_out_distance = parameter.minimum_shift_pull_out_distance; const double lateral_jerk = parameter.lateral_jerk; @@ -150,13 +155,13 @@ std::vector ShiftPullOut::calcPullOutPaths( const auto arc_position_start = getArcCoordinates(road_lanes, start_pose); const double s_start = std::max(arc_position_start.length - backward_path_length, 0.0); const auto arc_position_goal = getArcCoordinates(road_lanes, goal_pose); - const double road_lanes_length = std::accumulate( - road_lanes.begin(), road_lanes.end(), 0.0, [](const double sum, const auto & lane) { - return sum + lanelet::utils::getLaneletLength2d(lane); - }); - // if goal is behind start pose, + + // if goal is behind start pose, use path with forward_path_length const bool goal_is_behind = arc_position_goal.length < s_start; - const double s_end = goal_is_behind ? road_lanes_length : arc_position_goal.length; + const double s_forward_length = s_start + forward_path_length; + const double s_end = + goal_is_behind ? s_forward_length : std::min(arc_position_goal.length, s_forward_length); + constexpr double RESAMPLE_INTERVAL = 1.0; PathWithLaneId road_lane_reference_path = utils::resamplePathWithSpline( route_handler.getCenterLinePath(road_lanes, s_start, s_end), RESAMPLE_INTERVAL); diff --git a/planning/motion_velocity_smoother/src/motion_velocity_smoother_node.cpp b/planning/motion_velocity_smoother/src/motion_velocity_smoother_node.cpp index 057308e4329d1..b77af629ad7c3 100644 --- a/planning/motion_velocity_smoother/src/motion_velocity_smoother_node.cpp +++ b/planning/motion_velocity_smoother/src/motion_velocity_smoother_node.cpp @@ -786,8 +786,10 @@ MotionVelocitySmootherNode::calcInitialMotion( // We should plan from the current vehicle speed, but if the initial value is greater than the // velocity limit, the current planning algorithm decelerates with a very high deceleration. // To avoid this, we set the initial value of the vehicle speed to be below the speed limit. + const auto p = smoother_->getBaseParam(); const auto v0 = std::min(target_vel, vehicle_speed); - const Motion initial_motion = {v0, vehicle_acceleration}; + const auto a0 = std::clamp(vehicle_acceleration, p.min_decel, p.max_accel); + const Motion initial_motion = {v0, a0}; return {initial_motion, InitializeType::EGO_VELOCITY}; } } diff --git a/system/default_ad_api_helpers/ad_api_adaptors/README.md b/system/default_ad_api_helpers/ad_api_adaptors/README.md index 6cb43c5475828..121befb68c88a 100644 --- a/system/default_ad_api_helpers/ad_api_adaptors/README.md +++ b/system/default_ad_api_helpers/ad_api_adaptors/README.md @@ -7,11 +7,10 @@ When a initial pose topic is received, call the localization initialize API. This node depends on the map height fitter library. [See here for more details.](../../../map/map_height_fitter/README.md) -| Interface | Local Name | Global Name | Description | -| ------------ | -------------- | --------------------------------- | ----------------------------------------- | -| Subscription | initialpose | /initialpose | The pose for localization initialization. | -| Client | fit_map_height | /localization/util/fit_map_height | To fix initial pose to map height | -| Client | - | /api/localization/initialize | The localization initialize API. | +| Interface | Local Name | Global Name | Description | +| ------------ | ----------- | ---------------------------- | ----------------------------------------- | +| Subscription | initialpose | /initialpose | The pose for localization initialization. | +| Client | - | /api/localization/initialize | The localization initialize API. | ## routing_adaptor