Skip to content

Commit

Permalink
ci(pre-commit): autofix
Browse files Browse the repository at this point in the history
  • Loading branch information
pre-commit-ci[bot] authored and kaancolak committed Apr 21, 2022
1 parent 63bafd9 commit 4368544
Show file tree
Hide file tree
Showing 8 changed files with 196 additions and 119 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
<group>
<include file="$(find-pkg-share tensorrt_yolo)/launch/yolo.launch.xml">
<arg name="image_raw0" value="/front_image"/>
<!-- <arg name="image_raw0" value="$(var image_raw0)"/>-->
<!-- <arg name="image_raw0" value="$(var image_raw0)"/>-->
<arg name="image_raw1" value="$(var image_raw1)"/>
<arg name="image_raw2" value="$(var image_raw2)"/>
<arg name="image_raw3" value="$(var image_raw3)"/>
Expand All @@ -38,11 +38,9 @@
<arg name="image_number" value="$(var image_number)"/>
</include>
</group>
<!-- <group>-->
<!-- <include file="$(find-pkg-share tensorrt_yolo)/launch/tensorrt_yolo.launch.xml"></include>-->
<!-- </group>-->


<!-- <group>-->
<!-- <include file="$(find-pkg-share tensorrt_yolo)/launch/tensorrt_yolo.launch.xml"></include>-->
<!-- </group>-->

<!-- Pointcloud map filter -->
<group>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,4 @@
</group>
<!-- camera based detection-->
<group if="$(eval '&quot;$(var mode)&quot;==&quot;camera&quot;')"/>
</launch>
</launch>
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,7 @@ def __init__(self, context):
with open(ground_segmentation_param_path, "r") as f:
self.ground_segmentation_param = yaml.safe_load(f)["/**"]["ros__parameters"]

self.single_frame_obstacle_seg_output = (
"/perception/obstacle_segmentation/pointcloud"
)
self.single_frame_obstacle_seg_output = "/perception/obstacle_segmentation/pointcloud"
self.output_topic = "/perception/obstacle_segmentation/pointcloud"
self.use_single_frame_filter = self.ground_segmentation_param["use_single_frame_filter"]
self.use_time_series_filter = self.ground_segmentation_param["use_time_series_filter"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,5 @@
</include>
</group>
</group>

</group>
</launch>
29 changes: 12 additions & 17 deletions benchmarking/benchmarking_launch/launch/waymo.launch.xml
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<launch>
<arg name="vehicle_param_file" default="$(find-pkg-share vehicle_info_util)/config/vehicle_info.param.yaml"
description="path to the file of vehicle info yaml"/>
<arg name="vehicle_param_file" default="$(find-pkg-share vehicle_info_util)/config/vehicle_info.param.yaml" description="path to the file of vehicle info yaml"/>
<arg name="use_pointcloud_container" default="false" description="launch pointcloud container"/>
<arg name="pointcloud_container_name" default="pointcloud_container"/>
<arg name="use_concat_filter" default="true"/>
<!-- common parameters -->
<arg name="input/pointcloud" default="/sensing/lidar/concatenated/pointcloud"
description="The topic will be used in the detection module"/>
<arg name="input/pointcloud" default="/sensing/lidar/concatenated/pointcloud" description="The topic will be used in the detection module"/>
<arg name="mode" default="camera_lidar_fusion" description="options: `camera_lidar_fusion`, `lidar` or `camera`"/>
<arg name="image_raw0" default="/front_image" description="image raw topic name"/>
<arg name="camera_info0" default="/front_cam_info" description="camera info topic name"/>
Expand All @@ -23,14 +21,14 @@
<arg name="use_vector_map" default="true" description="use vector map in prediction"/>
<arg name="use_pointcloud_map" default="false" description="use pointcloud map in detection"/>

<!-- &lt;!&ndash; Pointcloud container &ndash;&gt;-->
<!-- <group>-->
<!-- <include file="$(find-pkg-share autoware_launch)/launch/pointcloud_container.launch.py"-->
<!-- if="$(var use_pointcloud_container)">-->
<!-- <arg name="use_multithread" value="true"/>-->
<!-- <arg name="container_name" value="$(var pointcloud_container_name)"/>-->
<!-- </include>-->
<!-- </group>-->
<!-- &lt;!&ndash; Pointcloud container &ndash;&gt;-->
<!-- <group>-->
<!-- <include file="$(find-pkg-share autoware_launch)/launch/pointcloud_container.launch.py"-->
<!-- if="$(var use_pointcloud_container)">-->
<!-- <arg name="use_multithread" value="true"/>-->
<!-- <arg name="container_name" value="$(var pointcloud_container_name)"/>-->
<!-- </include>-->
<!-- </group>-->

<include file="$(find-pkg-share benchmarking_launch)/launch/pointcloud_container.launch.py">
<arg name="base_frame" value="base_link"/>
Expand All @@ -45,8 +43,7 @@
<!-- Object segmentation module-->
<group>
<push-ros-namespace namespace="obstacle_segmentation"/>
<include
file="$(find-pkg-share benchmarking_launch)/launch/obstacle_segmentation/ground_segmentation/ground_segmentation.launch.py">
<include file="$(find-pkg-share benchmarking_launch)/launch/obstacle_segmentation/ground_segmentation/ground_segmentation.launch.py">
<arg name="base_frame" value="base_link"/>
<arg name="vehicle_param_file" value="$(var vehicle_param_file)"/>
<arg name="use_intra_process" value="true"/>
Expand Down Expand Up @@ -102,12 +99,10 @@
<!-- prediction module -->
<group>
<push-ros-namespace namespace="prediction"/>
<include
file="$(find-pkg-share benchmarking_launch)/launch/object_recognition/prediction/prediction.launch.xml">
<include file="$(find-pkg-share benchmarking_launch)/launch/object_recognition/prediction/prediction.launch.xml">
<arg name="use_vector_map" value="$(var use_vector_map)"/>
</include>
</group>
</group>

</group>
</launch>
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,4 @@
<arg name="pointcloud_container_name" value="$(var pointcloud_container_name)"/>
</include>
</group>


</launch>
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@
import os
import time

from numba import cuda
import numpy as np
import tensorflow as tf
from waymo_open_dataset import dataset_pb2
from waymo_open_dataset import dataset_pb2 as open_dataset
from waymo_open_dataset.utils import frame_utils
from waymo_open_dataset.utils import transform_utils
from waymo_open_dataset.utils import range_image_utils
from waymo_open_dataset import dataset_pb2

from numba import cuda
from waymo_open_dataset.utils import transform_utils


class WaymoHandler:
Expand Down Expand Up @@ -61,15 +60,15 @@ def extract_dataset_from_tfrecord(self, path):

for image in frame.images:
cam_name_str = dataset_pb2.CameraName.Name.Name(image.name)
data_dict[f'{cam_name_str}_IMAGE'] = tf.io.decode_jpeg(image.image).numpy()
data_dict[f"{cam_name_str}_IMAGE"] = tf.io.decode_jpeg(image.image).numpy()

for laser_name, point in self.get_decoded_point_cloud(frame):
data_dict[f'{laser_name}_LASER'] = point
data_dict[f"{laser_name}_LASER"] = point

for camera_name, camera_calibration in self.decode_camera_calibration(frame):
data_dict[f'{camera_name}_CAM_INFO'] = camera_calibration
data_dict[f"{camera_name}_CAM_INFO"] = camera_calibration

data_dict['VEHICLE_POSE'] = self.decode_vehicle_pose(frame, frame_id)
data_dict["VEHICLE_POSE"] = self.decode_vehicle_pose(frame, frame_id)

extracted_dataset.append(data_dict)
return extracted_dataset
Expand All @@ -83,31 +82,40 @@ def get_decoded_point_cloud(self, frame):
for laser in frame.lasers:

if len(laser.ri_return1.range_image_compressed) > 0:
range_image_str_tensor = tf.io.decode_compressed(laser.ri_return1.range_image_compressed, 'ZLIB')
range_image_str_tensor = tf.io.decode_compressed(
laser.ri_return1.range_image_compressed, "ZLIB"
)
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
range_images[laser.name] = [ri]

if laser.name == open_dataset.LaserName.TOP:
range_image_top_pose_str_tensor = tf.io.decode_compressed(
laser.ri_return1.range_image_pose_compressed, 'ZLIB')
laser.ri_return1.range_image_pose_compressed, "ZLIB"
)
range_image_top_pose = open_dataset.MatrixFloat()
range_image_top_pose.ParseFromString(bytearray(range_image_top_pose_str_tensor.numpy()))
range_image_top_pose.ParseFromString(
bytearray(range_image_top_pose_str_tensor.numpy())
)

camera_projection_str_tensor = tf.io.decode_compressed(laser.ri_return1.camera_projection_compressed,
'ZLIB')
camera_projection_str_tensor = tf.io.decode_compressed(
laser.ri_return1.camera_projection_compressed, "ZLIB"
)
cp = open_dataset.MatrixInt32()
cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
camera_projections[laser.name] = [cp]

if len(laser.ri_return2.range_image_compressed) > 0:
range_image_str_tensor = tf.io.decode_compressed(laser.ri_return2.range_image_compressed, 'ZLIB')
range_image_str_tensor = tf.io.decode_compressed(
laser.ri_return2.range_image_compressed, "ZLIB"
)
ri = open_dataset.MatrixFloat()
ri.ParseFromString(bytearray(range_image_str_tensor.numpy()))
range_images[laser.name].append(ri)

camera_projection_str_tensor = tf.io.decode_compressed(laser.ri_return2.camera_projection_compressed,
'ZLIB')
camera_projection_str_tensor = tf.io.decode_compressed(
laser.ri_return2.camera_projection_compressed, "ZLIB"
)
cp = open_dataset.MatrixInt32()
cp.ParseFromString(bytearray(camera_projection_str_tensor.numpy()))
camera_projections[laser.name].append(cp)
Expand All @@ -118,28 +126,36 @@ def get_decoded_point_cloud(self, frame):

frame_pose = tf.convert_to_tensor(np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(tf.convert_to_tensor(range_image_top_pose.data),
range_image_top_pose.shape.dims)
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data), range_image_top_pose.shape.dims
)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(range_image_top_pose_tensor[..., 0],
range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[..., 0],
range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2],
)
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation, range_image_top_pose_tensor_translation
)
for c in calibrations:
range_image = range_images[c.name][0]

if len(c.beam_inclinations) == 0:
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min, c.beam_inclination_max]), height=range_image.shape.dims[0])
tf.constant([c.beam_inclination_min, c.beam_inclination_max]),
height=range_image.shape.dims[0],
)
else:
beam_inclinations = tf.constant(c.beam_inclinations)

beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])

range_image_tensor = tf.reshape(tf.convert_to_tensor(range_image.data), range_image.shape.dims)
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims
)
pixel_pose_local = None
frame_pose_local = None

Expand All @@ -149,9 +165,12 @@ def get_decoded_point_cloud(self, frame):
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0), tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(beam_inclinations), axis=0), pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local,
)

range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian, tf.where(range_image_mask))
Expand All @@ -168,9 +187,7 @@ def decode_camera_calibration(self, frame):
for camera_calibration in frame.context.camera_calibrations:
camera_name = dataset_pb2.CameraName.Name.Name(camera_calibration.name)
extrinsic = np.array(camera_calibration.extrinsic.transform).reshape(4, 4)
cam_intrinsic = np.array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
cam_intrinsic = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
cam_intrinsic[0, 0] = camera_calibration.intrinsic[0]
cam_intrinsic[1, 1] = camera_calibration.intrinsic[1]
cam_intrinsic[0, 2] = camera_calibration.intrinsic[2]
Expand All @@ -180,13 +197,13 @@ def decode_camera_calibration(self, frame):
height = camera_calibration.height

# Swap the axes around
axes_transformation = np.array([
[0, -1, 0, 0],
[0, 0, -1, 0],
[1, 0, 0, 0],
[0, 0, 0, 1]])
axes_transformation = np.array(
[[0, -1, 0, 0], [0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 0, 1]]
)

vehicle_to_image = np.matmul(cam_intrinsic, np.matmul(axes_transformation, np.linalg.inv(extrinsic)))
vehicle_to_image = np.matmul(
cam_intrinsic, np.matmul(axes_transformation, np.linalg.inv(extrinsic))
)

camera_info = {
"cam_to_vehicle": extrinsic,
Expand All @@ -202,13 +219,29 @@ def decode_camera_calibration(self, frame):
return camera_calibrations

def decode_vehicle_pose(self, frame, frame_id):
mat_trans_frame = np.array([(frame.pose.transform[0], frame.pose.transform[1], frame.pose.transform[2],
frame.pose.transform[3]),
(frame.pose.transform[4], frame.pose.transform[5], frame.pose.transform[6],
frame.pose.transform[7]),
(frame.pose.transform[8], frame.pose.transform[9], frame.pose.transform[10],
frame.pose.transform[11]),
(0, 0, 0, 1)])
mat_trans_frame = np.array(
[
(
frame.pose.transform[0],
frame.pose.transform[1],
frame.pose.transform[2],
frame.pose.transform[3],
),
(
frame.pose.transform[4],
frame.pose.transform[5],
frame.pose.transform[6],
frame.pose.transform[7],
),
(
frame.pose.transform[8],
frame.pose.transform[9],
frame.pose.transform[10],
frame.pose.transform[11],
),
(0, 0, 0, 1),
]
)
# Define T inverse of odom:
if frame_id == 0:
self.trans_t_zero_inverse = np.linalg.inv(mat_trans_frame)
Expand All @@ -221,8 +254,8 @@ def decode_static_tf(self, frame):
for laser_calibration in frame.context.laser_calibrations:
laser_name = dataset_pb2.LaserName.Name.Name(laser_calibration.name)
extrinsic = np.array(laser_calibration.extrinsic.transform).reshape((4, 4))
self.lidars_tf[f'{laser_name}_LASER_EXTRINSIC'] = extrinsic
self.lidars_tf[f"{laser_name}_LASER_EXTRINSIC"] = extrinsic
for camera_calibration in frame.context.camera_calibrations:
camera_name = dataset_pb2.CameraName.Name.Name(camera_calibration.name)
extrinsic = (np.array(camera_calibration.extrinsic.transform)).reshape((4, 4))
self.cameras_tf[f'{camera_name}_CAM_EXTRINSIC'] = extrinsic
self.cameras_tf[f"{camera_name}_CAM_EXTRINSIC"] = extrinsic
Loading

0 comments on commit 4368544

Please sign in to comment.