Skip to content

Commit

Permalink
Merge pull request #193 from una-auxme/187-fix-vision-node-delay
Browse files Browse the repository at this point in the history
187 - Optimize pytorch weight loading on startup + dependencies update
  • Loading branch information
samuelkuehnel committed Mar 5, 2024
2 parents 884da96 + 9d9e4d2 commit fa7b8b6
Show file tree
Hide file tree
Showing 4 changed files with 73 additions and 59 deletions.
2 changes: 1 addition & 1 deletion build/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ services:
tty: true
shm_size: 2gb
#command: bash -c "sleep 10 && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=/opt/leaderboard/data/routes_devtest.xml --agent=/opt/leaderboard/leaderboard/autoagents/npc_agent.py --host=carla-simulator --track=SENSORS"
# command: bash -c "sleep 10 && roslaunch agent/launch/dev.launch"
#command: bash -c "sleep 10 && roslaunch agent/launch/dev.launch"
command: bash -c "sleep 10 && sudo chown -R carla:carla ../code/ && sudo chmod -R a+w ../code/ && python3 /opt/leaderboard/leaderboard/leaderboard_evaluator.py --debug=0 --routes=/opt/leaderboard/data/routes_devtest.xml --agent=/workspace/code/agent/src/agent/agent.py --host=paf23-carla-simulator-1 --track=MAP"

logging:
Expand Down
14 changes: 7 additions & 7 deletions code/perception/launch/perception.launch
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,12 @@
<node pkg="rqt_plot" type="rqt_plot" output="screen" name="x_plot" args="/paf/hero/ideal_x/data[0] /paf/hero/ideal_x/data[1]" />
<node pkg="rqt_plot" type="rqt_plot" output="screen" name="y_plot" args="/paf/hero/ideal_y/data[0] /paf/hero/ideal_y/data[1]" /-->
<!--node pkg="rqt_plot" type="rqt_plot" output="screen" name="error_plot" args="/paf/hero/ideal_x/data[2]" /-->

<node pkg="perception" type="kalman_filter.py" name="kalman_filter_node" output="screen">
<param name="control_loop_rate" value="0.05" />
<param name="role_name" value="$(arg role_name)" />
</node>

<node pkg="perception" type="Position_Publisher_Node.py" name="Position_Publisher_Node" output="screen">
<param name="control_loop_rate" value="0.1" />
<param name="role_name" value="$(arg role_name)" />
Expand All @@ -38,10 +38,10 @@
<node pkg="perception" type="vision_node.py" name="VisionNode" output="screen">
<param name="role_name" value="$(arg role_name)" />
<param name="side" value="Center" />

<!--
Object-Detection:
- fasterrcnn_resnet50_fpn_v2
Object-Detection:
- fasterrcnn_resnet50_fpn_v2
- fasterrcnn_mobilenet_v3_large_320_fpn
- yolov8n
- yolov8s
Expand All @@ -58,10 +58,10 @@
Image-Segmentation:
- deeplabv3_resnet101
- yolov8x-seg
- yolov8x-seg
-->

<param name="model" value="rtdetr-x" />
<param name="model" value="rtdetr-l" />
</node>

<node pkg="perception" type="traffic_light_node.py" name="TrafficLightNode" output="screen">
Expand Down
95 changes: 54 additions & 41 deletions code/perception/src/vision_node.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
#!/usr/bin/env python3

from time import perf_counter
from ros_compatibility.node import CompatibleNode
import ros_compatibility as roscomp
import torch
Expand Down Expand Up @@ -40,20 +41,17 @@ def __init__(self, name, **kwargs):
super().__init__(name, **kwargs)
self.model_dict = {
"fasterrcnn_resnet50_fpn_v2":
(fasterrcnn_resnet50_fpn_v2(
weights=FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT),
(fasterrcnn_resnet50_fpn_v2,
FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT,
"detection",
"pyTorch"),
"fasterrcnn_mobilenet_v3_large_320_fpn":
(fasterrcnn_mobilenet_v3_large_320_fpn(
weights=FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT),
(fasterrcnn_mobilenet_v3_large_320_fpn,
FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.DEFAULT,
"detection",
"pyTorch"),
"deeplabv3_resnet101":
(deeplabv3_resnet101(
weights=DeepLabV3_ResNet101_Weights.DEFAULT),
(deeplabv3_resnet101,
DeepLabV3_ResNet101_Weights.DEFAULT,
"segmentation",
"pyTorch"),
Expand Down Expand Up @@ -83,6 +81,7 @@ def __init__(self, name, **kwargs):
self.device = torch.device("cuda"
if torch.cuda.is_available() else "cpu")
self.depth_images = []
self.DEBUG = False

# publish / subscribe setup
self.setup_camera_subscriptions()
Expand All @@ -95,7 +94,10 @@ def __init__(self, name, **kwargs):

# model setup
model_info = self.model_dict[self.get_param("model")]
self.model = model_info[0]
if model_info[3] == "pyTorch":
self.model = model_info[0](weights=model_info[1])
else:
self.model = model_info[0]
self.weights = model_info[1]
self.type = model_info[2]
self.framework = model_info[3]
Expand All @@ -111,13 +113,10 @@ def __init__(self, name, **kwargs):
for param in self.model.parameters():
param.requires_grad = False
self.model.to(self.device)

# ultralytics setup
if self.framework == "ultralytics":
self.model = self.model(self.weights)

# tensorflow setup

def setup_camera_subscriptions(self):
self.new_subscription(
msg_type=numpy_msg(ImageMsg),
Expand Down Expand Up @@ -214,47 +213,63 @@ def predict_torch(self, image):
return vision_result

def predict_ultralytics(self, image):
start = perf_counter()

cv_image = self.bridge.imgmsg_to_cv2(img_msg=image,
desired_encoding='passthrough')
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)

output = self.model(cv_image, half=True, verbose=False)
output = self.model.predict(cv_image, half=True, verbose=False)[0]

s1 = perf_counter()

if 9 in output.boxes.cls:
self.process_traffic_lights(output, cv_image, image.header)

s2 = perf_counter()

box_img = self.calc_draw_distance(output.boxes, cv_image)

now = perf_counter()
if self.DEBUG:
self.loginfo("S1: {}, S2: {}, S3: {}, Total: {}".format(
round(s1 - start, 4), round(s2 - s1, 4), round(now - s2, 4),
round(now - start, 4)))

return box_img

def calc_draw_distance(self, boxes, cv_image):
distance_output = []
c_boxes = []
c_labels = []
for r in output:
boxes = r.boxes
for box in boxes:

for box in boxes:
if len(self.depth_images) > 0:
cls = box.cls.item()
pixels = box.xyxy[0]
if len(self.depth_images) > 0:
distances = np.asarray(
[self.depth_images[i][int(pixels[1]):int(pixels[3]):1,
int(pixels[0]):int(pixels[2]):1]
for i in range(len(self.depth_images))])
non_zero_filter = distances[distances != 0]

if len(non_zero_filter) > 0:
obj_dist = np.min(non_zero_filter)
else:
obj_dist = np.inf

c_boxes.append(torch.tensor(pixels))
c_labels.append(f"Class: {cls}, Meters: {obj_dist}")
distance_output.append([cls, obj_dist])

# print(distance_output)
# self.logerr(distance_output)

distances = np.asarray(
[self.depth_images[i][int(pixels[1]):int(pixels[3]):1,
int(pixels[0]):int(pixels[2]):1]
for i in range(len(self.depth_images))])
non_zero_filter = distances[distances != 0]

if len(non_zero_filter) > 0:
obj_dist = np.min(non_zero_filter)
else:
obj_dist = np.inf

c_boxes.append(torch.tensor(pixels))
c_labels.append(f"Class: {cls}, Meters: {obj_dist}")
distance_output.append([cls, obj_dist])

self.distance_publisher.publish(
Float32MultiArray(data=distance_output))

transposed_image = np.transpose(cv_image, (2, 0, 1))
image_np_with_detections = torch.tensor(transposed_image,
dtype=torch.uint8)

if 9 in output[0].boxes.cls:
self.process_traffic_lights(output[0], cv_image, image.header)

c_boxes = torch.stack(c_boxes)
print(image_np_with_detections.shape, c_boxes.shape, c_labels)
box = draw_bounding_boxes(image_np_with_detections,
Expand All @@ -263,15 +278,13 @@ def predict_ultralytics(self, image):
colors='blue',
width=3,
font_size=12)
# print(box.shape)
np_box_img = np.transpose(box.detach().numpy(),
(1, 2, 0))

np_box_img = np.transpose(box.detach().numpy(), (1, 2, 0))
box_img = cv2.cvtColor(np_box_img, cv2.COLOR_BGR2RGB)
return box_img

# return output[0].plot()
return box_img

def process_traffic_lights(self, prediction, cv_image, image_header):
async def process_traffic_lights(self, prediction, cv_image, image_header):
indices = (prediction.boxes.cls == 9).nonzero().squeeze().cpu().numpy()
indices = np.asarray([indices]) if indices.size == 1 else indices

Expand Down
21 changes: 11 additions & 10 deletions code/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,17 +1,18 @@
dvc==3.30.3
dvc-gdrive==2.20.0
dvc==3.42.0
dvc-gdrive==3.0.1
networkx==3.1
--extra-index-url https://download.pytorch.org/whl/cu118
torch==2.1.1
torchvision==0.16.1
torchaudio==2.1.1
pytorch-lightning==2.1.2
opencv-python==4.8.1.78
dvclive==3.3.1
torch==2.1.2
torchvision==0.16.2
torchaudio==2.1.2
pytorch-lightning==2.1.4
opencv-python==4.9.0.80
dvclive==3.41.1
ruamel.yaml==0.18.5
scipy==1.10.1
xmltodict==0.13.0
py-trees==2.2.3
numpy==1.23.5
ultralytics==8.0.220
ultralytics==8.1.11
scikit-learn>=0.18
pandas==2.0.0
pandas==2.0.3

0 comments on commit fa7b8b6

Please sign in to comment.