From bda01122a77319b52a9f1cca7dff33d3a4e9b094 Mon Sep 17 00:00:00 2001
From: MaxJa4 <74194322+MaxJa4@users.noreply.github.com>
Date: Sat, 13 Jan 2024 15:35:18 +0100
Subject: [PATCH] 110 - Implement traffic light detection (#148)
* Add publisher
* Publish segmented traffic lights
* Implement TrafficLightNode
* Add TrafficLightState msg. WIP
* Add publisher
* Publish segmented traffic lights
* Implement TrafficLightNode
* Add TrafficLightState msg. WIP
* Added side view classification
* Finish traffic light node
* Add manual control launch file
* Make linter happy
* Add documentation
* Add missing traffic light detection model
* Fix color issues in rviz
* Limit simulator's max. RAM usage to prevent system crash
* fix: Linter fixes for other team's code
---
.gitignore | 2 +
build/docker-compose.yml | 4 ++
code/agent/launch/agent_manual.launch | 31 ++++++++++
code/perception/CMakeLists.txt | 1 +
code/perception/launch/perception.launch | 18 ++++--
code/perception/msg/TrafficLightState.msg | 5 ++
.../src/traffic_light_detection/.gitignore | 1 -
.../src/traffic_light_detection/dataset.dvc | 2 +-
.../src/traffic_light_detection/dvc.lock | 23 +++----
.../dvclive/metrics.json | 11 ++++
.../models/model_acc_92.48_val_91.88.pt | Bin 0 -> 11071 bytes
.../src/traffic_light_detection/params.yaml | 2 +-
.../src/traffic_light_config.py | 2 +-
.../traffic_light_inference.py | 12 ++--
.../traffic_light_training.py | 4 +-
.../transforms.py | 0
code/perception/src/traffic_light_node.py | 57 ++++++++++++++++++
code/perception/src/vision_node.py | 39 ++++++++++--
.../src/behavior_agent/behavior_tree.py | 8 +--
.../src/behavior_agent/behaviours/__init__.py | 2 -
.../13_traffic_light_detection.md | 37 ++++++++++++
.../object-detection-model_evaluation/yolo.py | 8 ++-
22 files changed, 228 insertions(+), 41 deletions(-)
create mode 100644 code/agent/launch/agent_manual.launch
create mode 100644 code/perception/msg/TrafficLightState.msg
create mode 100644 code/perception/src/traffic_light_detection/dvclive/metrics.json
create mode 100644 code/perception/src/traffic_light_detection/models/model_acc_92.48_val_91.88.pt
rename code/perception/src/traffic_light_detection/src/{data_generation => traffic_light_detection}/transforms.py (100%)
create mode 100755 code/perception/src/traffic_light_node.py
create mode 100644 doc/06_perception/13_traffic_light_detection.md
diff --git a/.gitignore b/.gitignore
index bf3c7859..b667dead 100644
--- a/.gitignore
+++ b/.gitignore
@@ -11,3 +11,5 @@ code/output
# Byte-compiled / optimized / DLL files
__pycache__/
+
+*.tsv
diff --git a/build/docker-compose.yml b/build/docker-compose.yml
index 92f54ef1..70123763 100644
--- a/build/docker-compose.yml
+++ b/build/docker-compose.yml
@@ -26,6 +26,10 @@ services:
# image: carlasim/carla:0.9.14
image: ghcr.io/una-auxme/paf23:leaderboard-2.0
init: true
+ deploy:
+ resources:
+ limits:
+ memory: 16G
expose:
- 2000
- 2001
diff --git a/code/agent/launch/agent_manual.launch b/code/agent/launch/agent_manual.launch
new file mode 100644
index 00000000..32e33eb6
--- /dev/null
+++ b/code/agent/launch/agent_manual.launch
@@ -0,0 +1,31 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/code/perception/CMakeLists.txt b/code/perception/CMakeLists.txt
index 564502a6..70e340c4 100644
--- a/code/perception/CMakeLists.txt
+++ b/code/perception/CMakeLists.txt
@@ -52,6 +52,7 @@ add_message_files(
FILES
Waypoint.msg
LaneChange.msg
+ TrafficLightState.msg
MinDistance.msg
)
diff --git a/code/perception/launch/perception.launch b/code/perception/launch/perception.launch
index be3c7d27..300976a3 100644
--- a/code/perception/launch/perception.launch
+++ b/code/perception/launch/perception.launch
@@ -2,7 +2,7 @@
-
+
-
-
+ - yolov8x-seg
+ -->
+
+
+
+
+
+
+
+
diff --git a/code/perception/msg/TrafficLightState.msg b/code/perception/msg/TrafficLightState.msg
new file mode 100644
index 00000000..9da503af
--- /dev/null
+++ b/code/perception/msg/TrafficLightState.msg
@@ -0,0 +1,5 @@
+int8 state
+int8 GREEN=1
+int8 YELLOW=4
+int8 RED=2
+int8 UNKNOWN=0
diff --git a/code/perception/src/traffic_light_detection/.gitignore b/code/perception/src/traffic_light_detection/.gitignore
index af0e7a09..d55dcb8e 100644
--- a/code/perception/src/traffic_light_detection/.gitignore
+++ b/code/perception/src/traffic_light_detection/.gitignore
@@ -1,2 +1 @@
/dataset
-/models
diff --git a/code/perception/src/traffic_light_detection/dataset.dvc b/code/perception/src/traffic_light_detection/dataset.dvc
index 73aa6bd3..2f10a027 100644
--- a/code/perception/src/traffic_light_detection/dataset.dvc
+++ b/code/perception/src/traffic_light_detection/dataset.dvc
@@ -1,5 +1,5 @@
outs:
-- md5: 3a559397ebc58c1ecf142dea18d03367.dir
+- md5: 86f14bb96e1ac5735051b8e873f07a9f.dir
size: 13745063
nfiles: 2723
hash: md5
diff --git a/code/perception/src/traffic_light_detection/dvc.lock b/code/perception/src/traffic_light_detection/dvc.lock
index d9f625ce..6438d251 100644
--- a/code/perception/src/traffic_light_detection/dvc.lock
+++ b/code/perception/src/traffic_light_detection/dvc.lock
@@ -4,31 +4,32 @@ stages:
cmd: python src/traffic_light_detection/traffic_light_training.py
deps:
- path: dataset
- md5: 3a559397ebc58c1ecf142dea18d03367.dir
+ hash: md5
+ md5: 86f14bb96e1ac5735051b8e873f07a9f.dir
size: 13745063
nfiles: 2723
- path: src
hash: md5
- md5: b6c9cb867c89ad6e86403d9c33538136.dir
- size: 23777
- nfiles: 10
+ md5: 34c981a61e886a858d135daee17a82ba.dir
+ size: 35849
+ nfiles: 17
params:
params.yaml:
train:
- epochs: 100
+ epochs: 20
batch_size: 32
outs:
- path: dvclive/metrics.json
hash: md5
- md5: af33de699558fbfd3edee1607ba88f81
- size: 218
+ md5: 8566265bcdc76cb55d17230f82fc1517
+ size: 219
- path: dvclive/plots
hash: md5
- md5: 774919de9e9d6820ac6821d0819829c1.dir
- size: 8900
+ md5: f8f99f42fc42e0ed3c80c8e8f05c1528.dir
+ size: 8870
nfiles: 4
- path: models
hash: md5
- md5: ee67bac2f189d2cc5a199d91ba3295ac.dir
- size: 10815
+ md5: 16f96ecc475d20123051719908af9d4d.dir
+ size: 11071
nfiles: 1
diff --git a/code/perception/src/traffic_light_detection/dvclive/metrics.json b/code/perception/src/traffic_light_detection/dvclive/metrics.json
new file mode 100644
index 00000000..52d49ebd
--- /dev/null
+++ b/code/perception/src/traffic_light_detection/dvclive/metrics.json
@@ -0,0 +1,11 @@
+{
+ "train": {
+ "accuracy": 92.26495726495727,
+ "loss": 0.03128106306251298
+ },
+ "validation": {
+ "accuracy": 91.36125654450262,
+ "loss": 0.031184170882739323
+ },
+ "step": 99
+}
diff --git a/code/perception/src/traffic_light_detection/models/model_acc_92.48_val_91.88.pt b/code/perception/src/traffic_light_detection/models/model_acc_92.48_val_91.88.pt
new file mode 100644
index 0000000000000000000000000000000000000000..584f75f5cc2f038cf86088583d5d9ed96f7d9c55
GIT binary patch
literal 11071
zcmb`N2Ut``*T7uxVG?mcQ=!o>Vgs4zabW&P!YG{U?3M*%37NjKkUYew4cuX`a-|-a{uY}}qQ9#E@
z6;>fgMpDOyRSZ&;+DMNIceP=a{07U2Smkj7o|!;iASX}|D2Q0TnLr@uFZnFFq*4?s
zRi-Lp)r9OdmOdhM*0n-w(
zy@hO_APosDB3u+18=91yn)vCC`*wlavHdzj`%9tPAE7!@sBQ;zfQZ!-vIB#9eg{?
z*bze3{gc<){pqzHU9=BnM|Q?|`jtvJUJ{Pf>Et_{&f827^{3N~>Y)3G*wI2(@X6`y
ztlK-&w+q{u_3O+YBW;(lAKS%W+AiZdU_ucaAY{jXl4}2_)Cpa1E^J_D+(bdChzKqZ9EoA2gss4xB-u3gkG9B2Q&dgkC@XBjP=CS!wkp&&NLJ?ae
zWap=6u?vFu5~O70io&Hqoh@#UZY5?xlCwN^VHUfnJ(6Zbr>03lG+Qc=NefbtTxt)=
z?BXo8EVlieR9L6b#g=EW6_P8m|EX=qF3DmmJ8Da9CWe0#nv$HHV9PGeVyimN|4FuI
ztFzc;f30iBew)SCbgk>aF3)0X|613ct;=HTJI+g!!L)>Mkt9d34O#4p|D26VO-@Np
z7qQ=Eu`4^yN|QlH$FZxj*wyVP)6*iX{RK+O>n*2}Mo9%}@&DW0X~^^VLhg*n6h$XX
z6P=g)u>UD}O7d<(oJf=qEmf8WH~UMKOUvIY)8`HTf-8PfU9KQGvl*O$Z|(2P9whJ);V|3%cUHDzF{Jc
z+i?{g8$ICWoIy~eCr2{W8X$f5b2rCe4sGV~h`gEunnl==5d()eZ4flUy*rMi?*b>H
zH`fjW%a^((9Md6gzou|z>HW!YD|5G}Yiu~5G6kaG`wZUJwQ?($R^XYo>3HF2H9R`|
zmRa|B1*f@r1q^SkWMn~z%JUY&?D+4wM>$c9mHZ7Z>Utu2^|yw~fYss)k8X%>2o-75
z=oRA8DNQ&bcnMB>70X2(|C-q&Z~(cWT>K;E2{&HzG)6|Ab}8LogCk?lf!&~dW`45|
zcsm2$xPPswy60ui*k~*~xcm|(FVdjvuOC8hhwI3*^mUv0Tn+Qo4KboB7J{|1F`()O
zH`~4(Pv#ZD&Z|pt`_ao#(C;{EFV$kEHJ*k+VeX7yT`pePh+ISTDiqCBrPbda=58r@
zLS%NEcy^FH1}2@yfFr$`qhdYUQ~xVE)B$@!^O-PlMPHoP&7=J>xs2&gWGB6cnE*?4CZ$y|K7_y|{VFac@HUq}b1X0u
zJh_LfZ^6&~Wk{M|DlQ+m4~{8q0;~AB;OV2}`s?rOK-O{{9FS+x*W(Fiug-_%#re=j
z_7wE#`
zoC=CddeD&VLtq&H9ye_7E;#S<3a##z;8nMW+`+tk=n%7gX_Vc$t|5y0E>-eXuZ-h
zurQkfUbk)0Z^U&bzrjT8dm$Jc+}b!9?#JEdwiSR)>U=KLVLi(9@?^BVS3*I(s@sFY
zbEr7(gu~YKNTy<(4n8#4#r=?;An}`X@N5}}0fU-x@;rWzGsfAcrs
zrtlVw_ti7@mKzyalYTH;s{uFDEKof$2TYA$qRdfyocB@}d;hW$?`W~GTZo+g@2k1s
zlzbeh#lu+&iugUw!@1|ypira=*?uc9NV5cwyc!E{Qsn3Zlh&qhZcM=g)!Mjyxfcj^
z?{Z0dwXv7DnMv`y2n_0hX5I<7SENn+VroGeDW@H*J-9_O>yef(MN0SXBBiaDJNd_v
z^1n}#Dwp%=_zicM)`U=+>wkuk>Ki#k#Fsv^OUvKu8^jybtv*R8AMj;?qh1|{a<4~>
zn$1lVXz_{Ee&FbcmVd{-q=CAz?`S%M9E{{+^feP&KkYRantu!CKeyp(eZuLoiAnUz
zZ>7Y!*A5s~Uy6seomlS6AsS&GAC%Js47~eS&IwG%%{&v2GUVnN9jver)
za=(~R6{Zn9zkUVk1w~lII|Ebq`I09i_ro!-v811^Avqb~zqg^q=0nh2_
zGVveq2TZXPqQ}BLC^v93=i4@rUdxZ7XPzF%%CPA)?74tO4St1Ee--RJa(uXCIw37z
z8ZWlpju$&GcdIXtm(NG>s+WA~xa4OvP*%m~$3^zJv9o-q5&)1nc$;aFl=F~TUxIi
z-2Gxc*^(LotJh1OTLuJ>?cc}}?adeQ^*SE4ycACFrklYEph%swF{0ox?)d2xbEN-I
zVDM`avupDpn3QKp>aLj(pEZ^6BrTEnEii@ZHp8Y_y#8SQav|q=_82Ps77D@Jc+mQ?
zC-wWK2RfQ7a2rqP5|`{{VDjuG=QLg!@8<;4=C|W;^jUxxrgNG1lJVPQ|HxI?<21U%
zN_?@p3G~X{Nb9cyI6+1ntf;SoO{&*%;tVx9;`~KC(W?;0G$dj}iU!qKd;>?FTf0YV
zff7AFv6$GIAB0nNi||my9QtgU4CKZ&1CF={ep=P|kiE
zHi)vOqp8^ddsGc5!<&OcxPU4VoK|-Rw@DggibGE_d;eVcP*V!4ldEuDjWHTNt3&pF
ze_XfnIv#pbfODp*(15Zaj2!JjC-v-BNivsHOxB+8iOr-;|Wpod88eWw5QP
z!1-l=V6wIr%xP-|xk5#%ej@?Y8hP0NW&?O$)&rGh9w{%qjH_NwBJ8D}WaR@bqLMtA
z>>ThKW0&rL4GuOm=CBjx`Nq=4v+StW>MO8do&hO3?}mwo2Ga3Yyy-NzvDCMu0e8Jz
zjHJ$(>M1@&EA2is?_Dpn^B7L1FLQ!#)YszUx-2LP;lXq+3{@8wbB=8x82WCWIQPwQ
z)Tqm29^O=g%s3S5So=}C&3t;*dkoz)#}qXWPJrgwCvjpgReJTe9L8(PKJom$qv?~H
z-l%$T0_VZ=qmliTu%?tpFaP50s$1^M{dDy?R9|o>5l?Li^R)$iAH5rT>6jCvtWo62
zQ$7+MXU41_<&K|Q43*n1BZtvkY?=mrq|tzOQ)bYWGw0(^F>3V1;5!(){~9RI8%SP`
za%6m3dr)4u2Zk7zp^?`-lHT?lrfjl+*{`!9@V*P#GHVtdyYPr}c+mpQJA2XxS@k%f
ztO2~Axsq|Fdr>Pi7Q76$fU?VD*ejz7%U%Cy>ic~iaTLAwiU*jRzg~ZfYiyI$F0Mbix0fo
z&O9jKlSLkD@k&r2zMsR8aM4jFo_mfzTwlRNeLoZKrQ|m`e|rG<+ujY`Y*9vLm})Td
zyVg}?Rf!(H)g<0{Tk;P0&AMn*K?
zi&<%~em2M4X)2|CHdJs${Z`-&^|RP-$#*ovPJ;^GPj^!dIE--U2_Dy-M@6?5fUd6q
z>H=+vw;+Og9x^6OtpaJydCEi_=t0A5J*ZXr02IsR(dw<|aONpBe6%1Pb0%2O$@W!X
zxN|O47LB8el$4qqGs~HRVSP!#l$khr;A*P9bUQwb@?owXD*{%bCzZzk>$gZFPx|6O
zQvSF2@9_VQ|D)RqNYmi8+zh4`uko|RmV1}A=cG&<#YioQBx(73P5L~&?sAeOC24;l
zC0!u3!)gN;5Boi-df1sqD~3tiLuJ=x>+bW9KF^>#+miI7_XXS1Txglwhp95zhOguK
z^cQW@_UG%xY*O&QVqel7-Pz9_Rm&{RT1qT-X1TSE&}_F~xYd2jzhd99d;3%R8!`8T
zH9&Z(*mdtbXwUzbdLQ2Uuh@6$-hNbXiG#KKn3D2{{n7H$g&b-5lFwOm*Y88U+)cXa
z_dlP5&iEaL72BI(j-?riuTdcnJ*JS&IR<3hPFWJTLznnmyaqAuHst>4wc_*=W9Cqr
z1;p9D1cx$1GW}jXw{L2y+WDJKEbmG%SBwKxHnV~$j^*xR%+ct3b&Q74V
z%@e7Ok|#A&{0<+n?)25`{doMfD$ZWegFaZY3T>j5@nCXk(*wV9WL)>7$y!^w?D4N?
zbt{lA3Twe*oE2T|B1;c*4cNBBj+QMu%Dwo`h!&MTMAPA_bXA@XPE_%yx<5{)r&hY+
zrOBBnlkqhjW~)n8$175&L_IoN{u+*c8b-GU4rof{oWXmOI_dpkGKnhJfqNI^$ihL!
zz@?d!@++2PN$DEcoYcbX-Dn7JYh6h14ZFcR|94mAR5P&a-+=Lc-vYfVN0J$D*Y(Zn
zFy@jOefA<2iswax-sx$u^UzM%v&DqG2|LZG-E`uXD6fFcYvZ8)Ts1RihZ^xSTF+$8
zJ_hcW_rU($?&LcC7D9e{3{&Dvh|?-{;!%GE#%npiubbY(erGFEwC6H_XQo(7(gN1Y
z!;v4ViTo4k%v2q9=0~&PcW!hwyhDI)z)ORQQqa|EbzsKqAZ;1V*(*hv+=HI9Lc|z
zM#Nx8g1q%e@unki0DBV`m`V1Qh@tVUJ)y5Al1V<1#7ucADanx`TOP<6Y0C_T=+yvMMCThVA-Sb
zjy_wN+4YOE!c^6!v6XJ%=$9@3akTjB0^b9o2639Mf
zCo<`V9@)20nN*+jCj%|d!$=1+qO>I(CZ8>FPMGipo_gdG{yqzGQ?vw6r~HUdv<8#P
z;CIk?WeD*#EOlE@lR(4l-SBHm7ra@u3>-Xu=EjHpfVLl=;)jBKJp9}Uwiq|zjN<9E
z^0zx~RVLrzp!f@Dm=(eCn~ZVzN>93RT^w#tInNm1=0l$sQKSdm6{h4ss9t^=Rs(YP|p5e9XI{17Gd?z#T3Rq*oY!dOdh8r&OU$+iu@QuS6@VG}f8^
z@RKEJeDDN(RAhL{eRx21cx$U4Wo?Nq^(z?C;LFXe
z-NQtPP3f~!#xAY1Jcz?7J$iCnJ-*p?7x#va0yg*ohTC3;jJE^H=zZ2=U2zOkw`?NG
zonc26cKA`fg_-oQM-+D})}8V<>eCSiOX0xjUod}vHka=Ho=Y6yL8hHCWWof5?MeU3F3_a}F8ZQ1O{tF{Cd$nq>@2v8Y&eOwklf*8xZgIxoA(aFk6q%1iA54!Nc*CL8k
z>L-xo-P6cXy+ZO1%ZYiNGX1Il2QFPOn7*5L2=cO3-4ZU3L@uiaBQ_2qs!N73@f&lv
zj3Lpu@#+>RU&RB{1q;ZSRRz#{W+1s=$uOmowXdtXlj-5$Y4n7r2W=6IBELOtfjvWw
zaO0i>_;9Ezj#k%f_gC5R%RK(q_g^2EcmLn>AA1$)jN^1|%mnFrzO?-P2)5zre_;e$
zw=X`XMW@DvC&bMNm+l7&jf?Vf*Zty%ws9J2>tyF*XEW5n#?isW*~ZDn$=1o)(caO{
z-pOUCvxCdfp|%drPEwU;uCF>_CQ&FYl6??Tv7dH6bzavskY4C=biWVkb6)4wSi>$X
zMzR$B_qI`W>2DrSMpFC*3euf5JWa`uw0=k3{{uXIDZu~$
literal 0
HcmV?d00001
diff --git a/code/perception/src/traffic_light_detection/params.yaml b/code/perception/src/traffic_light_detection/params.yaml
index 7b90494c..efce6997 100644
--- a/code/perception/src/traffic_light_detection/params.yaml
+++ b/code/perception/src/traffic_light_detection/params.yaml
@@ -1,3 +1,3 @@
train:
- epochs: 100
+ epochs: 20
batch_size: 32
diff --git a/code/perception/src/traffic_light_detection/src/traffic_light_config.py b/code/perception/src/traffic_light_detection/src/traffic_light_config.py
index 6187e901..e1c720c2 100644
--- a/code/perception/src/traffic_light_detection/src/traffic_light_config.py
+++ b/code/perception/src/traffic_light_detection/src/traffic_light_config.py
@@ -17,7 +17,7 @@ def __init__(self):
# Amount of epochs to train
# One epoch: Training with all images from training dataset once
self.NUM_WORKERS = 4
- self.NUM_CLASSES = 4 # Traffic light states: green, yellow, red, back
+ self.NUM_CLASSES = 5 # States: green, yellow, red, back, side
self.NUM_CHANNELS = 3 # RGB encoded images
# Inference
diff --git a/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py b/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py
index 4631660b..ada59c5f 100644
--- a/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py
+++ b/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_inference.py
@@ -2,11 +2,12 @@
import torch.cuda
import torchvision.transforms as t
-from data_generation.transforms import Normalize, ResizeAndPadToSquare, \
- load_image
-from traffic_light_detection.classification_model import ClassificationModel
+from traffic_light_detection.src.traffic_light_detection.transforms \
+ import Normalize, ResizeAndPadToSquare, load_image
+from traffic_light_detection.src.traffic_light_detection.classification_model \
+ import ClassificationModel
from torchvision.transforms import ToTensor
-from traffic_light_config import TrafficLightConfig
+from traffic_light_detection.src.traffic_light_config import TrafficLightConfig
def parse_args():
@@ -49,7 +50,8 @@ def __init__(self, model_path):
self.class_dict = {0: 'Backside',
1: 'Green',
2: 'Red',
- 3: 'Yellow'}
+ 3: 'Side',
+ 4: 'Yellow'}
def __call__(self, img):
"""
diff --git a/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_training.py b/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_training.py
index 7f15e3c9..1322d024 100644
--- a/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_training.py
+++ b/code/perception/src/traffic_light_detection/src/traffic_light_detection/traffic_light_training.py
@@ -11,8 +11,8 @@
import sys
import os
sys.path.append(os.path.abspath(sys.path[0] + '/..'))
-from data_generation.transforms import Normalize, ResizeAndPadToSquare, \
- load_image # noqa: E402
+from traffic_light_detection.transforms import Normalize, \
+ ResizeAndPadToSquare, load_image # noqa: E402
from data_generation.weights_organizer import WeightsOrganizer # noqa: E402
from traffic_light_detection.classification_model import ClassificationModel \
# noqa: E402
diff --git a/code/perception/src/traffic_light_detection/src/data_generation/transforms.py b/code/perception/src/traffic_light_detection/src/traffic_light_detection/transforms.py
similarity index 100%
rename from code/perception/src/traffic_light_detection/src/data_generation/transforms.py
rename to code/perception/src/traffic_light_detection/src/traffic_light_detection/transforms.py
diff --git a/code/perception/src/traffic_light_node.py b/code/perception/src/traffic_light_node.py
new file mode 100755
index 00000000..6f67b5b1
--- /dev/null
+++ b/code/perception/src/traffic_light_node.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+
+from ros_compatibility.node import CompatibleNode
+import ros_compatibility as roscomp
+from rospy.numpy_msg import numpy_msg
+from sensor_msgs.msg import Image as ImageMsg
+from perception.msg import TrafficLightState
+from cv_bridge import CvBridge
+from traffic_light_detection.src.traffic_light_detection.traffic_light_inference \
+ import TrafficLightInference # noqa: E501
+
+
+class TrafficLightNode(CompatibleNode):
+ def __init__(self, name, **kwargs):
+ super().__init__(name, **kwargs)
+ # general setup
+ self.bridge = CvBridge()
+ self.role_name = self.get_param("role_name", "hero")
+ self.side = self.get_param("side", "Center")
+ self.classifier = TrafficLightInference(self.get_param("model", ""))
+
+ # publish / subscribe setup
+ self.setup_camera_subscriptions()
+ self.setup_traffic_light_publishers()
+
+ def setup_camera_subscriptions(self):
+ self.new_subscription(
+ msg_type=numpy_msg(ImageMsg),
+ callback=self.handle_camera_image,
+ topic=f"/paf/{self.role_name}/{self.side}/segmented_traffic_light",
+ qos_profile=1
+ )
+
+ def setup_traffic_light_publishers(self):
+ self.traffic_light_publisher = self.new_publisher(
+ msg_type=TrafficLightState,
+ topic=f"/paf/{self.role_name}/{self.side}/traffic_light_state",
+ qos_profile=1
+ )
+
+ def handle_camera_image(self, image):
+ result = self.classifier(self.bridge.imgmsg_to_cv2(image))
+
+ # 1: Green, 2: Red, 4: Yellow, 0: Unknown
+ msg = TrafficLightState()
+ msg.state = result if result in [1, 2, 4] else 0
+
+ self.traffic_light_publisher.publish(msg)
+
+ def run(self):
+ self.spin()
+
+
+if __name__ == "__main__":
+ roscomp.init("TrafficLightNode")
+ node = TrafficLightNode("TrafficLightNode")
+ node.run()
diff --git a/code/perception/src/vision_node.py b/code/perception/src/vision_node.py
index 06dab72a..e9681a4c 100755
--- a/code/perception/src/vision_node.py
+++ b/code/perception/src/vision_node.py
@@ -85,6 +85,7 @@ def __init__(self, name, **kwargs):
# publish / subscribe setup
self.setup_camera_subscriptions()
self.setup_camera_publishers()
+ self.setup_traffic_light_publishers()
self.image_msg_header = Header()
self.image_msg_header.frame_id = "segmented_image_frame"
@@ -127,6 +128,13 @@ def setup_camera_publishers(self):
qos_profile=1
)
+ def setup_traffic_light_publishers(self):
+ self.traffic_light_publisher = self.new_publisher(
+ msg_type=numpy_msg(ImageMsg),
+ topic=f"/paf/{self.role_name}/{self.side}/segmented_traffic_light",
+ qos_profile=1
+ )
+
def handle_camera_image(self, image):
# free up cuda memory
if self.device == "cuda":
@@ -140,12 +148,10 @@ def handle_camera_image(self, image):
# publish image to rviz
img_msg = self.bridge.cv2_to_imgmsg(vision_result,
- encoding="passthrough")
+ encoding="rgb8")
img_msg.header = image.header
self.publisher.publish(img_msg)
- pass
-
def predict_torch(self, image):
self.model.eval()
cv_image = self.bridge.imgmsg_to_cv2(img_msg=image,
@@ -174,10 +180,35 @@ def predict_ultralytics(self, image):
desired_encoding='passthrough')
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
- output = self.model(cv_image)
+ output = self.model(cv_image, half=True, verbose=False)
+
+ if 9 in output[0].boxes.cls:
+ self.process_traffic_lights(output[0], cv_image, image.header)
return output[0].plot()
+ def process_traffic_lights(self, prediction, cv_image, image_header):
+ indices = (prediction.boxes.cls == 9).nonzero().squeeze().cpu().numpy()
+ indices = np.asarray([indices]) if indices.size == 1 else indices
+
+ min_x = 550
+ max_x = 700
+ min_prob = 0.35
+
+ for index in indices:
+ box = prediction.boxes.cpu().data.numpy()[index]
+
+ if box[0] < min_x or box[2] > max_x or box[4] < min_prob:
+ continue
+
+ box = box[0:4].astype(int)
+ segmented = cv_image[box[1]:box[3], box[0]:box[2]]
+
+ traffic_light_image = self.bridge.cv2_to_imgmsg(segmented,
+ encoding="rgb8")
+ traffic_light_image.header = image_header
+ self.traffic_light_publisher.publish(traffic_light_image)
+
def create_mask(self, input_image, model_output):
output_predictions = torch.argmax(model_output, dim=0)
for i in range(21):
diff --git a/code/planning/src/behavior_agent/behavior_tree.py b/code/planning/src/behavior_agent/behavior_tree.py
index 990b9e1e..cacf8619 100755
--- a/code/planning/src/behavior_agent/behavior_tree.py
+++ b/code/planning/src/behavior_agent/behavior_tree.py
@@ -1,21 +1,19 @@
#!/usr/bin/env python
import functools
-# import behavior_agent
-import py_trees
from py_trees.behaviours import Running
import py_trees_ros
-import py_trees.console as console
import rospy
import sys
import behaviours
from py_trees.composites import Parallel, Selector, Sequence
-from py_trees.decorators import Inverter
"""
Source: https://github.com/ll7/psaf2
"""
+# flake8: noqa: E501
+
def grow_a_tree(role_name):
@@ -61,7 +59,7 @@ def grow_a_tree(role_name):
("Leave Change")
])
]),
-
+
]),
behaviours.maneuvers.Cruise("Cruise")
])
diff --git a/code/planning/src/behavior_agent/behaviours/__init__.py b/code/planning/src/behavior_agent/behaviours/__init__.py
index f8984b65..e69de29b 100755
--- a/code/planning/src/behavior_agent/behaviours/__init__.py
+++ b/code/planning/src/behavior_agent/behaviours/__init__.py
@@ -1,2 +0,0 @@
-from . import topics2blackboard, road_features
-from . import intersection, traffic_objects, maneuvers, meta, lane_change
\ No newline at end of file
diff --git a/doc/06_perception/13_traffic_light_detection.md b/doc/06_perception/13_traffic_light_detection.md
new file mode 100644
index 00000000..c627e3cf
--- /dev/null
+++ b/doc/06_perception/13_traffic_light_detection.md
@@ -0,0 +1,37 @@
+# Traffic Light Detection
+
+## Vision Node
+
+For each analyzed image, it is checked whether an object with the ID=9 (traffic light) is detected.
+If that is the case, `process_traffic_lights()` is called which applies the bounding box of the predicition to cut out the found object (e.g. traffic light).
+
+Only if the object is within `min_x`, `max_x` and `min_prob` (probability), it will be published to `"/paf/{self.role_name}/{self.side}/segmented_traffic_light"`.
+
+## TrafficLightNode
+
+The `traffic_light_node.py` file is part of a larger system that handles traffic light detection. It contains a class `TrafficLightNode` that extends from `CompatibleNode`.
+
+This class is responsible for setting up the traffic light detection system and handling the incoming camera images.
+
+### Initialization
+
+The `TrafficLightNode` class is initialized with a name and additional keyword arguments. During initialization, it sets up the following:
+
+- A `CvBridge` instance for converting between ROS image messages and OpenCV images.
+- The role name and side, which are parameters that can be set externally.
+- A `TrafficLightInference` instance for performing traffic light detection.
+
+### Methods
+
+#### `setup_camera_subscriptions()`
+
+This method sets up a subscription to the camera images. It subscribes to the topic `"/paf/{self.role_name}/{self.side}/segmented_traffic_light"` and calls the `handle_camera_image` method whenever a new image message is received.
+
+#### `setup_traffic_light_publishers()`
+
+This method sets up a publisher for the traffic light state. It publishes to the topic `"/paf/{self.role_name}/{self.side}/traffic_light_state"` in the format of `TrafficLightState.msg` which uses an int8-based enum for the traffic light state.
+
+#### `handle_camera_image(image)`
+
+This method is called whenever a new image message is received. It performs traffic light detection by using `traffic_light_inference.py` on the image and publishes the result.
+The result is a `TrafficLightState` message where the state is set to the detected traffic light state (1 for green, 2 for red, 4 for yellow, 0 for unknown).
diff --git a/doc/06_perception/experiments/object-detection-model_evaluation/yolo.py b/doc/06_perception/experiments/object-detection-model_evaluation/yolo.py
index f7ff342d..39d727b7 100644
--- a/doc/06_perception/experiments/object-detection-model_evaluation/yolo.py
+++ b/doc/06_perception/experiments/object-detection-model_evaluation/yolo.py
@@ -1,5 +1,8 @@
'''
-Docs: https://docs.ultralytics.com/modes/predict/, https://docs.ultralytics.com/tasks/detect/#models, https://docs.ultralytics.com/models/yolo-nas
+Docs:
+https://docs.ultralytics.com/modes/predict/
+https://docs.ultralytics.com/tasks/detect/#models
+https://docs.ultralytics.com/models/yolo-nas
'''
import os
@@ -35,6 +38,7 @@
image_path = os.path.join(IMAGE_BASE_FOLDER, IMAGES_FOR_TEST[p])
img = Image.open(image_path)
- _ = model.predict(source=img, save=True, save_conf=True, line_width=1, half=True)
+ _ = model.predict(source=img, save=True, save_conf=True,
+ line_width=1, half=True)
del model