4O__Z@JpfN(
zx0PrC2^lQ?!BDW-6qw39QcW|6{L}x3hB?exW(h~vKx=!%p2FYOrptEjhFt*DdHDYa
CJ*Tw*
literal 0
HcmV?d00001
diff --git a/trail_detection_node/trail_detection_node/__pycache__/v2_trailDetectionNode.cpython-310.pyc b/trail_detection_node/trail_detection_node/__pycache__/v2_trailDetectionNode.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..da38997b960fee7a3d0a12059864382d75072af4
GIT binary patch
literal 6580
zcmaJ_Ym6M(RjyZ8S64r##~yoTJhr#(U2nEY81K$5!4U6yv*Tpruo+uxBS=Fi)%2~Y
zsd05x`&RY5RJ}xut<5TOP(+j;C_uN9{8%O8p^b!)5dHxYtk?(;+C*G}0tyICB(xy%
z2T<00=T`STybIl`Tlb#(tb5M+&UbHIt=bBHZ~yi?>}zKg9#3*vRTQQ$
zHB?#()0iHr9j&FQBr`(2W3&n~UkJ@ku~n3LHMClm>{Du$@HRucQ*M=IT`{b5s;#Qb
zTVW0Tjm|`CV$`qRnQR>y<&U(s6wc89U`}l&r+OLtUO#w
zYiis;Z;}O3x){biwtyzq;`$3bU>HRA=l`j9@5{=HB9+aJDiAC-Un^FQ6acXa!b|CQa}
zJNGuVx@-T&yT7I`Qsu^jKcD#f@BZ$6|IMe~{!ZsVzIETf@K^u*l`B8`+xvl1P?SKy
zr?qL-;~#x0J&SmfbtI{>rZP3vn08m|tC_;|StV1Meo;wvl#E$rMwVxlDS8_h6-Wzt
zn~|)5w#u3@D5U04?Q5fa5qUk+GPSN?Ts>1$YetcID6eU5zhF1;Ysb;L$J53FYqZ^t
z$6ZKb&VtBIy~YO{%fuvCU~6Y
z`Hq|NU{f5CO~PEZ?zPjHHw{tktvmax7v(M)l+z}Q5|3a4k0+kgqO!{
z$tC8yTjKag6;2vEa-$u&(dRIEas)}usHijQlv>d$s-;#`L*=I+K4e`4a4@^vy6z#9
zDQj@lSy(`-4m4Pl%Cr@2sI3`&4RuCeNedaQBGaZ75wJv*%@p&V8$lR~qT6H#S$fNn=Zu1MT}guVM?9V1|Wc)$Mw`hN3XiAoTcg%BnZ`F}(Q{
z6>Mpy*!r6upM;=k@)?qwp%?k-swf|@IX+D_7VJ$rh&a9ygsI0p<{UCXX*qdvk~&Nu
zQ1QqP^3FWpVuv?+ZCdai3zr0d5~-6n4v|4PV^u1S`B2}!0gXqRdE
zj%UCPrXMA#aX_l((rM*2ph#a&m4W&>=xT_5nSMuVikYveuPU#qD;g^@>$a5{mz5p0
zUr4pAumh|@82DLM>YJH)9k?^lvjQxoh^^-jEc$cmO_c*3zo^~PuID~3HPzIFHvc35iEX~Fi3$sn)Q#F+
z;v~1kOgGr{;N~Nlldf_vS&c(>K*#pQ5Y_Ft(|zt2G3kHOGEOSnXU7|mTg?2$FmAhH
z@*Fbb=DQE@YZh`Vt+?Fpx;zaCPJ|8{6a~2j!svKz1e;bjg}NomQP){uPF|$~Do16$v@mlLbR32)Sz@e5nJ3
z$brsaYC~0p0%2eTP-j;$`~FiOzJZWnM(xl|5@9X7?VHPSTc*5QnY%e9e{r<51z5w3=bcg3x+R=r?kyAQdH-TE6^|(Ecz_~sB@gNG4
zRS%&vcBQ4uE@yx(
ztU0A$!Pr(-npM)7tg?e3mz9`-pjf)KmRYc^(h}&N{oSv2pS8b#$$xYGPk!gb`G2_N
ze|hoq;lKXR-w}&E)nXs{{?%_^^0#&$f6DsiTbKNAza@W{_9N2n18h1kQ*xdX(nE7r
zSduPZyYiAK5$NUKthd(}gwMHFTD8j!pkqdzzOt3aS=EJ%oNs*
z_=XoHXjWPR*M`e)d!p2fl3P8`+xGa2G}*^wmne384(l?3FdcVO6i01@-tP8xI{6EBNB=Nk9!eoR{1Assw>n9Yj-5u6D54w
zI4i>_9SB4ruQcOc+U=#nfFm+FPU?vwk2f5wLg>hd%EoE{7m@Irh@uNB2NL-nC!lcR
zU>&I!V*cbqNC0cH#7@1fTz5qa_ZH_bLdBoO26bWB%abz
znbOi;RaOuQnReT1Nsz+v$#@ke-ja+)R3%*G6b}wP9)gA;z0pImWE)AU?7;U}j%x~$
zG(x^1$iHKd2WCnN>z8F~R4xGrsoqY@JMc26lHuUsR6+I?r^Za_xf7swplNHikHF4~
zL!G>JP={oa5R@sfX=_Q;4gwp@XH=%>M3HBgfJB%%SrHTKUKq2j2$~>jo$Q`Qt&a!4
zf4tN@0-n_a%@US_coU?KBkFDL5x8}G%P<#29F8yz+{?)k7^k1x7mNWo$J<$!NlZWe@*c;U){0c|)_&vO#;EiFxO>?t`oULd|W&OQU+
zJuK%7cqHkdz^AG7H-B6=>#C)hx~UraaoF7U@k3@ZKi;)N_Ci#Y_H2GV?a5oFr>Tsv|-ggwY`Q2GKWY-dro<002@4yflf#ua(F`GIK`<
zewNb;dQ}NSzd%T7WmfVmB5{dkBYW|IhM5IieWGt84r2~{pUNGh!M(N@tb6PXu05WCE%2X0BGm00;0EQDzzdlZ;ar}GQgSosA|EA(
zI2Jh(;m&nD2k7uG>wR1q;G#eUdg#9X9y)CBC(vVmN`8lml6maW5B`&oeS_rlNVcCm
zY}UE2kNxWXm@kg%a3ZkjxaY6p?x)VYB%srj7qw%eP1{pTaii@;@K;j(-Bn6neM4Aj-feHiEPLcqdO=Jfl|CpyPKEpHAh7
z$1+6hLjwO5InZ{{DvmzA
zImtguWqWk=#8-_`RA%$*5+5fGGlzF!)v@{a__A0G`l8)0>XO4
zyiG7yDvri*o@V@m)V-E~oP;mmFQdU#={o&tLXf7JP92xh|?Mb
zsqfZwVz_m^PJtZZ+yJyvTEqW@ye6@=i<+&P2>J;2+7YxniL#BLKZP~`;gkQLwKh!(
zX5?51X13#iVfRAHR~+Y7&kge$3Hh9sD5|4~(DuS`92KOW?4VY1b>2blE}n$WDwYB+
zGA(8AaaVF#3H!^)q!RE4`Vi1e;6E@Z08p9NnHqtM0Qhhm$`BlI2S`wl^Z#9HYtb#>
z4xBtS2PK^KAT%-`wljjRCw|0noELdKU$`3*S?ZP}O@aB}qTk=v&SjlQHW${MTH25D-UpWAg
zMlpFv6Lc^3kQI<41kv}QsY=iBRv!qrg
zm-6gVvS|uPVWY`Kx%E=yAR0vh_t5?~J^FV#6f(FQHj-?+-#Mmff2YRICxgZ%ywO(xT;r^+m69)O=p~&cxY00mjcJWk
zDaEv!#`VXnWO0L=JjE@Z<~GmpEFb#9E~T$)JjaJ0X?)n%m+gl}DZ?E;!bcymQWiZA
zK4ySp3La8${4wK)zc5QfkUPRBAeRGnlpg~&4D4I{HDCvT9p~Q$HUjK*{syp7V0r!>
zUNLzi}a+nYb38By@%q4_5A}HBnpH*znEyeLlbYXg$
zH`l^?)8kVs_v5ChEKM~=6sK0=r5Ep4n5>+id*@tn&f8zV
zxyeHDd~vqmRrlAgILRyJa?vls66jfcVf(NslXGXz%ob(~`|L_fJ?WMC?Adevx&6&b
zPMMsm%$+S4W(xc3N?yd;7SEoU@y<|J#})>{L!=0#2~buElYm9QCP2AV!Mp1r8G52A
z0ADLHu9bA2`BbYiN}xxXl0ixINGq9?Ncg5c)l98qAtDG8MH}e4%WRw(ACA5WB9ltG*Yv
zgda&g4CTRj?ybbZsz1M4d#~B7&vKdeR%iZA95h2WL@qy@
zi1bF*ivyT_0&ny>zyW5l95Y##*}!_x`nQ<@_3L&H^=GQLS``X_-a|M=;JbLEvjFf0
zeBEX1@bMc9wc2AJn33!7j>1tD~d4iupBce>m!i+JzCTkCo`rG^pP1Jixd&(81{U3pl_Fe6Y
z_VL0xL-ev(Up5}<;v~N2a;n3YEe;E~v0+d|FQ>WIV&YBWZrZACcCkO*HMjd_(D(a&
zuEkTt?YGm2_np#NNWZVRLuw|ITV2fa|MDuz(+jZK3tb91Q~33kqf3)2xXgvku&zQg
zi0>y0OGeiojQ{ES-`bCU=;UM?f~`6tE*nHa7{y*#@dZ*ulh(VT=
z$RPm7;jdAqf06?nLwiiM%zw5gQ~;7ndvZw^L+HEcq$!(2ui+z`v#y(%2R!GxcUxXP
z;i#AwC(%JoEr++$tK!$|uDe4!(nnBOsX}oCNJQ1tMxNNCqiIFGwkr=Pim2qMNcs(s
znzpWUwr+GNn$EAA9h2+W`6oJhtSwWSMWNl$YwSGa?pmPf9Ro6q3h#!wj#v{j9leu^
zwN21b7qw+oj5k=-pgv(X;r6ICov0mi-B6>YI{N2S3o{E_RFEaRwMD^>7#X_bi_ovT
zlvC1*Vi9m(IEp7Ol4yFx;~YL;beK#B;Y!`B_>PrCN9LAyFDZ%QZODpQ0*4740&py8
zRH`+ZEmJJ0n2}c9yYGug4h5lGS@J?m9Z9w-Erbr@Y{#OjG#+YrcYHVLL)t{N_$HFr
z#_tFl(@pj|8`r5uiR(1w@YW@K#07w@Px=_P15Nz+X%n`KpY)mu)^9o^;zz_%fW{Fw
z`am_+IQ_x)^xo5S#$I@^XN}lbYR9mBnsa!6V-K95KjPk5`V8z*Y+kaaLne
z;Y86+vR_^T=-|A;Q7gHAQSJFL>f&IsRp%{@Mu5FpO`=4Wb=6DMJ>9ZFlwy`pLV{`=lRoR
zY|zey{EcdUQMCNG^I)|^80Ed~5g|L-6UK-)8eYv`@MAUBHSrou5jP2tg-?X!0?&E;sdZ72$^LTGP>6|)nJ;i(A_5-r;+6NG6M1z
z*`|*815G7vNMfa)>|pbL0THL?>Tfjf6}p=2UKGD1i>TO$fz}iE0ZDw^lO1gRF6YiY
z*SS|-hI1E4Wpz;*XdZDtBQfP%Y7I7Tmvg_-yjSR4vU}UPbgUZeT;l$gB$RWhHQ4%H
z&MiLIxmRC?bAL*DX9)~+E^!}|gmNym2Aj9bxz9E4)YVhYD|9Z|z3p83^I@>{#Qif#
zDCbg(&hQOv=C13QTU4h-5n$`DbeQ`UFo%hc)aROWOs3TTR?<%Xz6!(SfOrEWs%NdI)VPySj_DyZVCYLLoQ6|l;!%DQtjMoM@g
z52X<_YU1ZaFmNf`u_LLyNgZ$Pi_i%duOGl9EuwwQ#>Hoh*}87&Z0FDV*H|w5mUYm=
WeH_34WbN$m&YwL@7hK%V|NB2(_ce6@
literal 0
HcmV?d00001
diff --git a/trail_detection_node/trail_detection_node/__pycache__/vgg.cpython-39.pyc b/trail_detection_node/trail_detection_node/__pycache__/vgg.cpython-39.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7ff37c2ace5ee8be1c9f6edf0d90753bd6111841
GIT binary patch
literal 5926
zcmc&&&2JmW6`$E%E|(v&WLf@dT!(ej*xOp4wB^t+Qpxd0+}LUmD^0j`gzeBRBykksE>Y@Ev#qxkE4sr{KHDjlvY1hBuKLgK5aY
z_mDdbGjImJkK8!S!dvhIXnOLt@XJ@6QUQQ{93-WP6zF#mO7tB+Es$1xb3hnuoC~Ah6
zr>CK@5!4zkOf~OE4N+d1YKZD|y#Zb=oK^!&mP(MHam$6NX0-C+?edf5OY`qsEX=zH
zYd1fcFI+0j<=x7`+7%|DTq+g30?q{WEWGgi&?uAhGc$Afx%>f}lBp-%63kt==v_S6
zsN|H%`SSdQQhqjnu%_fiv~A(S%&a>@O>Il)2oI4Uk|aWFN*F{;A{G%^No7oX9;!o6
zIEBPxMGmZ}LF#i>;k0}nriwZ(p@*z!&@#ez;&W!OqKTkP3k@6X`~$~cF7I|koe`hK
zL`sDbl88mT!yahcS{GMo2YE-2jO_&QF7YPvNlX@Kpm#OoQebpBzSCW%%JpI89j&W%
zlh{Y5ql;#TZD(l&V5nYV$<}t1HAokqm57X?uJ(vbgsrA0l>X9k96#_Q$B|aWbEB5<
zLa7CTJiG{QGxFEH#r5ikjYe$_WYS%)Hj$O-yWYK)7eu~Wljdzd@LVB{kG$I-%j7lD
zXf|4rOqXkJ82T086Y_`}0W14$&)M+&>Pi&ahBQL2RuQA9oy-KSx}#d)=vk#tuig_b
zQ%)>$oG0wxA55<_>fUsF#ce<%s7{MUIOlAH)1fF&N5b`M4tSAQj{HX81h|TyU5E=C
z*Sk1`V;{#9o=
z6HM|?#c6!Ut_aq1sh&3TZx$shuJ9d-5_3wEK|uv__Qne?*3~@3L#`
zmyb3%qLxS6s{TL|@8jE8O?3FG2{;4Lw{$}CY7$tBi?2w!ZK=G`#a-#1)oYu=w(EVV
z2?>(+^GU?{VC^*4wv;rZMl!(c;yB-|uMnPG!a2KvLvS;MS8LgtG$?Y*Y|sd53YLC!
zKQ=5)y879~pKkuG{qV5qh&iuYdCQ09j`S8)*{jtYXFxAn3W5No!UO3Q5Pbv%@cuFb7(C|GmL}}
zp0JfnT%oGTrVD_+UbeYR`a!eimOa~yQIWdiej0~CaUSc6IU+}i96@56QZHAkGF>7J
zD6B}c=HB;2C^LTGlvmsUhYlrQmnK36@iky_wR|&mcg=I+Hl$4t5(dU23xCJ7apX>G
z6BwDqO&W4!X8~X0GLoHN^*IdxO#I|&7UwQL*v-aVKWmSQ_erD(^&@WeiOQ;e`kn1-
zy)SEzzi?;I>anlXK)3xe`{=>?9y&n3$Ni)91-LKqMai#9eogZ0dtxQQiO^1U!v!QA
zCbEDr60|F7kB=}eo@5Vrh5HyAzv5qT#3d6~I@-3{?;{h8ldOw-0LD&@_UQJnadGEn
zXD5~4bL!ZcJBI+N_|=whX&1|#Sv+H>bNDQZYIs?3lxqr)_PfA?+_@5N%J${ltx9fL
zw7l~<6tzSU=G`+rNJ?C*(H#XeFmscrK3Yep|Wd)p%$5
z2^Uv!((%FeL*mG8`|U}mPZ!gGJx|55qf0DW$|;Q@nveLH7PWHhH1c>J>ZdKxHT70`qF?m
z|6Jf*dlkT2!uiD4(4nTH^U`lI4VbD2JdzAI)k_E7x0>rU0*~z21D?9T549#qf4A3~
zJ>ZdKxHT70`qF?`crNg+zY5^}6z3Cv`WR}eG*)?x>6^eK$#7G>bl^SLTxYMJwO=Ff
z$c{bW(MKaO)S4vyGo}G+_JBtcx|P-OfaW;1u|s>WC?MJSD;>~(ft=07$Le*%=0pA`
zWkP+Elvey{G6>XVq*p!ka!K|*~t6BntEgm420r0&l`dyqV
n3*SXz(N$KnOpWjPeQjpP-%cMk@lJ!k|Cm;KWZw@j!)g9MMQe(O
literal 0
HcmV?d00001
diff --git a/trail_detection_node/trail_detection_node/__pycache__/visualizer_v1_trailDetectionNode.cpython-310.pyc b/trail_detection_node/trail_detection_node/__pycache__/visualizer_v1_trailDetectionNode.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cd4e32d62bc9b3ba483c8792aec0f75c18dbad32
GIT binary patch
literal 9034
zcmaJ`3yfUXS-$VN^W52o_hGN&v12EmCSBX>w1n2gX*Nx2HQAWhCF;=XYIe@u**iNk
zcYN;MUGL~l5_O#>1zZRX6*P%5>(YX0MG>t8l!8GNK}5u0km1;3X*_bU6Rdllt7RN48nP`MXZa9&jurZBax
zloX~hy{4BfnHlya&wXn_1dQ0>Ds~4bgxJLnu`9nu;J1nHc~3EQ8so(DII3}*f{P-
z*aVxz{Z=-`_Tzq(9bnVAA7cmEEx6y7R322=Ay&Aeu!5W2&`YohA6`)^dIbB}-St2bXgi^d=Q-v2!Jshh9fsQzo=-PU()L4hZ1sh$FC6;@^}3q+#2X(`=csY*=AR7x?YDmYM)l=W-}q+ZKfiRN
zdiP&^_l0xc`|BH?g17P%Ls2TU>i!RP;wf{uf>n@Esi{m2HKtwFI%=daeM*T`rk_zl
z9W`T0nUwV@WsL5|83kN3?$hI$=&RI>t{GaJYDepp6DaGE7O8m!^Xicr+LMYb;}DBh
z`@U2Gzlz^nb$M8rWra$);qo##K4)ID9J+-&3e0VVO9h;ZUpUPQb&r*KVUeSw99&el
zbiBfKoHq;CTkaNZw%l>K_r;{pnk`|vjaIlW%*DE24n@Wz$#9ljueuZptBxntt3vmh
zt70Nvfin=9a_IR@XITt!Ka9(ba>%{QVnlYS$Glay68gMoi0sO$v%PweZjnGa6_O}%
z2oG?1;5t9-858{ud!BV&;nR4l*J~%1{Dym?y;Sy@->jbCelY8-1t%6(ygEC9^>}rM
zxgpjf$-4VB*l_AWsq`$u{u1??INky3;1PC*z#wW~q6RHiLzn_A82XlOG!
zN@zw9kVuze^|wBFD;MIhI0_(`^Cu7YpH%+m~FqNqwP}6K$IS
zp2HT*Ln6&!soZk;5URomy}HXMsHk4#$hcqGP&;O*Mdk*~7CghdSh8b9)&-cLt{$0?
zlmwomM+teZHn>J)Z<5JLwa?2#af1*x0Oo~`E;cX=>xD(H39C~rNTm%5(AoeU3otyF
z??fy28|aS#5P&l0<@N3JotI}T&CV9>g7=q+J-R?Y>{1s;bDX7k-_5EmLRY_
zOA~~CD{$6)zKqil`GGX|C&~3^l_wX(uX_jHj7z!B?S8rUqah&0I%yg@3GWz0+RzL&
zrDe2~#_vJXF0(}%
ztklavAnb*5WqHAGx?-rsT{7fNDE&e?a7AjH(LVZ)lPBGlUs*b_SZM+rIdUSWRSt0I
z&Z=LTfroA174XVi`-`{qEDbn{`iZtxSz+aOodjCSG|DZ9!XK4I9n7d3~
z77QEL9IoIZ5EhxQ5K<9oAza!fP8}xA>{@irn{aJi8vt9CAX)=l)~eLQh~SJi)NYai
zaCcJ?U^I+dx|`Wlel`OMP{_=6tj7>282E1>WN6x_l;G|)$OrvDhztF*kwv(T;kn(M
zoNH)PspY%Fn@T6a%qS5hn7$0RNQMz*V@KKAQBLeA+dIlhDu*Mr(I^2r#!|qFD0TNm
zgz{{rcal-!e&xfrc2eOu_!ChIvX2#E_30?hH>0$~m1&lqQYH|@98fwLtjms)Q%X1)
zWj5f-qa-un`jeMF0zS+)q(3tcjGOw_r&~Xk`u5rC%d5Zt(*6^Ff42I`xu358^Y{OT
zp!2~Jd-t~=`P$j)`qpiy?9ab)w)(YKjyvN(jXRa**?cu1Iiy&A}>!j)KUnEqPEt>30H=C|vb&ZFIPu1qky2CY^v!~v0i5Xn^J>a6G#fo&pHKZILsFf{sGJdSk|h0
zhz{IKF*<%9ZLEoa1OcE87u=`{l?xSgK^WX|FA1Y6=>ky;?YQsVjN$%l&Jw0`cy;0D
z@vMNPF0Z9@+Kaqy===#V$oA~|ktd>Ga6gEvlvJjaw5OFtz#gVuu}jh;A$&HN$t>JW
z36YeN#o#|sPp~x0;BG$+%TY=~o3bJ+4Reh5VM6|
z*vSoUA}DkW)Di44+-oo_o47l0UNvMcWJ?mNt<+MzmNd#~n5oPLTAgf!LvwNn3>9Yx
z?ox5`god19NkQC-
z^?~9T&UOWeTi6buhZj1I$XB>a&ZM=n00jd0;3QmPD1O?2D@J?$f|Z4!!o3zbw>Via
z)Wgj1p>^`Z=qjUEXIEFk#Ud9+P=lT$aeY_E_&pY6zF~**rWaNg7ZIoh2-id=9v8Dj
zw)ebVHSag;>;2f0d|6Q!M&Q;Lw`W6?oOBS40y{WPEUAQ;_%k5ChAVg{2!SI_rN8w9
z!=k%E<^Qz+pLW)4&C)GZUJ&B;_%6Ac>2Gv}5NH2KWfHDAT(6N?2yyH+bpzs1)0R!1
zqZ&|EPvB5CNSkAk+Jv7Yp}4Ni$DzH&>2&!gh&&4-^p@X(J#DqzCc~?*H!IgQ{tL3D
z5mfhRCjS{U{5h`I=-~4`OVmEN`!)8Q@eh3){)?D|{}KoSHMuNObGa=3DJnfr0)c)8eY{afv4gNT_2ghjDJFlV<3x}t?
z{gm3y;n;%$cPp;9>axSd41W$A6-x}AiWu9eGkx{sbaX;5FCd$^t
zn{e|Lk*c~O&}0?PPZ(Zv(eIn3{u~ILq~N7`0)&N#WPFfR%pv@Aky=6MPvIQQ4%L}Q
zs_ufZS3XE^1g_vX2%?srv>HgikOwieN!3O|CZp!?AH$W=reAGSimIR5@%aoX#4dYJ
zX#)~|DLr3-a#A7?IT;!3T-7_2BQdWa9+wffA)&1yohwe8VsM-R0v^QL7E1sQT7bQY
z$g0`QX347>ON{|~hDm^03m}o&$q!_30jzH59Xrg0Lzp#B(fP+oijq+>I2R7HGzC9&
zu2rRzijshp$2)11Gf}2H0w9^0g42gth-=yHS~wcFf`SX^ETt_RBY4@#Mk$QW(#T5>
z%eMhU#r%|8SDw;XhVlh0hb%>(lkIaRwt2a2UMGiH_C-0&G89>zd^jBC@%C8V^LQJq
zFBuJCM$|jQy)stS8390^h>{yHe0Z}%ozZAC8bTi94=;Tgy+(T@#(HHMu`f*4CZl|`
zFB(H0WQgV2FdMmQ;yu3HYZ-}#(Soddza{2V{(UE}JH;lVJZT-yWj~vwy$BC%D4lU=
zGxGV-cx~D_$fo4|mMGO@
zcu+cm{RrPtuE?TW3xao;eW7VA*S-+zfUZQ8J)pNH5^pk(%jaN`Mt4qpJFwQZoiGV#M|%
z`RA#+$Fk3W_kH4@0BN7xt&y?@mmLF$x5~mGgA8Yl%#OcOU4r}0GdJ)kvj;2TQ{>DS
zXmWy+{G%YRkInmqirYl`4{2?;ut?Fs;q5pm4uQyuV6*6>Kums|N-_x2g@vfZTM2|&
zk?~G4T=FW*O*aU(C@In(CP&Z&_;xawXN%VHdbZ;6N*%$yj4QTe+S2X=vnqPXUwgC<
z;R=1U1q(d`CtjHqX@E1Fr~Nq<7G3XKh`6Q^7ZkzF;`yD)sg9+Fqsl6)fk!aRoIHLzDRQjhs$FV_qLa
zV41f@EQ8WEsxf4vJ!j_B{i=<`UrIXzmuZ+W;L9g%U|afzv0t4;a**7qh4BbPk^C7^
zr=@!xlP)!{?geJM)pr**Bvm1sB3)}61vtHdbA_u!h6o%4QZ*yeHuMhxSRrObt|+1$
zA~{PFH5-=4L|F@yVTy7>NR7vi66x;XWSC{h9L44jy>kf0@G%7j-RurAq``ZJ(VFL9
zzzX!$V4eChh#Q%T@
zeaG0LQ(vX(AA=MVv8a-iNNOTv(J~3
zTt>gn^Gt05h5=Zp0NelykO6>h`tWs?TsBFDxLy7l05=@}@c6%gKve^$yb91*UH~+u?2$}+!XQ@a_$Uad>xJ^g
zaDXiss|?&x$o+Nsu~FE;(n`quwPtLDhkK3hsn;Ka3GmkfWLmapPB}42ni$@7mh|y)
zk35jM^qQL_5F;3n!$(JaZj@?iBVUwJ`G3(oIBn{OV+v3$zAa9&W%_Af0`Xi&OPpWU
zkq47OJSU%zZ)ucEhBc$uo)RFj?7x>v|3)2B^7Uuviz+A07Q(oXBA!
z@&jU8I$USXV*m`aThgn=UQ;@`^Td;C{sfhtBtqNAze{9W5oL)4T-aw~)wu@;fFw7F
z-HVlNoC6tD7+YLa%cz=WP)dOQKQPRG$ue_LUnsC<580FWXYC`}P~KLaNUhsh7~`A&
E2S8v-)Bpeg
literal 0
HcmV?d00001
diff --git a/trail_detection_node/trail_detection_node/__pycache__/visualizer_v2_trailDetectionNode.cpython-310.pyc b/trail_detection_node/trail_detection_node/__pycache__/visualizer_v2_trailDetectionNode.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e2bdc9339a34b12ccf04eccec095a727c47c602
GIT binary patch
literal 8399
zcma)BYm6kZ;yb)iXPzYC|%%oCG!@AwobL=vl%K6Cjad;7`Q<00dSX9)cv|LKIAb(14IZ$L-@x`RTQQ$HBjmb
z)0iHp9j&f)^t!GR-3W}1S+``~400X2Zp*wHUkxuDz`
ztB=XN9aJ!m*%_~o5BpDaChJqf{DJzk9AlW>3
zyy?MBErk`Xsz{46Eg{vn)cVcCy0WB>$+R-8JK`N_SL;WHEyk~^Xn&MV)Q_>r`YmjV
z9e7@;A7|5S2Jc(hEStmoHg=F5!uxi1nB9c;9qeXy1n*j2d0b&tcJz6L9ra3^di_pz
zD?7$+d0wsG<=xGWU)1=Qy}LFQ>_I&*|Fzq;>HHVBao$Qz6Xs(Ji}%IC!t0*-xTXs8
ziB6;CA>$;x5ys0A?-1Eoj2KZ1Pe-wLK52Bi9;<2G#3)4b!(<_ddTbs|?1j}wxXj8j)hE8N`4Fe+TlfCdtp{Je{e_2WdRG36!K<&l_W8|+T4!Iq
z{KlXB_Ro(hWG&Zd#d_;$v%U2%UwQ6Y>ziLW(fHb@AHCK(Tz>A0e|r6mv#9)u@BiSl
zzkdCVYps8)K45*fym77dH~;yM@B5EGdhQx3zxu)VcCSuuoNc|d{iR#KL9MPhzx>um
z)di|tyZ+~6|M0Efzt(#7^f$iQ`L9>5weI_?@4fWI_y6{quV9zHVkkW#^i@tz!!>LK_i(-sNNtW+467F9X6S7H=
z(N?`?67iZLO1)KgclDysB?fbw#9`tHCh&Odx&PN3GouDK%=NOuZ^EwLY@b|-I^M~R
zl}5y(uyqp5Iq$B;CzpDDz)oU4e&8}M!CGj|A^85CiK~9xYXts=$KBO=+NvKsI_gH!
za-zE~riZuUCXp-8+?8j38`cp|ASoL~byl5Hi&{~&)uL*s{0PYV7>#r!$(dxdtH`8E
z8?rnHW=Pb52If+kwybSwZKJQD&gd(NnSw!5ZAL-fqCDQ}AeSn{N_k>D^v_uIkdo?5
z1>@N$Yr9l1tIjmZq{PTNF*2phV8#oW5Az!+%Tj7E^`a?hW@>^d&5gx~0
zksOPozv%Hv5NZ}bNOUXk!d9{(3VYa(&ryvH)|0X#rkDL7@wms_ecULSC`)dp4l{eM
zc*v4+`R_($Op`grglftAbu>+z(r0nc6>W;{`VLe#N$^Hb9vQl8qQ##@l4=aM%e3so
zQ>YN8PZHJGLsc{CjPh|Pl)j!Q1NCFLt1a|P^%vw#G3(>%i^_}Yvc_`Ee%?-vN0m*r
zZzfu5ZbEIrQryq-eJi!jLj?`=)C8yGFpr+*~OxF8d+0POB<~Hm*WyV>GNn@m#(ewLI#eJvx8^8k{%Qckg#m-mO$S+jI*4U*~R+
z1yW0-bm=bR!$?}-;txB0+AlczW9mhfLs5QOyQH1Z#9iJL44ro36m%22{IVvB%#V{s
z*z{sIz9eS5{$&pmKcuI)IF0PMis%
zW+R9nL}t`{`#PSQnQ>{c!CT!1PkhoWLI)2DQyzgZI$k3LPpg-Ox-JS~*PUl>R;5B~
z4$sh~lf|+{X%&ix?G%sgEPibT3_H{`aT0Z7cP-)YO&3ynBG0D^^)6j`|r@+#NW~h!<)EteUMb$pFMJ$Z#%hc9$$RtoY1no$LTrv78
zSXE2Zwl>h2m1>_;KCf-5{CKJkjKpkPBU)k(ats!b*;i1nDq&@(MLz3wV++(nm4SWL
zB+r~p^ht?n3mJC_bG6kv{hkZ3GFI3O8gVS_rAG7OQWSb(tjj%8s4oLp{(086eM$n7>_T)Mj&HBZ38ZrmI5<~!SqceE@W7>eNJhSlt`#{H*&
zo{F7D*Cjq89@#kb<2ube=7)Z~;=!Cp4JmSRw4*hLlZ*&0iiOc_K+RcxjO)k#sG$qy
zPdqSuGZ7~2ZF5B!L>CtGR>59EOCn)*IZQ5q*^@MqDF+s1`Px}s#xZ15GmdfO=(3Fm
zVoaL30v?%AkEvD7fq^qL{xcx%Q+Xv2Z2YBpI|YPA>MH?Pq*?-#wuM`VN;3x*-SZYq
z+rS3kRwby`0H3uMwJ=f`qfK>?X8_;>Ck3pAaw`mqTgs=3-~ffxT;F;I&V+&ACfth9
zKCQ&}t${!2_bmQsUrH^)Z4C1c%5to+Eu~!-8K
zf#v6v88{nVgi{EIAwa)%n(Mb$`t|c*+qa#zfA_Vst*^f(pR>Dmb^AIlfzxf&
zv~A)aexCAx{xM;k(?lWDTRSLh=_5S-#FHXV>O9j^dUJJN*v-{sAqpZcN()at`P4c0
zk;l)?pL_ffP9R3)7N9|44TOCmy5NN|n&lVaU`P}t^1U#=)bl({M#_f2Uv>#2x5bG$
zp*uT`%OW>yBlKpsCk(<9!VG(z?z+^m{9fv1_Q1UN@dD)?j2Fcc0)Wq1v&8j*-!7!gKRW}9L(8{;2t#EmV(pV!dj96gFSLs
zX>yOORJXSTnFXrh#MNRf+sZC=k*Jfy#&K^cZgRg%rZWpoOzoWyW;!5KkM?qZ?%wud
ztmDNoIkXv2iiLAN8*T|Z$V;)O4xAvm$Y;(R%Hq(&-7b$Vqe0?b@H^i5^{~0ZqmY!Q
zsBo_ZhW7ZvXzt-!pLqowqnG!F}USHFOKF)a?tu$j&rbQ?q~
zD;0K2$yip46jUGp=rd}^im>n{xIF%)h19_qMWSC?z?h}f!T87if8%3Zj6{8t3R&lV
zIW1wdvK%dzr%=Kvl+qGHhsp~YbI6rqML2LHN?}Bq*`*bCY5g&bGMhOkxrs!oK1{sGCG??&~guLFvAX{<69ce*Z7ThlRBIn>A4+H12i-$#M!pZHrP2CPzNvk)dt^wV`^qTQAq6Krq#R9-4Q|=f
zZfA@wtv}0VWvk=vk)2!>?QczIS@96=YFJBS+;#))b|~jNwv<7Q%}G8{+IJ4_N@plf
z+*doe2ql>*i7NM+p1!8$!v%A|GG$yU2&}J{wH{
ziwq25tUZqG$U?)QK&7|zw>h!~zY{%nr{upuMTx<8fH*%1+TRiV6q1co`vpDIpd3$
zNq!(WAC)uTUwXFX#|5u
zEacL*_4UTujq>)ZSk7PKA8#NbFIO7ex5~h+HgM~dGhrFz{lYj?TE_1n@Vo{*FDV*P
z%BpP@5luM*PuVbm^$GFkZP}`b_Opn(0RO|tQ>b_XUiE}}7-a{Zz!ZAJq#yoqlHR1C
z)#xE1+i?+S)iA~Qd;tRu$PBj?t~V&auWcjMq=5#uOyGp46r9OH(V8gPP*NuHT9Qv3
za<&n3%uF}pi1DeU#Nd(-k*x`L8e>VrU=GF@;)_GIYAy0Dtbkvoa7(z;?lzRRLRQy9EwVdQ{
z{%1tkVOSobNIq{X;j2u4s)uI?pjB0=Ftk$V~cm{6My
zSZE)wbU9JqW_`#4El|@ok_4XzC=$0NQ3XB~7^5<;Ff{~I0D-9ZtN=uT>n1FR2*?#&
zA&?w=$xZSqtwEj`0H{PCI9n7?u?7YCE)GicmR8hQ6b41RcmwlNcma-mTq|dn^M26y
z>qsD_vD;Y1HS&kQG-=^HIFv=Y9ys*z?;zLrY#H0~ygdItJ
z+Vn%1T82(!2{e@95y4ZEzaJUqC}){vh%p3Nv&ZaN{7QCJ8>`sL&pGGp5|r}w{{@l}
B*2e$<
literal 0
HcmV?d00001
diff --git a/trail_detection_node/trail_detection_node/model_loader.py b/trail_detection_node/trail_detection_node/model_loader.py
new file mode 100644
index 00000000..908c74f6
--- /dev/null
+++ b/trail_detection_node/trail_detection_node/model_loader.py
@@ -0,0 +1,99 @@
+import torch.nn as nn
+import torch.nn.functional as F
+from .vgg import vgg16
+
+class FCN8s(nn.Module):
+ def __init__(self, nclass, backbone='vgg16', aux=False, pretrained_base=True, norm_layer=nn.BatchNorm2d, **kwargs):
+ super(FCN8s, self).__init__()
+ self.aux = aux
+ if backbone == 'vgg16':
+ self.pretrained = vgg16(pretrained=pretrained_base).features
+ else:
+ raise RuntimeError('unknown backbone: {}'.format(backbone))
+ self.pool3 = nn.Sequential(*self.pretrained[:17])
+ self.pool4 = nn.Sequential(*self.pretrained[17:24])
+ self.pool5 = nn.Sequential(*self.pretrained[24:])
+ self.head = _FCNHead(512, nclass, norm_layer)
+ self.score_pool3 = nn.Conv2d(256, nclass, 1)
+ self.score_pool4 = nn.Conv2d(512, nclass, 1)
+ if aux:
+ self.auxlayer = _FCNHead(512, nclass, norm_layer)
+
+ self.__setattr__('exclusive',
+ ['head', 'score_pool3', 'score_pool4', 'auxlayer'] if aux else ['head', 'score_pool3',
+ 'score_pool4'])
+
+ def forward(self, x):
+ pool3 = self.pool3(x)
+ pool4 = self.pool4(pool3)
+ pool5 = self.pool5(pool4)
+
+ outputs = []
+ score_fr = self.head(pool5)
+
+ score_pool4 = self.score_pool4(pool4)
+ score_pool3 = self.score_pool3(pool3)
+
+ upscore2 = F.interpolate(score_fr, score_pool4.size()[2:], mode='bilinear', align_corners=True)
+ fuse_pool4 = upscore2 + score_pool4
+
+ upscore_pool4 = F.interpolate(fuse_pool4, score_pool3.size()[2:], mode='bilinear', align_corners=True)
+ fuse_pool3 = upscore_pool4 + score_pool3
+
+ out = F.interpolate(fuse_pool3, x.size()[2:], mode='bilinear', align_corners=True)
+ outputs.append(out)
+
+ if self.aux:
+ auxout = self.auxlayer(pool5)
+ auxout = F.interpolate(auxout, x.size()[2:], mode='bilinear', align_corners=True)
+ outputs.append(auxout)
+
+ return tuple(outputs)
+
+class FCN32s(nn.Module):
+ """There are some difference from original fcn"""
+
+ def __init__(self, nclass, backbone='vgg16', aux=False, pretrained_base=True,
+ norm_layer=nn.BatchNorm2d, **kwargs):
+ super(FCN32s, self).__init__()
+ self.aux = aux
+ if backbone == 'vgg16':
+ self.pretrained = vgg16(pretrained=pretrained_base).features
+ else:
+ raise RuntimeError('unknown backbone: {}'.format(backbone))
+ self.head = _FCNHead(512, nclass, norm_layer)
+ if aux:
+ self.auxlayer = _FCNHead(512, nclass, norm_layer)
+
+ self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head'])
+
+ def forward(self, x):
+ size = x.size()[2:]
+ pool5 = self.pretrained(x)
+
+ outputs = []
+ out = self.head(pool5)
+ out = F.interpolate(out, size, mode='bilinear', align_corners=True)
+ outputs.append(out)
+
+ if self.aux:
+ auxout = self.auxlayer(pool5)
+ auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
+ outputs.append(auxout)
+
+ return tuple(outputs)
+
+class _FCNHead(nn.Module):
+ def __init__(self, in_channels, channels, norm_layer=nn.BatchNorm2d, **kwargs):
+ super(_FCNHead, self).__init__()
+ inter_channels = in_channels // 4
+ self.block = nn.Sequential(
+ nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False),
+ norm_layer(inter_channels),
+ nn.ReLU(inplace=True),
+ nn.Dropout(0.1),
+ nn.Conv2d(inter_channels, channels, 1)
+ )
+
+ def forward(self, x):
+ return self.block(x)
\ No newline at end of file
diff --git a/trail_detection_node/trail_detection_node/setup.py b/trail_detection_node/trail_detection_node/setup.py
new file mode 100644
index 00000000..ff6a39fd
--- /dev/null
+++ b/trail_detection_node/trail_detection_node/setup.py
@@ -0,0 +1,125 @@
+from setuptools import find_packages, setup
+
+
+def readme():
+ with open('README.md', encoding='utf-8') as f:
+ content = f.read()
+ return content
+
+
+version_file = 'mmseg/version.py'
+
+
+def get_version():
+ with open(version_file, 'r') as f:
+ exec(compile(f.read(), version_file, 'exec'))
+ return locals()['__version__']
+
+
+def parse_requirements(fname='requirements.txt', with_version=True):
+ """Parse the package dependencies listed in a requirements file but strips
+ specific versioning information.
+
+ Args:
+ fname (str): path to requirements file
+ with_version (bool, default=False): if True include version specs
+
+ Returns:
+ List[str]: list of requirements items
+
+ CommandLine:
+ python -c "import setup; print(setup.parse_requirements())"
+ """
+ import sys
+ from os.path import exists
+ import re
+ require_fpath = fname
+
+ def parse_line(line):
+ """Parse information from a line in a requirements text file."""
+ if line.startswith('-r '):
+ # Allow specifying requirements in other files
+ target = line.split(' ')[1]
+ for info in parse_require_file(target):
+ yield info
+ else:
+ info = {'line': line}
+ if line.startswith('-e '):
+ info['package'] = line.split('#egg=')[1]
+ else:
+ # Remove versioning from the package
+ pat = '(' + '|'.join(['>=', '==', '>']) + ')'
+ parts = re.split(pat, line, maxsplit=1)
+ parts = [p.strip() for p in parts]
+
+ info['package'] = parts[0]
+ if len(parts) > 1:
+ op, rest = parts[1:]
+ if ';' in rest:
+ # Handle platform specific dependencies
+ # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
+ version, platform_deps = map(str.strip,
+ rest.split(';'))
+ info['platform_deps'] = platform_deps
+ else:
+ version = rest # NOQA
+ info['version'] = (op, version)
+ yield info
+
+ def parse_require_file(fpath):
+ with open(fpath, 'r') as f:
+ for line in f.readlines():
+ line = line.strip()
+ if line and not line.startswith('#'):
+ for info in parse_line(line):
+ yield info
+
+ def gen_packages_items():
+ if exists(require_fpath):
+ for info in parse_require_file(require_fpath):
+ parts = [info['package']]
+ if with_version and 'version' in info:
+ parts.extend(info['version'])
+ if not sys.version.startswith('3.4'):
+ # apparently package_deps are broken in 3.4
+ platform_deps = info.get('platform_deps')
+ if platform_deps is not None:
+ parts.append(';' + platform_deps)
+ item = ''.join(parts)
+ yield item
+
+ packages = list(gen_packages_items())
+ return packages
+
+
+if __name__ == '__main__':
+ setup(
+ name='mmsegmentation',
+ version=get_version(),
+ description='Open MMLab Semantic Segmentation Toolbox and Benchmark',
+ long_description_content_type='text/markdown',
+ author='MMSegmentation Authors',
+ author_email='openmmlab@gmail.com',
+ keywords='computer vision, semantic segmentation',
+ url='http://github.com/open-mmlab/mmsegmentation',
+ packages=find_packages(exclude=('configs', 'tools', 'demo')),
+ classifiers=[
+ 'Development Status :: 4 - Beta',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: 3.8',
+ ],
+ license='Apache License 2.0',
+ setup_requires=parse_requirements('requirements/build.txt'),
+ tests_require=parse_requirements('requirements/tests.txt'),
+ install_requires=parse_requirements('requirements/runtime.txt'),
+ extras_require={
+ 'all': parse_requirements('requirements.txt'),
+ 'tests': parse_requirements('requirements/tests.txt'),
+ 'build': parse_requirements('requirements/build.txt'),
+ 'optional': parse_requirements('requirements/optional.txt'),
+ },
+ ext_modules=[],
+ zip_safe=False)
diff --git a/trail_detection_node/trail_detection_node/v1_trailDetectionNode.py b/trail_detection_node/trail_detection_node/v1_trailDetectionNode.py
new file mode 100644
index 00000000..c20f7df5
--- /dev/null
+++ b/trail_detection_node/trail_detection_node/v1_trailDetectionNode.py
@@ -0,0 +1,278 @@
+import torch
+from .model_loader import FCN8s
+from PIL import Image as ImagePIL
+from torchvision import transforms
+import cv2
+
+import rclpy
+from rclpy.node import Node
+from geometry_msgs.msg import PoseStamped
+from sensor_msgs.msg import Image, PointCloud2
+import sensor_msgs_py.point_cloud2 as pc2
+import numpy as np
+import math
+from cv_bridge import CvBridge
+
+'''
+ The transformation matrix as well as the coordinate conversion and depth estimation functions are copied from human_detection_node
+'''
+camera_transformation_k = np.array([
+ [628.5359544,0,676.9575694],
+ [0,627.7249542,532.7206716],
+ [0,0,1]])
+
+rotation_matrix = np.array([
+ [-0.007495781893,-0.0006277316155,0.9999717092],
+ [-0.9999516401,-0.006361853422,-0.007499625104],
+ [0.006366381192,-0.9999795662,-0.0005800141927]])
+
+rotation_matrix = rotation_matrix.T
+
+translation_vector = np.array([-0.06024059837, -0.08180891509, -0.3117851288])
+image_width=1280
+image_height=1024
+
+def convert_to_lidar_frame(uv_coordinate):
+ """
+ convert 2d camera coordinate + depth into 3d lidar frame
+ """
+ point_cloud = np.empty( (3,) , dtype=float)
+ point_cloud[2] = uv_coordinate[2]
+ point_cloud[1] = ( image_height - uv_coordinate[1] )*point_cloud[2]
+ point_cloud[0] = uv_coordinate[0]*point_cloud[2]
+
+ inverse_camera_transformation_k = np.linalg.inv(camera_transformation_k)
+ inverse_rotation_matrix = np.linalg.inv(rotation_matrix)
+ point_cloud = inverse_camera_transformation_k @ point_cloud
+ point_cloud = inverse_rotation_matrix @ (point_cloud-translation_vector)
+ return point_cloud
+
+def convert_to_camera_frame(point_cloud):
+ """
+ convert 3d lidar data into 2d coordinate of the camera frame + depth
+ """
+ length = point_cloud.shape[0]
+ translation = np.tile(translation_vector, (length, 1)).T
+
+ point_cloud = point_cloud.T
+ point_cloud = rotation_matrix@point_cloud + translation
+ point_cloud = camera_transformation_k @ point_cloud
+
+ uv_coordinate = np.empty_like(point_cloud)
+
+ """
+ uv = [x/z, y/z, z], and y is opposite so the minus imageheight
+ """
+ uv_coordinate[0] = point_cloud[0] / point_cloud[2]
+ uv_coordinate[1] = image_height - point_cloud[1] / point_cloud[2]
+ uv_coordinate[2] = point_cloud[2]
+
+ uv_depth = uv_coordinate[2, :]
+ filtered_uv_coordinate = uv_coordinate[:, uv_depth >= 0]
+ return filtered_uv_coordinate
+
+def estimate_depth(x, y, np_2d_array):
+ """
+ estimate the depth by finding points closest to x,y from thhe 2d array
+ """
+ # Calculate the distance between each point and the target coordinates (x, y)
+ distances_sq = (np_2d_array[0,:] - x) ** 2 + (np_2d_array[1,:] - y) ** 2
+
+ # Find the indices of the k nearest points
+ k = 5 # Number of nearest neighbors we want
+ closest_indices = np.argpartition(distances_sq, k)[:k]
+ pixel_distance_threshold = 2000
+
+ valid_indices = [idx for idx in closest_indices if distances_sq[idx]<=pixel_distance_threshold]
+ if len(valid_indices) == 0:
+ # lidar points disappears usually around 0.4m
+ distance_where_lidar_stops_working = -1
+ return distance_where_lidar_stops_working
+
+ filtered_indices = np.array(valid_indices)
+ # Get the depth value of the closest point
+ closest_depths = np_2d_array[2,filtered_indices]
+
+ return np.mean(closest_depths)
+
+def load_model(device):
+ model = FCN8s(nclass=6, backbone='vgg16', pretrained_base=True, pretrained=True)
+ model.load_state_dict(torch.load('src/trail_detection_node/trail_detection_node/model/fcn8s_vgg16_pascal_voc.pth'))
+ model = model.to(device)
+ print('Finished loading model!')
+
+ return model
+
+def find_route(model, device, cv_image):
+ PIL_image = ImagePIL.fromarray(cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB))
+ transform = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+ ])
+ image = transform(PIL_image).unsqueeze(0).to(device)
+ with torch.no_grad():
+ output = model(image)
+ # pred is prediction generated by the model, route is the variable for the center line
+ pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy()
+ pred[pred != 1] = 0 # only see the trail
+ pred[pred == 1] = 255
+ pred = np.array(pred, dtype=np.uint8)
+ route = np.zeros_like(pred)
+ # calculate the center line by taking the average
+ row_num = 0
+ for row in pred:
+ white_pixels = list(np.nonzero(row)[0])
+ if white_pixels:
+ average = (white_pixels[0] + white_pixels[-1]) / 2
+ route[row_num][round(average)] = 255
+ row_num = row_num + 1
+ return route
+
+'''
+ The traildetector node has two subscriptions(lidar and camera) and one publisher(trail position). After it receives msgs from both lidar and camera,
+ it detects the trail in the image and sends the corresponding lidar position as the trail location.
+ The msgs are synchornized before processing, using buffer and sync function.
+ To find the path, the node will process the image, find a line to follow (by taking the average of left and right of the path), estimate the lidar points depth,
+ and choose to go to the closest point.
+ V1_traildetection assumes that the path is pointing frontward and has only one path in front.
+'''
+class trailDetector(Node):
+ def __init__(self, model, device):
+ super().__init__('trail_detector')
+ # define trail subscription
+ self.trail_publisher = self.create_publisher(
+ PoseStamped,
+ 'trail_location',
+ 10)
+
+ # define camera subscription
+ self.camera_subscription = self.create_subscription(
+ Image,
+ 'camera',
+ self.camera_callback,
+ 10)
+ self.camera_subscription
+
+ # define lidar subscription
+ self.lidar_subscription = self.create_subscription(
+ PointCloud2,
+ 'velodyne_points',
+ self.lidar_callback,
+ 10)
+ self.lidar_subscription
+
+ self.bridge = CvBridge()
+
+ # load model and device
+ self.model = model
+ self.device = device
+
+ # buffers to hold msgs for sync
+ self.buffer_size = 30 # 30 is a random choice, to be discussed
+ self.lidar_buffer = []
+ self.camera_buffer = []
+
+ def camera_callback(self, msg):
+ if len(self.camera_buffer) >= self.buffer_size:
+ self.camera_buffer.pop(0)
+ self.camera_buffer.append(msg)
+ self.sync()
+
+ def lidar_callback(self, msg):
+ if len(self.lidar_buffer) >= self.buffer_size:
+ self.lidar_buffer.pop(0)
+ self.lidar_buffer.append(msg)
+ self.sync()
+
+ def sync(self):
+ # if one of the sensor has no msg, then return
+ if not self.lidar_buffer or not self.camera_buffer:
+ return
+
+ while self.lidar_buffer and self.camera_buffer:
+ lidar_msg = self.lidar_buffer[0]
+ camera_msg = self.camera_buffer[0]
+
+ time_tolerance = 5000000 # nanosec, random choice to be discussed
+ time_difference = abs(lidar_msg.header.stamp.nanosec - camera_msg.header.stamp.nanosec)
+ print(time_difference)
+
+ # compare the time difference, if it's within tolerance, then pass to the trail_callback, otherwise discard the older msg
+ if time_difference <= time_tolerance:
+ self.lidar_buffer.pop(0)
+ self.camera_buffer.pop(0)
+ self.get_logger().info("msgs received!")
+ self.trail_callback(lidar_msg, camera_msg)
+ elif lidar_msg.header.stamp.nanosec > camera_msg.header.stamp.nanosec:
+ self.camera_buffer.pop(0)
+ else:
+ self.lidar_buffer.pop(0)
+
+ def trail_callback(self, lidar_msg, camera_msg):
+ # process lidar msg
+ point_gen = pc2.read_points(
+ lidar_msg, field_names=(
+ "x", "y", "z"), skip_nans=True)
+ points = [[x, y, z] for x, y, z in point_gen]
+ points = np.array(points)
+ points2d = convert_to_camera_frame(points)
+
+ # process camera msg
+ cv_image = self.bridge.imgmsg_to_cv2(camera_msg, desired_encoding='passthrough')
+ route = find_route(self.model, self.device, cv_image)
+ route_indices = list(zip(*np.nonzero(route)))
+
+ if not route_indices:
+ print("No centerline found!")
+ return
+
+ # find the corresponding lidar points using the center line pixels
+ filtered_3dPoints = []
+ for index in route_indices:
+ point = []
+ point.append(index[0])
+ point.append(index[1])
+ point.append(estimate_depth(index[0], index[1], points2d))
+ point_3d = convert_to_lidar_frame(point)
+ filtered_3dPoints.append(point_3d)
+
+ filtered_3dPoints = np.array(filtered_3dPoints)
+ # find the nearest 3d point and set that as goal
+ distances_sq = filtered_3dPoints[:,0]**2 + filtered_3dPoints[:,1]**2 + filtered_3dPoints[:,2]**2
+ smallest_index = np.argmin(distances_sq)
+
+ # publsih message
+ trail_location_msg = PoseStamped()
+ trail_location_msg.header.stamp = lidar_msg.header.stamp
+ trail_location_msg.header.frame_id = "velodyne"
+ #position
+ trail_location_msg.pose.position.x = filtered_3dPoints[smallest_index][0]
+ trail_location_msg.pose.position.y = filtered_3dPoints[smallest_index][1]
+ trail_location_msg.pose.position.z = filtered_3dPoints[smallest_index][2]
+ #orientation
+ yaw = math.atan2(filtered_3dPoints[smallest_index][1], filtered_3dPoints[smallest_index][0])
+ trail_location_msg.pose.orientation.x = 0.0
+ trail_location_msg.pose.orientation.y = 0.0
+ trail_location_msg.pose.orientation.z = math.sin(yaw/2)
+ trail_location_msg.pose.orientation.w = math.cos(yaw / 2)
+ self.get_logger().info("location published!")
+ self.trail_publisher.publish(trail_location_msg)
+
+
+
+
+
+def main(args=None):
+ print(torch.cuda.is_available())
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ model = load_model(device)
+
+ rclpy.init(args=args)
+ trailDetectorNode = trailDetector(model, device)
+ rclpy.spin(trailDetectorNode)
+
+ trailDetectorNode.destroy_node()
+ rclpy.shutdown()
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/trail_detection_node/trail_detection_node/v2_trailDetectionNode.py b/trail_detection_node/trail_detection_node/v2_trailDetectionNode.py
new file mode 100644
index 00000000..309ab765
--- /dev/null
+++ b/trail_detection_node/trail_detection_node/v2_trailDetectionNode.py
@@ -0,0 +1,233 @@
+import torch
+from .model_loader import FCN8s
+from PIL import Image as ImagePIL
+from torchvision import transforms
+import cv2
+
+import rclpy
+from rclpy.node import Node
+from geometry_msgs.msg import PoseStamped
+from sensor_msgs.msg import Image, PointCloud2
+import sensor_msgs_py.point_cloud2 as pc2
+import numpy as np
+import math
+from cv_bridge import CvBridge
+
+import message_filters
+
+'''
+ The transformation matrix as well as the coordinate conversion and depth estimation functions are copied from human_detection_node
+'''
+camera_transformation_k = np.array([
+ [628.5359544,0,676.9575694],
+ [0,627.7249542,532.7206716],
+ [0,0,1]])
+
+rotation_matrix = np.array([
+ [-0.007495781893,-0.0006277316155,0.9999717092],
+ [-0.9999516401,-0.006361853422,-0.007499625104],
+ [0.006366381192,-0.9999795662,-0.0005800141927]])
+
+rotation_matrix = rotation_matrix.T
+
+translation_vector = np.array([-0.06024059837, -0.08180891509, -0.3117851288])
+image_width=1280
+image_height=1024
+
+def convert_to_lidar_frame(uv_coordinate):
+ """
+ convert 2d camera coordinate + depth into 3d lidar frame
+ """
+ point_cloud = np.empty( (3,) , dtype=float)
+ point_cloud[2] = uv_coordinate[2]
+ point_cloud[1] = ( image_height - uv_coordinate[1] )*point_cloud[2]
+ point_cloud[0] = uv_coordinate[0]*point_cloud[2]
+
+ inverse_camera_transformation_k = np.linalg.inv(camera_transformation_k)
+ inverse_rotation_matrix = np.linalg.inv(rotation_matrix)
+ point_cloud = inverse_camera_transformation_k @ point_cloud
+ point_cloud = inverse_rotation_matrix @ (point_cloud-translation_vector)
+ return point_cloud
+
+def convert_to_camera_frame(point_cloud):
+ """
+ convert 3d lidar data into 2d coordinate of the camera frame + depth
+ """
+ length = point_cloud.shape[0]
+ translation = np.tile(translation_vector, (length, 1)).T
+
+ point_cloud = point_cloud.T
+ point_cloud = rotation_matrix@point_cloud + translation
+ point_cloud = camera_transformation_k @ point_cloud
+
+ uv_coordinate = np.empty_like(point_cloud)
+
+ """
+ uv = [x/z, y/z, z], and y is opposite so the minus imageheight
+ """
+ uv_coordinate[0] = point_cloud[0] / point_cloud[2]
+ uv_coordinate[1] = image_height - point_cloud[1] / point_cloud[2]
+ uv_coordinate[2] = point_cloud[2]
+
+ uv_depth = uv_coordinate[2, :]
+ filtered_uv_coordinate = uv_coordinate[:, uv_depth >= 0]
+ return filtered_uv_coordinate
+
+def estimate_depth(x, y, np_2d_array):
+ """
+ estimate the depth by finding points closest to x,y from thhe 2d array
+ """
+ # Calculate the distance between each point and the target coordinates (x, y)
+ distances_sq = (np_2d_array[0,:] - x) ** 2 + (np_2d_array[1,:] - y) ** 2
+
+ # Find the indices of the k nearest points
+ k = 5 # Number of nearest neighbors we want
+ closest_indices = np.argpartition(distances_sq, k)[:k]
+ pixel_distance_threshold = 2000
+
+ valid_indices = [idx for idx in closest_indices if distances_sq[idx]<=pixel_distance_threshold]
+ if len(valid_indices) == 0:
+ # lidar points disappears usually around 0.4m
+ distance_where_lidar_stops_working = -1
+ return distance_where_lidar_stops_working
+
+ filtered_indices = np.array(valid_indices)
+ # Get the depth value of the closest point
+ closest_depths = np_2d_array[2,filtered_indices]
+
+ return np.mean(closest_depths)
+
+def load_model(device):
+ model = FCN8s(nclass=6, backbone='vgg16', pretrained_base=True, pretrained=True)
+ model.load_state_dict(torch.load('src/trail_detection_node/trail_detection_node/model/fcn8s_vgg16_pascal_voc.pth'))
+ model = model.to(device)
+ print('Finished loading model!')
+
+ return model
+
+def find_route(model, device, cv_image):
+ PIL_image = ImagePIL.fromarray(cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB))
+ transform = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+ ])
+ image = transform(PIL_image).unsqueeze(0).to(device)
+ with torch.no_grad():
+ output = model(image)
+ # pred is prediction generated by the model, route is the variable for the center line
+ pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy()
+ pred[pred != 1] = 0 # only see the trail
+ pred[pred == 1] = 255
+ pred = np.array(pred, dtype=np.uint8)
+ route = np.zeros_like(pred)
+ # calculate the center line by taking the average
+ row_num = 0
+ for row in pred:
+ white_pixels = list(np.nonzero(row)[0])
+ if white_pixels:
+ average = (white_pixels[0] + white_pixels[-1]) / 2
+ route[row_num][round(average)] = 255
+ row_num = row_num + 1
+ return route
+
+'''
+ The traildetector node has two subscriptions(lidar and camera) and one publisher(trail position). After it receives msgs from both lidar and camera,
+ it detects the trail in the image and sends the corresponding lidar position as the trail location.
+ The msgs are synchornized before processing, using buffer and sync function.
+ To find the path, the node will process the image, find a line to follow (by taking the average of left and right of the path), estimate the lidar points depth,
+ and choose to go to the closest point.
+ V1_traildetection assumes that the path is pointing frontward and has only one path in front.
+'''
+class trailDetector(Node):
+ def __init__(self, model, device):
+ super().__init__('trail_detector')
+ # define trail publisher
+ self.trail_publisher = self.create_publisher(
+ PoseStamped,
+ 'trail_location',
+ 10)
+
+ # create subscribers
+ self.image_sub = message_filters.Subscriber(self, Image, 'camera')
+ self.lidar_sub = message_filters.Subscriber(self, PointCloud2, 'velodyne_points')
+
+ self.bridge = CvBridge()
+
+ # load model and device
+ self.model = model
+ self.device = device
+
+ # create callback
+ queue_size = 30
+ ts = message_filters.ApproximateTimeSynchronizer([self.image_sub, self.lidar_sub], queue_size, 0.5)
+ ts.registerCallback(self.trail_callback)
+
+ def trail_callback(self, camera_msg, lidar_msg):
+ print("Message received!")
+ # process lidar msg
+ point_gen = pc2.read_points(
+ lidar_msg, field_names=(
+ "x", "y", "z"), skip_nans=True)
+ points = [[x, y, z] for x, y, z in point_gen]
+ points = np.array(points)
+ points2d = convert_to_camera_frame(points)
+
+ # process camera msg
+ cv_image = self.bridge.imgmsg_to_cv2(camera_msg, desired_encoding='passthrough')
+ route = find_route(self.model, self.device, cv_image)
+ route_indices = list(zip(*np.nonzero(route)))
+
+ if not route_indices:
+ print("No centerline found!")
+ return
+
+ # find the corresponding lidar points using the center line pixels
+ filtered_3dPoints = []
+ for index in route_indices:
+ point = []
+ point.append(index[0])
+ point.append(index[1])
+ point.append(estimate_depth(index[0], index[1], points2d))
+ point_3d = convert_to_lidar_frame(point)
+ filtered_3dPoints.append(point_3d)
+
+ filtered_3dPoints = np.array(filtered_3dPoints)
+ # find the nearest 3d point and set that as goal
+ distances_sq = filtered_3dPoints[:,0]**2 + filtered_3dPoints[:,1]**2 + filtered_3dPoints[:,2]**2
+ smallest_index = np.argmin(distances_sq)
+
+ # publsih message
+ trail_location_msg = PoseStamped()
+ trail_location_msg.header.stamp = lidar_msg.header.stamp
+ trail_location_msg.header.frame_id = "velodyne"
+ #position
+ trail_location_msg.pose.position.x = filtered_3dPoints[smallest_index][0]
+ trail_location_msg.pose.position.y = filtered_3dPoints[smallest_index][1]
+ trail_location_msg.pose.position.z = filtered_3dPoints[smallest_index][2]
+ #orientation
+ yaw = math.atan2(filtered_3dPoints[smallest_index][1], filtered_3dPoints[smallest_index][0])
+ trail_location_msg.pose.orientation.x = 0.0
+ trail_location_msg.pose.orientation.y = 0.0
+ trail_location_msg.pose.orientation.z = math.sin(yaw/2)
+ trail_location_msg.pose.orientation.w = math.cos(yaw / 2)
+ self.get_logger().info("location published!")
+ self.trail_publisher.publish(trail_location_msg)
+
+
+
+
+
+def main(args=None):
+ print(torch.cuda.is_available())
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ model = load_model(device)
+
+ rclpy.init(args=args)
+ trailDetectorNode = trailDetector(model, device)
+ rclpy.spin(trailDetectorNode)
+
+ trailDetectorNode.destroy_node()
+ rclpy.shutdown()
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/trail_detection_node/trail_detection_node/vgg.py b/trail_detection_node/trail_detection_node/vgg.py
new file mode 100644
index 00000000..fe5c163c
--- /dev/null
+++ b/trail_detection_node/trail_detection_node/vgg.py
@@ -0,0 +1,191 @@
+import torch
+import torch.nn as nn
+import torch.utils.model_zoo as model_zoo
+
+__all__ = [
+ 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
+ 'vgg19_bn', 'vgg19',
+]
+
+model_urls = {
+ 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
+ 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
+ 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
+ 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
+ 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
+ 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
+ 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
+ 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
+}
+
+
+class VGG(nn.Module):
+ def __init__(self, features, num_classes=1000, init_weights=True):
+ super(VGG, self).__init__()
+ self.features = features
+ self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
+ self.classifier = nn.Sequential(
+ nn.Linear(512 * 7 * 7, 4096),
+ nn.ReLU(True),
+ nn.Dropout(),
+ nn.Linear(4096, 4096),
+ nn.ReLU(True),
+ nn.Dropout(),
+ nn.Linear(4096, num_classes)
+ )
+ if init_weights:
+ self._initialize_weights()
+
+ def forward(self, x):
+ x = self.features(x)
+ x = self.avgpool(x)
+ x = x.view(x.size(0), -1)
+ x = self.classifier(x)
+ return x
+
+ def _initialize_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
+ if m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.BatchNorm2d):
+ nn.init.constant_(m.weight, 1)
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.Linear):
+ nn.init.normal_(m.weight, 0, 0.01)
+ nn.init.constant_(m.bias, 0)
+
+
+def make_layers(cfg, batch_norm=False):
+ layers = []
+ in_channels = 3
+ for v in cfg:
+ if v == 'M':
+ layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
+ else:
+ conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
+ if batch_norm:
+ layers += (conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True))
+ else:
+ layers += [conv2d, nn.ReLU(inplace=True)]
+ in_channels = v
+ return nn.Sequential(*layers)
+
+
+cfg = {
+ 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
+ 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
+ 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
+ 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
+}
+
+
+def vgg11(pretrained=False, **kwargs):
+ """VGG 11-layer model (configuration "A")
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ if pretrained:
+ kwargs['init_weights'] = False
+ model = VGG(make_layers(cfg['A']), **kwargs)
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))
+ return model
+
+
+def vgg11_bn(pretrained=False, **kwargs):
+ """VGG 11-layer model (configuration "A") with batch normalization
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ if pretrained:
+ kwargs['init_weights'] = False
+ model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))
+ return model
+
+
+def vgg13(pretrained=False, **kwargs):
+ """VGG 13-layer model (configuration "B")
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ if pretrained:
+ kwargs['init_weights'] = False
+ model = VGG(make_layers(cfg['B']), **kwargs)
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))
+ return model
+
+
+def vgg13_bn(pretrained=False, **kwargs):
+ """VGG 13-layer model (configuration "B") with batch normalization
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ if pretrained:
+ kwargs['init_weights'] = False
+ model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']))
+ return model
+
+
+def vgg16(pretrained=False, **kwargs):
+ """VGG 16-layer model (configuration "D")
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ if pretrained:
+ kwargs['init_weights'] = False
+ model = VGG(make_layers(cfg['D']), **kwargs)
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
+ return model
+
+
+def vgg16_bn(pretrained=False, **kwargs):
+ """VGG 16-layer model (configuration "D") with batch normalization
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ if pretrained:
+ kwargs['init_weights'] = False
+ model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))
+ return model
+
+
+def vgg19(pretrained=False, **kwargs):
+ """VGG 19-layer model (configuration "E")
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ if pretrained:
+ kwargs['init_weights'] = False
+ model = VGG(make_layers(cfg['E']), **kwargs)
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))
+ return model
+
+
+def vgg19_bn(pretrained=False, **kwargs):
+ """VGG 19-layer model (configuration 'E') with batch normalization
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ """
+ if pretrained:
+ kwargs['init_weights'] = False
+ model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
+ if pretrained:
+ model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn']))
+ return model
+
+
+if __name__ == '__main__':
+ img = torch.randn((4, 3, 480, 480))
+ model = vgg16(pretrained=False)
+ out = model(img)
diff --git a/trail_detection_node/trail_detection_node/visualizer_v1_trailDetectionNode.py b/trail_detection_node/trail_detection_node/visualizer_v1_trailDetectionNode.py
new file mode 100644
index 00000000..616b689d
--- /dev/null
+++ b/trail_detection_node/trail_detection_node/visualizer_v1_trailDetectionNode.py
@@ -0,0 +1,431 @@
+import torch
+from .model_loader import FCN8s, FCN32s
+from PIL import Image as ImagePIL
+from torchvision import transforms
+import cv2
+import os
+
+import rclpy
+from rclpy.node import Node
+from geometry_msgs.msg import PoseStamped
+from sensor_msgs.msg import Image, PointCloud2
+import sensor_msgs_py.point_cloud2 as pc2
+import numpy as np
+import math
+from cv_bridge import CvBridge
+
+'''
+ The transformation matrix as well as the coordinate conversion and depth estimation functions are copied from human_detection_node
+'''
+camera_transformation_k = np.array([
+ [628.5359544,0,676.9575694],
+ [0,627.7249542,532.7206716],
+ [0,0,1]])
+
+rotation_matrix = np.array([
+ [-0.007495781893,-0.0006277316155,0.9999717092],
+ [-0.9999516401,-0.006361853422,-0.007499625104],
+ [0.006366381192,-0.9999795662,-0.0005800141927]])
+
+rotation_matrix = rotation_matrix.T
+
+translation_vector = np.array([-0.06024059837, -0.08180891509, -0.3117851288])
+image_width=1280
+image_height=1024
+
+def convert_to_lidar_frame(uv_coordinate):
+ """
+ convert 2d camera coordinate + depth into 3d lidar frame
+ """
+ point_cloud = np.empty( (3,) , dtype=float)
+ point_cloud[2] = uv_coordinate[2]
+ point_cloud[1] = ( image_height - uv_coordinate[1] )*point_cloud[2]
+ point_cloud[0] = uv_coordinate[0]*point_cloud[2]
+
+ inverse_camera_transformation_k = np.linalg.inv(camera_transformation_k)
+ inverse_rotation_matrix = np.linalg.inv(rotation_matrix)
+ point_cloud = inverse_camera_transformation_k @ point_cloud
+ point_cloud = inverse_rotation_matrix @ (point_cloud-translation_vector)
+ return point_cloud
+
+def convert_to_camera_frame(point_cloud):
+ """
+ convert 3d lidar data into 2d coordinate of the camera frame + depth
+ """
+ length = point_cloud.shape[0]
+ translation = np.tile(translation_vector, (length, 1)).T
+
+ point_cloud = point_cloud.T
+ point_cloud = rotation_matrix@point_cloud + translation
+ point_cloud = camera_transformation_k @ point_cloud
+
+ uv_coordinate = np.empty_like(point_cloud)
+
+ """
+ uv = [x/z, y/z, z], and y is opposite so the minus imageheight
+ """
+ uv_coordinate[0] = point_cloud[0] / point_cloud[2]
+ uv_coordinate[1] = image_height - point_cloud[1] / point_cloud[2]
+ uv_coordinate[2] = point_cloud[2]
+
+ uv_depth = uv_coordinate[2, :]
+ filtered_uv_coordinate = uv_coordinate[:, uv_depth >= 0]
+ return filtered_uv_coordinate
+
+def estimate_depth(x, y, np_2d_array):
+ """
+ estimate the depth by finding points closest to x,y from thhe 2d array
+ """
+ # Calculate the distance between each point and the target coordinates (x, y)
+ distances_sq = (np_2d_array[0,:] - x) ** 2 + (np_2d_array[1,:] - y) ** 2
+
+ # Find the indices of the k nearest points
+ k = 5 # Number of nearest neighbors we want
+ closest_indices = np.argpartition(distances_sq, k)[:k]
+ pixel_distance_threshold = 2000
+
+ valid_indices = [idx for idx in closest_indices if distances_sq[idx]<=pixel_distance_threshold]
+ if len(valid_indices) == 0:
+ # lidar points disappears usually around 0.4m
+ distance_where_lidar_stops_working = -1
+ return distance_where_lidar_stops_working
+
+ filtered_indices = np.array(valid_indices)
+ # Get the depth value of the closest point
+ closest_depths = np_2d_array[2,filtered_indices]
+
+ return np.mean(closest_depths)
+
+def load_model(device):
+ model = FCN32s(nclass=2, backbone='vgg16', pretrained_base=True, pretrained=True)
+ # model_location = '32s_batch8/fcn32s_vgg16_pascal_voc_best_model.pth'
+ model_location = '500epoch/fcn32s_vgg16_pascal_voc.pth'
+ if os.path.isfile(f'src/trail_detection_node/trail_detection_node/model/{model_location}'):
+ model.load_state_dict(torch.load(f'src/trail_detection_node/trail_detection_node/model/{model_location}',map_location=torch.device('cuda:0')))
+ else:
+ model.load_state_dict(torch.load(f'trail_detection_node/model/{model_location}',map_location=torch.device('cuda:0')))
+ model = model.to(device)
+ print('Finished loading model!')
+
+ return model
+
+def find_route(model, device, cv_image):
+ PIL_image = ImagePIL.fromarray(cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB))
+ transform = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+ ])
+ image = transform(PIL_image).unsqueeze(0).to(device)
+ with torch.no_grad():
+ output = model(image)
+ # pred is prediction generated by the model, route is the variable for the center line
+ pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy()
+ pred[pred == 0] = 255 # only see the trail
+ pred[pred == 1] = 0
+ pred[pred == 2] = 0
+ pred[pred == 3] = 0
+ pred[pred == 4] = 0
+ pred[pred == 5] = 0
+ pred = np.array(pred, dtype=np.uint8)
+
+ # # prediction visualize
+ # cv2.imshow('prediction',pred)
+ # cv2.waitKey(25)
+
+ route = np.zeros_like(pred)
+ # calculate the center line by taking the average
+ row_num = 0
+ for row in pred:
+ white_pixels = list(np.nonzero(row)[0])
+ if white_pixels:
+ average = (white_pixels[0] + white_pixels[-1]) / 2
+ route[row_num][round(average)] = 255
+ row_num = row_num + 1
+ return route, pred
+
+def equalize_hist_rgb(img):
+ # Split the image into its color channels
+ r, g, b = cv2.split(img)
+
+ # Equalize the histograms for each channel
+ r_eq = cv2.equalizeHist(r)
+ g_eq = cv2.equalizeHist(g)
+ b_eq = cv2.equalizeHist(b)
+
+ # Merge the channels
+ img_eq = cv2.merge((r_eq, g_eq, b_eq))
+ # img_eq = cv2.fastNlMeansDenoisingColored(img_eq,None,10,20,7,42)
+ # img_eq = cv2.GaussianBlur(img_eq, (25,25), 0)
+ return img_eq
+
+'''
+ The traildetector node has two subscriptions(lidar and camera) and one publisher(trail position). After it receives msgs from both lidar and camera,
+ it detects the trail in the image and sends the corresponding lidar position as the trail location.
+ The msgs are synchornized before processing, using buffer and sync function.
+ To find the path, the node will process the image, find a line to follow (by taking the average of left and right of the path), estimate the lidar points depth,
+ and choose to go to the closest point.
+ V1_traildetection assumes that the path is pointing frontward and has only one path in front.
+'''
+class trailDetector(Node):
+ def __init__(self, model, device):
+ super().__init__('trail_detector')
+ # define trail subscription
+ self.trail_publisher = self.create_publisher(
+ PoseStamped,
+ 'trail_location',
+ 10)
+
+ # define camera subscription
+ self.camera_subscription = self.create_subscription(
+ Image,
+ 'camera',
+ self.camera_callback,
+ 10)
+ self.camera_subscription
+
+ # define lidar subscription
+ self.lidar_subscription = self.create_subscription(
+ PointCloud2,
+ 'velodyne_points',
+ self.lidar_callback,
+ 10)
+ self.lidar_subscription
+
+ self.bridge = CvBridge()
+
+ # load model and device
+ self.model = model
+ self.device = device
+
+ # buffers to hold msgs for sync
+ self.buffer_size = 50 # 30 is a random choice, to be discussed
+ self.lidar_buffer = []
+ self.camera_buffer = []
+ self.only_camera_mode = False
+
+ def camera_callback(self, msg):
+ if len(self.camera_buffer) >= self.buffer_size:
+ self.camera_buffer.pop(0)
+ self.camera_buffer.append(msg)
+ self.sync()
+
+ def lidar_callback(self, msg):
+ if len(self.lidar_buffer) >= self.buffer_size:
+ self.lidar_buffer.pop(0)
+ self.lidar_buffer.append(msg)
+ self.sync()
+
+ def sync(self):
+ # if one of the sensor has no msg, then return
+ if self.only_camera_mode and self.camera_buffer:
+ camera_msg = self.camera_buffer[0]
+ self.camera_buffer.pop(0)
+ self.only_camera_callback(camera_msg)
+ elif not self.lidar_buffer or not self.camera_buffer:
+ return
+
+ while self.lidar_buffer and self.camera_buffer:
+ lidar_msg = self.lidar_buffer[0]
+ camera_msg = self.camera_buffer[0]
+
+ time_tolerance = 20000000 # nanosec, random choice to be discussed
+ time_difference = abs(lidar_msg.header.stamp.nanosec - camera_msg.header.stamp.nanosec)
+ # print(time_difference)
+
+ # compare the time difference, if it's within tolerance, then pass to the trail_callback, otherwise discard the older msg
+ if time_difference <= time_tolerance:
+ self.lidar_buffer.pop(0)
+ self.camera_buffer.pop(0)
+ self.get_logger().info("msgs received!")
+ self.trail_callback(lidar_msg, camera_msg)
+ elif lidar_msg.header.stamp.nanosec > camera_msg.header.stamp.nanosec:
+ self.camera_buffer.pop(0)
+ else:
+ self.lidar_buffer.pop(0)
+
+ def trail_callback(self, lidar_msg, camera_msg):
+ # process lidar msg
+ point_gen = pc2.read_points(
+ lidar_msg, field_names=(
+ "x", "y", "z"), skip_nans=True)
+ points = [[x, y, z] for x, y, z in point_gen]
+ points = np.array(points)
+ points2d = convert_to_camera_frame(points)
+
+ # process camera msg
+ cv_image = self.bridge.imgmsg_to_cv2(camera_msg, desired_encoding='passthrough')
+
+ # increase brightness
+ M = np.ones(cv_image.shape, dtype = 'uint8') * 50 # increase brightness by 50
+ cv_image = cv2.add(cv_image, M)
+
+ # # image visualizer
+ # cv2.imshow('raw image',cv_image)
+ # cv2.waitKey(25)
+
+ route = find_route(self.model, self.device, cv_image)
+
+ # # visualize route purely
+ # cv2.imshow("white_route",route)
+ # cv2.waitKey(25)
+
+ route_indices = list(zip(*np.nonzero(route)))
+ if not route_indices:
+ print("No centerline found!")
+ return
+
+ # #filter points that have no lidar points near it
+ # filtered_route_indices = []
+ # for index in route_indices:
+ # point = []
+ # point.append(index[0])
+ # point.append(index[1])
+ # point.append(estimate_depth(index[0], index[1], points2d))
+ # if point[2] == -1:
+ # continue
+ # else:
+ # filtered_route_indices.append(point)
+
+ # find the corresponding lidar points using the center line pixels
+ filtered_3dPoints = []
+ # for index in filtered_route_indices:
+ for index in route_indices:
+ point = []
+ # point.append(index[0])
+ # point.append(index[1])
+ # point.append(index[2])
+
+ point.append(index[0])
+ point.append(index[1])
+ point.append(estimate_depth(index[0], index[1], points2d))
+
+ point_3d = convert_to_lidar_frame(point)
+ filtered_3dPoints.append(point_3d)
+
+ filtered_3dPoints = np.array(filtered_3dPoints)
+ # find the nearest 3d point and set that as goal
+ distances_sq = filtered_3dPoints[:,0]**2 + filtered_3dPoints[:,1]**2 + filtered_3dPoints[:,2]**2
+ smallest_index = np.argmin(distances_sq)
+ # print(math.sqrt(distances_sq[smallest_index]))
+ # smallest_index = np.argmin(filtered_3dPoints[:,2])
+
+ # visualize after-pocessed image
+ visualize_cv_image = cv_image
+ print(f"{visualize_cv_image.shape[0]}")
+ circle_x = route_indices[smallest_index][0]
+ circle_y = route_indices[smallest_index][1]
+
+ # # visualize the lidar points in image
+ # uv_x, uv_y, uv_z = points2d
+ # for index_lidar in range(points2d.shape[1]):
+ # # print(f"{int(uv_x[index_lidar])},{int(uv_y[index_lidar])}")
+ # cv2.circle(visualize_cv_image, (int(uv_x[index_lidar]), int(image_height - uv_y[index_lidar])), radius=5, color=(255, 0, 0), thickness=-1)
+
+ # visualize center line in image
+ for index_circle in range(len(route_indices)):
+ if index_circle == smallest_index:
+ continue
+ else:
+ red_circle_x = route_indices[index_circle][0]
+ red_circle_y = route_indices[index_circle][1]
+ cv2.circle(visualize_cv_image, (red_circle_y, red_circle_x), radius=5, color=(0, 0, 255), thickness=-1)
+
+ # visualize the chosen point in image
+ cv2.circle(visualize_cv_image, (circle_y, circle_x), radius=7, color=(0, 255, 0), thickness=-1)
+ cv2.circle(visualize_cv_image, (0, 0), radius=12, color=(0, 255, 0), thickness=-1)
+
+ cv2.imshow('circled image',visualize_cv_image)
+ cv2.waitKey(25)
+
+ # publsih message
+ trail_location_msg = PoseStamped()
+ trail_location_msg.header.stamp = lidar_msg.header.stamp
+ trail_location_msg.header.frame_id = "velodyne"
+ #position
+ trail_location_msg.pose.position.x = filtered_3dPoints[smallest_index][0]
+ trail_location_msg.pose.position.y = filtered_3dPoints[smallest_index][1]
+ trail_location_msg.pose.position.z = filtered_3dPoints[smallest_index][2]
+ #orientation
+ yaw = math.atan2(filtered_3dPoints[smallest_index][1], filtered_3dPoints[smallest_index][0])
+ trail_location_msg.pose.orientation.x = 0.0
+ trail_location_msg.pose.orientation.y = 0.0
+ trail_location_msg.pose.orientation.z = math.sin(yaw/2)
+ trail_location_msg.pose.orientation.w = math.cos(yaw / 2)
+ self.get_logger().info("location published!")
+ self.trail_publisher.publish(trail_location_msg)
+
+ def only_camera_callback(self, camera_msg):
+ # process camera msg
+ cv_image = self.bridge.imgmsg_to_cv2(camera_msg, desired_encoding='passthrough')
+ # cv_image = cv2.resize(cv_image,(480,480))
+
+ # cv_image = cv2.fastNlMeansDenoisingColored(cv_image,None,5,5,5,11)
+
+ # # histogram equalization method 1
+ # cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2YCrCb)
+ # cv_image[:,:,0] = cv2.equalizeHist(cv_image[:,:,0])
+ # cv_image = cv2.cvtColor(cv_image, cv2.COLOR_YCrCb2BGR)
+
+ # histogram equalization method 2
+ # cv_image = equalize_hist_rgb(cv_image)
+
+ # # histogram equalization method 3 CLAHE
+ # image_bw = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
+ # clahe = cv2.createCLAHE(clipLimit=5)
+ # cv_image = clahe.apply(image_bw) + 30
+
+ # # histogram equalization method 4 CLAHE
+ # cla = cv2.createCLAHE(clipLimit=4.0)
+ # H, S, V = cv2.split(cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV))
+ # eq_V = cla.apply(V)
+ # cv_image = cv2.cvtColor(cv2.merge([H, S, eq_V]), cv2.COLOR_HSV2BGR)
+
+ # # increase brightness
+ # M = np.ones(cv_image.shape, dtype = 'uint8') * 50 # increase brightness by 50
+ # cv_image = cv2.add(cv_image, M)
+
+ # # image visualizer
+ # cv2.imshow('raw image',cv_image)
+ # cv2.waitKey(25)
+
+ route, pred = find_route(self.model, self.device, cv_image)
+
+ # # visualize route purely
+ # cv2.imshow("white_route",route)
+ # cv2.waitKey(25)
+
+ # increase the brightness of image where is predicted as road
+ sign = cv2.cvtColor(pred, cv2.COLOR_GRAY2BGR)
+ cv_image = cv2.add(cv_image, sign)
+
+ route_indices = list(zip(*np.nonzero(route)))
+ if not route_indices:
+ print("No centerline found!")
+ return
+
+ # for index_circle in range(len(route_indices)):
+ # red_circle_x = route_indices[index_circle][0]
+ # red_circle_y = route_indices[index_circle][1]
+ # cv2.circle(cv_image, (red_circle_y, red_circle_x), radius=5, color=(0, 0, 255), thickness=-1)
+
+ cv2.imshow('circled image',cv_image)
+ cv2.waitKey(25)
+
+
+
+
+def main(args=None):
+ print(torch.cuda.is_available())
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ model = load_model(device)
+
+ rclpy.init(args=args)
+ trailDetectorNode = trailDetector(model, device)
+ rclpy.spin(trailDetectorNode)
+
+ trailDetectorNode.destroy_node()
+ rclpy.shutdown()
+ cv2.destroyAllWindows()
+
+if __name__ == '__main__':
+ main()
diff --git a/trail_detection_node/trail_detection_node/visualizer_v2_trailDetectionNode.py b/trail_detection_node/trail_detection_node/visualizer_v2_trailDetectionNode.py
new file mode 100644
index 00000000..5055d3f4
--- /dev/null
+++ b/trail_detection_node/trail_detection_node/visualizer_v2_trailDetectionNode.py
@@ -0,0 +1,392 @@
+import torch
+from .model_loader import FCN8s, FCN32s
+from PIL import Image as ImagePIL
+from torchvision import transforms
+import cv2
+import os
+
+import rclpy
+from rclpy.node import Node
+from geometry_msgs.msg import PoseStamped
+from sensor_msgs.msg import Image, PointCloud2
+import sensor_msgs_py.point_cloud2 as pc2
+import numpy as np
+import math
+from cv_bridge import CvBridge
+
+import message_filters
+from scipy.ndimage import grey_erosion
+'''
+ The transformation matrix as well as the coordinate conversion and depth estimation functions are copied from human_detection_node
+'''
+camera_transformation_k = np.array([
+ [628.5359544,0,676.9575694],
+ [0,627.7249542,532.7206716],
+ [0,0,1]])
+
+rotation_matrix = np.array([
+ [-0.007495781893,-0.0006277316155,0.9999717092],
+ [-0.9999516401,-0.006361853422,-0.007499625104],
+ [0.006366381192,-0.9999795662,-0.0005800141927]])
+
+rotation_matrix = rotation_matrix.T
+
+translation_vector = np.array([-0.06024059837, -0.08180891509, -0.3117851288])
+image_width=1280
+image_height=1024
+
+def convert_to_lidar_frame(uv_coordinate):
+ """
+ convert 2d camera coordinate + depth into 3d lidar frame
+ """
+ point_cloud = np.empty( (3,) , dtype=float)
+ point_cloud[2] = uv_coordinate[2]
+ point_cloud[1] = ( image_height - uv_coordinate[1] )*point_cloud[2]
+ point_cloud[0] = uv_coordinate[0]*point_cloud[2]
+
+ inverse_camera_transformation_k = np.linalg.inv(camera_transformation_k)
+ inverse_rotation_matrix = np.linalg.inv(rotation_matrix)
+ point_cloud = inverse_camera_transformation_k @ point_cloud
+ point_cloud = inverse_rotation_matrix @ (point_cloud-translation_vector)
+ return point_cloud
+
+def convert_to_camera_frame(point_cloud):
+ """
+ convert 3d lidar data into 2d coordinate of the camera frame + depth
+ """
+ length = point_cloud.shape[0]
+ translation = np.tile(translation_vector, (length, 1)).T
+
+ point_cloud = point_cloud.T
+ point_cloud = rotation_matrix@point_cloud + translation
+ point_cloud = camera_transformation_k @ point_cloud
+
+ uv_coordinate = np.empty_like(point_cloud)
+
+ """
+ uv = [x/z, y/z, z], and y is opposite so the minus imageheight
+ """
+ uv_coordinate[0] = point_cloud[0] / point_cloud[2]
+ uv_coordinate[1] = image_height - point_cloud[1] / point_cloud[2]
+ uv_coordinate[2] = point_cloud[2]
+
+ uv_depth = uv_coordinate[2, :]
+ filtered_uv_coordinate = uv_coordinate[:, uv_depth >= 0]
+ return filtered_uv_coordinate
+
+def estimate_depth(x, y, np_2d_array):
+ """
+ estimate the depth by finding points closest to x,y from thhe 2d array
+ """
+ # Calculate the distance between each point and the target coordinates (x, y)
+ distances_sq = (np_2d_array[0,:] - x) ** 2 + (np_2d_array[1,:] - y) ** 2
+
+ # Find the indices of the k nearest points
+ k = 5 # Number of nearest neighbors we want
+ closest_indices = np.argpartition(distances_sq, k)[:k]
+ pixel_distance_threshold = 2000
+
+ valid_indices = [idx for idx in closest_indices if distances_sq[idx]<=pixel_distance_threshold]
+ if len(valid_indices) == 0:
+ # lidar points disappears usually around 0.4m
+ distance_where_lidar_stops_working = -1
+ return distance_where_lidar_stops_working
+
+ filtered_indices = np.array(valid_indices)
+ # Get the depth value of the closest point
+ closest_depths = np_2d_array[2,filtered_indices]
+
+ return np.mean(closest_depths)
+
+def load_model(device):
+ model = FCN32s(nclass=2, backbone='vgg16', pretrained_base=True, pretrained=True)
+ # model_location = '32s_batch8/fcn32s_vgg16_pascal_voc_best_model.pth'
+ model_location = '500epoch/fcn32s_vgg16_pascal_voc.pth'
+ if os.path.isfile(f'src/trail_detection_node/trail_detection_node/model/{model_location}'):
+ model.load_state_dict(torch.load(f'src/trail_detection_node/trail_detection_node/model/{model_location}',map_location=torch.device('cuda:0')))
+ else:
+ model.load_state_dict(torch.load(f'trail_detection_node/model/{model_location}',map_location=torch.device('cuda:0')))
+ model = model.to(device)
+ print('Finished loading model!')
+
+ return model
+
+def find_route(model, device, cv_image):
+ PIL_image = ImagePIL.fromarray(cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB))
+ transform = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
+ ])
+ image = transform(PIL_image).unsqueeze(0).to(device)
+ with torch.no_grad():
+ output = model(image)
+ # pred is prediction generated by the model, route is the variable for the center line
+ pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy()
+ pred[pred == 0] = 255 # only see the trail
+ pred[pred == 1] = 0
+ pred = np.array(pred, dtype=np.uint8)
+
+ pred = grey_erosion(pred, size=(4,4))
+
+ # # prediction visualize
+ # cv2.imshow('prediction',pred)
+ # cv2.waitKey(25)
+
+ route = np.zeros_like(pred)
+ # calculate the center line by taking the average
+ row_num = 0
+ for row in pred:
+ white_pixels = list(np.nonzero(row)[0])
+ if white_pixels:
+ average = (white_pixels[0] + white_pixels[-1]) / 2
+ route[row_num][round(average)] = 255
+ row_num = row_num + 1
+ return route, pred
+
+def equalize_hist_rgb(img):
+ # Split the image into its color channels
+ r, g, b = cv2.split(img)
+
+ # Equalize the histograms for each channel
+ r_eq = cv2.equalizeHist(r)
+ g_eq = cv2.equalizeHist(g)
+ b_eq = cv2.equalizeHist(b)
+
+ # Merge the channels
+ img_eq = cv2.merge((r_eq, g_eq, b_eq))
+ # img_eq = cv2.fastNlMeansDenoisingColored(img_eq,None,10,20,7,42)
+ # img_eq = cv2.GaussianBlur(img_eq, (25,25), 0)
+ return img_eq
+
+'''
+ The traildetector node has two subscriptions(lidar and camera) and one publisher(trail position). After it receives msgs from both lidar and camera,
+ it detects the trail in the image and sends the corresponding lidar position as the trail location.
+ The msgs are synchornized before processing, using buffer and sync function.
+ To find the path, the node will process the image, find a line to follow (by taking the average of left and right of the path), estimate the lidar points depth,
+ and choose to go to the closest point.
+ V1_traildetection assumes that the path is pointing frontward and has only one path in front.
+'''
+class trailDetector(Node):
+ def __init__(self, model, device):
+ super().__init__('trail_detector')
+ self.only_camera_mode = True
+ self.bridge = CvBridge()
+
+ # load model and device
+ self.model = model
+ self.device = device
+
+ # define trail publication
+ self.trail_publisher = self.create_publisher(
+ PoseStamped,
+ 'trail_location',
+ 10)
+
+ if self.only_camera_mode:
+ # define camera subscription
+ print("Mode: Only Camera")
+ self.camera_subscription = self.create_subscription(
+ Image,
+ 'camera',
+ self.only_camera_callback,
+ 10)
+ self.camera_subscription
+ else:
+ # create subscribers and synchronizer
+ print("Mode: Lidar and Camera")
+ self.image_sub = message_filters.Subscriber(self, Image, 'camera')
+ self.lidar_sub = message_filters.Subscriber(self, PointCloud2, 'velodyne_points')
+ queue_size = 30
+ ts = message_filters.ApproximateTimeSynchronizer([self.image_sub, self.lidar_sub], queue_size, 0.5)
+ ts.registerCallback(self.trail_callback)
+
+
+
+
+ def trail_callback(self, camera_msg, lidar_msg):
+ print("Message received!")
+ # process lidar msg
+ point_gen = pc2.read_points(
+ lidar_msg, field_names=(
+ "x", "y", "z"), skip_nans=True)
+ points = [[x, y, z] for x, y, z in point_gen]
+ points = np.array(points)
+ points2d = convert_to_camera_frame(points)
+
+ # process camera msg
+ cv_image = self.bridge.imgmsg_to_cv2(camera_msg, desired_encoding='passthrough')
+
+ # # increase brightness
+ # M = np.ones(cv_image.shape, dtype = 'uint8') * 50 # increase brightness by 50
+ # cv_image = cv2.add(cv_image, M)
+
+ # # image visualizer
+ # cv2.imshow('raw image',cv_image)
+ # cv2.waitKey(25)
+
+ route, _ = find_route(self.model, self.device, cv_image)
+
+ # # visualize route purely
+ # cv2.imshow("white_route",route)
+ # cv2.waitKey(25)
+
+ route_indices = list(zip(*np.nonzero(route)))
+ if not route_indices:
+ print("No centerline found!")
+ return
+
+ #filter points that have no lidar points near it
+ filtered_route_indices = []
+ for index in route_indices:
+ point = []
+ point.append(index[0])
+ point.append(index[1])
+ point.append(estimate_depth(index[0], index[1], points2d))
+ if point[2] == -1:
+ continue
+ else:
+ filtered_route_indices.append(point)
+
+ # find the corresponding lidar points using the center line pixels
+ filtered_3dPoints = []
+ for index in filtered_route_indices:
+ # for index in route_indices:
+ point = []
+ point.append(index[0])
+ point.append(index[1])
+ point.append(index[2])
+
+ # point.append(index[0])
+ # point.append(index[1])
+ # point.append(estimate_depth(index[0], index[1], points2d))
+
+ point_3d = convert_to_lidar_frame(point)
+ filtered_3dPoints.append(point_3d)
+
+ filtered_3dPoints = np.array(filtered_3dPoints)
+ # find the nearest 3d point and set that as goal
+ distances_sq = filtered_3dPoints[:,0]**2 + filtered_3dPoints[:,1]**2 + filtered_3dPoints[:,2]**2
+ smallest_index = np.argmin(distances_sq)
+ # print(math.sqrt(distances_sq[smallest_index]))
+ # smallest_index = np.argmin(filtered_3dPoints[:,2])
+
+ # visualize after-pocessed image
+ visualize_cv_image = cv_image
+ print(f"{visualize_cv_image.shape[0]}")
+ circle_x = route_indices[smallest_index][0]
+ circle_y = route_indices[smallest_index][1]
+
+ # # visualize the lidar points in image
+ # uv_x, uv_y, uv_z = points2d
+ # for index_lidar in range(points2d.shape[1]):
+ # # print(f"{int(uv_x[index_lidar])},{int(uv_y[index_lidar])}")
+ # cv2.circle(visualize_cv_image, (int(uv_x[index_lidar]), int(image_height - uv_y[index_lidar])), radius=5, color=(255, 0, 0), thickness=-1)
+
+ # visualize center line in image
+ for index_circle in range(len(route_indices)):
+ if index_circle == smallest_index:
+ continue
+ else:
+ red_circle_x = route_indices[index_circle][0]
+ red_circle_y = route_indices[index_circle][1]
+ cv2.circle(visualize_cv_image, (red_circle_x, red_circle_y), radius=5, color=(0, 0, 255), thickness=-1)
+
+ # visualize the chosen point in image
+ cv2.circle(visualize_cv_image, (circle_x, circle_y), radius=7, color=(0, 255, 0), thickness=-1)
+ cv2.circle(visualize_cv_image, (0, 0), radius=12, color=(0, 255, 0), thickness=-1)
+
+ cv2.imshow('circled image',visualize_cv_image)
+ cv2.waitKey(25)
+
+ # publsih message
+ trail_location_msg = PoseStamped()
+ trail_location_msg.header.stamp = lidar_msg.header.stamp
+ trail_location_msg.header.frame_id = "velodyne"
+ #position
+ trail_location_msg.pose.position.x = filtered_3dPoints[smallest_index][0]
+ trail_location_msg.pose.position.y = filtered_3dPoints[smallest_index][1]
+ trail_location_msg.pose.position.z = filtered_3dPoints[smallest_index][2]
+ #orientation
+ yaw = math.atan2(filtered_3dPoints[smallest_index][1], filtered_3dPoints[smallest_index][0])
+ trail_location_msg.pose.orientation.x = 0.0
+ trail_location_msg.pose.orientation.y = 0.0
+ trail_location_msg.pose.orientation.z = math.sin(yaw/2)
+ trail_location_msg.pose.orientation.w = math.cos(yaw / 2)
+ self.get_logger().info("location published!")
+ self.trail_publisher.publish(trail_location_msg)
+
+ def only_camera_callback(self, camera_msg):
+ # process camera msg
+ cv_image = self.bridge.imgmsg_to_cv2(camera_msg, desired_encoding='passthrough')
+ # cv_image = cv2.resize(cv_image,(480,480))
+
+ # cv_image = cv2.fastNlMeansDenoisingColored(cv_image,None,5,5,5,11)
+
+ # # histogram equalization method 1
+ # cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2YCrCb)
+ # cv_image[:,:,0] = cv2.equalizeHist(cv_image[:,:,0])
+ # cv_image = cv2.cvtColor(cv_image, cv2.COLOR_YCrCb2BGR)
+
+ # histogram equalization method 2
+ # cv_image = equalize_hist_rgb(cv_image)
+
+ # # histogram equalization method 3 CLAHE
+ # image_bw = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
+ # clahe = cv2.createCLAHE(clipLimit=5)
+ # cv_image = clahe.apply(image_bw) + 30
+
+ # # histogram equalization method 4 CLAHE
+ # cla = cv2.createCLAHE(clipLimit=4.0)
+ # H, S, V = cv2.split(cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV))
+ # eq_V = cla.apply(V)
+ # cv_image = cv2.cvtColor(cv2.merge([H, S, eq_V]), cv2.COLOR_HSV2BGR)
+
+ # # increase brightness
+ # M = np.ones(cv_image.shape, dtype = 'uint8') * 50 # increase brightness by 50
+ # cv_image = cv2.add(cv_image, M)
+
+ # # image visualizer
+ # cv2.imshow('raw image',cv_image)
+ # cv2.waitKey(25)
+
+ route, pred = find_route(self.model, self.device, cv_image)
+
+ # # visualize route purely
+ # cv2.imshow("white_route",route)
+ # cv2.waitKey(25)
+
+ # increase the brightness of image where is predicted as road
+ sign = cv2.cvtColor(pred, cv2.COLOR_GRAY2BGR)
+ cv_image = cv2.add(cv_image, sign)
+
+ route_indices = list(zip(*np.nonzero(route)))
+ if not route_indices:
+ print("No centerline found!")
+ return
+
+ # for index_circle in range(len(route_indices)):
+ # red_circle_x = route_indices[index_circle][0]
+ # red_circle_y = route_indices[index_circle][1]
+ # cv2.circle(cv_image, (red_circle_y, red_circle_x), radius=5, color=(0, 0, 255), thickness=-1)
+
+ cv2.imshow('circled image',cv_image)
+ cv2.waitKey(25)
+
+
+
+
+def main(args=None):
+ print(torch.cuda.is_available())
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ model = load_model(device)
+
+ rclpy.init(args=args)
+ trailDetectorNode = trailDetector(model, device)
+ rclpy.spin(trailDetectorNode)
+
+ trailDetectorNode.destroy_node()
+ rclpy.shutdown()
+ cv2.destroyAllWindows()
+
+if __name__ == '__main__':
+ main()
From efd93a159fb62fd79194cfa2e0d93fe21319a426 Mon Sep 17 00:00:00 2001
From: Frank_ZhaodongJiang <69261064+FrankZhaodong@users.noreply.github.com>
Date: Thu, 31 Aug 2023 16:33:30 -0400
Subject: [PATCH 02/22] add psp model, change the coordinate conversion
---
.../__pycache__/jpu.cpython-310.pyc | Bin 0 -> 2452 bytes
.../__pycache__/jpu.cpython-37.pyc | Bin 0 -> 2441 bytes
.../__pycache__/model_loader.cpython-310.pyc | Bin 3390 -> 6824 bytes
.../__pycache__/model_loader.cpython-37.pyc | Bin 0 -> 6861 bytes
.../__pycache__/model_store.cpython-310.pyc | Bin 0 -> 1425 bytes
.../__pycache__/segbase.cpython-310.pyc | Bin 0 -> 2194 bytes
.../__pycache__/segbase.cpython-37.pyc | Bin 0 -> 2155 bytes
.../v2_trailDetectionNode.cpython-310.pyc | Bin 6580 -> 7265 bytes
.../__pycache__/vgg.cpython-37.pyc | Bin 0 -> 5925 bytes
...izer_v2_trailDetectionNode.cpython-310.pyc | Bin 8399 -> 8830 bytes
...lizer_v2_trailDetectionNode.cpython-37.pyc | Bin 0 -> 8356 bytes
.../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 330 bytes
.../__pycache__/__init__.cpython-39.pyc | Bin 0 -> 328 bytes
.../__pycache__/densenet.cpython-310.pyc | Bin 0 -> 8119 bytes
.../__pycache__/densenet.cpython-39.pyc | Bin 0 -> 8190 bytes
.../__pycache__/eespnet.cpython-310.pyc | Bin 0 -> 6119 bytes
.../__pycache__/eespnet.cpython-39.pyc | Bin 0 -> 6085 bytes
.../__pycache__/resnet.cpython-310.pyc | Bin 0 -> 6189 bytes
.../__pycache__/resnet.cpython-39.pyc | Bin 0 -> 6424 bytes
.../__pycache__/resnetv1b.cpython-310.pyc | Bin 0 -> 7205 bytes
.../__pycache__/resnetv1b.cpython-37.pyc | Bin 0 -> 8293 bytes
.../__pycache__/resnetv1b.cpython-39.pyc | Bin 0 -> 7945 bytes
.../__pycache__/vgg.cpython-310.pyc | Bin 0 -> 5493 bytes
.../__pycache__/vgg.cpython-39.pyc | Bin 0 -> 5965 bytes
.../__pycache__/xception.cpython-310.pyc | Bin 0 -> 9518 bytes
.../__pycache__/xception.cpython-39.pyc | Bin 0 -> 10103 bytes
.../base_models/resnetv1b.py | 264 ++++++++++++++++++
.../trail_detection_node/jpu.py | 68 +++++
.../trail_detection_node/model_loader.py | 96 ++++++-
.../trail_detection_node/model_store.py | 36 +++
.../trail_detection_node/segbase.py | 60 ++++
.../v2_trailDetectionNode.py | 43 ++-
.../visualizer_v2_trailDetectionNode.py | 61 ++--
33 files changed, 597 insertions(+), 31 deletions(-)
create mode 100644 trail_detection_node/trail_detection_node/__pycache__/jpu.cpython-310.pyc
create mode 100644 trail_detection_node/trail_detection_node/__pycache__/jpu.cpython-37.pyc
create mode 100644 trail_detection_node/trail_detection_node/__pycache__/model_loader.cpython-37.pyc
create mode 100644 trail_detection_node/trail_detection_node/__pycache__/model_store.cpython-310.pyc
create mode 100644 trail_detection_node/trail_detection_node/__pycache__/segbase.cpython-310.pyc
create mode 100644 trail_detection_node/trail_detection_node/__pycache__/segbase.cpython-37.pyc
create mode 100644 trail_detection_node/trail_detection_node/__pycache__/vgg.cpython-37.pyc
create mode 100644 trail_detection_node/trail_detection_node/__pycache__/visualizer_v2_trailDetectionNode.cpython-37.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/__init__.cpython-310.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/__init__.cpython-39.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/densenet.cpython-310.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/densenet.cpython-39.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/eespnet.cpython-310.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/eespnet.cpython-39.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/resnet.cpython-310.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/resnet.cpython-39.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/resnetv1b.cpython-310.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/resnetv1b.cpython-37.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/resnetv1b.cpython-39.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/vgg.cpython-310.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/vgg.cpython-39.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/xception.cpython-310.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/__pycache__/xception.cpython-39.pyc
create mode 100644 trail_detection_node/trail_detection_node/base_models/resnetv1b.py
create mode 100644 trail_detection_node/trail_detection_node/jpu.py
create mode 100644 trail_detection_node/trail_detection_node/model_store.py
create mode 100644 trail_detection_node/trail_detection_node/segbase.py
diff --git a/trail_detection_node/trail_detection_node/__pycache__/jpu.cpython-310.pyc b/trail_detection_node/trail_detection_node/__pycache__/jpu.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..96f976eaeff4e2dbe8bba73d606c4b100bf72eb2
GIT binary patch
literal 2452
zcmai0UylFPtD^8@6|daN1IZWkAgD;>RzXGg
z!8Y=0EAS6i*#QTgF9`U-w>pmT`l=WWwc*=otd$FAbg1%?1;*8*p;QV_H$fDod0Ix1
zaq`@FGrtW_z<=Xx=cX~lSnj4;b{d9jne|MN=EE$`r8Zv8n~$W*WftjlERCm2l?rM6
zVJrj$-2@`d;xa9Ad%g+tLJguUej?Q~Y8p|LWUU0V
zM!R}rdz5Bkqf~L4MM9P`!QG;~5b}8C!{KOs_(aXatONMc73Vw+=^7ocy>?0K^9Upk
zn9T>u7@+I$=!-xqG9eW`qr*Y(b6ZFHZVapMdI>ffbEI
ze+FbxG`3hATPzM#C>D#Y-KApj6h=5DK$H0{xD#e7p4@~---pt9y)-5q65p)=GtQC<
zi<9Br2QCzgvrkJl@q~9s4;hvN%;whqTdjj7>)@@{4QmaEQ#q@1KhTrLfp?52%_G*K
z2;XSkI%3@v%{N-Nk65=v>y6ec79!eP(-$xlM&5$M4M@~goFGKppZ*7n)8{}Sp{@G8
z`Pvk(ZHn(rv8p;J6f%36vplou7v7VPM=~$dI8%6&hW{vUKQJ!B%9k^Cb;hpDV5l+2
zh|A`sxVaFoEX1oFcUDr(G>?*7Y4tG-q?r(YJF^$s&!{{Uo#
za_2O~
zEQw3Cj}zCdyr$UGZ7XiB3J5}_yKQme0E$#9gHK+fVD;@~&%=u<#Gp{w*9{8s2C
z>QL{+8XGt9LTkiT?}N?K1`CwLD!|mz__rY-tMudtou=`lNE8VG>ow4tz?5#S$&N!!4@%Mv+?d&c(>&-D=(YM3xZ!`f@`xb@GXW^W5?knpe=fe
Kdd?|!`sDvy&pjjn
literal 0
HcmV?d00001
diff --git a/trail_detection_node/trail_detection_node/__pycache__/jpu.cpython-37.pyc b/trail_detection_node/trail_detection_node/__pycache__/jpu.cpython-37.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba624d87e89303e64e1f29b1fb1b3dbf84126f4f
GIT binary patch
literal 2441
zcmah~-EZ7P5a0FMKHpbT+K_%wAmssid?X@G5rPV#R!XJPMip%Y@{8qo_bzUI_I1}s
z=*f9X5l{IaE-y$t@L%vZ@UX8u-M@gu$IRN!g|t!E^33e)cy~NI^PAh!R_t7`!nfN<3iJS;NZp!;)Bj6egh8m^x`8i3T3#Hh&x&O!#bZa
z*lR|CY(~v$f8-b*TvGReK*bZGU6sq23*8un
zoI|j+$Kxc-<197j>sFe{VUUE+h3rw?41y>Ll?sAi$zR{B?Pf!rR|lYom{M7URwW?Z~l`D8R+9X*$Ikd+7jNvs8WmA2^Q(y8lNt^Yr=&S(QW~drHHhfO~2M!7$_F1m#W;JR65exgs&8bo+LgN4r}P
z<8^LgATOi3Tg5pNNfHEemmn+NgmsCYnT)`&DJ%q!x^&q!x(KGn1FIK@`WVQ(NKA29
zrZ_C9Ml23frzeWTl^EL?BOKMcU}mD&b02lWZmZN
zGp)NXS$BBnOzQ;$3Ei#Pzc3_5-GIR+r002@AS67V{R4}$zkxs+8})^HZH8Ai!|!HT
zN4-Uf%v@xdT_)*TkHoXFNb@*MB;KU9zZLhlwSzEnvt%13yHUbUv&Mu^^|81<7jMkP
zjh<5$QaetAXg5p&Xq0yLVxI5HB|OW03e_}FbtWGdn(^-nSg0?65V~O5bQzL*fu7=L
z=_|WQ2l~VuClJbUVCG`!v2n2U6k$=IK#|6x^z0zzWV#98&jt{}OF1R+p+J25m)5z5EVTWr7|p}S!c58z5NfPPuG
z7|nP*lo+C3M}B~xUq{lzJ#5n+=%$97%!M3f&=rJs@=Qj%nnhtQKSbM)kRZ0mPk_|7
z??TfV=gN2bBFvS%2}{0>@*K)a3`4a05?*x<2%*>M61@V?0)3x))})PBd5o$OvqQdx
z`viuA`f-OP>Y3)8RIBzPZthi%8PP7y6R@n}|!l)~3LN@hihn}}w
HcK+OdIXN?e
literal 0
HcmV?d00001
diff --git a/trail_detection_node/trail_detection_node/__pycache__/model_loader.cpython-310.pyc b/trail_detection_node/trail_detection_node/__pycache__/model_loader.cpython-310.pyc
index 9b9029ed969ff988e7ccbe4405900b9c4afb469e..40bfbde6c5e6e7fef1fc9362c1c3bcd7a5d67f7a 100644
GIT binary patch
literal 6824
zcmbtZ%X1t@8K0i_KD1h`mBewtOdiB+V#}5+C&39xY$r}gg+;NH2U}DoqwU$%%Dc0h
zo>@P{h)Us5frE46030ZZ;^G5GDEL?A2nR|xisBTC3cs&sc6KGN3016Wdb+>vp6U6%
z@Athln42pq`1Stucm9vBDav1IF#XdpcokRpM>JgFtf%x@ld-g~_L-_MrLQ%0P*kq<
zjK0}4vstTYfu{HDexX@l${PwdxcRBVO`mNS?`X{uC>FOtu`@~;lmah;Qp_lG1%;P*
z`BSA?!Q33LV6KwQ&4V(}tDsafN)?m^UIV4(7x0D!&eoOsu>+i4X&f-Lb(R=6I-TX`
z&?&F`oy%V6e{aD3UW?XD|LDu<7oJ1oD^13grpg(8rLS%4ZT&N)&A85uPYX?a7q@Um
z^D{+Pnik$^C&tw)jTb_pVfWRImPwDweirTqt`e^BFK8kqVzF{Z-Bb6$%~*>SSBvqk
z>#BG-(qo;gciFZ9su@_3#kD;&h+>{YoHGj8wGctMUapN(s=CO$m0<5;ZhE%E~B+Z{;lw`^Y}
z>8~pol_sNlxgj}mk0r&S@FU@M1D}7fI0~)@gPVb~>9wwJ4g&w8^P}B^1+e7cI5mn&
z4df=!QIdjxtJNEY-5Y*gPxLL{<4GxO4TSFw2ZP@E>A`AZd!t)D@3t@Mny6s6n5U+S
zrv6Z3grlJ^1W6&WUAG%_BiBtdY>~%I?6&VkBjJaFgq4)9je@A#_g@!cAQH1Z5PdI7
z)F4QT>;A_hACq1$G161W+iE(xfTu$b-o#PmArkY-Ah@x@Cx;RAwMof!LqGDONVx7j
zE73#0*G|l!)$_tIvB%sIi?nur!a#Qu>YEe;oX3^lnwZybdZH78&jr_&+wnlB_#
z0CWAWzFUY3vGs_uTa>Gcu|0EDDK5n}R+Z(da?EC~nv0FSLOiz*!w5JQ#SNUHB+pfm
z`zjCESCRWFv@a?HgfLh4jK+b~c6+ni14=xxaZgQj;(Rja^}3zFg~|uM2&beimS<7MwQ51otiqIXm}b9F}j7jsMBF(
z^9;S)0<@sO{OcGwVeGK*J;JeOgo<@b87J9WrYw1iG7O^1z6ND6B!
z$60kz(jM$bR$%@3!v3F$3R9F408r)z
z=-(#a*ipxRft#J&N7U`qD+uDDAZ`lsiDHf#X|u^VLd|PLxJr$b5t%{xdEz1Jk%<-$
zQ}YNlq&t({;u|z{sFA|Ubxl6%MbJZfdPN=Eu8Oo+PpLceDbr`0Dtc#bs%~KbfGZOt
zlNl^BT^h`k4sN7I%ZLy=#4t~}88TuW=E}%ShbdmbT1y%-Q?50q)|zsyDb0l)>trs(
zvv_&EAkO2KazZk0nT9LWklIbKE-qkLwKpi7a#%xT+Y>44x%mz1bC}uOH>a@mzA2v%7lS;}^
zt%=SNz%b5Ezzap%o(^Fd1c+3y+_($nCyNfcNf5Y;-X5U6Pj?6$V%7GweG2Pr3YhAJ
z#sR(hKtm(rovX0>*3jd;8*~zVv+IQ$h+3^~Fzk6P|8Kwk75)DF3b-usA@yEzQ!@8a
zddx{QiGIz0``yHPO$>%)dX3HApmn`&rq&(~!fkDNLE!gb-^V@iP1^l5?XE}E(C(>I`Z`e%
zX@^O3eA-M;Dzcqw{=Q5J#jBW@saC|WAK=PNc#Ty{_iF%≥Rh;6-#IL?V&0jfezc
zu}lZ<>aiNLHtQmE=gJ6Z>nZ_@wcoMNv2qV;w$Ju8NZ4qka>Zx8jVEo?^%MuWsKoa{
z2K5?lNg2YLu$=^wlw_orhImt-M2tWPxwgKxd~2Bk%pM4mn>qrp6C{*>3;NO)u0ua!
zJ^iGw9YZAti47-}f&|G95*vfQA(?mAZVRv9Pyz_7T*fZ^=GbM!=AL*hcgVQ4#Rc+A7$r5^F&7hTM(B@{)DiU;N
zL%fMoO1X&B)FT^rK&Bn>ki->fx3p4;1}LO)!@?!*4I?BXmu_^34J#b^#*Gdo0LyuA
zCGVZjd#h3oiP3_TmeboxdOM%qRv~^0!=w&K1d8`*IKz2GpoioOD}dVgZ;{d8akg{(
zh?r*M?~otc!?TY1v51kXBgm%k8vYEjnD{O&o24)uS>lmdJ)5O~+`ezt$Yu+eg=CNH
zHKWotf^C>k@|I@DpxVJ*6$)rIQaPQ$VK>AQS}8nOMvZs-2o|J6oA|#?TJa$@r2WFB
zhEwwqHDo%3hbA$ifoN?dTFZ;>^N)!^`5@#)H^_@_kQd!nkKj=H$xrY=>8z^kWcCM%
zJaSYbV>f9Yag+axPn0&7UPG#CD`i$K3!>czaY7^gLelOr#K{|S)>b9d;fSO^q*}&w
z4Z}nn4VyyAYvYhtH1MR{D+&ybf}BH1U<=i|R7B7tW6z9?Z4@r3TtdE(+whJmFXa~V
z$tjCz?Ag2kFNutb7v=Gpw=7EFmK$BN%XKw%RUhCKT?MI68bK0>Q(G(ELU%(n
z(8BD}sq3W_*L_+e{Uz}&(T3r0mN^z{w2s_R-AIx71QEy{3wmpc(HZdkIVLl&NLg?V
zptL7mF=Om(R37q*gm7R)dc|E>JtV?F3nIUFF@f(7=n!;1@g6lZ{Npb`4@nOwmx6~J
zzxyjaB(dmcv^iC>Me>kDd$5v`D;g^qXYd~dsbXnrqi3m&A-#zSFc%XOhiRbn5#yUP
zzv8U7{J?kCJQ2dZIGDXT5Z8-E2R{&z*de72s#H^&Ies@u#E}loovv-BIkX%(hE4PzU@()
zvWsN?#8{O=sY=64l`;V>J>fia<-N6Q&zyEv&MiML50cnpF`-&Z25$T#)gK$YaEQU!
z*ugK7e^T2T%$?pv=uc4z3OR^zWH2blWB^eTGQ`u7$tg^v_+>mJJtNnUC9jZaoXV2*
zsVo^W%2Rl6mjbJ!xp5Zz4=|tFGHKOh3P^S%O@zj22y0I->H-a?^4D@rG-hD
z!D15`6O5j$u9@N`thHoy&5~=askNy0VXc*=$mY=$IVn@g)E!$s$*5(V@xi&WJ(YV<
zXpt~CQCsJ6tqPejKPXxL0~Tk=at-!BO_K9une9|ECT3?rnVPig>IR~Z!&$Ro?Kb1+JdO{T4KxiW+@2L2=>@`xYQm6vh0frHY#}Iz96n78va+3
zR$4kVv_ii_{1fC%5)=P8L1j&rz0&H8OhGBpkfDfl74r2{PD#2anXuR|rIopt88(C`
c(3F)L;#gZPvzl5vS9@HYxAmG;duXxxAJJ3B_W%F@
delta 1114
zcmZuw&rcIk5Z>4Q*>1a(QlLN)L?Nz31O#IuQU#QFpo#HkysTw+DVSxK%(m#oc#yLZ
z-w7`s^=QJ$gC?HzUvO_8Ie7J`&OAt~#!dFin|brTnfczlx3eGnttgkvNceq!^3{7|
z?OJ8#?T(+hu`GFtr|w9e>a&OXGx@%O9?jFyqaS(F=rO!BdeTRpjK^+DcIJTiss{|w
zW{J98uTPzKcRP#wL+lId%b^=Af({DkPSm)$fHpogGL&^isr$Q&XN
z<+K_UR2R@67r2)Guz4Dnb{eKAQZNdNvt&*ZOcS)UhI44zx=y-aWgA<-Sji(C|tw1x$Omd3D1W}8X0B9tXnC}B*Z8Ir=Zl)|iE
zc?*_M-zqEfy!o8j{IyvgtH0W#?|lXP6p{+5L=us+L`equH?z!!`A>6T;v_jn5fW=7
zZ1~`~>$NcSgZO`1-sQ{L+v7H?+6-3kjGg*Jop$M>YMPr>{y95XnWq%(-F9PC+a3Im
zMy9~>rnp+$OpHbZ?)pR+LNEW(H+qjQDUtDk0u4o?twdfk@Ws#_PL1fZokXm}kQ;OO
X-1ab(1oN~%nHM6tSk~SQ8i0SFBer|PD
zeO1LP^?F6a)Bp3ob=g2qBk!33Wk$||GMiIoK{+AkK$#25SpS3+Yg+T<0Zy*190-(6ky^L9-GvuX
zsjUUwD}EHbJCH%YO=HFn{bqWi3n&7uC8XBUrJ&yz7@KCt{9Nk@X-ey}atpsmmT*4%
zbIsIRCG6*<*0rlEFGtG2>}xmLHZ6sxl)qJcqranwwOAzDU42jA2M-e?(L5u;J~wrB
zHZ~Je>i5K^1*#o3;*vD>oJ8Ljcg4>{T;8lC+NLcvY5rV)OuMgPRJEYuWCgWWP%Wty
z)Oy^Ib|Mn%F6}xKm-l96DG}1yr}hbMqd#xYC5HOz12ax0+TJNy2K{fcLN_GqT{CpD
zde_+3NdmxtL%*+^!5&5G%hI26E@rD7+aCZinu=-@ds%2!l)RPj?SafF%cyqUdQl
zYG8?`P|?zIu-)#DqTa0_H8%oZrq!rDP=Plb4Ejrl8_TKVkGA{%oj^4oO08%#43x5H
z3CHt#VK4T))W8TnQ|fdAKOU(dQglOU?fNK;dt1SqN)1$McLr+9k5fGi)5==#$tXal
z-%qV@-%0JT
z-S?v?b;hhv#D;Wc!YywdGMZLGoX6vzmfAOO`>Gp(sb$aO>7Ms{?Y}q9Zw$7A^E(^<
zKn}w0c{PX@z1z|GNVU%s=lh-v;-DS(2B8;1=8q?Ckv4e!fiD9!KfI%6ahx*V$VH)v
zhOmVrX7z@6nEx7L7JsH_=(ac|b{Z`C^J8we>Hg@M!Ao4eiZ}WX3doO^h<&lEqb5kM
zcOiLudSakvcC}r|{hpDSXxXTlOfR8k@mMFeH%p0~l=hr`*rZIdNk64|&)h91<)rk8
zwp-y*mBcxAR5ht44o1~@R4oz5j;bftUOB1n!{R~XD(as&LzU0f;CYQNnAhNW4Vo9%
zpl4)>td#>6XlK3Gha&juMpI8s;&@v3`@L@HL9W9dzYY45L-zHiHNL;
z3jOIOUiv;MocygomMY>&(01yXJmqs?TA|3HzZF1{E1n0=jrvrtdEO@@zn}G}hiJd*
z6+dootUzm^i6q2oiOBU_W7>^~Htb_Uv6?{=-0r_{TIjWJ}P$#K4MaAP(usYCe1%E|r
zmRP>klSKFy6}*H?wWp{!jbePmg-G&dub?NQZ8Vs)v1x0tLED+h?&qjABP1`Z88IC;
zK%3a1+@_IrGhoR~FfOxeRt)bUfdwlpm6e#VXjX1rY&FE#65BeP$J&R++C0`~i{d0^
zZc)^;*k7GN0b`<`qec@&v#iciotekndY+muP;rh5<`8pm9?dZa&r=sqWUE8|k6NH&
zkqTzrgny(^-^Uw0j6wqpuriA|d`W@n%YXnGpC4;*S&_o1i&VUbA}x8I9JHZ|q_yfL
zDhh2?FH^JFb$o-Yqa-fW9g(6s!o-$ybrz!I^^|`8SWnrB6YRvnsbeP&!0~_+XR(~M
zP0XT)*hpNYH)^4nQZpgN3x|Eb9dv*9TYCQTJzO~}
z5-SMvkXcHXqn<#Kn%9GO-cL(!sKJm7wYAyF^UVFn2!=MjuWL%r5PpiRr
zQ;Zp3#zY)wOJYq#74j5tD-Z$_1xM_h82d;^)rl~Xe(Y7e5g7+40Xqk%Zk
zP0*oOLJN_|>@^8ufH$I*G_f;V>@^lUXG-k;Uuh$r{)l#UEGorH#@WZn3xzzNxgzZer#iP85f^1pjk&ZFOOLfxOc`=CkH#xT9Hka_TKm+IWk{
z$ZHJNW6dda#sSi0I)h*cfsaL}PiMTletUvcrc~F01gV^+h
zb{7^ihDDU{2cZu(*WnSOUgA0!MS8+@SWVQ+aRu6Dbo4~OPwnbdyEfIXPqiDYbM}fN
zBw+4m7WGv$4@m9dJh586P5mU=sR2#MoQxBfr9X@jm|VWqrJuYg5rN$5QjD-r)E0}{
zQc+uGey3I&cd?Lt7PHS%_F2Xyv%J$yGk&Qz=`gvQ!V184kWLiijR+h`>uSXxdFHcX
z=h3N4IjVpU;MpFQnZ~nNkSN0+Cr=!>2F_0~%*cDmV@J$5)obRxUg*ovkc2a$H?vQ*nj*>r`b<6aM8uP^J`)m3(a~A20Yr?2l-M|&(6Hfk
z!*o!9yHX&cg;fef4AGd@XbBmCvD5ESM!}4&Jv+8Gk;S0A3E`l0fQ-7V<_7VN!XR3E
zjw}Nm5qHT7e@oN1UEG_nqPoW%jY#LxUCW){Fm0rMQfWhVp1oVQ2y
zlHiQ2CfNYiZ)gbv<1Q>6qGsqz29dfVaQaF(`72tBWdXPhq#SQPmcOryl*GzkjQ4QF
zsj4E%@sfDhCzAq*Qf4{KA|^ABm@LK@8$Yy?+BoeBawrKnlOv70)(*nJUG-H2z;V%g
zd!TMsDlQ%n5i24}j<|WQc(|EmUffGA%sCTi%F1YKJy7nT<3jg?E<$a={xn-UwXI
zU(qhokCBv#=Xu&VbAy$lSfRUEY*hv4@|+ifIttWfk@9lG?4)jdXBfE8$)Mwp`thYR
z$VZ{);)OG>xRd(XV#%DgaADyL;==Keg^P=4n%QY4c_N%=u)c|ePcfOIVdNQhe98g2
zoJ?{N*h_HW=(&m`xtBP{bm`+ToG*gvk6$TPCGM=<3ELZL5F!g9-K)qvMd#jHec!#}
zXCZj8X?{qg2ynV}z+|z?<gy`0y_!A^o{|WX!Z~r{2IOr
z@;1F8uw;)Q~wA351Ey+#Pb1I{zG!S%uKz5UZZu3yk4Zp?3PCd9_+H_oZ|vqV`8oy7si#?YrR@_5hV59nc+T@xwxakO(NN+F<4h1b&COL&*n1+>2~kp2@oi$Hn~
z0FedIc_59ZV^$dOVs>IUIU57Z}3JWy3R>cZ#ar*&AHTJ
z4`@CNGe`I2xH;b$g`7f0E*x3Gaa|EtS_b~Lk|l25r)DV%y2L+4aFg2jKMS&ToJz}*
zJuH9}cd)a!&ZGHtLXYh5DdeS}@wCI77pQl4wf|HwUieB2$^Ek|PcnZY#c9sCf&tf(<|SwT-0o7r
zTSx@;u<0&jUg|hqmWj?q`OI{WxsK~iahez0>{wrNRl>_oB-ln?H(M9fi@t`g9)QVF
zg??)NNY;2wDpKJ;$