From 332285e5a63aab534d021abb4a99e6b109832e34 Mon Sep 17 00:00:00 2001 From: Keita Watanabe Date: Fri, 31 May 2024 12:51:54 +0000 Subject: [PATCH] update --- 3.test_cases/torchtune/README.md | 6 +- 3.test_cases/torchtune/docs/LLMOps.png | Bin 8031 -> 19591 bytes .../e2e-llama3-70b-development/README.md | 39 +++++-- .../configs/quantize.yaml | 63 ++++++------ .../full_finetune_distributed.sbatch | 95 ++++++++++++++++++ .../{7.generate.sbatch => generate.sbatch} | 0 .../quantize.sbatch | 36 ++++--- 7 files changed, 183 insertions(+), 56 deletions(-) rename 3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/{7.generate.sbatch => generate.sbatch} (100%) diff --git a/3.test_cases/torchtune/README.md b/3.test_cases/torchtune/README.md index 6737f64a..4c2a1b85 100644 --- a/3.test_cases/torchtune/README.md +++ b/3.test_cases/torchtune/README.md @@ -4,11 +4,11 @@ This guide demonstrates the comprehensive process of developing a Large Language ![LLMOps](docs/LLMOps.png) -1. **Data Preparation**: The journey begins with the collection and preparation of data for training. This step is crucial as it involves exploring the data's characteristics, performing necessary cleaning, and applying preprocessing techniques to ensure the data is in the right shape for model training. +1. **(Continuous) Pretraining the Language Model**: Next, the language model undergoes pretraining on a vast corpus of text data. This step can be bypassed if starting with an already pretrained model. Pretraining is essential for the model to learn the general patterns and structures of language. Refer `torchtitan` test case for the large scale pretraining with the latest techniques such as 3D parallelism and `torch.compile`. -2. **Pretraining the Language Model**: Next, the language model undergoes pretraining on a vast corpus of text data. This step can be bypassed if starting with an already pretrained model. Pretraining is essential for the model to learn the general patterns and structures of language. Refer `torchtitan` test case for the large scale pretraining with the latest techniques such as 3D parallelism and `torch.compile`. +2. **Instruction Tuning**: The pretrained model is then fine-tuned to cater to specific tasks by updating its parameters with a new dataset. This process involves partially retraining the model with samples that exemplify the desired behavior, thus refining the model weights for the particular application. -3. **Fine-Tuning**: The pretrained model is then fine-tuned to cater to specific tasks by updating its parameters with a new dataset. This process involves partially retraining the model with samples that exemplify the desired behavior, thus refining the model weights for the particular application. +3. **Aligment**: The pretrained model is then fine-tuned to cater to specific tasks by updating its parameters with a new dataset. This process involves partially retraining the model with samples that exemplify the desired behavior, thus refining the model weights for the particular application. 4. **Evaluation**: Evaluating the LLM's performance is a critical step. It involves using various metrics to assess the model's accuracy and effectiveness. This step is vital for validating new techniques and objectively comparing different model releases. diff --git a/3.test_cases/torchtune/docs/LLMOps.png b/3.test_cases/torchtune/docs/LLMOps.png index 977962758a23e5d0f851098b7ba52e42737cf18c..fbdb82cd3bde228e200330386308f9ed9c6384b8 100644 GIT binary patch literal 19591 zcmd43byrqh+b>Law{)B|NJw`|cXxwycXvrkw}2AT-QC^YNJvUIdvRU&J)UR0@BRS$ z4~LF*t~KXT^T^*cQc+$4837*w3=9leN>WT23=I4T_yZ3I13amU+QbI_0(VxH5CN;0 zAOwCP1(OmJR`t+7)r0m>9mEmPB#jrc48jb8LADbQ)(PCgvUbE@TPMgv(~ZKSJqw-r z4jqK!ewPQ1L36g}YvJMEn)yq2xz+n%1yuhF>8WwDe>` zaA*)A0o)LOBMOH}`^cFsXFzGZ?jI@4tKs2WMZ;01$~2g&#c4?XeX;8eoive3#UY}r z#%$E^Vyhbs9eta*vK;C4^%V^tA7MO`GgS{38w3RhF4FY{4o!+cZWrTewc13Y!HYvd zKtNzBPZ+XTvzi=>K@;uy`I*#mrtsqN#=6P*040mZ6~XOv1^RfQJoxbNOLR?BY0f8$ zSTR}}-hVb95DSo8N%L}FvR*SmK|u+#U2RCeG#^Vthe08PBO@b28AbeHKAr(>G8h}$ z>VBRvqFSbo%weBZ z!|8(X{wTsoFMxu? zN>lsU4)>St@)%)bjS5}#cGz~SdActvbtNT_!%5V&zF(~2?}{NIAY{8un_7N2&zVO2 z!+D?hJLf$iLKV8rvbf1(S==fvSa(+klVlRHgcpa?6iT3&bml*;&Ic1e)b5I8QYA6} z`3?=D8V%5Rk|ErdTJGu{`XT!yKrwlloQ?tYes;%$dg?ZfqaXv-aL$6&E85jTA~Z94w*kxN5;g z#v=(&A3Ev?u$+^RcHT!vvp-KF=A&_$(lUL9;>iq&i9VWB#<=@Z%cOi|Pa@5Sedc=Y zq?cqzPQeTr-8zPzm%bOx4^WjajwvMtPDeYr5U5;ZT>3<#&Eu~uWgj2QM!mI08jA_c zmJ*{bIz@>LrKQbO+xo{OH<@o4Zzn~Nv%<3@vF$j|ci+6;xpe~6k`kPOGAgMhCFp&N z)baR-K1?K;PA$)Fz2(Fn34gWD67z}R4cXs%vxDEm!$VViY>3{{S?JQ@D)iR%@@T5G zSO$=G9u}adBDg<=ZQuAUef%)u6e6V*a`*zj(W+>VTNUzKN@z|;t)Pos^O70L0)`t? za>v!4g0aqcA~9G2b@y{Tsj-o>RgJ;(a!*}}Bkt^@PCnh3LA6%nipSL%U6LWMy1dccxQ#8 ztdRea?+$d~8)a4(BBF#Mq!0p~;!*VO#e2Iqf}s(&Vi1p{aoFM;LN%3}`nkKO*l-)I z2401AYaJdATsS-GZq{+ya(&7}!Gv&o_jQmgWO;ko47AJxe$dui5%5zjDhjk}hvet= zt4dX#Z{pD%@uNDprA6{+5b~mc0a_~lDIjk?my$VZf6#Xc`}$h-wB$h1dGF& zWzKSH?1P%K)?nT5r=21Gb_=vrU$T67z{x%U;@~AwIkRRnI5M`#5}&JvZ;=|kW%lY} zTH7rugcO^Ro+_vb33z??O`Wl3H&f4RZ0Z4%8uEbayD(=hfxpZ1zCHv84n8~f{8RIn zYCLqQXz{2^bcp|PxIQ)%TwF`n0&F;4-Yu~I<8GOwyib;@Pn0pq$b=`g>xeO^<;h!! z+s#3V3uWdd1|b2?0C#REu`T744aWq$e|C2vk@49AqOj>jx`Uv*Cnw|hs{-8}H=CVd z<+FL9Tis5BT#pyP^~tPqhWc$S>BMGQhOa_+)l_Lq#$-x~G0f)8STJ!fSx)eQRnxyszKh+GhDPABl zcr8|qyig>n!!QF|J{Pl*<+%f=^`^oo`AGu!+7Q_CUdd8K=o}#JSg)7vPT?H%a(hY; zK_V#N;Q>;ImLj)QGc1yZ2VVzxD<@EE`AOj<={2~IJk=~GS_W}0=t`jcQ;w zq?~nX=tO&%(48LzC>0msx!wqy&ldxlam6J}fkaT`?5h-Mm7$Vm<4ybt&nY%2Q5{;n z^c&g~GC*jHQjs?gd3a&t+YPUa|UUVxQsU3hW-6yJh`~^a5lOmPvL>#;k19@ z-la$&%)Cs`vdZ9=^q$21^TVgF1k<=PKSsf9{Z1%b~ z6+@>bxvY|S3qRr1llgZWGk=*A``j=d6iljftBb~C`LWIO1))a-R`h-|lT5fLM6uLH z9PWq(&d;D0%3Og6mPWL{bhD?P3XG&As3An58THr zerS{MyD;V7bS#OTK7R>S-x|PO&INla%CgjF{w!0?%fcOk18U~~G9BZqC^Ex+1tpTO zQzSQIfjTt4HpY0Y5!4HN*Oj~dlT7C}OOZ{3PBO6%(~YPZq3F`W6r^1%Pq9>MP5gqB~^vPZHg=$mOu$%mATlIujj869y?l7d)2Dxz3?M*IJxki{FlHmln2NvKFQQwQp|wA}`r@xSA7(^d<|w$_pRk>c za(Vpl@E7Tj=R3;nFTq?+KOtTCAHJegC}ZI_et{JX9ZwOPne~QoGzACIk#&(kG{xXx zV)lk^iOuL1OSOv7ky&~tEHxplw_4LXD;3atPex`2C~f08ggx~`N`!TP=TtyS*l%KG)Y+h7m(B%kkTEX5{{23`N*ZaQoYYEC&>E`7gmIXYt+Pa2U6oj& z&4aqu;uo60i*fp>)|oDeuQAXhlEb{qS*+H`viJ9jE#W2WNU*nWs{5of;aloNE_4Fw zSx->14rxL-Y(kyUuNlL-=69R9CE#K=jtL zRWEZeDU>Z8x(o{qlea95#YMF;M?*oj#RfRzY#2(lLRYB0__`T^^;QCKp!Nw5iNshg z|MT+XesS6Fe7{1imgVDtgy=7mOqD&qx#90&f7NzdYoV~MvJ6$g&u+PXGa31~%LPNqp zz(TQ74U++uPcDi1PZWv|2G%5;D@IBMHlbG`_)i!jg$35+E>`;R5ui%_2giTo5DKuS zlVSm@?t3CEYYS{}Q7{t{KnSe)9%7q-WL`Uc1=!%9(E0x);Zqj-0v>m-NuejF1mB=0 z`y!i8;^;&DfV6i^$9?Gga33)|DUj)DLD3Q60^E}#aUo<8&>NGR;fUdB+i%RRm1K3kJrKQAd^)wpbGIZBhYHbrG zbKg@3mC8AK0=TTUOFW!s*Ns1whmRAP?2M+8Nj+2Drg$K?^X*Su{uHdS(K=uiK)$a? zLyEop0jLLyzXIkl0;Y1aBHFdGQFmK7S0Pr1TdubrZ~6#`M%$mKh&WQnaD(JvYwU&w zotrkA8{(n%LF9=C>x+cCFkBM?K$`}c-02q-aBU?S75MVJqHXbJcyJ5AN>7onN>J?3 zyZHauhEwch<+^EvZ*+Pi^J+XJC3hU75=SPr$RI_#u}J>XhWB3yUI^ikgexEeG6uF7 z75{SbfWg`c{kR|o4>K(Ejb8%sDWN(50_0MJ;J!cwmT6Rq2nhv9WzcI>2*na|vyl@;zE`zpK=@lB zDptu13BZN|+8$eH>#aWwSk1@a+|Sm?e!VwKx&Yx4Xg3#AF0(v6i~x~A ztpx&t<5m300UxLd*QYOzC^&1CAsdRu%mJLBfd#=EeMq<^ZSh$vZO9QabU1L%_kzb9 zTJb7GXrs+@yC)PeUeMo<%lEO4HltQ35W-=9?88tJHENci{R^FnNE}e)UdI4iblk|a z@1lX{z)*%?Bo2juX~12uk^IwJ+D6dBNI{Da1XBoB{(M&q2WV4LDB$Mbu6}X4(!NwqF< z6?5N_0*kw#ipxEy<(!i|^r=ajTDfC%Qq|7A!|CvSy4=$%uF_yoL=06X-52wy1wpgcGR z{97?`AJCSN-oJa!*F%@;QM~id4)Fod^dZi_9Ia=NxqeljF@=0@XCMQHTZ1SfWb)5@ zFhr4}aEB=G?*Z<5;<;F~&7V671}No#MI#F5`%|r`p+EqUdEeH!>E0{-#CO@{CD(!G zmCg0vNw|z+8J2^)0H_3{#Kxp9?nEaN@Kd;?xIi@xV1*D6Jiy_pV)i#-)b~Aa0yu91 z(WY1-_{L#dsE_jUNSA6Y;!;jSO(}qS8kg+fqzBbq$$IXq`g@reZ|}4+9+_E_5>^c)XHaGS56>DvmB=4ZBEJ*A ztN2DSFJl;G2nMjI6Cn4qSwMRy1vwv$(>3v=ORmfMm)83a+9m^{JH^580w;nhR^L!p zEM&R)*I3jiQHsrx$pA^rq1m)xhMowy(`+}amDb$|BC>}E@UNx7d zuwp<44L<*i?A@5~0g}JR6CKJI)$sHs5do1;nFV_X`CaAqOP3&~WpITRWd-$r*5l{R z%U}mp-hQ(q6eN`>v{GkDW-*ba z*yiC_tXzcfjZsIPKZJ5_EQuB5GFq-RjIxH%se^x@bg&Tc{h^C-zo7XuASopf|Np%g ztvWoI#HtG36`7n!#Dy*^U%I1pAU2bQmrl^P7m0h4c>O=EX)=P({{H@O$jG7lmRI>m3qX5avj*|>1iA@M>T$n3BpCq7%h4}+TPz= z5ntj@grFpOM!(|%>z7%l@p)@`?1XXwIrCU|cXyTTDo0Y-C&2qd!lH!4$D>Va=dxSl zd?n(=IooKrrc)``W?!l@f?Vx&Q`?dHP|+hM%WDd2tT>}V+bD8gZ zGbgaMFu%8}W=d<}w2PTJl$i8d<|w;O0Rec5O8d!~!d_PI))(sk^HKDFKXTb0OP5nx zQ&w+Ttk4Sv#tSUg#VS8hKngi)Kg`nn%T4C)fSz}jg*b;5z~^~w~L1>3pcj& z`@g&Kq{0wDs|FU4PBx91Y&j6Ftv+77bi$zBtN%>8$@7Y86=pPrkpPQP8*67Uu96mz zKJQ@5^_E$;_)e8kui;o4%T&9!JDP{lpSco-4Xf+JY5BkSys(a=+IUhX-*UDV21u)c z*d4JEY`5CL$e>vjFz@yH>|I37~cE-cxwt`gta3`y;_Wy$(r0!}-jhs)oL zL6Ft#b%7682cpA?l;s@qZlwCuRtMioo%!1TuDfq`108S%F?#i1cnsQgL;Lsc;eZm7 zb>j5f7?||i!1%KFe*cChCMLeP*cO${=3)1_zW~?o@EMk;@bvbkhm{S*$CEl3Ngj1k z8XimkBrfhKAuiqx%ygD;BjO1Y*!Fvi&nL2ZYso#fhk@q3>fwUF8irJKxJDFF-YX(D z9aeZ==)%+%lWr5({j>#R0#U?bbOv|P;q&XuQ^12`Pi;z#`S?hz(MraNZn-nLI2NOrg{Xbs0OS>#dwW zbeo-I1)gWRh8OjDd>`4IEwp8fCGkfe9v?F%l+M>%HRkEXA`AP|iS;E(`%xBtX+g#i zaGECXQLsSOy*fn4jw$ugxA zEc>RjKjdVyFsbF46%eRg{3~^v6JPjE29T0|j%BP>%y9$N(P+YsS$7IEhw!~n<3bH% z;y_!)WzCiqexug(cs7j^dBb?wZ#6cGBdht+emLCJEg|cf!brXGom1l`V;too>15e6 zrfT{lwKDbej^a51PB{>xU0#NSeN=TYiT`r~bhMFnyBB+&4w8z82bXHGV&F^6+2rwJ zo)dW_A{yrfbm?r4AdsX?pcx6JQtqNaO z&We(8sOE}M>c&geL~>0L@-ehPzH zYSL4H?Sb3)4HRDJ$ezQX#kzo`j~?ZXVMPfT$k2`wKBZfL!OWp_2D4F6iEvgM;74p! zz?^v{Z%dBtrM}r2mG7otQt)>TojAaNDAd?ZQG=hH}(A}c_e~3HlqjJ zPMSYKa~B@!#sRG|1??(GYc83a8bUq~G>4+E@cfQV=g};5hY*99q)oUdH0j<8tiS1C zTzS5_tBP@UT3p-&Sy4XcD+=jm?SKn?otjOgu3pKNK z=qrB+oCKDdx1KdXTEC?HF%vc(Sc0;KrzrXBNU76_X!gxZD~pw8O;_ru%VnyrrigR( zl-^Q;o`%z8|9W4=i#iNOm^)9%&EV%^d7(&i866U7sZv2$f3p@J#rh99g76(lEwyC0 zF(^ui7or7A#@_8(bcg#BTP-uaX}hDY>!~`Y0EUNeSczrMR~)&5{u4VgA;tO^0X%+kf2o^t96+9eqS z4`j7Qa3oQ0MlHfa;B7W%_Qq}r=P7SSKOC>Ndvkg>s7iWJ;m4UTxAcJ;kd!;JZuqz< zKE}}R2h>1+qxzuoUn~GCmvWMdf9iMVuVF;2rf}9I)EDR!pVLf*g9e0HWj%zU^s$WS zrKX!(hi0-*TY6D`-Ds4in(YlGFo+N|eq61$9+ZzdS!+%be?;8Cxo%+8s_CmhrvF>R z=RubK3lc_e=D{yL zb;Hq(@zXq;&{AgqesfM#QV50{yP#GSsb2+yxWn7Ip?W?&lr-+q@K2Wmh}D zzRyk7@MfP3#JLfOKP66-AIU|#8Oq6*&|B(oSiPD+@~YjLxo zW0pyx=jRBmzpRyAV+YU7Cv(R4L>t}SUSGfq+E{LdnyBi4?3CHB?KFUd;?G)!NWCfS z!d?>KkS{%=Q{%_ZPd7V!d26v{mD?CTJhS$m6)fs<;H)iAz4uL0lRJ!=mjhD7g)d`F zCP*!>t{g@=!$hYs%+?|kN^K?heWByteBl?*#d>2}r`1=jGHzJTXK>MdecU@->j{>+6!go|#pf!!~i2-vblX zeY~gGg+K}_p<{XeBGqnRebAqa0!RC8D?NHtEELJpg;%NeGL4EVQ&@xeS{&CL-m674 zWu{KuFO;cCWmiK(l3ea*ggV08rL?|}*KVs+*{fj^h;C>)JSF-#Z7~%#%QYOl=|Jnl zO*Frq+9u}Atl5nhf#baMOCVWG8+Io(qnSGMoAOBfG>IDt&>b)Q=H)s1bg+R4jDXdm z%z}8#Y$`=`=Ot(>H-?fUPqq-9&INsfPGOluohm``JVmhgUfx2ilVT_8HL3=lBcU2j z+%wIlLs{BF^loUaDnQh{Z$jvpCwzLf^NE1fn7$2?9+>kW~`dOU> z`2U&imJCeDnBm$Bgb=;yR--M>)6?i^CJ;3ZM)|bu^ezs+wr&k3E5}dhMK-)NtXaZV zQzq@2dA_~!w?1N5dJQavtTj8=YM`D{Ue-K^PrOfwh0Mq;pZk46gWqE8?RV(Oq?0`H z1QzBC&AjpWw{eT+opgwUhk@{JL?*Z)i?dSwjp|X%ZOeSLEEK~s4pe5 zjiI6*F&q@LlO}Pq?Z5l^4L}C$5LTj7@c5D498N>-Ph?w&zQni)GIQ^vmLF~^Be>gDt*4JX`1*bmQY73Z_~fk`E-9T z6bysBq!tg2h-0ddv|Xm%KKtqH+@C>qAD(vvMh0HLbT+J^#x!k&4%0z01^O zG{$?T+FROWv%ZDXUQWi%@FT9LmaYjV01Xir(HPw74qk?acEbGZct^;!QYCEM;IJE7 zIE=EV-BIVHotb5MA~Ty?>tsES65^MT+OLJ*U=U}Th*R22Be6^8l$?QXMHVvBPGy z2`nME6VxlW7rs8fKSZuT2ds}BtJY;@zEI$Ee5ax#jb2AhMdhPQSc?b9vIf%P%Z3+` zc5c@Apn%`6T{Eht7U@I~e4$(>jsUe3v?c3rn9cz5c!A^8QOy?iTHoeSFUd>2+J(~q zKI=a#sm6&MxRu9y7hE$ld03S~IZ_!XLTx$y`+G)C4U4!LsC`x|egZisLb0i#e81?L zTg%mCf|Zx^fO&3MZkcqa{~L;c?;})Bj+B~h;?LpqeaIWxKnY$_)UvCSZZrC$!(xee zW+igTgq_Ys2Oz^ow%6|rs`ORnu9Prm_kDf-u+i>~NF2#W3QSW=>&O~sEGBb$e#Vh7 zK33o+<}~bM1^@1)4v5R`;3HJ-tT}D{nYlmKi6oL)z|sfOE1`lVDr2 zN3STRPSk*qZ(|`(6sp}VN2>OxZmQG>o2$;BGpp$ls;w=Fx1RB8#3L(f{CBDqY$^D# zXMfu&IOE2*4|y_@JUjdB#g}p)fm;1T=HdHsr;byvXlZvJ>&J7EJYWp81%sL}0Qb(PzwAfRCu%?1Y?v9-1d z8|m?@mo|k7yyEX4i^dv8k&;Un1Izs@g5B*my4*M2Lk1XESiZ^=2_lW+zCb*= z*@t%pi7(rOH(!c0-pXyCl$gYG>wvkO^AhUw`NI$_d<-2JU+o#`cg~-Tuded@ZSQcr zucp~SMhKMUs3u`I=Jb1^^o;=)Gp1Em!+e@84mdzzhC-)W0nfM+(sfgoYIwax>_EK( zgY{7#;qwb0qB>J1>cIqIcxYJzYwBIbZ$!iG6IzGU5ojuv&HUXw(<9P-+J}5c_Mz-b z(!6M&#RkEA)n@6bqB}HTY!)rry5s9amFQM%tn|U75oqcuSv90fxh7U<&B0owjG~VF z-=l}cWunO7j)f=%w*)_^<#BhJ3_USBDROc(xl@CrcHr6numyF9 z4%WbBDQhhr-riMhOp8-6@;CM)u zCxlV&AId7HBR)K49|$Z4<)DP#rUc(~9h}HKwbhBKHdnYd02Hv&Sd9FDh!ZSaKn2mV zlnD(h+;GxN>N2CdZ)fm)>~c#Ml;pphS32cAr4Y}Sj*E0QA1HU>Ptt;v&T3JReEpZ; ztK?5PykR5`Zz1B(K54PKK10m(2KR`~`4an;$YoontDIGSt44Smou978^3GcBh`@nk zV^U^7#;AYC8G>>OpURXK8hy7R90Ua~Yj?b>@qN2%qcdNpm1@jwrs(byy4|^OKC`i* zMw9Cc!nZO_BA6J2V06fqjJ0*x+h^o2?rHIT(*n%4f*$Cs_Yb1_olH*)noKLKc29YO zaHq>k|LUXA$g%Uv4rnayT-(s6{sEnyI}$v)JrLuRZw#oTL$HwLzf9$~N`2HdLM(9$ z6|qU|lX^T#qT#5Xx4-27n#n_AE=y?ZjFHRiMM}Z%lKVE5Pk=?MG_ZGewqW114)1&t z(NX1aYpPl8hzdNyv@SgDmPq3(l} z`!O$J%QZ0qrYLOh`^*}^fe25mJ>+R0wQU{bVqe_t(RKtKOum|xEtI7)X*RV&A???9 zdWz4PPR#xbV)ve^{`=?{HYo630M;YTx=k_w^=W9;Z)3BW{Z?-*E}o+K8DVT z%ku^JeTALp?W(kabVK9Tyo zwEh$m03d_9jxZ(il*W~9SaUmfm|8*WojL^fzBk4VHI@3U;A`!^m_8~+MrEwJ^JI=L zhcl>;NFPK&wx4H;pX^7}**x__EM!;NpHdP^;)4k40}^&Ez*k}(a_WR;noa{(NzzUd zTpW^($BKNZNOKze;s7n306t8#P|Wwf~w3 zsDqayrl2*L(DwQw{^y%0h!MM*>EQ;WKm^37@Fqe#FZHAV5+di=Np_30T$WAjl`TETtQ+-C_bs3RJXTgl*Lz)9Nm(K~? z=b5|n^{_m0t>w+7SbdlxLtSHPi>oe{?sdjrvZ1HwAj>wi1NlBVkNYiR-F6>hb*Ah| zV}c7#J~G3YLg-1jk-0oCsRn!BKaZU@s4Y&zSzptp*ZK1#be5_;gNNlENqGPK6+N7@ z1(QW2FmtAS@hvF*HSxdD9%IfSBxaBj@`j)oj^GASP`OP_jT-*o?i1?y@*uXI$sy&D z88E<8H%}-{y1rTeWHkDt{$g+fGoIewPm{|6;VaQbc=<&quIF#;A5*e%jUM5rHhNlbZ0}#fDH;1Wn&4l%7Xt6X)D-?~Lb3 z{9-}k+#DvK1sRjkXjZlz)$h#Q_&e-WVksQFqIA)g9}4ws)2PBRu#gD=Q!(lD1rI&B zr+X9JDFJ8|5`5~rF!qkX7Uay{kPn@X5;ONyTmm|OPET#;5whiGJg;zr@Xa8XN3hya zTiu`ggWAz}pTYXlAYJV52sltUerXLv%x4usSAI^;=w_&r<2|q;{_c%CzHSKE% z;lxWCWa;vWqQr#9`I2U9M5BuTS*S07neY7h=!$y(FB(KAbA;nYjT{3SayuF0A2n0g zJl)ye4v+9E!UdK>cZnY+WbisH9w6qp+@EI}GZQtzOPKk2-p}r2`@Q&5=j-*zx8e2{ zBvV}eU%25nvgi{?h@ys9eGJP)jn!xHild2DvktDXX%G~?6L;?mE>zK3Fl}@Rbo>V8 zNUo&V7R%3=Ie|=}!cD^p!;WSiK204D;`|uWd*(DU?(JkLZFSBJqrzpW&^%yx%|5l< zjT+bh#exgniwBvm6}-_jbjHw=t|>Hd8tp7Hp0V$M;^0Sjt64yn&pfygK~SRIs6n|u zKN_ZLZAB4iY@|&QkP@QyB(pDY`UqLf?!|RG9R3-c=kbENC@^`srHE(pH#$?FQTZWj z#Pr>$r#uSC%r)cdH4{^K?&D3**V~b`YN4wToyIWzpu9lxHdoKABatMkhs1HG`2FB9 z)QHT+go4)>6bh0aAqY$xe=nu&Zj1@rviAQ)9@Xw-4tZo|&DTJ#tq#T+%|ZL!f2U@2 zUAcBtpQ%DZC=*74aeoH)daI+IkzumFAK08+Y}nj{=hTeiPFD9z-Mvzz z;X2;VIojy=axl|O=XmFE@;bmsWo|kLc%95NB+&lo#o5P;V@ z(mNWzMa+F6>5%5)YO*JGF^zi7qQ(0FC({9)`N}Dl%9Sse@an-ii-#z=cQaQ^(7ca; z*8|SKcUTAR`H`+DPTZ9%a@UbTr}5`DDAvNyF%ezzbTuz1p414U0XyJj*HLlDdj|m6 z8~I*dUz=hU3oVfBct^I3iG9`o`Yrqf43BoVTK21`P)~0Xfky~toYRW4`@y(Dnr8!Z zI>~mgQ}AbnI`1CHMy~w{yvdQBtl(9gG-e(W%tU(P+~cKk!Ea27p)Z^x;el0Y%t3;e z-TYs)-lDzL@0+?hSpv}Q`wp? zee(K6r8|}Hj)Y0Js{IiBQC@21Y1p-vLs+s?;T#q7er zfyiG6#5pUgHmwWc7u5|M1+mx5M8F95oqz8UVqrmf-<~)|(K+--(yNkrdUy!EJl+`X zSB^#&nvvR!qnKXK%|}hh|D_UA<&yvj)XDOIKVIkC_iD z81%)~X!jBd4#XFRM(4*8ufUE6(^tjSb>3z*kXmk=hSgIf%^$OwRx5cBa@Z^w-wIRP z$hFLJ=!(q1v2GA2^T>J8=$;U$Q6uQn4elB#6clLYCC2M=bEh&H5cl-%c4$*!3mO<0 zM8w9L_kW>e#O6PK(0i!fCyWHovW4{k$Jj`o0(v!)nI9oncX~oX(gg}S$GfWaR^LK? zN1OKoh*K@FJO|K3R-tY71satx*^(kxO?XIDnH7>@|E>>LxKWSy8<6~GGG4`N+iM0o{WUB!d2_+Rjp&`gMKiX(CIXhO z?L9Nm0A9}DXY9h;1XMXX+&VIcD7Nb*KjjiTyq|Es;!TB$Kkt^nule^yTaHPss9zMP zMdIBh(yNw0i$1*$!R31%%U}HMmGHRvLs=-Fl+rIvDuw7yhb9Fn2~YUR-5rtI?!tuH zHN%C2Wzg3jWVtPdMm#OX1lQ8hcq(XbL@(ly0;opvQY0(h!E%10S7WIm6d?Bk#Ap_z z{=`RRpY*9Bi>0O2|LryEduvhr$1{~3zoPn(pOAYVR&}>%(bK131qXlqJz5I%lTu{o zCD+zhInAbT@p_Gp;nWvSxR?J*>WZtCYfZX1^`(Q>trcmWBD!XYJ=*C%-|lu?e|@kf z6(6ZQO{_yh3MWB##b>J}ufx$%%%a@xO&=W)9~s*?O+wh*mA zI^2^g_Crss1>e`UNn;}BE?9~`EUt$usL!>g?wS%ONZQ^J{fBO(J`wPZSl>G?Ae+Bb zs4^TPXb8`b?tMB{$~4%Hu25K;94OQMTXFrJa0Ut_ZK3GB_XjcNIYS&8=hm0S#R&tjo8R1jo3jizE%))5)xGP_$cJZu* zw1}_sB;u+N^+GaNx=OP#ioH`HmD+u+7CMov!ov(8QQjwU!Be$49x9w_=#m3K&cesB z{7ZtZvm9&kGcg(Uf%otjRVEruB38Ge)Hn>}l(e2d6i1-SZ#`uxkfmuK>r-&!2J>~4 zn&4orwrt%R^qYGSU2XWsf-p-i&=Ak)U)c(&w~#%|=v9M%ca8_jzG~(BrgofY5_{>J z%b|pBYA^-v4XX&f{MhU@Ake6l;J?6Eu>jRTKG2X`=5Tq%xATQz-t=%MEiZe4Btg;^ z)LFF`h2ic_e@XH^zMsu7x)(bt^m$Onb>84onR3i5Zke6t)V~YU`XSVRp1D2Sf3@9U%S5f+LPxoBAN*^7 zU1f+7>jI~g<8QwVcV7(CqSD^3k`{Xe7grc@9(bAsKt7cE0J9Kq)pgvZLnx?K(jagn zO#W?_z!Z6zXbQW-%tmeLldfnd^0AvfXWH!|>*ibYo1P-5QQqR)ERxi&up=K&Ai*u7 zx{bMpEdu;o`Q}vaauE&J)&=5+IF2o-DEmj0lj()CPoW;yh?4{GTj$I5$uBVn_#}c+ zSTD*c&8@tS7z&vOWGoP>5%TPUWs%@;co6`4YR%<$VqJZ>)*>$C@qU9r>otJ#t~74iTYfUYs5iG2$J#y+yu@rKp=BU`bwoMbLp z624rCHUN3x@|Zpj)xj?s)i7Ek<=jp|fGLCCn;5hiNcbsg$E!TroLxB1gdnv2_fcZ) z4jq8D=XjV*UV!Tv-pm**n;_h?ZRhC|E$u}-N!&vf)(>KbSRgo>MJwfiH$yHjUIX_Y zY>GlGzF~TL^pRQW9MF^})YV_bpSE5b-MR0kOJ9R!aXq3wbpc2&tDJ_bQA$f|)Y`(y zv|&1_+~%90Q|p~m_xt;SGn2)9wO~3Mo&C#TRE9&?N*ZzZ(oa$uIW#Ug*%!K)%ixnM z&N(M`)Rxbe!r#Y|D$#g@HCuEkZ}}(_>$Z&h&Je<-aF>h}do1{bjb`#lmLUfXyz1f=bG-IQ=0=ous(WtyX z1vuJ0Id$Azl?LsmAX~qZ4hQPHdrL&JDn+H+fS%Sydoq~@LWkEesn$s>1oy6;5qyeU zobhyod#{B5MdWg+E$)4Th;Ub4)0C967MgPXjHe~#X&*XJKA*Nb{booMaQh-iVZLV7 zJxJ&f8WRZ^ffE3eGPw|&ndnYf)NUfP^hrcxLKA{0UoD1yW;J7Q<~4@e09utN0xA*o zBzz{3)7c*A;2pA|-ZRna_9pJZ(Y~}Y3{oZ_+LqS81B6KOymf+EFLLyj2graG+MXxV zgDiCbrOBLIctt>0y7d)1{xaYqr}$Oa8@Czojn;RmO!wXjAA@}(Ec7q>tiRrP1*MDm z_u=LAFPA#F%ix_Y&S86i=Gq#NN$RQ^5(i%Nk&t>rKAQNjAQ+T(N65+U{+I}VlsRp}3`hn8J*e;?D<3;d z>YG#8k4tj|&`~s;mfn@Rw3S4fgnjM2=!zA}H%zHoE)O}-LiR!=kP0EPmluoEoi8>ayu>y^ykJ9!t* zR+jBRQ&BOeEh_LZH8(pdA%SieXnONH#MWQFw^&Es(Y7D_;<|`Sor%U2=t+ zYN!=}veMT={!F~y;)dORIKIQdU@ud`^HHHvp(+BGejjI#HfB{FT3!|sqpeh9HMYOq zxgzoT3DaBBBKSN!5_wR{jYSn+#-bWf{=N%D&fheJRc6?d(dS2Hle z+XI^0v&qAP%h{NLs3gDNos&U(bl}q)hLlY8{tLBzPh_Es?g5nga|(|?A*)5l01`j=9VaW9;PgVU=x*yiJh zkeeyaXW0W@bF;bLOkKe2K81vW~Lne;f(wBRf6B2W3z03ZY)^A(TFP341q(S97+p4F?VsFzjfva3AiONZ0{G z?nymgu+~`p=-Ff7O7x`^e*0eU0ZGtLrVTAsLMA^x8UqkLdObFUXwZi9GfbaP1E}WP zMx>GHD&XjdH@y>c1t8aAU!h2bk8Rzfnlb+3xskNPoNxHo4QVjA<9|`(=e(Q61VC@t zf96qsHMgOolUp6T8Aaz!&+`KpwjV$-z7|(kfxx8v15X-Ka3As8 z6Gp{mWbEz^PJp!rFA3IAMI1lDN>ZHwS}vbI^7LCr81*n#@sBn{%;pw0;$XfVA^&)OfMNS4ZY|1A3-T*-i=BVt#5Nir@1bfNO+8O`NC zdK!kCM^E^lkXn9Jfl2191M!xE(k23*xGh%^@2&?e`XxAU~XB=h!#SvvfH*RM{zn;|Q)uv;r zW5PesElLWXH&m?MZ;~Cj5i`YHk$0RrSs@b?iws#&9INLAXYH<+7%=6p6&!Uh8cLpS zmb~u!j1xi<7q7N}eVT$Iiz-KdJep*TXj23Ef7-d%r=;>Q4&bqfkRf=b^ zQ!!IP<&wJI%Erd7;wbBwZfK=Vskt~fih0YzoY6pxE>>n+np=jZv1U@5Er}JDrk3R9 zuA7_N?gKirFSh?+_vK+Y59gfUi^Dnd{d|WK&&B%IT3m0Ibss^ktKb>2mQ`wOe`}q- zke;WI2xRZo)}%pu=YrJP+I!r$?W#{#KJ4$grrFyw=sbbeK@ddU}uCg!$m8;IXaH-8+fJ4@?lMoe2%~q@zrdL<&8=M_F#%roI(ARsGK&& zQ{440_L9tm8J>>H{#tkjr5sL1$Jr;Z8ba9iQM3JwQvD7gBKux+wwwc5! ztHmiH{Ecz0>d2H~5w%8=kz9$!4NY`2J~gW`IZfA~Vrgmk{>2?M9*>7U5qv(M z-zrV#276#m5RyVHL?-#nICE~=S_7PVskvhS#FCOnGO<<%eICSqlQ-Qg8VfHjL^=9K zVC%Fl!2Sa2UwOCkgd}a~8Sm0dXMb#KdkJMQDW1a+@wWCzERsKydAq^*>9cE| zDp;f`>P60tAg!B9nI384FNf^AwT~0K0%Pl!Wgv@g;N$H!uWu3xh0=96GDQ~3Y(jEA zPF$yyHg|b$(Yh78-d{SRG0K9#ddfXDgsUwhOLB?P4dLJCBGGcFR8bX*%5Iz5`zmi7 z9i0WJPyUYV!qB=|AuiP={#$8SlUu9RyP8r&pJ-qcw08&7&~SIXxYDW+o{z1C62+gR z^3vKm=@<4CO7?wQ;lxoig}8K@*!z_`oFJSq#(;hRG<1MBgEihTr=t*~zn8J8Gzhhu z(z-s@=q!X|*~ZRuufZA798xDqs@&?Fr`xaTD1`1D`6bj5*y~AP4tMoIFV&KkTRzPQ zQJYVU2wTePa}Vg>W7>oyxNHrn0xc59>SxFC1v9bvKqTy1=b53I?In!^6%`9Vv+cmz zBxx$Zio}5aI{O4PHRgN~Sr03M9=nI0xkH3g{;V8j>}^6G<<(0ii^m|^7wC~O1I7p` z!l3|h)J1W4f`y4E_l(S2Xn=kckp0cd3DaU%&((VRA&@*-xlAOh1{P$aV-v5(De$W9 zl=rbMad5M!iwYY{9f)PL%di_u1W(yP7b#%_T%7M?| zoa7$HqUb+Mq)Cw?;)r z=_L^}uJ>x~>DI$5Dny8|f;<2|5i<8yJ@7GS^{h|-TnNiGN3XIIC~zMiRf@{=EGXzP zSBUBz=mkuKO`D=84nmwc+ zrhj?GFlHs}Sl&@iv5F%)0MyF?WQWxWYKJG`HI6{&;FFiS$_Cr!Aa}FH+9(~Rrk}5& zhQTBl*p39PxhYAIps&V)HYXBn^J9E7L@-P@0S)-?u+mJyT=@QOCTttaGAAkeW658F b$IRo!`!5!?xY}hxv)B?=aClH7ZFk<^RRhM{ literal 8031 zcmd6MWmHt{*FK`s10vmB($dl~BF%t;fPjDwFm#s;T|){I(hSluAW{+ol9Eymol4h` z0}Sx;d!GOLuiuyV`+L?p=RW((wf8=IpS|w;x?=Qo)d58GL^wD&0F4)F1~@pl5_j*) z1bBB(KABhGyXBsTfw~G#^$6n@4h{>QhMMwAUyD6+e9xCans#ymYvV^MAK956n{F0_ zFXGiLKOhP<~$z^j8B{op?Dx}uNk>i+uPewMNK z(q`ZWc4Ik@?>ulR>$A)LyzEX&a&j`RZmyx?Vn}#wjO#`3{TEn4I%Y+`?aEE$&F$!B z7U|d4WWpnG5FPH3ZOH1yQ?1*P?gK^2$H?)Z5h2T93>WmhYVf+P({Wv5a;c2lzT~d7 z|HEo7?w@B&zC(-ICKjRoR7Xl%DlYz&dWFx}p$i-u*zbKW)*p8xhIX49b1+_sABVW! zptGJ&R?d1+lXA@`T?EQQ0`!%07W%oglQtG;rpwom$URt%pg!{hN*8bW&1OWFGk>JvCxMFBRZ ztZ04(#~4eup?BrV*n#e}rmAj)H2CS1YY$>{Ajj5#u`cJZpHF=HG6b>6ZMC+TVxIu7 z^p-?;8fjt6uiT?8y`+Dd!Z4|+GG5*k$Zgl;`cGoeKzLeriS(lSM2#khH%T_O@?cxE zMfiy;a=6ZHR*_RdC zB8kA{krEkcJ;f95>1C?rPmoI2-7hu-c+^bn5QGovdglwyr}pn|$dy2mdjHy`MiJp<5F zDAntybR#1AMLH>0Q4jP5Cppm$Y)`dCBGlrel#14PD1y%nQpEZE34O`D?wW+aPS{Ho z`>zG>eUNYeD+8xIB9jv;s1`|B{0PY%3@PA_u_XRz70Sm6k$dpO52B4TtEDskWs_@J zeo_#_$Z0g(TID_{ZYBgU`f^C(Ji3)mTNTyvuk?gm(CEJb15aesalOq-~R&Ixq{| zMb(AJNM_twt%XO(e=O7IlW*ESkVA|G9M5N=Gakw%xz{UU)q$OTeSKV@;2Hv0p*j($op^?yE>~xJYKQqUBjQqZQzpzi( zN%I74<>{mqOaEc!yK>L+QIAKB)rOn2yQ`F1A|39%qJbZ=!fhn#r28R0i`O`~nh&4W zhQW(XD$X9sxJkYw?f-exVwI9s<8tnOc93J47b15-L>teh11si$RFTHzQpQ_g?8YX} z+h~6ZZ%B%ZQ{JU}pOBDHd->gP=Bv7o-3ajJYaqu(kxq<)&`_z9A84XThhTT9jcK)k zB_yx1^!6k40o-~S>yHr9qYiya8F zd-@48^}r0mRo9CEElRJVe4%Ye{0nmW(`Oh$%POM?740F;HyWXaOL@W31#Qc_WQ1id zRh*Dqg+QV^kHPO_*QX0OT1pEvKudkjFP-YRf%C%*o&V{K49R3mv(AC4Oc@Vu?QH3f zevYA`q2cjd&~Th&@!uxfK<-^~^oV&Ah$*fhhi#S3LANn2%Jw@)qSNRJ7=~w)Oo0@P8u-ZD;&lbtCX<@*9Nm!BU_H zCHLqJ$%nu|w${0TL+6%i{X$(1+UW|W*s=@HVbb3XGowf+gFj6k^F)+S@Bpf2DlI}* zhuakh;!eN4lEE5}x#zrQ1w@#163X&e^=K!Ru!4ZR0uk3gzUf{Y!x=aeg^J-1`P+jb z5un<-s&TtFzsiUj?f8Q?Kd|TRe8~HW$hwtXc4pEkq{Q?xp*AG+4JtNV7QiEGLaXK{RP4tQ>kDp@|1kT(R(Uh2(0hDG~rx)s?g8^BY9Md z*~t;p{1w-gHxacE_he$r4>)#K$T!=;C-4yIV1k*{hG||ObG#+_(T5W-XKUBZnIV75 zaX6T3m?1UieRAkBN9w-^=fK}-P$$F(LF*GNcdw>vZM3JuqoTTk?``*nQA3;0>7_V} z293T2?6HwkL`d;Wg`Dr-C-Y#UcL+J_pU-g>jkkgW|_eE za4vBA@Zzc6RD)Qh=@B6bJ=Mb?`M~mwgrN1w^G7mrBq~{r@BhkvE5`?3YjvjZBwC#( z2v3u8IWF_|r^>P{=UdhbP)7o_!F^mH^7}JOwFh-2dMejR>yH$~ldl1Z;(ZAOQOCBr z%5w?wE4y0{UgRm<*G@F4#>ht>Rj5ze{ZpK3HJyloDXXpTh+Wv*Y|}Zg1Zz+Ty9jia z$1$uHdiuEZ6_U{LmYtNDho>um#WTw2z{l!z^5}M_O}57G`Ux$CJicp5Sz4p#);RG@ zSnOqGna#>gTfnc1TY|IUDW_jALHRL96}DJ*BO0MM!A4c4%3*kbw^&~k^9un)gryMV zQ|pt>p8eSd(bmINtw${%#9B5zx5Gh?g5D{{lw>pKbCvP@~5sbRw!4IV^~56TmE26EuO8s>(6sF{P-yz zpgJgw#o?COX=ZE*o@!R`@&;u<-gE2HzZo+m7%vOW4i`CP1`e%G%~|;(M|P2J_lQ{- z373L=7_Ai&G^>zyQ<9q#MXA=$6c)|86Z2egsDpMM8j)T3-CSrpvKH`=%DXr8>Cxql zKG<9I^WMc6t?sk8>^3+s7yu+eK4h#-wg|ZxQ1@iOY)vB z#NvMnNq#>t$8?^L03#*X^KdYQ?O4Mn>2x`aD4<`W3 z(MA$Y)puu}q*q5l-mTUS4s)dPZ0!C<^a=9v(6Qiv^L^1oozBpQR*S8ELbkku*rSJJ zedcd3fDW42DTlJycP7W0?)(5wD-S|Vlp+Cz}dr!fe`AK?zku>uG=OS3Hu%uYE zv0cC2PO|iM5uU)s2OidSpa(d7^-hVio6|#3m}Ip7-Qca@gox}&mHsi?txwF;3EvL6pmp&r?^KmJ zG)*Di&{Z8J%dB;m7G6u`!;mnFl72s>gYZD!tgtk0i)AQ^(Zj(4!RuABN(j<}j_SRv zx2TVGHQ*T)%)2wr-$EliQZCq`n|Kci1??B-%p?m}q+Spn=jfc_Io~*^fcU}it=h8M z6i&z+U6$D!j7t_}b7}20EBR;p<1|sO5g@={^4I;xq5>qp1FO}QR2m948_iunugtp&wNsorb$zEGi+sgeC4y<)=)`Y!j)PM`4E z*(RB}KdI|8W{_t&m4um8HOPx8%0^q~irQ=1*SrBL^$GD3Ks}N(@PkC8x>_^-yW^PQ zrNW_nB|GF7DY;8FJv3Df;NJ)PbNC)1Z~wbQV7=IULu7N&mf%CXyXa5`ZKpLkXu7bp zjOK`R$g~D;HIXs6LC2xyHfXT@b|!}27=v;d$IYDL!w1ke2J?BO*XVH5)Ef52zlGzW z!u{hHL&}SeYwpGohB)bD`JQ;SN#=0#DMMqha@!pLtjUuaxEAzQqEB!3U8JpppXy|W z44*qF%z}oJx(BNzMy9S`Zz{k*2n5__jFBolVg#v^pUm!jrUC-aGn+K~j2}zPPr(AI zj*H2>@s>Seb$*OaHze{<5h# zx$JyX_*1kDxHz13*~U5~RV}_mPJ*fBJdAmz%b@IMQYVdt{{>ZA2B_B8TeAB8pfR{l zFPaALShj_^tElusHYsbAxXDESu0*Oh(*Dz~qH=zxY;%XM`>x9C%-lr>HuIl?+AaGI z^gW@ym;P;A&s{V>-IYhRJauHS^}X+$f-@}>iyZrNwpch`1dDb5nw@JHX78fK8{SKG zBW7bzSwufTsO~YVD$Umy~u(W9USEvVl=8sTVD@1$2feko8*56 zEt3``-ZAhF$xC3@d{Pq-oDjz~#rYWepn4`gqKe_Zh8T3&-kt~X6rf*fHGj$%ZBDcP~ueq8?)kyXL{EAsFKG-kaSz@5V zcVxZGKvH6x&mVo=c>HnINjJfCAP@8^(EH?1RR;ZtB65>5f`D0Y@9Ipwqms`SWit)S zbhZkck?W{ibAEFh0v$ApcG!>O z0EhItH69^|NQZc&Dhc1n#1!Fu=EKI$1jhNn8z%`6$B)`ZAOJ6- z{4a~oV#ZkXA{)t5U}s38lXkoXD6FM3^Dpt&W9)Z8R_CRJ)@v4{!^!AgGrO5jZ<*Rn zIu4!%DPLv9fi?^AV~OzuM(@MS4`9 z_zlaYSJfkU)AYLq%%s!f^(Z@=ety)Lq;2NAOS?~}fc?gG3ri=R>KYn|%JRv>$=k@a z`N`-DBoGH90o(hQq={hJt%a3yR5EVDG=AY;B( zHmR%!qu0}heNa(BaegmPm6PzTjt}X;ER#Pj_Zb4KphOk0)oerF_NZ1#1`_G~U$J3u zyfr8NJszIhkpw(U)mXz8=KX+Ck-LdDJP1hq2ZiwT3UALn^q>0XN7CZ^-Z-FVM z?=xIgKw+Y+n`|8#v=M;Cd!05OpA@V?&{}}}qBJN$o(;0D&4ACM2MhnpaHiA@du4U8 z+AIS0wWPM4%Gc1E!WPOhCi2TH_4D9W!b{zH%-&53w5;(MpDicO^p1Qc@hH)I+lk1C zkGhv69b?0ozQ!?tFmi5mSsIZOuLBPsZOcp0+D6KPhb-=@ptb!Y+M}fcP5ZhXmQ$)s z#*o1Aq5O9#G5*L2jZTWfq5GW^c8WGAQhB_rXTViDAko_&N-A zk5D|wLXi262{OB^b^!r-sMLf!SDVtOWL#LM^gI@7_M_ zSbN}7LJA|Lr{e|+5>fR4h$0x9-AkGys12LPjzjOYYbKVAqYoBz!uPV)rr%aCuu`@w zl8>rU`7>1|jaR*2nRo*E=DJ&+KyXVt5fVBRHwSYw=xd_azU})NfW~f#*zbyP+kvL( z*&uRWQG~PG0O(YLdaK2~#4C|kC^UI%f;6S3C*c z+2X*4~* zv!)|ZXD*>lyfF4W!HQlnfxmSF+{<(8J0UIwQ35W&@tkVRNnF29Uk!7;miEasFW;By zBbC)za8I>9o0(v4S8d~s$Pdgo_+$;=u}3fmz@qST)${Wx3pi)blBSPC)KsqLD^36IBoJ>vA>I}kvW zDd$3iBWGG}QK9evW&c3Ne%PLU+4Sp{pDX-`ohACM5O}GiqJn$;>pyJUu@*Tpv%rQb{!h9j z*JYDPVS_WshM(6F;Hf?JmrfMU2M&84=aYGdJ{-KlqMUfRQW?Pig_K^#&dxx5tqz4M z{)m013wti`;oGK3l%)a`)X&MoQ)l^i9o6hmpNyG;&DZ&Z6D%F`?%*+FW^*?1$Xkmf zI~X1KiL@5F$Cp43`jtw;=mpb1bY*P9z%}o9p$Z7q4M4 ziNNa`r97*k|M0Y(cAUs>d}DnPboCOk^iVxOF=};cFoEoR^6--owk3^HEN;v#ZPo0X zFCRDLP>TQid7!se@!j`{XA~Fb%S6xYI83~Njy=8`$H38N*anv+<~wQW?ZS7j%eb#) z|6yv!H_;9!y(t)pT+6%~`v3#Rti8G7^7dRuh=YcxVU|Jkg0HOCuYP6K6u4Dhjun9J zr!f6{@jh|B?ZR2RJCZl0K@wd-1}glA$W(}{u9`8l`NHY}F-VrlRtu9L!WEUo&xnuIzqAvvV8!tr`opt5tD9f#fL_a%g@UOZe~PQgWa+ z(2rjHiBfVC0h(Xb?};`K^i?pY8~LiS_0h`=UHJ2dFW|uc+Xzo!nIU!OC34WC$?B@= zVSbh3I6Aet+tE}#C>Fo_POJ$yh-=?^tLd`l{O&jLcD@XLFlH)}9bEyCGJ%jIzlszM z>#iCzIDegV??vcMCSyK1#AtXQjQu~!NZ?>yLiYblMtNd`S|XL^CWx6o({3l@Yf4y1 zVb~p9mOy>9rAfv(nCD+ynKhK{-w`pssK?IJZR)v<2vxfpS{09VBJETOlYDR%rd0G8WRMlV;kD5YV=-OZgY~PKNS0GO*ShiVPP&0M)&@cPN}; zo_2oRl*2Q+6uf|@89$jTw8xv>AMlyjxlqZSeOmB zA;1`W`Q`57Wdbb|C)a?AXXx^cWc^ovQy{IoQvcN>yAh6YF*90N*b0}Kz0aNG=dn8P z^!nT1Ppd5}pqJ>zYgXF^<5cf`K1!f5Zby-qRgir`^|M<&5#)@wIBQ7;D zKmse>^EGr$Gk&VPxGSuxbR&?aIY%8xvnnNM-HrH-c#+`WjbN0s|7ZMVGoGqpt`(2U XJIhjn7S!$(a~utzu3ELq+tB|3CFRGx diff --git a/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/README.md b/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/README.md index 583759c4..ec65f64e 100644 --- a/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/README.md +++ b/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/README.md @@ -1,10 +1,11 @@ # End-to-End LLama3-70B model development with Torchtune In this tutorial, you will see how to: -* Pretrain -* Finetune -* Evaluate -* Deploy +* Contious Pretraining +* Instruction Finetuning +* Alignment +* Evaluation +* Deployment ## 1. Prerequisites Before starting, ensure you have requested access to Meta-Llama-3-70B by visiting [Meta-Llama-3-70B](https://huggingface.co/meta-llama/Meta-Llama-3-70B) on Hugging Face and following the access request instructions. Additionally, make sure all prerequisites described in the [slurm](..) directory are set up. @@ -64,16 +65,16 @@ This output confirms that the `torchtune download` command has been executed wit By following these steps, you ensure that the necessary model components are in place, setting the stage for subsequent tasks such as pretraining, finetuning, evaluation, and deployment. -## 3. Full-parameter finetuning +## 3. Continuous Pretraining -WIP In this step, you will author Llama3 model using c4 dataset. +In this step, you will fine-tune the Llama model. Specifically, the finetune process in this step is called Full-parameter finetuning, which will update all the parameters in the original model. ```bash sbatch tutorials/e2e-llama3-70b-development/pretrain.sbatch ``` -## 4. Lora parameter efficient finetuning +## 4. Instruction-tuning In this step, you will fine-tune the LLaMA model using Low-Rank Adaptation (LoRA) with the Alpaca dataset. We will first cover the basic concepts and relevant configurations found in the [config file](configs/lora_finetune_distributed.yaml), followed by a detailed fine-tuning tutorial. @@ -111,6 +112,10 @@ dataset: As the config suggests, we use a predefined dataset class prepared in torchtune. +## 5. Alignment + + + ### Submit Finetuning job You can submit the finetuning job with the following command: @@ -226,15 +231,33 @@ quantizer: groupsize: 256 ``` -`Int4WeightOnlyQuantizer` performs per-axis group quantization, which means it quantizes weights in groups rather than individually. This helps maintain a balance between compression and model accuracy. +`Int4WeightOnlyQuantizer` performs per-axis group quantization, which means it quantizes weights in groups rather than individually. By adjusting the `groupsize`, one can control the trade-off between compression ratio and accuracy. Smaller group sizes typically lead to higher accuracy but lower compression, while larger group sizes achieve higher compression at the potential cost of accuracy. ```bash sbatch quentize.sbatch ``` +```bash +Executing following command: +torchtune run quantize --config /fsx/ubuntu/awsome-distributed-training/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/configs/quantize.yaml tokenizer.path=/fsx/ubuntu/models/torchtune/meta-llama/Meta-Llama-3-70B/original/tokenizer.model checkpointer.checkpoint_dir=/fsx/ubuntu/models/torchtune/meta-llama/Meta-Llama-3-70B-tuned checkpointer.output_dir=/fsx/ubuntu/models/torchtune/meta-llama/Meta-Llama-3-70B-quantized +``` + +The resultant quantized weights is saved as follows: + +```bash +0: 2024-05-31:02:10:46,964 DEBUG [seed.py:60] Setting manual seed to local seed 1234. Local seed is seed + rank = 1234 + 0 +0: 2024-05-31:02:18:17,728 INFO [quantize.py:90] Model is initialized with precision torch.bfloat16. +0: 2024-05-31:02:20:33,576 INFO [quantize.py:98] Time for quantization: 133.08 sec +0: 2024-05-31:02:20:33,577 INFO [quantize.py:99] Memory used: 40.03 GB +0: 2024-05-31:02:21:18,609 INFO [quantize.py:112] Model checkpoint of size 37.94 GB saved to /fsx/ubuntu/models/torchtune/meta-llama/Meta-Llama-3-70B-quantized/hf_model_0001_0-4w.pt +``` + + ## 7. Generation +Now that you have production-ready quantized model. This last step test text generation using the model. + ```bash sbatch 7.generate.sbatch --config configs/generate_llama3.yaml --prompt "Hello, my name is" ``` diff --git a/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/configs/quantize.yaml b/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/configs/quantize.yaml index 61344ca9..1060a081 100644 --- a/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/configs/quantize.yaml +++ b/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/configs/quantize.yaml @@ -12,42 +12,43 @@ checkpointer: _component_: torchtune.utils.FullModelHFCheckpointer checkpoint_dir: ${MODEL_PATH}/${HF_MODEL} checkpoint_files: [ - model-00001-of-00030.safetensors, - model-00002-of-00030.safetensors, - model-00003-of-00030.safetensors, - model-00004-of-00030.safetensors, - model-00005-of-00030.safetensors, - model-00006-of-00030.safetensors, - model-00007-of-00030.safetensors, - model-00008-of-00030.safetensors, - model-00009-of-00030.safetensors, - model-00010-of-00030.safetensors, - model-00011-of-00030.safetensors, - model-00012-of-00030.safetensors, - model-00013-of-00030.safetensors, - model-00014-of-00030.safetensors, - model-00015-of-00030.safetensors, - model-00016-of-00030.safetensors, - model-00017-of-00030.safetensors, - model-00018-of-00030.safetensors, - model-00019-of-00030.safetensors, - model-00020-of-00030.safetensors, - model-00021-of-00030.safetensors, - model-00022-of-00030.safetensors, - model-00023-of-00030.safetensors, - model-00024-of-00030.safetensors, - model-00025-of-00030.safetensors, - model-00026-of-00030.safetensors, - model-00027-of-00030.safetensors, - model-00028-of-00030.safetensors, - model-00029-of-00030.safetensors, - model-00030-of-00030.safetensors, + hf_model_0001_0.pt, + hf_model_0002_0.pt, + hf_model_0003_0.pt, + hf_model_0004_0.pt, + hf_model_0005_0.pt, + hf_model_0006_0.pt, + hf_model_0007_0.pt, + hf_model_0007_0.pt, + hf_model_0008_0.pt, + hf_model_0009_0.pt, + hf_model_0010_0.pt, + hf_model_0011_0.pt, + hf_model_0012_0.pt, + hf_model_0013_0.pt, + hf_model_0014_0.pt, + hf_model_0015_0.pt, + hf_model_0016_0.pt, + hf_model_0017_0.pt, + hf_model_0018_0.pt, + hf_model_0019_0.pt, + hf_model_0020_0.pt, + hf_model_0021_0.pt, + hf_model_0022_0.pt, + hf_model_0023_0.pt, + hf_model_0024_0.pt, + hf_model_0025_0.pt, + hf_model_0026_0.pt, + hf_model_0027_0.pt, + hf_model_0028_0.pt, + hf_model_0029_0.pt, + hf_model_0030_0.pt, ] recipe_checkpoint: null output_dir: ${MODEL_PATH}/${HF_MODEL}-quantized model_type: LLAMA3 -device: cuda +device: cpu dtype: bf16 seed: 1234 diff --git a/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/full_finetune_distributed.sbatch b/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/full_finetune_distributed.sbatch index e69de29b..239c8b02 100644 --- a/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/full_finetune_distributed.sbatch +++ b/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/full_finetune_distributed.sbatch @@ -0,0 +1,95 @@ +#!/bin/bash + +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: MIT-0 + +#SBATCH --job-name=full-finetuning +#SBATCH --nodes=2 +#SBATCH --ntasks=2 +#SBATCH --gpus-per-node=8 # Number of GPU per node +#SBATCH --output=logs/%x_%j.out # logfile for stdout +#SBATCH --error=logs/%x_%j.err # logfile for stderr, remove it to merge both outputs +#SBATCH --wait-all-nodes=1 +#SBATCH --exclusive +set -euxo pipefail + +################################################################## +########### Check current working directory ###################### +################################################################## +if [ $(basename $(pwd)) != "slurm" ] +then + echo "Please run this script from the slurm directory" + exit 1 +fi +################################################################## +############# Load environment variables ######################### +################################################################## +# Load environment variables +if [ ! -f .env ] +then + echo "Please create a .env file with the required environment variables" + exit 1 +else + source .env +fi + +################################################################## +######### Define EFA/NCCL/Slurm environment variables ############ +################################################################## +## EFA settings +export FI_LOG_LEVEL=1 +export FI_PROVIDER=efa # change to eth if you want to use ENA for comparisons +export FI_EFA_USE_HUGE_PAGE=0 +# https://discuss.pytorch.org/t/nccl-network-is-unreachable-connection-refused-when-initializing-ddp/137352 +# https://github.com/pytorch/pytorch/issues/68893 +export NCCL_SOCKET_IFNAME=en +export TORCH_NCCL_ASYNC_ERROR_HANDLING=1 +export NCCL_DEBUG=INFO +export HOSTNAMES=`scontrol show hostnames "$SLURM_JOB_NODELIST"` +export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1) +export COUNT_NODE=`scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l` +export NODES=( $( scontrol show hostnames $SLURM_JOB_NODELIST ) ) +export NODES_ARRAY=($NODES) +export HEAD_NODE=${NODES_ARRAY[0]} +export MASTER_ADDR=$(hostname --ip-address) +export MASTER_PORT=$RANDOM +export NNODES=$SLURM_JOB_NUM_NODES +export NPROC=$SLURM_GPUS_PER_NODE +export WORLD_SIZE=$(( $NNODES * $NPROC )) + +################################################################## +############# Set training arguments ############################# +################################################################## +export HF_MODEL="meta-llama/Meta-Llama-3-70B" +: "${CONTAINER_MOUNT:=$FSX_PATH:$FSX_PATH}" +declare -a SRUN_ARGS=( + --container-image $ENROOT_IMAGE + --container-mounts $CONTAINER_MOUNT +) +declare -a TORCHRUN_ARGS=( + # change this to match the number of gpus per node: + --master_addr $MASTER_ADDR + --master_port $RANDOM + --nproc_per_node=8 + --nnodes $NNODES + --nnodes=$SLURM_JOB_NUM_NODES + --rdzv_backend=c10d + --rdzv_endpoint=$(hostname) +) +declare -a TRAIN_ARGS=( + --config ${PWD}/tutorials/e2e-llama3-70b-development/configs/lora_finetune_distributed.yaml + tokenizer.path=${MODEL_PATH}/${HF_MODEL}/original/tokenizer.model + checkpointer.checkpoint_dir=${MODEL_PATH}/${HF_MODEL} + checkpointer.output_dir=${MODEL_PATH}/${HF_MODEL}-tuned + output_dir=${MODEL_PATH}/${HF_MODEL}-tuned/log + metric_logger.log_dir=${MODEL_PATH}/${HF_MODEL}-tuned/log/metrics +) +################################################################## +################# Run torchtune ################################## +################################################################## +export PYTHONPATH=${PWD}/torchtune +export TORCHTUNE=${PWD}/torchtune/torchtune/_cli/tune.py +export TORCHTUNE_COMMAND="full_finetune_distributed" +echo "Executing following command:" +echo "torchtune" "run" "${TORCHRUN_ARGS[@]}" "${TORCHTUNE_COMMAND}" "${TORCHTUNE_ARGS[@]}" +srun -l "${SRUN_ARGS[@]}" python ${TORCHTUNE} run "${TORCHRUN_ARGS[@]}" "${TORCHTUNE_COMMAND}" "${TRAIN_ARGS[@]}" diff --git a/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/7.generate.sbatch b/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/generate.sbatch similarity index 100% rename from 3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/7.generate.sbatch rename to 3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/generate.sbatch diff --git a/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/quantize.sbatch b/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/quantize.sbatch index 73e50462..c094e87b 100644 --- a/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/quantize.sbatch +++ b/3.test_cases/torchtune/slurm/tutorials/e2e-llama3-70b-development/quantize.sbatch @@ -13,6 +13,14 @@ #SBATCH --exclusive set -euxo pipefail +################################################################## +########### Check current working directory ###################### +################################################################## +if [ $(basename $(pwd)) != "slurm" ] +then + echo "Please run this script from the slurm directory" + exit 1 +fi ################################################################## ############# Load environment variables ######################### ################################################################## @@ -50,26 +58,26 @@ export NPROC=$SLURM_GPUS_PER_NODE export WORLD_SIZE=$(( $NNODES * $NPROC )) ################################################################## -############### Create train config ############################## -################################################################## -if [ ! -d ${FSX_PATH}/tmp ]; then - mkdir -p ${FSX_PATH}/tmp -fi -cat ${PWD}/train_configs/quantize_llama3.yaml | envsubst > ${FSX_PATH}/tmp/quantize_llama3.yaml -################################################################## -################# Set arguments ################################## +############# Set training arguments ############################# ################################################################## +export HF_MODEL="meta-llama/Meta-Llama-3-70B" : "${CONTAINER_MOUNT:=$FSX_PATH:$FSX_PATH}" declare -a SRUN_ARGS=( --container-image $ENROOT_IMAGE --container-mounts $CONTAINER_MOUNT ) declare -a TRAIN_ARGS=( - --config ${FSX_PATH}/tmp/quantize_llama3.yaml + --config ${PWD}/tutorials/e2e-llama3-70b-development/configs/quantize.yaml + tokenizer.path=${MODEL_PATH}/${HF_MODEL}/original/tokenizer.model + checkpointer.checkpoint_dir=${MODEL_PATH}/${HF_MODEL}-tuned + checkpointer.output_dir=${MODEL_PATH}/${HF_MODEL}-quantized ) - -export TORCHTUNE=${PWD}/torchtune/torchtune/_cli/tune.py +################################################################## +################# Run torchtune ################################## +################################################################## export PYTHONPATH=${PWD}/torchtune - -#srun -l "${SRUN_ARGS[@]}" python ${TORCHTUNE} cp generation /fsx/tmp/generate_llama3.yaml -srun -l "${SRUN_ARGS[@]}" python ${TORCHTUNE} run quantize "${TRAIN_ARGS[@]}" +export TORCHTUNE=${PWD}/torchtune/torchtune/_cli/tune.py +export TORCHTUNE_COMMAND="quantize" +echo "Executing following command:" +echo "torchtune" "run" "${TORCHTUNE_COMMAND}" "${TRAIN_ARGS[@]}" +srun -l "${SRUN_ARGS[@]}" python ${TORCHTUNE} run "${TORCHTUNE_COMMAND}" "${TRAIN_ARGS[@]}"