From a5ef5c9e117105ed72b914301b6475c58c019efe Mon Sep 17 00:00:00 2001 From: "li.zhenye" Date: Wed, 10 Aug 2022 10:46:30 +0800 Subject: [PATCH 01/12] add requirements.txt --- requirements.txt | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 requirements.txt diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..2f32c41 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,17 @@ +certifi @ file:///opt/conda/conda-bld/certifi_1655968806487/work/certifi +cycler==0.11.0 +fonttools==4.34.4 +joblib==1.1.0 +kiwisolver==1.4.4 +matplotlib==3.5.2 +numpy==1.23.1 +opencv-python==4.6.0.66 +packaging==21.3 +Pillow==9.2.0 +pyparsing==3.0.9 +python-dateutil==2.8.2 +scikit-learn==1.1.1 +scipy==1.9.0 +six==1.16.0 +threadpoolctl==3.1.0 +tqdm==4.64.0 From fa5f695d2cdb9c6cda20166eb738577ae58ff6a6 Mon Sep 17 00:00:00 2001 From: "li.zhenye" Date: Wed, 10 Aug 2022 15:30:08 +0800 Subject: [PATCH 02/12] merge two masks --- deploy/icon.jpeg | Bin 0 -> 134972 bytes deploy/main.sh | 2 ++ deploy/tobacco_algorithm.desktop | 7 +++++++ main.py | 11 ++++++++--- 4 files changed, 17 insertions(+), 3 deletions(-) create mode 100644 deploy/icon.jpeg create mode 100755 deploy/main.sh create mode 100755 deploy/tobacco_algorithm.desktop diff --git a/deploy/icon.jpeg b/deploy/icon.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..282b47c37dbb2c9929e3f995102e45c9dbac2a09 GIT binary patch literal 134972 zcmb@t1y~%>(k9%44elP?-Q6X)yF+jWcY+2DL4ptN?(UXg!QI_mLP)Sck|p2W|K7X% zJiGgS`)^H8_d9i}`cxn5b57O#S^2XAU@FQg$O0e`2#|mI0DtyS^%bP0&D1s3WEGTU zUJ?KRRbCn5>;i)c08Y-H?wWE^6#52+6yPZU20#K308RifxAbsT(UR2$UPwVkn!@8n z=r8^k-7NtxcLIQUR(W*_ihuF{QwYn_)!h>SK8Tcz%2hE^A9ZX7h5?wTfNBqwb|9m$?7j2e8C^Qy{um_tnv$f z?+vl`dBF=Wn8wk|3G#v;UNEkcwYdiXz@h%-dsEF~-}tN^SutQxE#tPQL?Y!GZTY$|La zY%Odj>@e&+><;V&>?0fk91a`>94nkKoIIQkoF$waTo7C=TqayOTr1ol+&tVa+%?>9 zcrU@4*@1YVarU9QY9YfQW)fj>v^5i)e`Gj2McT zidc!*i#UgPjQE6viA0MegrtUKh2)DAk5r7*i8PILi1dVvg-nkuimZ)nj~s%Wj$Dg8 zguH?L9R&r23Plh_0|kN-jFOI0k1~R?i}Hwyjmm^7jcSbQg&K!ihWZ(G74Aj~Yx4$NiDTPz$bPAoMnC#+bkN~}?=6KpUx9kvX%6?Qmw5%vJ~J`OAn zHI6inB~BPlG0qUqAua+gJ+1<-J#Gwc4ek`~H6At|51uaGTf8j19=siVSbREs1$;;R zc>G5ECH$WRqy&-#)&x-mwFGkn_k^zqB?zqvqY3K>7YTn7Q4q-xIS?fhwGnL+!x1wP zYY_Vo=MoPQU%bM7CH%_rRrIUIS8F6NBupflB>p5tBoidJq-3OWq^_hHqywZEWcXwf zWcFk!WW8jk6oVAklq8h$lwOntlv9*XRPQHW2-Q>anst1zpugK(wri3q)jwMenZz9^NbxoCmtju?g5 z8?k(`9dSx=bMZp)JqcKc|B-Wtgm zL5&WLd5zy2FPYGryfqm$B{p?5?J>hJGdF8`1OGth>Hn^Z9v}}Bk9JSom&w$G7lT)X z*RHpicb4~qkFHPCTdcRvZzp`2e4~7i{bc=0{Nem9{rdwb1406J10@3sfA!2u%w85vCW`8BQ7=9KIhR7g6;d{k{A9oj}F5(tff^ za%&1jN_5H(s2OxLl_xbX4LQv#Z6{qhy*+~_BO&8C(=Kx{ODd}_nFP7FDCD2J|usoK(3&*kghPb2%*Tk=%`q?c(_EMq@t9#G^X@dnRD4rxn}ub1%E|F zB}rv`6-<>^)oHbH^-Rs{n)X`O+JZX#x|n*P-mCts;Z4I*qf%pklR#5#Gi`Hr3r-wcTRk+Z~pcCmj#7|@kQ0e*(IH&m1X1QofWHey4$ocBpDx~CVqIomFrQaKWoc{dzvkpiDFd)$1LQBmR~6uhE~+ z02boQtDO)QgayE0fnc#fe+FNsB*5Pr|6TC>e}n`^1R=n{Bg4VIq`P1OAQ*U<7gJ#n z5HU~?U$kIg0XTRBtQVWHDLHW@HE^lS-9nJL@TjFU6ASRUE!;z)bMpj*M8q`Gx_U{4 z4Nc8`v$TR*I+h+@-pQ$r3_N`NvSBILbd08KSU7kr1WHaYwxkBmiz$f(4Y*W& zvv|~8QkqG-*8~>s_}si&+M(ZQcp7P?3+MP=_gjYT{aFK0|0-j_Vgcg7yM))0Sa7eg zF#q!xnK(B%{ck|12^quiTiV%u`hB=g*!-V;SbK<6(*n~|h;eS|RDxgLd;oLEq*HM( zA%38u_z+6<|N6rHfvQiPbOOs<)jcm94ypQ^}f_?bv-q+;>)bhl(9>z+wN?|l(NTjYVGTH9|;if##Jtn zV_^RPo?Sm>o^QzPVHe1o_pL_Ka2z3%vzm-gaoThlDT=-3v{o{HTpg<)s})i6CEYG5 z{7j^M;@9jJ3~p<9NX42f%Edh=C&am${{W%tRE0!_0#ABbf>NQg>+VIbvMy1a`FbWY zAm>sE$tz>CQYm>=F=mfcJD*!Py|eV+t+KkjdEA8H)$(O!)Z%%ZDG#>mrL=IVpIn9WcRILV0dj#3|!hi>=ESUYAv7RvY=4A zmDs&|LN|h^{^)$fh)j~1#&@ljGQ_8{^RC}+s3gR1cmLpBX%@JvFmr3crjDvivUiMu0p5 z@jm7GYLu>cDZI5(2Ss;5S1p{6D-O`eHNV9&hoF=vSBbBt z*HLqQc5DW_Oo_5|RgAZmk5aE(9)<7H_!1x~NuRa1^PMMDT6VsM_C@vj@{5a#EK)AJ zG%*s z@}kkZ9p>Yr?~3(iS?(mEWdbyWz@vL3&irwDmz$f`LvIbAniAp4H4z3a+_b8wPL{h3 zO1tj-|mM_6aq|YnzvD&kd&!AX0Il zVgVM2v9NFwu-#aMp)5NVNM56n*(vGL6YcwIJo&|}$c&nZ-{HIv7-SYFeIZ?nSv;URechDHimzN(~5F!gDzWDLIt+X)B z@2y>tvAIA@HGY2fRdBX%dS&6LKqy7FB);Gvo?}xwZ7OoF^f4}@MX?F{8d-&dReDDA zkKY${ip*CNit{Ec%m=$5`V)(YaVL7I@|T|2=Ml+))~)x>=lW(@7+*yTPld6Uc17Yy zGf~OyO5-d*%&u94T{(bRH+1tP>8j287k*etr?C4uUzt|*xp(@-?XY&B=G={pWEuXR zrnaW<2oRjSO|0*j8fzc zQ;YE{d5=ItxZljJQD(}Cp+j%iRwT;F;L+T~qHEW8GqlR`53uZhcKp`L896(ltU2k2 zyx>!#@%91TtMga&f!2jrX+ELhSjw3}g*5Bt%XF#7;i>9LB^AEI46qe~ry+P-#@Fxn zEjc-Pc`M& zhmY+3<^!k7OeOL*kGN-ML~CUuy`w{Co3jwq1OLzlBY5}Mx-EnH*A237QlDE>elA=j z-yd3}-jQ_kD8o8@g0P*oDtEuHHk@0g$?w*?R_$Np7e8 zcV6V|gGDJnC+p!-I#R9L?xIaa%_y2YO-t6$xWX?1as=HQIf{=`64BK@%rtw*F(35L zmk>K_5L-p5=KQWY&#cL2T4`~_&FVxiZZyxlSiyQ@I&s#R;Md6uk1|M&ck&}cS+T2c zVXnL-nAeN*Y_|1kx6z&&kD6H-E#qvAyvD-kf*?|w+kb#?afUAzjOa?%P{LGh3h)ekJSX6W=YGtH_NStdwOXW=Vbc6`8hqND(wgY8KCUjn;_rismTK4zI5;%WtC{ot)nW`D<}$X zX!LWBxu%rJ0k*)0FH4&~50OM8v&OXJN_h@fF#s6#2Qt&FTC-2FVWg?E)LENz424Lq zwra=jv?BH1UKSu_U*+>fB7ElW$xVyK#c8^}qD`pZO%COLs^sBO*y2%i z8s^u6VtXiD+a^6Z^eKgt!g*I{Lq+s^0c*QUEkA30JmAT^(1znQh$-)_WA`Rh@JS}b8tiOtIRoWHze)TRH#ggVTz+ygfbwZ0E8jM#U#E!|R#d~>lh8@jiSNflN!UkP&n%q-i|VqyIp!9Y>Lr~z->PRg0Y9rw*6=S>H(ju6fBxHTmv$t8Q?RvXShkwI(Y z!rc0^Eq=F$*wz6B9jJ4QOED z*YZR;74mHCHSSAFt(Qn3OR4Iu3+PfIq>^NX3{pLbB*S2yl?ZP>-{DBT z)}0P}lWC3|nO4L80OX_&+jI%h0Ks6v_|Yks^jKNq1iLJm^3l~~aIvjvuQ<;NpUaS% zf=?CQM9(K<6|w_OxhScs3heI%rslmRZ1K@Z0#{(Wh6ze#Z4qBamFy~T zY>e+jU({k~A+&>5(j>pK>A$u_`|pFNOy5w6=b<#25L(eTqxVQc}}(p{C2P zCJ=6*t72clPy&~6+h~2D@!j{W+zhtG+;v-XH@`-?(A-QE$))*zVULjToef)b4=oPC zQo$UXR1B@7_t$Z;fY09wcR%R@*FX){}3iY6LDD<>popyXI z#~`)4dey50CmBttBC67bD^#=W)mVrm4Z3GlfvCh+JVy!LNSrrBna1o8%lp+~W~{Z! zt<4t3P2%M$zH_JkFqb7-+Z4GVZK0&PZyp3)GQ)GFo6Lg9=AXPiKh~+;$8JhBl=3ph z_*rw^Dr7puY2_WD!wHo}L5#~NP4CdrCF42H%?u_4k4&aQJtz`~g{_ik7F#~4987j@ ztS-DiFf~l7BPoq!5Zc*&abewHCGAItY=iL^qPDi%4$0wBTzuXil{;pxwq2n~XBJPg z9jCjSh>Z#ztjDq4Wb(j{*_fTJwrMh z6Q*cuYhA*oK-{(H7tj@rsQsgmq zQ)^d#mjPyZpB7i8C6}zg`}3s6(G>d>pY}qB`KEQ(lCy2mdVYE6A3u(7`@QdrZ&dBb5!o}pSp zkjZN_jv!44hE}%I=|=<(^@8mEMr(7Wp`qL!BiZ|+xe9*(ZPLQg_Yr4}`;Y5qJ+R+s zGVfHv&erCn)GjPsZI4#T&*PcB%#`+g451uqyUY`7em|xxS9(?S?k^4@t6luN8r3z8 zrB43#_U6pb%O-B6*ka{Ip9(o&dn?MXde$lEdTJHv2dG32Ry_@eseH{bTXn2yvoO+$ z$@L4`Np<&%_Aq7ssMNYX&csM6e~d`3L73?=sgNa^l9^HZ0XvqRw*T$`&u9m!anf)O zZA_1yxp1p?^={0rc7!!LMln@K(cHON9aiy73I^Zz1EBzL%jr?$HGHubrjeTEIXIw1 zWWhal+@Sgguv^w-OuhPY;B>s{xX7jys;$VKI6d-Nt5z^xGQ3QzmX4t>^Q1M8ADxs& zQmChYb2V?8*QX;4+m?Lwk~O zFUyc?e$zA`G+Tu$9Q)-6V*RKnrPj3!dlTu-ZipBDf>;S12mGy$B4Tzz6k11Q{d%`m z_qJ1&eOl?cSDfld3!}5jVy;gTWGIyObOemzdkDHJ5HHRR#vy8}CYZpjq?wd*SfyRh z!}*NfJlx`PF{e19|50V_XJ(Xrb8)`$*WC*B3$_3wny>6GqSjZ8ZQz@w zJ-9D95Piu0eInW&i0!$|`q|Vdx%Y@oY3q}ytIiP;?P`SoN0^{}J3=K0d1eAwerzcM zVZlC`dsf-EEKT6|Q)=GE&}UXZ4EahHcpKzaIX;RCYu@YA1r!gJwxTx+Y-cK5ElypU z%Gp1WAc)~r>n zCG<_%Jt)#~bS*THMO8yT+3*N=){H6GSMlf@Aj$#Ic%KtoS$ly8*1TlqowhA~*e!sm z&F*>UO?bA%pi7GfXbmk)yhfYN#BfY7c4$8|Id|nGDA8_FxkUq=CtUp(idx~#Sv|~% z!pOArIazS8yjNJGY3f@W`<pMrnN#Mu+<~j*egTn7v7QV%(OUh1jV~ypA(dk~9oq<}|Bo>=lqnedUaY z`ig6PFr5%gvp7yQkf!|ofvbg*xEt%1yU8x8!uhiB=lWDOVXlbq3IR5T zkWjp3BrJa;Ydi~M2w4N7#D~vIVF7hss3*>wUfdc^#FH;RfaoEaEGF|rK~8xC?nWkk z&4fMmus!!(`X3sO@XFJu*6o@Dh0ky_EUJl4TG1~Ykj><^Tw2eLMxPM6W zqF_`>5t)pGgy%1vm%T<`*0}SeCG!|paZK<;Old*irw?dsiWABMhe7RiZ^b+3ix z_WYD9XNPLJKXDrR85Y??7h3-SE$%-gMwMz0qvHCXZ}~2Ab!Ya6kIi@8Yo@-x zEm#+3-3Sb`*w8M^OYUArNw1``W-UyBiV!Qs(ZrQcDF_k*Lj`RpO|^R~xE(884Mxeg z{PV($_j=6GGq#jvd)?SrsOc`Wtcz1QCBw5Iqb(`4n4&^ZKu?16=ruwsb8$`-Ueebn zHvU`?1sSbx?H1YP*Izql6O-@M=~Fwy3$&|nfAH=!C`ss|Fu2^f`X@tiHokZR1mk#T z3x~2+U$O;bYd(9!lYKBTWYMOVAg0=`KK`O~y#2_^D#Z+fmTW)wWK-}%9n3e>WhmMv$GWJcSr83n&aY)CjO#-Mf2!coX8qcAY$^<%T4LIs4hP?pJD8rW{)Yv zWlg1}M`GE5Z?A3*zr}E;WGzan(pjO=SPK*|XimMQ3!^|uvE#)FaWzt4Vma8L|C0LN zn5d@7@YMvH&SCQ&{`$xA5(V`>1=2onL$KFU#{|VnEvI|Ko7g`5;S%Cc9w$2IjBaaq zV-cSW6ZN>YG~-g34+>Tl#31Svj~@z9)~UtGG#Ng!>@A~H^5>GivxAALIFg-Ta$|Nj zMt5%<`n)FXdAjKR?D~7A==@&z*Vb&rgq98A+H0av+B4JhPXw^*+Igt-ndu9LcKladIS~vrUZHqJ4okbNtSyj@ z%XvRNniEe54YslG8`eWh%GUW!2QdXzh3MdJJmQq@SR69QdJTDZM{)dM{6~|+uE&gX z($d^HVFCO1D(l<$yT zSZkbmeK!bgd6O55%uGKp{NChz5dF&k+G1X+8RATfCv-3_qtdEohE)9H8|iSW1QA?f zVz#s2x$76~%xW3o+zZaCUf2wkF4+g7%W-*zSfwS_WS>Rc;52%NoOts{> z-KDstFrbEQc!A+8xWZiXqig7FGI*?y;Ijp;MJ-$W5O@7MI6F-eSNUH}N1n|!DwOD` z`aNm4qt(dWZxp;6h~=9Mq>o_g-Byes%)Yc-6Z5;TK~a5RK52<;kM1p28&zFqf+7#J zL?H+47x4VFtYzs4=gEe@DNjmhlsFBUWA1L5Ysb$(@S2}~HO00CoFGk8o)qU&8)5`G zYuOW!W|Tq4ypZb*qzrwvAnM8+?G!cP?cBT2V2~c;Y2v&fAhl-U=ecnac37NCeC)9& z=w;%&wye@^ng!|2Yh(BKD{xsPqKD$3C9I!L!K|5Wy3sGve3(FFJ`(!KWiQJZ#?!y- z^()` zfkAMnXYWsb<*Zx1{Fc-2xE;QNr#8i+g%;*VDGhtHGLnpBM2=qIN@dvU9>Qc0@{2F6 zW|nG6UXXD`%Y_6*MPccR4T1m2p60+)d#__gsj z*Y<95qvS%>iE)+ANxIKxT=Z$0z0k>{i)k%W8QnS$h^Q3AGH);!YWL}UIfrERaVV8Z zI?8asZCU~Adh|#(C`Mr^PFOf#{V@Lk8rSCh_ytpoQMFLPpQTXeaAGf1Gv)D+Qf`+3 zVe_r+U6@=_L>Ked#C*H@Z*!Z=3Ijey^_^`6HPBaE7lBQ}Jh0ev69=4>Y}q$lD`L_# z$1h{)eQZdox`}%Wr*Ppwg|X^K`+&IT1ro%5w-q7-x!3l;#mVwk>@9~p2=aclMi zrDWf&NT2Qb3V-;z;8XGkn8MH>(YA;Aq0ndax|ByeG9sB($OLEcpvS})h5noA%&rjx zxO+CE6vwulxYV0($CsR2cq{p{YwMe!FKykh^ar%jnpd&yJgrb|?Vbo*6)FSwo8KoD zDkaTnx@VC(a01#XW%Y+sTz=Me8a6X-EWR1bBhp?49H$fCK8gI$#$!Chg37-5B-txN zByu{hh=@&uOjgGtWX(GaY+k8xZF$)vDFURU8nEZMZuJIlq&)RQU%`)#_|(Zi3$*&u zHYkx4#p zhjHK@Fa7R9)on*{n#!)V&XthLC%uZ9a%O2SqEiE%&cQ@|S~035L{%v@tr^Q|Jzo#k zitkwFLLz+FOJ@=zte#{IuBKdA*~Z zR#Du@#f_-rV6$N*%}?G-4HumFhsx>q%cRZVW)9ZK0`W}eVS7R!cC=H_En@6qkN%Uc zyO`)~Bq9zO)3)0|(=RJFxs?TG87i`SIZkGw<(gMzYh}7%8HRTbb|ltT0gZk#lTORr z9~wkT=+K`neR7C-#VHcvZumb1eT~%EsN2%vtu!e~R4u9ZwL1NnH{vALBUF8;_+2!3 zYN;;!eC54!70FPBw=o#BQV4KR-4^H@cD%+4Tu`zpbB`)gDO1yi3q#J#xU<3i+SHr~ z73fv>RTo)sh0{Ferri@e{7AQ&o8L=EWV|p&EOBHMjU<0dGsITo;OyALYD5i>(e7&(M<=QctG~#xQQJLCnd3_>`_&wR{)(KyVR44--m-EiZW68snJ z+J#^Jp1Hb3xNxVViuO=d;2^e1iy4w>88DpKnvet|C5s@Z6`Q1N z#dl%FhIPcDkBbd~M7yf9%)Lm?i>&%kx40Ggva@(BByeXMas^ zEP1zHwc$Wc=;kHA%pUy6MQv3F8LQ|qz5hV7!=5N!N&3*=t%kIY>ncRghsppHxY z30m70RT}SY^iB6qWj`vz3_Bdpa}7#q4xNd@6(=}amr7{uuS5HTk=03~rj2C|+ca3o z@swt5iiC3K9-N85uGgJcw=S1pXB2hV8V&lIb1suW@XncoOLyYaeg7XoUY^tSJ-)$a zsm{JN+y~2Yn1HJ)y2`8X+ATfHc;06BCZ2-#SsCRWyFPz_FgSX0`SLf$v4`G4c3T$Q zIcqB2xbD^Mg(Y}+I7teloL-@e#%B#<9`nWgQYA|$RsW_UQS19nqe1>4x*!;YrC0wza5+ zhRAmc3pUm!pIC0@Ctfi!Yq!Upi?yG!EXMnpx@_tm%*}kWDWPRMpA-m7X^-sYIMWuk z&1#^YVjzTBSJ|t~Ug&^k#`F$Mz#cn1mWPhnWZs_2%RugF%g=JG)P&Xqm*(Li-*B-x zUt$=+*~ZOD{Y24Q$#ffLP0>Gu9o&RwAT?c!XG>n+CA_LM!_O}?O#_tF;5bGPoLD-> zTF5;yJjwP%4zTs+IowQWM#X5FHLiG8?A(d<>qPVdmj#1N78N&T`Y(Q&I<2loBqx=$ zH?{;d5v>$$09W?RzZ`jl=ZkSMnP|>7_p~hv8CD%FK^vf(?FYBZC!~5CRGJb{P$p?Pu&q#>pOaunL zPExiw=k=jtee&ZN*?SfT=VB7y4>Eib1MhDec<_&a?* zOTx{lD8oquO-qHxQrM1)6Mj!p==_B+y0b(lkq* zF@@PZkHbo_7+Z5v=57>yf|lZ@n9&zD1(1^bqHJRmbNp=1NsnKFvyx8y*(nWYu=2r3 z-zB@mIAfSk$aao>3ndMe#soN!(J2=#45v!+Zt2Q8wk6ISnzdZaE}JY4pBSs!f|m0M zH-8UINnWmV+rYG#`>eYcZ=6*+jCGh}Ip`|-ntpa5FPatlsfw3%oV60~E7gdO;`^tf zY!d&edjIyNfTy?&*pAOftAPaXIUQ?ENwcm7iJO1MnH$_~9^L%HvOfozmHIvBSNf*I9}OGuaFDU%6W+Kgpc zzi=>e{N(eWRSiosc%5;^c7vtFMPc^80R3uZCxM!X6#O_Zv{OI#s9Rgu_kS%}c0%`RBIsSTgrz;Uel zEb(2Mx-9xgoQ4E7Hj!|A+2H+k_@NmZj?Qn*;9Pl(Lf-0AL7&L8)``a5K$TwP}&hY~bP@kXH z>X(LjpI6Q2WT#0+F5^olvHntAc)KF65PeGmH0z7=Qr@^u2C7KsTHT7_4tQ+YeR8Un ziCw;dE|W}W6p2?9Xv`0;ur5?Z~ zI{d2nmOLqruf(*FHqsX%42^}KL0>YdP%>~ zgV*un=W?t4eU%TZ=qka{QGESq!$fY3s-&B}V!d2vZoXFxKliBOO0u2y$xQ=IKF8oT zobiFMZ%V!PwY`k*15b6E39wNIC&jXu929MfrCysab_J05?{?M+~z27)HdU&I~tCY%} zNuuhn+3-*i!*uiDA1aKa`|B|ZPEo3@i&-M9hMcj-Nv^dGj=p%zAqIQE$uUg1a>I2B z$}KTkd*4h64e7MK#&Xl3_<4j};K#dj3Yoi9ef85g(}D;oue9=I9^J<_A@n`kwfr%m zX#5%pc1)yJu&%sbgwXXQX*XMvzB&810-syd2CJbJi%vEwe$UsV0iv zp11SInDwe7j1*VkG5$MRBqGhI3sCG7AIfp3mzcr zx*(b^w5;97+LM-znz3aF)WAc%RGG$IEERc7%2e*48dFwIdc(4QjMP29S})Q>E(?>t zEpyjxYI(0}Rd0-o%>;~$DYFT=DqQAC{ZY2zy{YN^(Wpd-9GabKMjc!JF2xtq4{PRS zRo{85sPZK>dS!;x7Vdky*0=D&uUsFQZMWFQ)Nk1|@)MHs*YOUDz1%_$j-|A>7p!av zN!Hm5T!Z)`wBDp;G+APRz2bW7;Qb6=Jh-R$i~!zGNuB@0ei%M-aHxsK6l;KCMHXYr z5v#DuqhV>O4w1T3eR2CH60(}y7J1$RF2((opU=pbO}U40BI%L=uA2kG!64J7Yb2~~ zf+0&SwzUYq9?i@uUHdwssE$$S_s*Zw?k{u4dklsNn8z)986(xaCi{VWthFU02 z{lp0!>33L`PkwbOer7Ul5Ml#`v_|6M(e>WJ2+h~gI2R!$^sE(>qR8?T_vhziLCj~5 zvXYKnuWQ0sq0Nm-LfZ`=r@npJKK=|s`au|B{w?3aUXUnvbrQaGI*74f<==FV-;=3F;YNHj_O(-y4SPFmganjfj z7ZwW7)%6Aas_|t+%f}WBIg_pZJ`po3D&dKm94$KH=N(70ZRMB3LFV?GqfUs~*? z&l}z3(^kiXI(#WxAQg`;QeiIam`-N%?+{Ir^~bh2Mmg#*2E`bt`6gz06|`@V$sK6f zZM4oeZwF=?4>4Vpy|l!b7vD`KS3K~eFa^9%=u33Pd6py`M%YKG47C^eXb41dz-D?< zo9-2`)?QoBs-$i+_DVyM7j@&5ZnU7Q#R!Zh=*CDOh9ltD;W6nK4E+i>K~{Kb*tt`; zz}mwp7XmEv6X8YCeW#(;sOr}0r36Tc46hJ(9mXn}g^9%Y)LRE+|j=Ci?SMTb(<7=CrVazq@A}iKfW%8fq}9sB)Plw zogpu&gC}4Kf6AmFx{$(O$}Pi1Puql2~=``WZ@D*L0$Q z=qdOq=3IlKqS>{q^7$|7(RX@3t)SujrZ0JS{qYab_+pDq#DiS08Ov3Q5#H;R38QBI zncCe^1nA^XEqL>!G@rFMqJ8$azw%B^CZKfhRI4lMkGH-tTwn)vHyG8iD6)hN<-wQz zDv(V}L(FM6Q_#nj;PJ#A);|Pa7F)?#aX3;8iLb+U6QHq6T0(WvK4kkH1gmB zHPJNHJEGq##$*JP>Yu%$jDNE3PImn6RczkCA5xvE6r|NHQ=2;`re(HpH*pjS6%K_4 z%Pq)C?C4Pw!^x{|ewaf%>J=BLS)%zHaou;td5BAi!?IH})EM99M{TH9!EM^-Ajnb1 z0F9{n#xvAjYu$W*)Lep{Upq=`aN^F<5pDQce+oBCEIxlNGo0S@VP7vvt zQ;Soe$ja1KGA$jXH6qb&v|@<$*jIf{f=P3!grvq^kR}_7b7rwIYV@F9e5&bR(?nU% zE^aKxAEmzi{+y|erqL!%7SgBER;;L)y3wc;?q77Xq1P)htz%hB&28kkJo!-AhFz!5 zDYrUXR23QR_)PHDj=O1-QRUc6fzJ#7GEH$)DeAtwad=C){fEH_8@Vz4jo-dUlolO) ze`pr_B{%H3Dfmf_ZOeY~lx~MXXx(cI_dIY{WGGETh05ElC4%c<#gX+(lH5B~MoPKw zQDhoY1m^UbVZ7MP6j|>{=iZuqAdRARd+H(SZI4|zjiptqj8IA{<68q6Fl#CmgU*z~ zimhzY)amA^2^W)5182a)dmUyX;9L8RwFmvTdAO##z%zSSP+CCw^i&NEwLjUjHaE7mXiLTq3V7KoPbQ zc1Q&3j5nT}w`d>IsXA>t|Gre>u34gz6(m(aab`nXN@yTmEs&g2!$c|xCxh5Ij=;Su zB1Y6ejQdqU$}JMQDH2F1BV1hX|=%X8gyGj&FJsVy@@ zkt$0}%Wq2Qj*jqk`D&+nExjEF_dQ)258q|ZH`lVMw&}EztJ-&(QRH#^%Y5TZL z1-xu9#xMIP!1UNj;CJ=G*tth1h2bS2%{56mGwMczqV3z*iyP3xmodgxpw6v7jqf6U zh5ipfp5&03W>RIi=3`Vb1A*x=N=)*$P|P;H7VSC6PvM;o8=sIM8qU_o9TLQ*5nv0T z`zina z-oo2qL^s}6Gsp)VL!t7%ft+83+*`<_G-BAa!>HwwlAJH9>DuaT_bU^mVMYCh@W;l} zC6w`W|L5QrY7QkfWCso%JsWJS z+Eac)+ti{X?2AoyFq-??o=uqzYZ==ZYO8i-PH)5%B_gcIxg7=`Ea|8!xY`;6nX~$h zVRgiYLGSEP$HPadDz@f>rZcAn);3IE?Fdvx`aO}t^IE|PYN}v&s>%gRFm<#d^V>cVJC2oNL$cXy|Py9cYn3k~jsf+AQE zBoH7t1b25RJh%sUr*L<73zGcJ?a^a&-@do|^*o)2J@!6l?K#({r}dP-*421Ty!80t z*`Vr+pD6{SansK-qa^#}FKx$LEjEkvYNO-Q;qJ5(?+cm9|LpLrp(w!QI;5rlL;6ZQ zK@~pQZ8m{o%=>dsFL8eKI8%*9wyWF_ct62P0~F1qdyvEEk12Fq#cjq|sNSu(pipOd4}Awpu`zV}nI+%GoETJD6xd2t*2-b=aTs!phnuat%WtKy z@G08G$i4=<0_AR!Hqq^3vrQej(I|NV*IU=nDoa~m!O**KXOs!Pwf#p{KKtOxu6hDz z&Sq(hlrL&AdhXoirm0)e2ee3gJLp0(NA1_i^i9P(&3$Zixz-Ky0fM8#Rl^b{`A(VJ zC#xrEp;l35&!)1_ViH=ZRH26JE)jy`A@tk45hSxC*w>MjVJ}+`wI7%$Sc^ZFZ-L5! z@<(YR(hRP(Uf3K=@&y=0<@H}k2EgPZHXN%C2PW zH?t(uu~~|5QliLcxF{%I$r58sGdLMrzsEBid+{gX%;sHxy!O}oIugMP(GRR}_hc_4 zLqzhdnGNRgF}lV;;TyeJai88|ng0NTCI;sein3Zahv?d*$gok%Ocks+ z4oh+2%=C4;9P{n=E67CoD#bzRolor%TvgYhp=qoSFeH8~cZ1R$mhWjC&tovfey#6C2;+wR?bum^RH7B`@33 ze6!~AN7CY_X3%;ymL*r8B1-1U1oj&A(@GFFw$^rqS=#&rH}6T-Z0tqAI`PP;_{)4K zus+4WKN?(NT#C8J;`$RfmgQJ~tvPUKAlPc4_S5#Ux^P&Ekrrs#P#v7BWQl8#r{C+7 zy1Ac$m$QKG#CZC}K_TH|GfTR9z9CoRxEdBc2fXesB*HJUC^>fRPBR zuv&@iuLGZ?)s@jgrEW|;W^cNpe~RG9EJRO@AEziITa}&rPc&OllYqa5F5!6vPDy@; zKS+%Lr_BSm^h6?u$FzoS7^XLj?qNI;M{A0D- z>f$s-)EYwBL9Z&bKe`O65o{6J!n~yjnMb#F^7H?v|I0;$f@-Y7&dx|4Sk%lh*_KWC zB-l1@Yg;ZfD_|6k$Og^PVSdW3FeeT;7mryheCg96Xo1Y*79(Wc^_<$Sgc+|Tk2JpM z2Y8f2VJ)rwjmvi=2rEv0qG1^3kzUt?NeXHvt`0G-R8|3xcgV?#L{4nG0j=SLmDaM9 zzE0jgDcf~Aj1xLA42sPQW{OT_92$_(N4h7;!23n42Vni}*iAgV^)TS{AJP(YZNP(36rQ~^0r&ox<*PBFyXvw@m5Vs4PTrd`*&yRD!5^E zPCank?{w2@mzh(sV5{>ys`X%q`dpdo0V2p`LS95BU*_sGgCndoR9kID+cKT}Oagc| z>Yy-FfirL$yTG#;*n2$39xw+Wn6h7X8wRzr{AhkL|1`OMbN=^y?bNrb@GN;S&CxS< z1?O_2YGz>R-PP%Z-p9xd40g%6X*<|E4ePz#1`1&QE+WI6xh))p8uK)z>$&WuYH%o$ zVw+Usr?UzNHh+BJv|_=PADmXNG$DD;5|X2tlp2>42UArY?OcM+QvrKo3$t?rG3W+d z(jWf`uzqy0>JJ+VzRAu97^K+Qm?oJBwZZ3(iC+2vzm$FX`P>{i;|8dJLz>br2fPl! zJ(Jx0B&KvfQI~qW1UhNS0c?ffhdOYMo0nn~39Q z;zCnq7R8Br%T;`BwT$jVrG7mR8wK6tTb|DUzduXAO$*WnCqpR)}Z~jh#Pr`2@*jla2cqC1Xfa z$HY^+eZMEz75Is3)+S*ZyHOJ9SOdlu?=zTJNn!@jchEX^b4rVbO}SDfn#Ki?R&q@E z{l=hEuM7P8?nyEDC^MwSY=*1ttDa5J9&h+Sm)cv+sG>2NfVo06VwzGQO5T{cTUE&_ zdQcunhM0U{oiVuo-{E5pbM8g?8Qz%TLZ#f|bh?_5SA<)H#b`wSyKz-6cjgdTq+F^k zh}1*ujerB`4}2(3PsAb7{Esi~p7&_lw=wpHO9P7k+6B=jyH%x`Ia44`L$8)k9$(@= zfbQ@2bnHFox|!F3qA^gQpsZ|G&fI;B6o$V7_v2W zl=H#8v#2*~m$cqb2>3nY;0kbyqR-}0%S0NlRAw=h)8q&Ym%i;U>2pDIdS2TmF7>(S z@a;S)C5H%x|86F1no&L7>nP?0 z?JAlsu)romKQ)uOo$Mm?#Ng@&fQWpx6LS);U(fdAPt^Yq`DD{8r$c!jcvZTa~ zMB*46cx8a6X0ao`nGU2u4}B zY`^1RE~fOjbe{V6gdpI-yovI!at>^<#ERr%=N_Y1Q4%A_)ydpR^|l*fJ0m`I#zC!? zTcO)7Dl|N3LPI3zCDrd&c*~I*6e4U1Qg+kr+es-UCG`HTqA}Sqy+Nj<<#lp6ig^2> zcpUg8^qkCU9bFS0U@z*zz7g#9-TO-1#L%{=l7k_ITdpKtIVpt$rQd*BLJ0#k2M~T8 zRY@`@M^=s3U!g|U``q|9ZpiISLQe$deBAo*SQZu+2R$|^QgOkmoJXu1CCQ#_HSIPf zBKNq>wj454!;*M9Nn`Y5T37R8>d#HG>;nCk9g5R^fBAL_s}>8ZH&i?uYIg7oJoO;& z(L7&p{oN~#dhNGymI$)!7Ca z+;O4|ZE38o(D7NG&N9w2jlSR^m!cZA%U^Q#1Czwd(&8y;VjFJ3>I&x=wfEBm0ghV- z2=9nnuUeC>6`zGEZ{3qGA|Z&UkAma`o7(lmHL=Dv-??_FunT`2(73LAC@mE%B?c;v z%U)2$F!L}4V}~_3{qOhK$Fhb`-hz^k@RIbDJQ0Dmvl!nnZ(kr-Ua zqFY*LvToihClp-LWyx1XZi~?**DK0eY@E;PfBDdGD zW9ju-jO5#Q^=|1B%BEp|Vw>~}ouB9Q&Vs}c&m?^#yMvY#2LobHN|}Zy zN9|fOLUB|w2!WNln0Q%n+~w2G%O+l@#o4cDVyp3`t@^zDe8-wQ<8*<=sX(dc5o&^* zY76r48Z%LgWo5WVt?tEp)PwJ`^ZL2oXp@EF1Wgqtyb5}ub`82La=~=Zc{u9LaY@`t z;~>^Dgzp|3_HF=~a;L))o7NE`SuPF-8)rE8>g=&}CtT^BkIHvF%L57mg!sSAZlaM9 zqamN-kNjlMX9)*uqD)>tG*7r~*?YiuVLltSbkiFsV;`g`5-cW<^m6CV(o5?85#*}3 z(F<;hQ9JZOZLsz<5A))t!X{=wkHxOK*H>9@SSp!l_GV(SjoXL1-eS~-j7EaML`7_y zNPNIZm5QoY4vCOt3*HsoR$Vz{>Rpp|VmalO^k^te56RP)J;th=OODY1*q}@y&-1qI zfkZUy;%vjxOMPnY-f#QKimI_qpL~(8NE7#%Ezmv`F!V-o1dZ@f$~p5U#IzSgmx;2D zrS`)_fkRzML3Aqhf(8CYAWb|m1=318kw7kDY+2GRUM(kGb^!c+FkY-qE}urCq4C?Y z1qtCZN`U%U5Iu;AiOD+N8v`yhR@S_tyRf`a1h|Rg*GW(O3D)6iG)+uZ8N#9-04opO zMrqp8_dDGRzY+U6k7?h`|9->&a%&9FmlT>h)`9WAX~n_p3ItT`wb{=H^9~;u-ew7@ zRiOPAxHg1}o+YvQGZKA14O;tCDBvdgL1L_r4g1KdS+F3S4p;C_8Ih5b^6|7e^b?H! zvkMj)Cy*SKmN#sO3Ip}Co~?)oy1**&c*GBFb#CtF_S7Zl6arMgqe4!WA$v8 zV{GF%_M2e;ZQU-Zf|B=$FD4`q+iH0w@f(lf(F9j^YG8^MQ)|G&s??uB^)FoG-a%J- z6K1GlkhBaK_qMp`JtSFvth|;V!1Jumcf-Nv(nJ6*WTSd@`EcB(1~he)?@8ik%U&*V zEX(GGahnHuW!0m~?z#Nk?!$qF0yo${V}h$)(U# z@FooH*0$oU_(~$4hxCYmL%hK|f%CgghedxBeqBZno8(1EL|8mI6jS)tnqa8` zqWgtr>Y#8NiRbgz473ot@*q=RmurVve)nGh$?~yEVq!wkh|@>p*kyjH!os}%x`;gu zAK{GT1~{*<+AIfuKX*NmX4y`-J}^}2;}GvT-Tk@u{m=j#C509190R%6x7?=xkWxM! z@4}%%IlP}rRYpL3r(=lOaQoy8Ab8C%?Ei#iMBeaa25xTsnsl~88y@zi-TF{742^-N z>!t^_$=$XRtSPoXueG;Dq<-B44iMt;p*!N= z?DrlzSF-*}#@UAyY1HWzry6kSK-j@QCh{0_yL)t$*1 z;fnO_SG8eS>d)ArOg=ab_wFo{=KDxplx&cUOVuf7SmvpespLbzHc7!k&nn13u=4q9 zCrq5p?JCAtB({8r(+mJtaK&1I#QJ|}iZIq|XWce01-CrQ7%R+~uar%TEBf?S=%uSH zF!cf~Tx-k7RjbuU-0^guqi{!~q9EZO3E{0|-xXDIyf!{xQ?=bmeq!1r{4v|~`Ie2V z0xm=45r_<{ovC*^tvc#E+hG2pyMN|w-DAUY95u@BIx~@YgIXzLAt~t%LurZp8hGDZ zqS}d^W`$`k#2R=E6_1CEx7s&^lM*5BSfKDF*pE3Jp_=-fJY^mahsn2`dGd~mkQv;- z+tGTH(UpwF6%&%PVh%%yVFUZ7&TgVyRKZJowMuhNz@*>ikHx{vsD-&(NCibBt5 zms*NUOQ#5oeMnLUu=8*zdcqeZR{O`+DI@G{)37c~HZmk|td%S&t!kA{sKtX432Brr znB9~75Q&2+$vS};fTB>I(gS;Hp9+`u2>9Z)yi)4-SghZ3zUMEh2h!%%q(A+m6sO#ritz8oGdd7vDW^(Z zxL7i?4YMJj^*KTW6Ql%oBVrkqXm3}ZMrE@+7ex|OPYI4e3tibF!OsAQDx9 zGsZHm>Onq%Ra*<8)oZ}BgbPJ<@xW~75adu6N^oAfa(|S{}%DaJ?>Ap8DjKZX7 zKCn9cD6zTXhcprIRAeBi5TTmzD00jV(<#}vEcDDFdf zR%tlDVsSF?ye@N`S8YfKOC=n3ll$CT{b@PMx}-9>>>L zH7!w772502e8trosn`#o4(|zLzqQMkOSk;o7Y#H69p%y1%8oW}P*&AJYWD?E$GTmf zngn|WHI=EvWvRoqG+H3ho3{+EDb8vD?*@oenlP=G@r0U6rKmlG6)s5>ukj80j;C8U zbP^~=<@S{yFx_obd#XeNS9K>ZX#XSsE=xtZ`L`L$xc)h*aOQr-m-PFZy>+)srsp}5 zzUEKdMN4*bLp9=dv21peg|2`2EG`}J3%ehH9>#nU;wk8LqC@ei@s_ExcTOKjHs2XL ze&f4T?M6X@oe>;A@c=Y+|CgdVgmtz>-2x*ybxB=9!P`RI355zC3tO!%>@!&uQT$f|9h9O$@X8(q;Ut8DPE`Is78NN4H1 zj&d&9+5;CmIjOsNIg{eOvk8oSczC!xqUzVSszgK5Yha#aBY(#MuJyyO>cTTQ)kq5i ze)vi_%3QMeQm;|KFwyVA!uTiNZ|K)TnjxUgMY#RTKQNp3z>WtNILo(ZuLGhN`! zCTIvf-;kV<5ss7@Hji)#T7^VfAI+n93LS_aWK0gwuaR6#r0axL#YXieMnUo^=52Hl$eU?x7d1d0-w{xx`@+c@$I#lH z131JWRGw>=F&+#j_TUzUrwk$wCqz8k$-*5S{b@4^eugIFr9whE`Y8gUjO=C`|SQRQj%c!T{6X;2u43_7x%@YQ#XFY&6R601RX!b?osGX7H^s}JMbQjiJ-l`WLjIXp1$?k(^=k4U#DGo?=YpxyM5;TJPF_1e3|aE&CuHq0qn z8r1{}*P6z&q-6{AsJR51lH+&Y4S-YP<5g@viePn%@XB1e8hJ;Pl@7@`j;->|w~aQ% zhax}qcU66hnrZ2y1lyZm&wjnqtKXNv`gBWC(0I7}6rp($W%& zqtj6OFIOCS7l}se=(8E8`h-Gy)Zx|7)g4!TWgmsnp=N)qrvsYr7u@k!jxMBE1LgEhQQIt~Fa&-}p;~k4-<`Phj zkLE_LN`t`brB+iKpSJE!r6cI?O1`3DxUR&cv}4=(@Hmy-Gqv?U=1-?T&b^5q5t44r zIkuPE>$WN9ZI{lHnQa26I+PAZ9dlL0>06Q<_BYpLFzy9z?^?Z4?EuzJ+*MQlk!{(F zeKCc5G$kGHHN^k{BTMvk`ytv5fh7Ufm=OQtpN*pnXb_LSvcxyHN$P3~n!#43DwXs= z%|F@Nm(&I17TUzppAlXmDCGO(yRuT|#E5hLGvv=9iAHsAC$h2;99y#3D!|b;PVUL* z%nPx&fRm9VvZ^CaVr_eY7EimS@Q8B~RGe!W;NVyB&h`g*xtlO_Pjb6GZ)+GRpqx}( zm;F;9hyGXt?43YvxWXpXVw_f{niih0D1to1)=bu6um81p`HRW?yW=#+6`kR-e%G2O zn>}Xc>9Hogf|y+DVM!#^>^OT^N}0O4z?f~9|4MTP65-L7eGg=PajvxAyERlpx$z8|j8qQm0@FC$6|k&tjIjBcD+v_$+?^N{{8o$AtT#F#zeC z-s-FcqXys0CGncV*3X3MAl)J`DZffa4~rr2B^SA8Ham0pX`5Heg{VlaBv;6Ju0*qa z(TYO5JW6WhL;^Bf`WW5+{wiiF==iO_$~xaEnvUaO%PLs=fg2{aWTDh=PKEH4=9ch3q`uv6?_=~JfX$^=c%4!D%rRmoX0&y=_%L_@ zE0P&0#X$W#Q_MG9E)Ib|Z|l!)G@_hvpy!Bh`ogu?n*ZVu_9@1Y(Jby#YGau*Lt%<6 zDW|2?pA?O=7Hlo-DE)L+fZEzXjZ31KX^OQxkh|Mi1p2a*Z|;9cJ=4%=8QJpQR1~(T z$%euh7545971Odyi?RugmK`-^O0Zhy%l&;h?JA&z1c~<^d1AjV^cNDRr_IfgA%e(U z`O@4olfMH2gMmnUR#Nh~24Po+!2|;ArMNgl^^2=Fq~%l(v}d1Bl(eqDjuGk}ygtrWM( z>B{6Sd1)He8(WJE5sYL>n{$+&oo>BOe4#gAvbKQ}oq^ZfN01CN4Tc)0hnad`4yI^=x1Y=*JA>_I{fJVFQ<< z#8MSI47-x}3E&@A8W)7s@oXM%E#A+_y03PC|Bw{c&%P7M2VmBR+ZnW*97JEdeYiR{ zt432pQRpaX=#b5@duC}rij4y&JlHJjcgp^q&5{mypFJxkf}8C;zJ$|V9e%5S3V97~ z`pP6*`ot^$#l1e!vvbSnsB0{7Y!G*yNDveqV>-It1V9|4U*MDSQBkFvu;_tmu#(z4?uNwb{$HCNwF&_bp7Dv-E(P@muSju1KWa-MgLpqK@eEMZoc` zGOlsG*uhFj$sF`uXYyZ`7NC3M2>=(?{Rp7DHzX1Z#t9<36TU!u* z*l&>7{Ti7PU|zyF!`@2lRM?Sw=H1rL-L-PhjQI{{&g?D3E`=>FSwr7cKbatN2N8=7 zW>^p#b&)R|C>^iW!&}fEa^ak*3O{Th{bDHov98ifsK{RLHlCR|qC)Z$N1$(Mm>}iQ5voUn{5Kwr0Hu42KN$dt2F(G7a_I}g-xT4h9;vEyjcF-8eL zE00{Q(s#+t-Y^agru7Qm(GXYH!s!i0(_T{oTDcjMU^7@WWHpB+*FEwSBOm2~-La zEBRz2{|90i>rKpxwKph}i)%a}6$f-5Pn%F1;j`|;228sD@bN4EUGO(Yt^4q9iQS~G z+Q-oFm%6(+r#P$BOU5`GO%P{-^v88JW1RAZceZzFp98t-^+MlcFwx2nHGXnA`OA_$ z*HY;*PU2+wsZ*~CA_;6Lc0h9X4I}_puQAf?lqtDcG@-Fg<BhZJ`~AJSoGw8R@1 ztma}a9V>!5Cj${nrL}=+6n(V>t1kgk_HH!SJ~mh*>f*9hoYO|V@wx^+3EY!ELJEHG z$6erc5~u8ULpnFaiq9z-AXZ)RRilM;Zup6fH(tyQaXC7fRJ8PIRjvNKQn(of*>Qv3 zaswmqu7;G{ow1Tu~MY-PEB0=j>c7`qZQK z(5`3s*46lCZmfvCbs1*%uf1BJ75y@jSf!!C$*KIo;)x3syI!eK^}DOF`msGMczbUf z0b!Wrz`#qlI^S9gTU+M}YoiKkn`(RMi9NKuEKU1oVr7Hh(Yv81`5P<&iY#fb-?ogt zidp@>Zf|T&JCu&dGpu+bU7LSV=N6t72pg~apX&u{a%gJ{jk|EGdQs8O%)+JLytaXd zQRGlxYoB?|MNc*_R>vLAYO%vSUDOpRyCehVrHNhs9K0?&81#&77RXg+v7DK_ zp5gkkyVw>bDoQcI+j1#RH<9%eaqWQidQtY_3N#ArjCbp1x#V%}D=Tfo4lh!|Zsw=O zlPye6^PJJfcZT6kPNs&cTp*z#HVklLu4qKWFp8AcPS4#EhbmcAI(nhY7;-L^Ahm;; zzl-h%oUWfZ-^7dEDrBIiZ}omd7dF1x|6BW7UB-Wx?ol|$6&cuApJN3dD&i~pmTN(^ z3O@;$5?$w49ho|w1w;((Q?7LdINW_7nG{v;NsWNb;@PgfVf=TJalTP@k8<-NyQt~C z$JU&I!t`fd>ee%RSE@Amz~tSw@$aK51j?v>hHlZJywS`YzvRBwkF@!b%`bm#+Wue@ z%|g@HtWK8|=M{-+`_)O7c5L-*`Gb>v;!(rgvsg`OLpAb}S{t}IobALGYkJf=efauA-{_O9pi=*rdcT;e%ETo0iv`~j4y`-uKoM)j~q z_0ipIu`$&-h3>FQX2E!I4UMg#jYJhDOjW=u>>ADZpgP4&Wc;*_;LoSaVhY2r^%CLl zW~^DQFHsHnqqxK$kgfsfIfSmAG;hJ<)%qV6=KOaA!mU{mXc86wA#LGD+9CPqSiKa? znJ*S7HK~bYk!|qlx~w-AbW~*gYCmr8M~G|dB$iIdKr{LLrM|U14YG%^jIDvVOq@<| zP7{5ij&V!%Sr|{gl0Put!waWJQ+#0-0ejPMm5m z)0!b*XM9lmdGn{o+FU>rN&4U9RoBF$&_RB1DVDcQ_c5gm$6#O7H_L`sxZnRjB)BJf za!D75KCdny9=%ENJ!?owmu;gj>zpA?!(!rd@>;fM1HN zS|?-IldmDmZCwKC9p?wKg+OY8Kkc0>J}9p7wQaOmYRn1?5z+H9N_y-@uSCyaR6JS z>#fQRlg{`&&7o?4}f?abSInq?g!}TA(M7F!r zU`0WyTGzdgOk7Jm*91?}UF=d@G_#Ceextr7%ioaR@HzPx9E~Kd?lkRNlG4c<{*41#Q^|@UW%nbPuTJXF_?|kzGuekGNLot*p;dz$!}8I{#tDF7OX|; zL_EL`nOUe;#-ZNXCAo9*EmR{Y7a^4V*l}bvR;5-VbGESN_FCcb(YZ26vUDaZVR>uq zsSLgUAv|>>xk&rSqqpvQ*~^Iz08E)NG=3SG5#4a1QIOP(^})^=iCp9=_y&#WHxvm4 zChe!pxlGNcd&6)SAg#qDG1LCGI?CWCC4e%@@=E(mytc)z;{)irnlx zCT(XkRosS=?3Ec;Hj&C(5;z0ZaU@dIx3U~1dD)DN)#t_5vzh<(#f^)RK-7zd0e%pf zRFQCwN;AK8GP$1aU%r&3R$x~9J|q6a)+Zvf=yl#HtEf^cp-y@v2Y2%cQ~U~?aS{d9 zJCN~UxiHFG=O*ustbLn$n(1cs&+^-}3TZue5|L1zwPa(I7%63Z*#YO-r#(vh)#y?|_(y~|B-Ig#)xZDlxJpai-8N2M`8dQ&8~C*(HV!c<7irAf+fkj|6JnL5z2UAwGQ8Z}q*!sq#-CAp@EO%)Yldw> zVH7Eekr|815}=m7^Dcdt+bTcH<{MzkenV)(ilcX(`=uSx{`RhFvC?pF`TiDgqk9bosr-^w-LqCn)GeDmj7XAa1fP)#yi^p@JcDYVXZg z>EI0-#;$VeUeEyIFLy0KTv}_lSiX`3`X+|wX6v?nB;G4_xz=@A8Av`L8aAk^q#tBc zMlfXwnv6>KUA>6gLljD84(nMLppyaQ%0fv!ZmMB`qyvP#My+T

)Nz(^~(PJK1Q@JKjS zDL7Cr?E2_{s){pMnm|*gL||=TU|^oQ{Y}h$@CW@EHOC_A!j|*8FrhlPlOaO*-f~$3 ze{gU{-hDdYJp=*t7FQv^XQyIJGkKIl5H%5Bf*7N0u@e)5XXlEAmFS-*o7o0P#@KHc z-#Flnhronq_II

X$<*pk`_(YJgC4lP1^QRh2MrS1(n*;{*gjyj#>G(dIzMO|z@X zW|#lLO1Kb>js1phH`oQGsK|`0Nj$}X#>7v>XTFoGv3S!Rv{zT=VPhG=N$}3{(|vJz znhL=GwTg$egh$;EzbuM{#2gbx;v(YD%(=ObGAf?i*2q%T zP0-sQlUC~wls$Do?<~cNsWhU&HLyk_HhjVz$!#mD4>;EHM{hqF=MA3@u|RyTpA|3d5PwOj zoI~f$U}<~Sn0{m<*>A6DC2_X=@a3V@Fv&$mpTEn(+dc`%8ziTNjY$O` z=4w{QJ@Upml@r0gojhYY^B%)!uAXp35?!dFiNo52`kCos|70}`7Kr8Yw8wlc#aW^| zeSc^_6}_^352iQ5?^x+>cjv$zZWAc{LRSFK_XnKLPxKhWNbY1vUh?!G-rAN+%#)g2 zh-Mp4nwxPBjZ9i%m0O7RPcvjr()ZmX_jZf@B%O&+53{eNna{5$FVQNR>GkC)!G&>~ zmVv{yq>7~iW8*^E0mGi3-u`=8SVVXmW4ULFvb)&U7&WDxu?p66n-|`*NS0gcGVP%B zzESaBw%wa_1gWSTo3sFmKq5g!Z)=B+ZH)EP4!wwc2$XExJF4iNJy$UK;3y^j+~lpW zJK9h~Il&0|xemSb*qjt^um^CCm!}ub?vg^F3?`BYqYIS6&!nu*Wax1hPL?qEVYS@p z*q4t{@|GNL6E_vfvS4o16vmV?TlXkTe=k58hZuj)hj?u@>SxeS#q5!_P z+D=qUeHBFo8opMKzSB3i`aO@W%mNu|91TIUw#7`)BT|-*h|DhS@{gZ6cI|)ptE0=# zaDk~jD^mOqlZW%~salZAgmKh)bxa-|)Mo9}G^ZgGDnIyTES7SJj}u$bcjTUss5KN^ zOBfTgOA=Gt+^hJptv(CdmyD8}<7X%lh&}Bql&3oitW%au zTEq9nJg}aR9!qiCNNYB9$)WFXe6TMXenhPPgS&`<|6v5G=Qc<$-2;$ z;saXUU%-dV7mqgCX1i*y0Yp<+c3YJEAK+bRbyqS{F8e-?oj;de`Lst|yNE9YBqo)c zepq)*dkkD)g8w^LROr475&|BggMH~r^|Lzp#C!D4z9t#)5!yWKO|kq`!qhWzi~T3M zb!C)x@(5S;rjJS)TRd*M4F>j*w+s1Ho+BQTbimmLL(NG~vxT%3I;jas_Fu-vMUp>3 zQC@K%5s=IsQveaO+56y$_fADd(CteI;%DyXv8+dFNyV}$`TDjbro5@Xe%=E)+0S`O zSE--&Mle}_vqhqG&w0$XV@j$e5k}LGH+t=hcY?^5{ci8x*$OXndPsU$DsXDvkGMqu zlo4PYFB1n5$N@`L(b!~>*_=zUouKS?VX5Xv$*^ZMD(A4p0O5KM!I%fB@ zrrN%%8@`E5w11adA0=Zv`;4GOHz8MJS(+FwQdJ)q^G-Q)w@JH6de>_0s#GNTk_GQK z0q`Raq}Z|#)HzU**V34?aPk{^Exp#!$?zJYB6%@^X1wi3n&~TQIGd9&w8;-~r|2=+ z0f!XL+aNhJaRY>TmOD=>;i6b_?J-?H!bUJSP|Id)?Wf1uXC1eW#y|H;)02L3AR+qw zL8Of3YnOhArJzvThf30Z2|y_58zn^DB3Db2C*Dm!_B;f^lcRh05|@cjJ*=`K)?>-v zuMX`zd`7yGcy5N3uw3i(wYD<_b1_|))UXaIk`g*LX#WUDiYxo{fJ+M1o2j%fp9QFR zN3?*fZ{jb&i9L&6g~9HaUzrY4Pzf-2@%jgK*}Y0WrkcMtGL_JLC%%aqnCEmAB#X79 zYl2nLHTL1>NUK)pZZFTDo|!Cuq78-~1q%y)7)g8b>B9^sj$8UT&WesjjU(yGpqRhF zT&C%5fG))t_V-3xova_MJM5ILxi2-VUMcBo97BwG^>u|o(Bb2(r}o}sd9tC-^$1~- zG)umOd=CGx|F5H9??K6E_#24!a;oH^pH)mqC+E-Q0SZFImNcrbHPzFo>+g_G4*%0~ z7k+WuRbqlooCRsuKGMPYcV&9Jd9SktS9|< ztE=Zf?aF7tb5-F;o5&AvO5U_86<4XxgnV~G)TnrdhCd*5PejcO;6XgpeztxmpF;da z*FW%QNUdsS?|?E4x2PVlRcDD0g)E3wC=PnlLC!9R5)P*m{Z&U)MD6T&Xq>rxO8AE}VxP%2t#CoUeO9}_h}^v} zkGnJ}I*TYFj@%_P$^fR+=loz|g2+}!;je4%oghHa#J1(IXw%I6jSZDs9@wrti63pWkBun=H~3;{Od(_Bh6x#;f!$k|Dn>ZELvHr)71|w#m4q@bmo-=0z`S&3L+fanGE~ceg_NxRNmcWnygD&#z z6`jFnzKMrU@2ZHirQ(Ig;)NgI*I-x;y6huG`a%UfbsGhIJ86vjE>|&J{V2HFuMxcQ zx&EK`q#<9>H%HhSz@k2b?+!MnUDgYpvKygq<9HH>?X&u|RjhGrxIcpi``Fii8_e~U zZiQqBnM_A&2J44GSH$HO%tOfFue#{O)k%qN&B} zvlB`K0@>fLE6qb2?^?b(vYXO#8{NV^2r_R$tD|K{*Wm9uJ}#%-;b5NskWiE+cN;Uz zvX08L-d~4wNelS}adLM%sIN5Kuj#i?$>E*#Ya8Iu5qj@aUd}m!pvYty3GH2IP_0;Tl;$`h7c`!1vdI?!!tE)f%t_6wj z(s9x_g;*H+?%Uwy6V*R?M*daCU){t~Qb3-aR%(9<)!cEKJuLE#wWzjDg6Z-59;OL% z^(vOMKXFPoSB@<|oBtv;ZaMgfp~aSz{AE2g+l|(x{xQ3NwT2pqzc>fEM*NZF>TaU% z%PwNxrcz>7Oui2vG>YBc8Nyv_E_-jfLk(XzFB%^Cbbm(!21D$zL9@vt#l^OdiEqIA zaTlD@@nUu-_?fCc(iG-GpZwoAQo<$Ly%R}k9p=*D1HG|9Q+7 zF(=fWp>=(|GegVI^WYXFvxs08zGi5tX|L zIy%wk)(xu)HE~eTz$icK-*cs;mitQ10DS_(?A^1cfsEt#_Qwq;YDP&E?x#DM7rM-P zKo`{PV|%5heKz+V%PNv-Pa&TOGl@aqgxJ;F|AVc!42mn-x^^L0Ai*uTdvJGmZ`|G8 zodgK((2X{3jk`;NyEpFc5?n%ZIrlyH{k|XfXV>o9)qBsXUURK6$9PP0siH3LwCG~A z*O+ zw{lIkJN66*Xhvn_5G?&$YP1qJ^K|PkI!;~VUFR9U#_e4a=PU>7KeEaLPs6+zWb&+R zz0zm!R_EM7#c}Uv$&^Clr`m0NF1*rr;)YTGW8MyR{fw!Vz?$8nJ=-zJwQvB}GL-|bl87lZIiZ^R6!BLmgM~~*>Sc%yM0I`` zc9Hsb0Vg1-@xli^%SlMgn^S&kGrDlPV(AR;#>#r&PvZ?W4t`2FjR>brQ55mO9#yTH zc$`>y9_9?i!a-0L)wrYc$D@o1w*?xKy=$LEk^+8Cp$l9euZk(Ts@WuCNxQ3by|?<> zOa#%6xPnJDZVxzUd2v^3Fxf5{DTUUmZKkw}q5RWHHd-;~4i?F8&iQl_zs8d#_Tb7S z>nGp!o*Q!_#=YU-i!2%Q>4gd-m8+C8j+T;AYaIQ`(KcYNOb*jqJ^Z$nE=O zUHtdEOHutoyM)x=>Q;SnJpQlIdK%E*Z_v59;@eJh17q6DBa>1~w&KJZ@Sf6t8!Yw@ zX0imUU?S<8aQ`LU5g6mM9_AvRkql1%EaQB2D2M6faOI)rr`e5gtGTAlnbH~C6i1D6 zE;ig7gqHc-p=nUl(K14%Vt}>B_$Sz%wnE#bX9iX?|Ve zMq5i&J&y?;)lx1^;;+CeCkrU^YT=O0RFO@JdL`P~vJ1bMLF$_HA3iSVt}yKtggeob zd9!5m`Tjcj+&atJ-FbE&;N)LK;W1-PteA2MKQZo+1ij^d%+e*9lRf66q&(FEaQ*m| z5&DiMNRSp=_V4DF%dtE!pRi|5jih=VDHAXDFA!TFiXT}zDQWMJ4pu8T)Co19c5fRS zu22zA%%`>?*QAm1wq8ONiefrf&;n@JlCMDY8ptQeKA>BN(<;v%&vzi#wklUN(JRi| z=$yx>y)_sG-^%{^Ad)`-tDnT-QE!z9x9@2}vKu2<-Yj=RCzI;jk|UIgvPN>#J?q+8 zqfU!|p`{>9Y9Dna+Ev0(+Mc04oktgWkI67c8=Tx<8{y%hZ?Kc0@s~K4yNUo$@3}k9 z1N~Kc?Z8TkhkmVmlP5c(J#^43!O@cs{x-O3z$v2lMH}HKVOSlXsOp$*(C^nvWksLp zd1i&Vv7CaS4QR#Z63L`;>->3aREAFqa{wIGELjP(AWEB>AQJYkCwe0V`S;)5P1MYL zWx*+d;R=GfBraXBc)T|aC3Bs!t6=bmwS#`yAt~Lf!371U3S-?wMQ*TxZuFDnD$LRLrYR@+YFAzDH=F_*CgI{lRkXvDih4Hy}3)X%Y^;6)iU=$uvk^iohnv@vwr z!XIFurY@JfVoE0vv0h0XQ3-7Wjfi&B7=gF>Pd_T`hn(Bsw7q@28% zs_jbk?*G}>{+Ev^C5|B@LmmrQ%p{6&QMkmbrs3EaIgf>uRxiMpumY1(Q)f&cgd0f( z!Yl}EqFq`ooENI9IGpCx4JdcGbVGqxn-cI0cZ!aapvV~tG(=T2<1J!L}^^!W*ddCSj7rz6$c!$1= zTiaRRiY$m_dTh*tOhp33bU)lci&m^?#mOOl0XNRkqW)kPQ5>}T<2SsAhisaa-91V1 zg^anK(%6~QpT;uC_(faDA69tuB~tuZwiT}*EAM(lGiF^bvfH|qSi-gJUr}7=79Ewj z9FqC8Y6HB75<^si@QLa{Xr{N>5`Nrk`H&#&cTqzhT|eN89O)ztj$lIJKIrTAv8O;_ zeoE;?3wb;5CVYz5{3uHTgW>3w)}1OZ1BVs-bE3ILYd?Qf8Nt>`)WAO&*1yaqB2GMe zSQA^%qSy9qBVFmuXqeF9RkgHARkePO;(!Wl>j)<67sK<8X6|bZC2o z^J0nqS-{3b%n^`jwuYVP%6q98Ak(zzqT6h~*<3sZVmHdO*U0`E0b<(Jdpq?52CHLh z>`y=^_`OeeKRmA{sz!?nqbO$$xRUziecBMr8noY7lZhsXE@@V7t!A{>Sc7G5`3@{b z=O%`Mxl{_m#Jsko;RQPmp_BK_#Nq*xRW_q9`+ltXpl|ufcY3`RR693n9kHMkB{Wjb z0T(!A2WZ;yIA^II6^=Cnr$?kd8CDIID}R~hA@g)6>(&|Zt;CvGqRZj_veLo*^N0Jz zVU5ej?o`@DB^S*aJGSyssQLGP2!UBqQhDxAV9ltA{OBLZl`9MBx+!Gh^YjXFq{ss6 zc^;ytBW`)`71?F)HeYAc^{LK99TC}=RN#r#jgBqAhlJ8z!e1|Ql;IFK(2rKkfX-78 zP9ydRPb{v7MP$xZzq@^kpL5Afgp|2FwNGoc|HX3!Ut&Ekze~1b4yBuD4cJcAQovY!or>Y0nlBb{otvYvpfx@6R1DY2U5V9)K01 z;1iDdEdNg&&l z*rrEqoBXdkP9W%u2SO0-4pTJ!O3?3TLlN1LVsV$HTBnH1IAulL*;*sfit6{=E+8l$ z$czG$V7&+qMZmN>iA^5QfH_u z6^F`;>@lNziHaOeWI$jTwJJS8eBIRZS3VhhW|20 zHO`&Th~r=O=`4-JI9(IR(6!k*X_X=y?(8@rqxA34>dNRPWcex57LpvpwAu+HEG-Z} zon|ISZ^VK1uR{qn4pqj~$pu+{)im>}U~l6Iz0e%lFf=BUE|TQ2MNuY3$J|+vs~lZA z9lHdE7v0;hRv#qyY>wn&&BNFw(!37(MsfS`1jQ1V@y@f*M32LjM8VJ(4V#5L=TaV` z$u0B^VX%nN*jZRcz0ZGDi1(35WRk$`@?d$~pn{;;e=ss6YvZ~N*fgg+Ib#t<3YrS| zLf4->U#yV#WhWRpx%2`bo5qHlG~tWTJ3s0^yX zJ8M`iTP1^bvs6~-sF&ZgZlIan^BKy}*zBj2*nCtYuAw2{rV%5Cz4}-X>6M$@v@+`B zJcX8A)|T*B_%B1rg=|!Aii=7@i0yiVLj^NIM&Wd#O`eT#uaik&@Ckza)3b~qM#-uO zZJ8F)KNzJy|6q_up3l}h#`nU9Vzvm+#GzJtH(YCJ;UY1K+`^Q`#(w)=@h=U}66w8y@=%5p5Gjem%3TNAo43!ZqLTgS|lX1Y%*6V-v# zGcnF>WoEg3ZJNUd@ik*$B?*nbr`@X$(**MHU!vN!GtH_ZU&Japesh36GmLrw-C*mQ)Ru1Nd)^vo2Z(ui!cCCEru5b2Dexa=G?G$5xku69^cWn+6N+T zF~EjWkf7);-a3UYb5T-4z8AlUMX{@HCHBNTWdqI^)5uTjo12Pht7yVG1xk`3Odm0J zf{u*WtC+`Q&RW`su$C9}o0{<+lbmNNl<9e(t41tx0u8RP>ZhCEypmrbgR;Y}w1yP# z^hLM5U$r6xt5EQrIIFsg)NMicYQR0oGgE>hx1Lhy+C)!YXi`)1OE2bTET0F0V(GLC z8z693wVA5L$;a>1vmv{MC7fSS`PbtY?Qev?-cHnh{aHYl3uX7hKYs2zwpb6gsLZ%D zp}rvc6od|72^A@2U~y636->yHEqD1t?jv=WdAfVfSUmdYlh4-xe1Yy2J80N$$W1DC0}Ty4XryC={~vJ9)$7E<0-0E0lR-6qF{S;s&T z@T4TyDp>O!rnlh8SJ^ZVTDnjvnEBh|;wTTW~ z_KL*uPi7w=&cIzI_OX+ZdohD@C0wgoeO^*rr$ENb$lQ?x>{DQ1np2EQZmzZRN+?|Q zifdrZWvfr$oOl1=e3A0fMy-;j_SxK|CdO!rm7Q_m2R}`LO0ocQ(&Vi6&<*%kW!8Y; zN6HttG{u!z9t$jkzpdz3U&05HyAY4b-*T91x#89LG|oV0?0YIRC3dHtxA^wl+6oE& z`#%`$rzd0u(Gj#Mv)jLWC%@DXB96OJk8VbP=B=J0`}D$GV-i{lX`2C@wiG!C&tI^B zgZ!wLc>>g1S6!l?nTx!6^_s_bp7K&o&cI~Nxf`ThKe|y0_ksX4_GYcfPrf1KWL2io zA&hmmrgp-zhaY|qudtAZUr9I^`-54JtQ?>0#?P!$F0ALFFIrk$F%Ii?bADLHe)Xz< zG1(6v6fM2~TVMS@YU@{e&rrtBfixRk#+Dqy&AAFIy|2|F~7LmvT8{X03R) zU*Hlb2=gzga!lN7v2>^ov`(kqCww49tQdIAg}LB;hPWe+{>J&{&!j*14Ld|c$7l_V zb1%hF$her5hP3gJ9`Fw)gfy&i^?eWXTa@Nfkf>^nuI{mOzu6%1$7ZX(1Iy?v!#!Z- z@Kf0WAqr^_TsYF5Yz)iC%06BT*mJ5wyc!ED-vrpr$j^c=hC;RGC2!@|%{BJ@^9wol z(t~uI^yi(L59#WXwaOgd{ZdZGpqP&Ufv~uQ<@kGaM43#MSnW-COvfu%{?m?i=0@bq zxsy04;<-;gGWVl!28ZrD6_;&mM$BveL1itDDH^Qwt-0A6^v75c29i#&*TiRUAA|*^ z;wXAU7$-SJKJ-IttyfC35w*1?NG0}{-EHgc)e{j>>F^_VPt%z$JjPdne_9H`KYE_& zRhK|a@hsE({(48Uu7A({z}@(~^`~kXl|4>MY{;l z3S4$xGTxx=(Ku1%*@fUVWS;ha_xwyFiF!2B7POV73Uq(6kF)12#C&C*+yHkj!G+Y# zS8DP6a(!vhn?BzboX;yAQB<>wlOJnZ@2V=Yb1I$rptg#|21D{427Q)HjJ+TZ)wb@_ zIiKqa@;e|a3n5ZXOVaC)el;_CrLx+Eow~2i0ggeCoKJrWNcUvS5au@d5PN={KY_M2 zfyQ#vOc5=4@8*3C8~ZvasFhqgz%i$MQi$sGAbuv~|LeDrOfO%u(QD) zRN-NFL$2Wm4R|va53G;cawOs-w{c%A@RT*JJhHWlQB&LrLg3;SrZ} z4!2m=woJ5`+=y1<1p|pJs`V3F=Dmh6Y-~MZw3?OHWTNr$R;bh<>O zR&frR3k^#?2Tio&ANg5LC>yF~p z%9glRnPf!kplR`)qD%CcG1(zTSf(YNL7Y|7rUBJ}>$vIfPw(kgPGx{Gr%@rj93}kWniXvn*F}YC{{uE>Wp8 zGR3vuwVpk0_K?O_x~>qTner}Shuy?on6M3+S;QJaOawZm#|hiJKDDa8v2<>h0Ln@O ziGu$+&RX|@eo*XPDe~X`Mxq;?Klzfoo*vM+I$f_tgnG0deVMz7H0Y1$7@BF*uBZQ3^TA0FGY@&tXz( z(~f#3gBp#c1|=10R#rc+HWbfQB0pqmkx2=G`9fZ|f93%#FyFI6*S@9)ZBSq|-FRCJ zs~@H(9Qc}_8AEgZ#&bs@o$@x93wHUc^7Q^7-ju|hA46jI1PiVpfLJOiCb=6yA`PPY z8E4Cadh}}G{-lVvo|)r>qCjbmmDqa*riwUeAogwz!|cHgx=P@YX#^#X6?rGe6dC-WO~m7IyxN^w*IqN{`PZDg#^ii)$COK`hLU~Jy#~nC z4~2%L>`(6?+{hlRU%W|W%Wd6ANkLVk9RN#jQf8r#PYDbhmv9nHoo>*V@M;s&LUU!q z36b3RDURShh{zB0f)U|3FpJLE{kA%b}H4}7@4@4X{A<#RlNGZVJHd>$my1|s-> zB)1w*=lneapm_I`PV(mOrxkkY$$H0M^b;SNiQ!WpRaJ(|T*B%V3;5)w1ULA(a!F(> zQ$Rd+i5svNC7LGQM2LKE^H1_ZXkp3POHnc#+3N|bl~_t#pheq@tQ=-jE9t}el#pwE z_dLj5!WItT$DgnaP7e*-h~Eml@lIr^(VM5m<)i~VL3dX`esDkbbjXk-gthw<@!RXlM7%tu{^*_iTN!H8^F z&?OpzxOVp174R0kA}T%BDct}O33ruBicQ+*<6~|O7=o|~0C{RyVl>=J@iJurbK#2) zMnBHqQLnyqZkzb{6IUg^g(4RK^RrS?Wa(Jxq1+9107P{h z<$37F$JRrYC(YFMY@(*2yv05aAoYW`&O_w)6S@|f&J)PN$m@VvXX36x+a2?3zcTzE zMC`mJ!jEkomG3TK--KTu_AM@`Fa~)aQUqbh5T4b2lO42h@7iK7i~qr>`B@X$&D2I} zKEy~4Hd|X$VXtL1pv|)5aCXr~U%>pqEnJDj*ROv5Hdieyk(9zz*^?=+-<0wxu%@yQ zaOOt^zwJTyWn^Sn_uTa_e>9<>RhhotYQEA;cnii-bj=Y;cPVT}Uw%e|635H$b4m>&!o)7TroI)TBjF>zOA zG0@A{Q&feq`tmssTzxwmQdjx zAO!OeJ>5{I7I%ppx+@V2gJ~*`6hdR$5*c&gFl(x$uxh+DDK>p*31K+txN z*wLHnUAndqnvOlztk@Kq2Jl$g;wLAYCaqsHqZgG`-xB^};D32Z$RKOuUKky*r$s9r z8{+e-|8;gNrxURH(?#z>uG@YIL3vPER5#n*?H;lZVwRRY7DNY1rD~uh63pgRMl&7h zJ&&Y%VAAepziV5MrK5dk&bBi-O(Uwx;}MmuuF3#cNWeu)!Bg7k6ZsKorKa*S2k6}M z`Vl?oIq&b-mELe}?(y|)#}B?jH#GlDZo$%)ufOf~!`Y3>*&1+G z&aJZ5&I4|fU$=CR+|n4xGm&M#sgs87wpEv0z)r9fYZzG+t@C}O7b)w^Dk79FzMpi1 zDmXQ|D6qJfYj{$K$UBJwcc)VT0;!l2aX`C~ix7CDskik9dKr*>X1OwtidL&f!|56I zHJs+5cXTr$8nmg;I9gGH4AKMa)PMkC&Tp4bgCt-_6g+0g0Oo-O>g4lQ17G}d=`u}B zca&I}rBbo3rPn^GgK zjs!(!^BE{ggr?*NiNg&3huuI`&?V(uBz3O)DoJQ!?Nqt|ndH=H=SH*#PVX>sr7}8U zHOdpe1KqyD{{Vd`7@u%0m=k&}Jq;w1PYS0(J-mw1e`ojd(q*0d0^Dd8aw!acbkd%t zy%ENj-pRuNx#@9A6qT7xn^I0Y+8VxBZBxU*{+iJ0wd{Q=Ez--5I*vmPo$U|V$-`<= z;V>j+B^f`HswuKF1jb90XqjOxyf-DvW981|X4oGABl2T@fP9spUoswZ1&hX?-9oKx z4u9-_hi`+~3GjJh67N&B;T0Ka$=WFKQ5{#-wqZ=vGPE}8+v9*8;K_q!Jg@S#l1c!N z^G*BK51m!@0_E;1d>SW+-a8gn$F3C3-Bzc$3lWNrG6vb}x41=|moQWKf+Y-48=I(i zNstFY0fdZDJQA&*r0(wLpNbbj8R@awRgh*@{(_Z_Wv*e7~#eZ8(AZ8^01m!oHXRgRu2LEq^h$66+**W_thwcbj4=M z=v|x_dxs^qg=>ZX;jWO2W1YOp3P$~n%vLR0Zcu&lB07bDl&Fcx%+ra_-cPUQMxGI5 zWOzjC!{{*kJdPgzmfvan*DCbs+Yta--^ebbE;Yq2#*>|aqY&S zIq9W~cC5Y$HAYMfiU}NDmPwqKl_zNNemfTmaMmVhLY!^Lu>A%e_0N@^t=$hNco%jf zZ{eGs_5=N+k0;aEtbB=Tr6kBE$M9s7Qu_KjK}rK$r?KBo%p<*Q15sjmfA%3Mw0hWU z9u^;Ko+;cJC>oYngIJ=!;cjyzz1qRDP$$7sdss}dpfjmPv1y4EpiWu9QM!^Fh}0K+ zntr=-&}!EO{7p0uHSjATY zC%hHj@0lp$C33oJA27@*)tGzx-6@VYg@6Qhp;*Ag==XCsedRPcwhhdkIDG15pISv? zp@%ZMMk9#+qB)^hIVTJ4=@krrDV@<{4)ybo)De-7m`AkFwUulO6iusv{HC1E({>9g z41Ro=_FG$mU-^o2b2#A_bS}vSc~UpLQ#&%P*X+*=DO~1g*2FhDZ;4oLF*6pp}9@3*PbBj`R~M@ai7+_U$qb(v)Fim(#S-|?*cSY zzYe%nF5y9S7K#^gE;_Gt8WE$UNtTMM9XLlnYIcr^{M)oS?jiS zpTh#weD&b?>Y>p0k5{erD{F20HDvI@GTd*Z9TzrByNqFK&UVF+2YWNd5i&GyXmI_O z$oak3I$J&l-TIQg$a||*>fSoKFLKI}x&xjCb_az5p zdTpsBJCJ&v^de4}qn0PqB8b;}QEjPyG2b&FV>TEp{J(L{|M4FPisK*N=A+;D0-jfx zg;n=reRaWNw4QF+cxxG@oZjp%C^1LI*a)qpP7O! hC>g1wI^-})5scLR+G?Jy{v6596jjEMPT$9#Cmio;oO{bw- z*_kB`w>6N@9$3X-FxuOm5f+FBi%#K5nMFtr!Op^L)lj!4BlhdC(5a$0-2k%zfW-)P z78f}H)GbudhCOF=!eDwSg8Bm#47pYrDjKt5Z`<5Cv8sSfm{quR@=>teFgyK$Q5Oy{ zYzgv3U>AHt2eSl}*yS^ae~U;11h>b3wa@($oBAR;66DzKd%upvemQWQ`1r`{?XkD45E=eNMY)3F}HtILAXrztuK_QHP&NY)3HsW?gK@-?@iLCGU zv2(Fgnd@lN!>AvW2~wr9p)xfdz)7E*Zp+b!5*iKj0jH|i^x2%}PKbh8HRiZ?@R3-o z;e@ScvY37zEfbwxF+AcL4{lexeMm=rGO@W_0H>m#vHkRY@k2fq!s~UDEK36<-zSoa zWU<-xVFy3Ay5fr>)U4jiu0ST!tc|F?UlQbnni%lUdYxVFR~tCy*C;aFoN{j`^0aOue`2mHCPp)~xz4Xh z8d%L!FFF85v<}=FBCW^1GbY(4SdJGzV}c+vct9YdRLhAq=f>n43uY#etfr>VilTJos*p!yaufX75x9tg1odGLmY;u#b ztt1gz_4OnoPn7)i-A!mG$3=`ew+HZ;sY&;&pnFw@UE>!m)#YbXd&$KbG&wmCXr5G6 zC2CytlJ+~c;x}~&UfYUI2=Z@p9%6QP`**G!S*l*WbI;cXuSNa<*plqB(%B9Nb1>%CS<6%GA%uGB$-szywa3=&8KAb1PZZn zvz1&whdTar2+y?Y>5=({<^uxgKX7p3J7=?SkV35txSrGta^r}>oa0rj{q8^QC?`ww zH8@`MoEDk5Th|z@o$KbTXsmGjJ#Y{h6sdXy;LZ>mvxc=+!IfrsBW&reEO>Bj_CVAN zUuZMR@A9QePG?oEN=_9Op|`A{!C>OK&S@nZTG-%l0Vd6TY$)5)M=z@FbPU{E*Wp|$j}GvImL_X@H2Ge( z%JsRxb0XL5j5`%!I-Y4%C<_Zy`q;-LS+X2of$pFB@wk!Wz%?xKEJbuB>Ff z#KvSPHJ#7J38kuT0l?laaVPNP@U2wk589_#m-vB3SA|M_&3!lgrGV>6-m}!jm z!9ZPQ+b3;uQY{T#t!br63(Q0yafDMylX{Dy0DkhdfXdK;C>#&tFF+I|dmG_--_Y~i z0~h3dCOORB7mp0)%09-|a$_{)Cu^a4-d2^%%HxcJwI7@Y@6r15KCT@h)r1<=+zbI7 zC!OWZ55^kN%+L*|@ccY4Wq1gAYn%mKne*@4H*NcBW*(^t9qz|%<<2T6tKSE~MNyXa ztd{4t@lnY9d?C*8222bV4u6bYlsO>d^H(XA4WV)phd3W>4kC{bJ|l@JoCZlbA1~k7 z8}+8<=}vv~4P4TQ@NQH~^?mm(EeHO6y4qXzI8&;WyMIq-WTMsn=?KlJ(4d_Mwrxrz z)p9Y@RcH$J5=1*I1jDam5*jVpy&a7P5y)36KuZjG>8}sTHlns=X~cbpoJe>syiGLe z-PaqMxuflOF-1S-!f` zNp1~SYsR;pc}Y5Vj7xOdjeBHeJw2pJ)Y0nd$P$Ti9i777BoL+o0^{asbv^m-6Q~{s zrFH!q@lw?z1wX!}W*l;nKbmx#I5WPGys^JvI@&vx_MJM#Cl5JSm~RIG=ZVFfUb6|ir@)e8^H#K z%G}}$M0;=dowAmJFl<;Qw&sGqKxr%v066)_BVAhpZHR#v&8#^dmf=^}Hhd&NVHHVC zq1muZj9JQYJZ%2@r?B#$G&N@tFWiDAhnu*uS^g^G3m6R0C0wSjNvazR@?$@Ls91~w z20+NW{V z6)HQ6qK?@S7E&=aQODbq7`~(qXT=vo-&w%jUE|VsW?AOyyr|$gMte|!H&8s$(Y~tT zMF-f`6bJ<&zF_#jQ7uj&Xqjy~WTD>^JzBP?wTPR%M?*ev_ov3c#;2k-)oMCTyLkVZ zD&OFWe$QSk!t!ElS|71DS_$R%ul5~PzZITP1dg!pjP+ZNx8}mY-!dI5cM#OCnhUx;zUKbW8Z^rxM?14+-ahnV!g1$h}%|M&M%=kit=O%V_za!`h_(-vczncWuV({kPsRBFj-!s6Fzu10D%>V2Ytl-Gl}=E@}!p`TGq zH+^;m(58aQ6$}MNJ&g35?(c0DLX$H_%q@m zV`&vtP4_RwZ{*^Y$9pO@r&0Yuv=SQa5|!3n#mO%X!0}|G_X4l^|06|5a1kN8y`o`L|Po=g|M& zr~iNM^S{;qgcM~Q`R%>Oazq51yV4$402wS74-BhkwIwtQN`Ph>&2(ubejVOKP^9hXRntL~j4kB0ch^$Gqn$$VA?5L4&8P_zH zMHVRC4F6#6>cNB}XYswK_46I$zMG_X_JW&D=MVVe!7BBRrB171?(!NFLd4e+Mq;PM zA!GUCQ~Z${2Mx8mNDBph2x%Uy0fuoGS)1nT7DQNL-#4Qo`!-oxOt-v;bG6SJ>bpq2 zFW5^TE{4k=xhjE&hb3{%d=vQva%nQ!x64%ISitW=SV0z*zdX|w`=iwpnBB~RB{?+1 z8iYhn{Uel4yK6Kt&jrKt4~56>H4w zic76uA|Li%5`7Wvp_&sgM3unCoH#f0Zd7S}@vk|uTvk(=%mRHWF^0 z?L}U|H^1B(2A9jSIFiXgXeoogD{8y6JODv@sIROvBKSsp#1``)v^MGjlg5EmHf|41 zcHKqLI#_Zt@HU30ou6Y5G4AsJ<67M5TFn3n{k4uO{pv}c>#sDpyg*KH zO1{gV(}*n=S?A%dVc5QOlR@}n|l&qDaCZ*9Hh8bQPLKyT;y``F6LS~pE? zlSZ?6NoPuly)cJPe~HNijw-`DoIB4FrqJ!MC?^6%+8!NBAJ~C4`GIl@5S4@i?~~ zQZHXFTcA?8XHeu?N}84YhuUhKmMyEHg-FYjF=W=<$PGQZ-m}v#FU*kYNfqBE+1jGHG~o9L{6DCKF++B{C+wj*fG@@U( z&DI|uKSxgdK1Z=x&s!P#+0@aWS*`o18Gp}7x#G5JUh)|@KT4t+4k~BV7K(;{_LGL! z>Z)*TafYYD(k73R{iOS7En(#I&g*r-P>|3U<2!q`fSh~0P!6YbIwHx2 zi)vyV5_JuQ%H5D!am1Ux3K8N$#Fro}8+eARFJ;R%@B?k4c$*3dBFJiSlnj94%ZYOK z8Z(!7hIirz^ooj({s%jk7i7OR%BR*#+WC|SMrIc21OLwno=11mZw$e$0^xoA#@>%c>EL$H(q`Qfd6D->ykTsCfq6SMK$3PZAPEPSfE*J%=+kRhJcU*% z-zLD-?jifN_qwCqzkS$#J#EFwyT8>@;V@@QB7+`)bHE*~@4+bV&14aCh zs#dO!mwQ9S2@$4LK-(BI5^F8r6;3iw$6Pa`mmQ4o89Q(Zmy9KYU6zsV_18DYe(?uR z`KtfuFgmX>)e|X5Y*tdGMxqaBUwiBT?~I{~^oU|HI5&TE&eVc(vr12IYzj>$I3waI#>ZqSti{4tMRWwM&3(#yq;^K?7^EH3N`pnlBdSGNEMioNV%M5STa4thTUq!4kwi3rM!C0AzpB?7@)z%sOLR#L zX>AZ`9`UaPL3Fs0@EaM5{##GGR)tAaE-G0xcrtYeQ|GG4rL%C!=_fA%#VMsZ>og$r z#9r^(h6t&RBxM`^rgFS~pPaIPG8ixZ3R*cdXDM#AF4x=1f*hkyxH2&(}>EHKK zL+nQ^DNjkT8Hrg;YQc=I%fM#8nWf3)qrsHctWLw(#6L`9D53(N8%u^jhe8HNCOWMk z1xk4K&1y=%F(K6|uV=VTw%mIw#S-F6*hB_h+b_;T41)5S>5P8Uo}c8;i_21v3r`dW zeFz;sj&^~83i#0}3hLUh^uN>=@rdd_bZ8Wtllde6gV{YF&gn~k6s%uE@@#AxUCYTl zw5C7Sa1y0q`@(3OYaV__UdAu_Ik(#)NcTOy5^HH9%=ZE?QStl7R6-=BoiXu7F;)_T;enI=b~wS% zx|8(?!sD={EuREa-|+GcA-C!mkHHSySj}uZVXXG@mV@5AM*xn+Ad&5i_)7+LLIcvI z18vI+<9bO6SXK{i#K>#$9}Ec|zM2GMR^Y9B1y!bdGe#?x&^3q}?nTNhvdOLcKFR%5 zl=~=s&6f3pP<5n6P2fkA9jAVemfYj>L2W0!g`W9OsLG06p=&n@n7LYI2g`9`XC(KG zwajYabBft(YF;S%{cUM7&8!z3EYzQ)`0;Y37)Kv|46~xXv>$)l=Lar6NC zi*j{pFP{5_s_qT*#Dvd?6yr|yIJ9JTCuBcw7Qq0NAazvthM}~Ox7>=np}GbCihXzL z+v>O(taEmcgTqm9NFXaMR7E!W7^F#so!j|YILziWGN{O%Y*!S`ZpLvdLMHoMd%++L zL=dyFUMlC_u7#{Qe|GR%t|otNKsO548kjNM?IkdCT?JxDe@}P(2ZP-#N?w`p(B|kI z8HB5Y>3ZswF4v-V#MS*Fi#9veg(NnIvH?@jQ(SJ0B)PrOmeo`ziBU6n3Zkg~dp8rXTCXm_CT3T^4 zSJ72v4OL*`_AFq>T4t`lWH)-sIBvM>5iOyF&DRe&t0* zF}xq>D>*A#$XM=`_t77H7QUm`*5bX$5|!>bM=Mp1Bw(ZWXFs(Oo#6gL`QD0J>CKy) z{QmQyi}0~_acbR>w}z-RV7ZvkqtSYR8zLDESnz_mgep3B=^oYTDt}a#m=+)4Yu4rB!;ys0pe(RiD;4&}<-5_yK(jgk6?4Wc zTY$Ii9bb1d%E%-#oAZ-p%nB=GtdtSguJnLrP42V|FF#4tP{RG@Djv6vBDQ@1rpEvX zjOHdudDqi#U@mA!7j!j=Of0i4Q$V@d))Mg!8hvH7nrtRM1a7`0n$1;USm^%87c708 zW=ipb{dYHAp2num3J2DWuXnHLQg zIn~sfgfZ8|{BvO?)+o2n_Fw~WL1KdlUF~dOe_TCq!Q1+1BXp}_EBY{qkN!%=)BI;` zRtS}ofxBW3WU3PW!P$rmxVx~z09P)mQdVq#a(8??yY>O5U4-OM=#2XDt(JW0m;%qG z&v)X{ZS3Hco^hwi55VB<_Auh1)j z&b;HE#=b8Cfbg#bpIxLx?T!F`XyVH8&7=2MBru=q>a;xKXje7bb7D4bu-?Z{oHuQ* z559(g@V`8!g}feiN&Oj#UG*?b<1GpBqGcjvIehK!G{JQ3I>sJxwRpGP3RJO*s7pYy zSyyJ~9DMYc4R+hT=j?ohWfn41OJ_lv=~AoZN;m>1ilpQsgbIsAmwPo9aLpPj6P7zl zeX)2}1qUstZQT(zOegCVOC-t)89jXAhBu4~2FXoUG>{vmVlz|<=BF~CH-SNgFy+H> zJ4wD$Msr!e0LLV8ds%r)C5o*4!;$5TC7~pTr#(*5$B%IECBR#zGWVhs)lBvu*`PZT zM!l3dwE|w5CV$3)h6El>;nmBI>QarCx$3^#TjsUf9;gv6I#LEuAXj~)nSxObj;d7f zvD15{Mj$-V6l#x_L|+Ujtt4H{If7-`eX zDOGCaX4$@sBvJ#Yo_wJ9)XSIgqz|RyM*RB;`Ve_4)lzSVQ0p~HPv8)f&Z;gWO3k;b zl~};{A`7gowYyc$Ot{iNcQgR4s68?Dc`UVOCrj}S|C6WeZ)C1jY5Y8_y$BCDkFC6?-`5QR;yVx;pcPp%g^rgXRpadh6`^{x<><~=>#-> znpOBg#}zPtK1MLC7oL{51RmlbEbhSd!5ydzoyYjbHa^U%xzy6_envCw6zS!>+E6_f zNhbr*;F37lC z2CyXjEH`(M4NQ0Z4F6tgfT$Se0Zot_D?(C13pST6jE`j}%r*5cL*ODnEfOF@SrH;_ z(6*ZM$(zvh4GprYmuJebYWadCq=#QWlc))XDSl2&_&B#jU&jb|!%@$8rHc|}amXC> zHP>#ZwJO@*c`)R*dnRSzbU;hi;o?=d*B%^XELX^3?hAh|fj%lm3CG~DiIdjDsE!Q& zPe!Us9@S=#SS7&Pu2D5L%PDt$-DSWFm;L&5({gIjOC8ILArdvmwIjlwIYpBOe}(fS zsWks%x5mx;|Ej`s*(9RSo578+k(t+;@DO_8^hY;Us8pTn!DxS#iAc>LpBr$kF4H}> zqKPM!orZ7K@^p%OfvP?|pe!YXfo?ld_xSfcJ;^E%GkJ9D9}L~k{Z6CWooj!A^}0^E z#*z2tGvMx8O$V(TI`5qWc_guJFmcZll+}<2Sy9*6n=#4TQH}VRXncni{djm??M%WU zxI09EcQTOu-iHX`V>bTLK#b%i6zZiZiKHlZrR-nudd=I#ga~HkWTa(5Iwe|h3W;_mKFZ{GXkyX*c< zvS!xGIx}--pM9PUv`gMEjjZX|yq#=+PV4{P&pG#ABD-z!&$n|ytz7Kwqbj4_8ZT4U zo>`+cH=#FVff>7Z9k&zt8#?a009bmX`ZlvaHNw3zBO*DFIGtE;WpgzpRDZEFHpeEn zSk;<~7K`%Z9pJSXF2^m8DvS@6)8y+>Uu zkf3-YGw&-T+Lsb1+cr5WgiVsaBdKJauzrG4;o#jKUxh@89u@auT_9%}aYb1oPYQn` zpw-K3Yt-3F%PI9UqwX=4tBS))+F*xrRBh64S9ztq{cYEuE{z@UsLFW!1bxrYq)UY? z4vG@$PPr3Kw!oX8%2}KE;bs6{X47$^8kdR&e|_cm>(FPu_J7hz<>z+U^5J8UtXeqA zSDPrY##b?XrOJMDW^vipXl*8-Bg-Ju7yo=xR|G}lbOuuq1fnTq=P~NU!=jB6>0pFY|yxhN;T#K#R zmos*i9Fd6}|989<y0M*hUjc}A`VZ=`KQ-KfP~+wtmL9fjj8Mt02miW{emjD1?KPzRDLs|*~*x1V~*M_yQ&dGx>KT{X!&6w z#7JD_`x9vN$=cz=%0xh#HJjv|$}{X805#$@0Ir(skUPf4e(i63u|6`mX^zDR$T2b0 zMeNj=oSe0TDMlvgB^}xz0Rl_d@Lok8@S}*7pbDKpyXDJ1MFmLR`3o@%WKHI`1uQd;j1%QF>N&R7RMLpKh-z^vHF# zUFbut3)C|zjtgQ};YS?U#E>Zxcr;q3hF!NcLz`M^rkTr@kk#Z*KR7hlmJ~s2wS%bK z9)L9FA7Wd($5XDBC6xU*`Fm6iaD$*26Oj+d45Kg8MKLh@741xihL*WrE_3if?c1X{+*i^pUaG`lq0k3=Jw;{HDh*Db>iBJ5M z!!4u<6O9> z5)Tii&4U}HWMsxRX8a*W;50;cBrg5!q?g((v*Z5ryXV*7ZEmFoZ{Zo&;Z_sYa47@!N ze`4&;#xvN}&a|~?4@lSYa;EqP_re%?N%V?VMR{N{Gn@D*fDP;nEhaWtMELAZGME^F zn-$)!QJ^?^GJVX5zt+62`E7Jq{7l|PgGjP$JA2rSjtL`5$)Vo5z$ePj?UoSBgxJGgWAl7BrB|H2=uR1A?x1JyQp2x+& zF3;#OU*j&EiN#jIk~NoV|K<;fU3^B@TiowU>oC6}q~njvy`BQSeAwY9c09fjwkG`; z_dE=#{f4ktZsYuZogwpBW!;W!T{BWqGj#r9k|$^Qo9KP1YfTcHSc>2;7oy|%asIfX3`SLta92;b+4H$Y97Q!Rh^bII8x{v4obtcH-^v9` zyTLQjnbT>A`@Im~(Hj5F<(gjpU>-*T<>Zqw=+LfQ3F8D} zZ4Obf!ip(ozsN)&62=m=5-g}tTkw3Wan5vV2vHl=Id28LLriJ5lF#;_@K=?4N4e!K zmG729{L6In{Knn`DD_5fWv_5bzM6UqFb7#!88aV3%)T6#wHd1Pjph|1?9~f9c^XF^ zJ*&i_4Uj;(4)>nddvxaj*)ChnH9kKvxU``R9nndPdr3ufz`wr<2e)cv2f^&|Y%K5d zqphz&w%_~UdwdEXH^z`}gdyptZUyL)$k3IKZ2b zHV>Ec?m} zyj5JKL*S+E6U+S3Oe|V9#N4S%nKal}Op()0x1;`E#m>jB4zQPLMKL~G0%vcpnmeKF z9k)tL@zhr=Q$3=t-OY0H@GAk?zc;NuSLfG^T=yC=WM^|pn39^niDxlNH({oKq3o(> z4S$r;TQ?pKd?z~cSM6E|NoBA!Jw}){59!f_81ZSqYM##*#0klwSb`*EfE*Si2*}5H zn|J4vBNu_n^yt)b03`Xu{2;`uuM{)9cvt!O)Ww_zxKG!VEis?KhapR|cv^Ocjw6Jb zvQC_k+no=vECeEw?yupX3k|uX8T(Sjn5|Kp?B(9xovwJ8PEV?t_Vn!novpvpr5$5v zchRiH9w&r4qAbI9{OfpXQUe33vd@nU38{@Y;E3v;3cD<+C_?^Y+Wj1xWU`kNvMQqK z=e31y2fU`5lcsbY8tTIhF-M-x8h?C2AI@QGZn8HT{v*UQayD*xWd>CM|RQZ(A}y#h$A8%~xkP@Z%8^W>)mCuf9l+Uu^lPIKQ}& zN0Vh7O2!xd#XlMMYGvQ#(6$8bU?YePCydDqQy9~8#U5T&9hl~y>(&*vzu`5KR;$4n zR5va96!zL^^Y?~3`HY~L>Jn&4$^%QF1&~O@jnU{a3}IYq3gh7`-@&p(8SkqeH~Ln< zTV)G4yJ4Q}92|yLST`Z^h#}Q+dLE;%Q{f>CWPSEejv!h{94-R@wrxOr6d#$3!Fs*4 zHT2|ki-5adxP7*K^C&nK_RK5x&BGQRG=%f;=FW12G^Wh|+7(t__MM!-qznlcAU((-p_wn$Yf^E0gw?7} zulifjn7UPUN-BX9#hzV(y4ADrbE@j0SYvnG(myy;^F#44ku5O+0n$3`OUo5XImX4%>uX;M(Rju- z2vs^lEN0k2dSG%4qt-mV-~4|tTw#Z zjHPE~Eg>rh2q!#p6p$!tyM^^rRm>k}u|&}u?8^!sV1Awa8` z4iq~pX&H>Ao2^BFj^)$GedG+>JNDE++8h0Lq*~~|Go!F%PEL|fL1VMR*fy|RuiXS1WrOJW(b)K8Y2Qel==s!-DJZIS=9{T z-HBczcH|9iTZ&YFA12NrFd*vOHmx+$mm2+m#brft7h($03OehsM^PwiPHB*BE}?UK zbmFAho=eSevZRUz)0XjlVq#~qZ5u}YsF4LS9mC|&W8p;EULi+_aa{hCH0NN`mList zHywmJnm8A_?)1e*9w!*RbWJtVVV&Ae=KUO%SvD7Gp9Qs6LA~Y zA?N0I5pL?3=}yDnnl-{y+WclvO+FYLndSC2)HyEGM-zlI`#znQ=Oe+2H$~{G-z48m2Nq26x&D2_aqR-H3|pz_xR-FOoz?@v!iRDL9ZJr9bkP;C^UUe~1V2 z&$GRi`0a4P@*OK=xgJ$YYJI0AN`?txU%InEsvZxe zYKPvGa8+sAr58D?<`LkRZ=u*I**@54F+VBeL_XrH@-7|)2M=)}Q)jCE$n|=!eTOgY zn@mdK2k@^e4uLTI6n-Z334f@zMqjL8a%4B^bv%JJbh{To>9os?IxeI@h3=~Ka!MD8 zUNn{|~o%Fh0>ci7va$t%oe zsDDZ1wIx4zbYRTKL%>5~!0N^cv>L-vM94|VxE=0bM_#+-< z$rII8RA-7~)e?HQZ&R$BhdTCQnkbx0Jy>vN`&vTmJDyChmX%7SLsx|-Bk=_M39?Y&c!jW&o<6heOpH!Oas#B@0f zpH|o+u8ZX7t#+v?=~9SK8t{y{M#D5G+fRz0-%o)kbIKlZmCvUwUwbfBIP-iM=ht6K zZVzxv4_Zmzdjn8!+& zJa$~ZYt?dJsGPsOw(Cl@;Xav4H_4<%N5!pb{OMh6#UMu(8AXEs2c#NB`E9t2cFn4L z%p>@fJHLNhnn%b;ZencpTc&(hXkc!UCJheLPJ2h!{nv%ymgqZxveOFu3>$uCP2s`c z8IaHip=o&1KHjCl@~0}wa1|mu_9-;VDq!VkMz;HU3X;Ysa2H(dc`{rjS~}nceFh^Q zi94&ylCne9xsMGaoce;Z8kU(Z9)iE?va$AMbI1{hd!V_UHUO5}Pl@)x3n-`^y__~6 zi4jx&pb+qvA3v)B(oVDN8GlCJmuk>Zl?at+&-;5VJ>6ABzINW`dYm&<7-V#vm+Zb@ zM{ila2F&EW*DU%e_zjLjnH{$UWr3Xx415|Yo@R#1lK9{Pv%E~Izg|GhkWcZ{m6}ib zt-bVlfG2|_%AegeYe$fd&p&>vM<{IzzwXY+xKrYBmq`_p*}PK9778{2GW6!UBZpAc zKf4abOX}1&ud=5cI+9%f$kNYokGYT-$os_REAl;5GAX(R1`Es z2fi;yx5~EzosueAWoJq@omG8Hbk3Gaww3}NBt=?@l}uqG*{wuxm~M(n8;F&eS$9A=Tf|`%ZgJa>4-Y@R;0#b#F zruV7jdk9cHFYudYUD4-`<__+)+Qv^gbvY6zvztU@Vbrwbl=ug2LQi&bsrO%Fx602R zJjLz)#O2>-AGNDJYae?jaRSiQN?}=GB^iLk#HH4Rpge?C(&zr){mvRMc#@`-+SPH6 z5S)Z2_fe*YBFUWI(M_VIE_d2*jHDYHIF-*_I&#m%%cSd`ylbSb$Hr=nIY-vo-B&Jc zEx$4MO2<>=>}4g>Mf~Ppy06mtmytiQr!de3xGu9dG;7zt&eme&SRGt~FcSbu+X zqg+?^Db^QOE2uJh)JYclTQIS)2@A0<9^UdLSE>5F0+X|aM?rWX7`e-9F+U~Gv4FqUHklI|G9)e3T~R_wei4JM%IYsaSh9TiZAn#H zLbc6ptg%~TAMF2}`I#S}|c#0e!)*?=lgl7?b5(MM0*2^Q^!67l+0n5dMbJE=|Ee}TmP^Ee_ z3m>s&A!{u`05qz>Dj}EX0PGPi-f%UfbzveGW=#WAIJkrEgu+-8cO5m%ZbqO;nTy50 ztKUGTzFdknzFD$#x;n`k*wN|Uw+Gk0Ln(Ws*l^Ls4haj@&Y0*bJ#G<(SX)Eto6}BREPaltndRlhW%$Q)(jz9}*dim|)QIQFm_ZO$#dl^+?xF)nui?N0FLVYZY z&AHBEGRS306;9++b=5H-pNu2dr&w)lAZURYp> z%Z|dy3U)%itJkcIM0&+UwM$|*mwF#^3nV=Ny8QOanNch2>5m_o08LC|*??FTAO{)E z{e>ovBCoCG9jKk=%_pe$N_ZRD?)rVh*7t2iqq&!bchD_ILtnI8?ndO*pbpB)7zOq|sPExD$4M1Txh? z=GmL8_y+Gq@1OV!?yL*4D{`OcC?DerQr{*E-nZCI$(=FckzY?ml&_pDzhLD;&o%fj zu|+=<&zT(BPW*$*V7GD8{{-_KAf>&9d9!lsNc@AVYBzjKOa2EZ9DYMeFmfgHRzP%2 z2A=jS`aN2A>_Wb3+Whpb&&a&oYYFrK6sh#)qnucoKICW3#mRr>C=_sjvW5(-KXFh_ z+zOs#{ezntc&iE0e7>Y|kWhak5PTCk1rcT5iHEbp97A(7B2{bPd&hW^6~3FI92l1> z=^tE-({98aX!?QYh1l>(vvKg9Ur;@9=^5 zt#q48Neidm#CVihiEp zq0;-D)34WP6ZlGIS87;qzj#QCE^lT%USd@F2)FfK{ga<~VPeiA^ealyR=gK~pJz1G zPvbA-JrB5URE0Jw4$EXmLvmp};>chXzGk6n-<;m=_JvwSc$P6v18a`F_dTz74smWm zd!a#IJ{v_VNaX8in+u5Kd`yvH3 zR%O-HDAN+na?#_>og9su$uB~_ogaEb>wNjHsQ3K-X7LF$)PViy1MiYPRZC`_rjv)! zx*JN&ZGaK5t883!cx;^LF+3s5UXwjnq9jG1BG9p}9V>1SW1ZYY$Dz3o+Gnw+zEFCI z^>mq*38vp)|BbVSzUt9X|2g!X#!c`@)W=Z+DA1+hk?f>5eN@r?cnp|=F)nwDm5DIz z*XAm;y%LswEcpKBWEru`BcS?iJm0KQSeJr^9hmJe~Dr{-3o9wWSV1W&>#v^ zQtoIS94$kK2ivw=FZD0CWzJqGHPXJu4$Ecddg6}C7VYfH-<}6YCp~R1%NC45ja^kg z>8Zdq@j9R?&+mC7aqkGv#)=hIzHri4pW+zfyF=;hSj08B1R~~pGAqXN2vTK_QU~uK zz6g3AG&JQj#K|QPd$j#ar-k--Lx7PYhDw1%2hF(@f5-9akpD=QFB?LUtC4%EI^!v( z6=qixVMPeqwW79Ly48fkH^^0Ic0g61|1Y91Sx`5~B3VYlESgaZ**2(;N<3%!z>q(s z@8%qe2#Ly@V_#+4@UuUX!APipJhyFE=q1fB9dms1l9T>LV?;+)x2VjzCZ9AFq-e~- zGcG@q_8!$+WCnAqb&|OS4$aHuNFzX*SKl69y@OM3X1VCw9iM*I4>_J&U_XVQ@$048 zp!_EB*zcz@^z+gA5x3d(hK3fDnqGZ9?Yjo~=Rfw~Qw++35zXAof>}(KNL_O-&s25~ z?ezeKr;_&C>?~+gRYR)~Yis!x*S_{$Q&xJil~TT9S239)BY2QM)L$(+6C8)3W(+(m zkQVqA-erU+mk;^Wk^&1FdGFAR0)u zdi9ck6}J3m;h;KR*bNKaWpJdf$XU-y{{7O}q6M&NxX!FnT}Bih_y>9SZN@lR~V zH9Yz2#gIg#g;}eN@crwbl*4d_MI-&E4XL8?(4>OYZ;Y00!xe7$G$#TKSDCIPjRLex zJN4Lp5`!s}`{Vf7a~-8>)(I=5^k}OEo7&@J2RUa=y$#5av*LA5RBb%a_o6Y?ea5f( z-yR$j;doVH+qi}U@E1{1k;mnb7nv`LE~N!aM5C^4`y%UH5~9a5oLtg2C?`KO#Bcom zU?_b`y*QXT^3J?i&n#gmyORO5mZm;V4)G!kAzllvtokVg?F!|@yeDmdu?DD~7+H!x z++Ro8w-pB!m3c(!<9C)VSxC=S2z&o>1nwTWw*4||{_e$xApGNz&I=1lcs)MyCq4Mb zpd~Y7uWG5nq(YmfRp6MrByK6sY427IWaOy)ruG#unC+2jVEa5VRCZ@|;vZa7-{pk3 zWy^}4@s>#D7#jvsd#p)ipLm~^-^aeOdbP9T*$iHXA8gdJrna_UsvLn*(HJD7mxVUdy7KUJ ztHjI!iAWu!CyK;E=fA)^C2S<>QaUOeiN!i|#OCzjS5ZlQI6^K0M!2Eu=F>_OdtqO( zI!E+$u0F)^9O{c)p+14T^ruSMIo1l5otoiLwK6@KNF!ee0Exa%Axl%{Oj;l{i8eJUS=0bZ*`(69eja{|Wda)6e8@ktitw#UQ)N zFohKDT_7#OQBiugJwN*SUyTa8&`bt?Vm(UiMff}2)xjY;wKwjitnxrz_t#Wd1b7UC zfsrJJcnzW3!}cM`Ke$Wm{LUmIdrc9pLDB|Ip0h6!tGgE7=Zu-P<5wf+UuVmCcj?uw zSCQ;W+=44xMrJm&7?w3Tg8>t`k(_67g zn?nl){{C92#8%;OAr(J^zU!;-)If_q^>_aVS3F&G+yH2}s<%qX@KWN=J9?ClOhh^) z_@?jTS<;N1LUGt)hfWb_G7sW?R$gdM1?iH@!+MmH=uYgY?kLWH!{sftSC_`~*W;?( zZCD?*h#moaXyAFv?xHsJE45h`a&21_d7BA%WIIR+h{E~r_Zse7)cxh{ecS_is^xbf zUstRH1zUFn=()E?T*380iMXE-vf!CoJCtH5HGkBjemBZ2dHnoT5^?9J@()h)6yYtc z>>pfCv=bL#Z9Jf7@m2X&lfZXATmEI~Q{lAbm4mu)MB37`-eJj0FKDv^4t*jtdnBDh{(2v^G)b*nr`S-e3 zJa#+uY)%0sQ+iDTUct}kf0ETUomxbdT{F(e->OAYh&=j}>0je2(S8X0=E{Nl?9q9* zo6pnF|JY=mI(G8v)*9h%p^FF{k8mwba}(k~j8Ssv9>DsBqza>t=ySQZ}cNG1bTk~o$4#;!HP|E(WkNw3H6u0_( zIYdopHVW>r{j1wv<+P%CE7v^eO5t|O0Kr7zNd9pUB~JfHwqqqk+gqKGJtXUsk!6-+ z+r>E4VNE`umOCEP{e z+rPevJ~M-U4!LyIq4R{1QvH`QEvpb=CDfe?TIT%+SCk9W^MM8W7MJD@uL8a*!{iGB z2LIsJ+7{n90$$?}Qbhh&T%D);#q)!gu&Nsllf;vrH-8vy?yI=`Gw9D{K;ccV*>(g$ zLh*XC(^lW9qkCEtpZP5QSf7XPcj8BhlW9h|@gjv?$8Z=Pjn+_Q^tbYn7t8oKnY@d1 zOV0LQskr=XR=+~MynzNF{++egwvwU@Y<(R3Q!rH7L@d1Gw0JbUxV?X%$MIaxoU4E8 z0Na?){5i{ZIgw%z;QVrMhB67lAIP&}*82Sg z%H%$&&C)G};gOPk+<5^+kG-sHtOn^Obe#^DW5flsSSuMUGB4yah`losp(tCQg~f{! ztYn3?&vU{&N@{nb)WlOqs?h?M4jlpmd7jnFx-FiE?pJmetY~Z%Krokfo+hXWQA60y z)FNg6=cx5VDMF2^36}&4@Tj)4Bx7TC%rDV<5WVrZ^5iWhoPy|yb7?S#`T4^?IJOP% z!Jz!5ghnC7si?Ml^3!i4WP(QmRKO1U`w8 zrSYU;qF*WE*s;G!;M2Fqn)$Bs@DN*b?wOP5-G5G0AFU^i(em(ff5?p~(NW)rD^stV zGrUsV@tMHdU!(+IAq0f6?N}G-zNVP~FGv=Qf-cm@y@lS!@N>P^-z^+uhQ?a*`>Z+5Ne!R+6wuS1<&Tthr*_1xk=g^YJ4-+1Az2|HU{Rd@08N|{HBPT57>=-*A^ zQN9pJdKI89%=qwUU|Cj@2Pa%UfK7l8#g^#tN@EI|Zrp2%E9KDH`l|}{*{Ukq6ghfJ zx*)^>VJy4ImGP}5$-1vhPEI`*hi>rJ7)$I^3g4M=%~=JqckVbKzgeb5@B|@QzT%1o zgjxbzkR?gM^p0&Pli<(|fEN9XF`0!TW%-Ye@=8Xdc4NBHo7Z9=uc1Vwj#n%k(r?*o zp{lXRk@=DzubM9BSNIV!M(+Puiz#(tuHry;_r5ux_*Pok`l zrVG(xK@htyes9GfD$mD-%W*MDQ>o(&&CkK7htF_Z)WyJ+m2mfZs?-4IXB*SE;{G@y zmeP-d2NfJ=u*um@Ck?kVexM?O!ZTExe zCNHV)*5GI6HL@b&tYfO)W(VI{tLg5BfWkW~bK~=xRZ#FBHuWJdZ0vI3AECds$$PQF z`xwj<7TvhCL2_mENA{$8`~iQ%j^n7k!3}xMR!c#tBseL zqfc&-R&p!^meXc zGzb}!i|80D;xb)Q6MEw0yyT?ThH5!{qW663x{eA;6oEuR5ujsQI*a|kMieQ6Te@%_ ze7wJ66{u*=1$i=V8j$KlVnm-$a~CSs~_rK^3U(Z5kr{K2`CT}Yo0H* zfUJ}F;=w9$*)g;-}N@i zv}{{ec&bmyU#R8I!J~-+X5yIgd7RUhTA&@zbQAy8O;ui1hiF^8x-gyC6{MaHcEqlMv7gt zKbbL_@`ET# zLsMOQ5YQWTohU#)0x$GCoEB$uf7axjlh=Og1^JPabYloY-`+ccs#5RPe3jF!T3|;$ z=RFX1Bt&zNFZ{g>V2GYa-g4w}&Yi!Pf*K@5DMKlOuPk6rk+tM-eI^KElq(+tmJ$gt z*zSG4yGF>qp3R|fMv5c66MU6S_8WKOP2H9`bDdfn*CSAkVYlnQqGDldgaegoIHyge}n zU;3PHTKw-E_%U)RAiRSl@~S`nK2Pi4=?Li$d0)t9ItpZW@Nl^?RJdu7q|2)pGSe*5 zB`3+fq)um{XLG=uZpOIf8hh4Q{Bj_}cw+RPF1?y9`4D3s75*zPp0n8B&q6kLQ%Yb% zpOgw8u2M;VyI>~j!~Fm}de-NC43)g$2Pvy$?WJE;EPrH#;XC!k|J?HTDJ>Fu>}@B> zSUtdLoKBIUXg1a^<22vum~ZF8(ZkaEIIgUCPgG8$$kks888=TmfLQ=Ch$*>AYfMgU($<*8rn=P@7&K0d^0kfS?y@O$BYkl3 z2Ihoz4*UnF01cRwO5g$vjCcPg`mb5fbZV>pMk!%mF*ZybcVeD)E3qh%Lln>}r^U*R z&Kdg<1*h)@a9O1Os+i!{qtuSQ1nHVN)>2wjC`O}mDg>`r5gI_u?|z ztN-K38M`nAu9xOmYvw(L&a31L-j}!5$C`8L^UIAlYsMC>58Mst>v#KuH(ETQ0hoQ4 zn6pu#U71FjJ^AXuFB|ETR;<`lE}4ShL0_rjra2E@UF^s)f&m*I52eQLk6Mk(r}$h2 zPvfpSv6yr5;DV!o&+CG7d4}1sK7noyONbiXmV5cnooX`)`F(>M!F^)x zp^{iz6`9~axDPR_N6EBkEQCoN(r61O&8CfY&jfe=4)}6FVUeiGWn^Xc;iqlREb-vl z5^S!jXUWx-W|X66h6fz=@J56%{6mxqfq;8)~`4tRu|SZRGTuFW3>?q!RsLY3AEHZmx)Xdg2dR3{I2 z3;{%MpT@tgp%Xz~+OKuqB9BiOnll^Q#!EH*GdU11br8}ciefveDeJ=mW~T;Ivw`V} zQL_sCgCO`N{H{2IoMIUq4t5^dg#?oDt&GsYmLOFRI|ba~`Q<_bXSqG#J5 zZs~93j)W;Im3SR+MZwL59_^Uce0Qv+7Y4$q)eU(OXEN~I7nGS^w=v?GT!uh;9iEcg zsGRyE?NT)}Eso2fky?HVZ?Fs;27od@Co7DR`9f> zV&_qDCZT!MtYKu)pW}jkVzH9^!+(B8CJ*6APT1+XQ?nS`+iX@%xl7_F$~Kdjew5Nj7pKKC)0D=8Bwan>syOo5*5FJc{#Z~D7&RNiUueagqSAc8p*VY}-Xx(2`8xo2 zR^SWuN%T%i!dWS*q_-6RfMpJ0tp?vbdJ&n+kMg+1;KT1QMdLm$6>*YDc=-TIJ&l2Btu7Ng)4YB(e4H*c}!id?;_ z=VGsAvG9ODRLIVpGVZeHb}kT1Jw97JxBYDqpOC#(t7 z9ONx*ZX=bjpRAt3lB`y&Z!XqPacyV%2S>pGP~quF`YC&cj$x?(Mmiv#Prhiv>9()% zIb`uiX9@DyRD60!D`r#>=cmPlF|&dut&jA0BOi8>(wHY|oA7d_s7ha*);=;wnLndo zV43YIs8!wS@W(g;5b3{_XN{#JjP|<8!n7k3=qkbB?9>xqSnUNBOSXQ&?hf+a#p2dE zSMR7Si;YpHsNP;PPzj)KuY&(M)i5}99ubLZ=L>1w5<4G=vFlIEBGk&F2lKu;X288k z^ML_e(flx%tuW#TiW^^KVlj~#(7AsfcT-&M_@_o7!Oxb(!(7WWUmso1BzrAJ zeYzt-n1W0SJ+2i>&}+Yj9#z3M+rUtyR%E9#!JR`_?qj3Aywb}biO#j&H_^)6j3qGK zCzkl@=W2H)@VaLl+OzZtoj0J^;6_O5(hL= zx$VMd!bT=Ogagmn1iC@x^^m7bbMrIecLeXBYDD=}s#B%+ZM7Jj#@fqfydlHY$z>GO>=tFN|bTh+$ct8O*}h{L&}Ve4Ne5>_;DxK zpyI({Z`0A{J-4WMe)Cz-RkTJ$<$qAxAYNBVynf3f>k1&8bi20tVA?9BQQ@#W{$bT8 zsLm<^rc4I25Pd@S*98HGM0qSa%^!^u0sUBxjxJ$%GG z3ovcsKHizCNR-+uuRp~%vv#(**!xb}Gl2*YI}Uj-Fc8`hW_qObbb0 ztK-to;8uk!$HYIrql{-kLj0whq*54o<=Wbh`3 zG=qj+y3=(%f^5B9{m`RtCF}SL4#xys#FD~aY81+9+(Zss4s;Bsw3N-UA+4RPfzQ9n zwq~EVx`Kc4$@W8={}{E)j`{87_^qIa(69C#a9P-S*8m!AlydQvd}n6ie3GI`uK0f) z$OK3v>|(}fX5x#R&0IDTlbHk>GHoH__J4J|0#b5k#l7dndY)5UMqDpwN;g_}U2K}; zU)d?kE>y??BmW_b>6^h;$YI{h{b`LsB_L_{$Tk zvmSfn?628q8 z^Y7Z27fM5*a*Eq|8rtt&R35EXR@KL4Sc6|8-@Dw#;Bdz(^!TE7SBOZBNGvDYJBV?rzS;TDJZUV5Iql9`S6x#G#>x<@QZtB$B_`&H4ag`5_rYj-;@KeEF;dd! z`V7m)2=Agy+TyO!)+4@Oe>JzzC4Exo9GyfxFr|W%x0_}4)tpECP+3yH!{Q-AW`<2) zOM0`rtOf&KDSTc@)e_0d>6Tjm!4>>+=<~yEz4IO5wVM(-jEs9ckest`vU@|EJn|^d zZi$F{33bzbtNg$21e5>Ioxm>{@KOacLf`b2KJLgA9dv0W{)1}?TM_~mKFf7-3)q`H zSHc$0WYVHh=Trm$FkFPiZJZ31yYKdltD!JlHpU{%)^O$k6?yufL80(%falg1yJ~HIlcEj|8mV<5_ix%`Z zY;DB}$#Ik)HG-YXZV3CZT!NTgG`MI$MVcQCcB=$S=v}qV#ue%5_p}(K9TWEC4iLa` zOeZUR-_g^eZRdmJFQDa^RA$CXW1rc7Cashe_Yd+l9a}50{MLnRR7Yi$ISb~)01xpP z&^|i?D|p`Dg4SqxHqEQ{+y0#rynXNh)t#4?s>i+()DD`=E zQ;eQZA+5tThTg51Vys(IAz!I2%OXd^knAOR<4x`=2Ih8RV&+sI=VE?&u zpF<{Wf!6yo_zSDzYd-I4g>;pba9bnvj(K>!?pCdi z4lgpbT~faQ+_rp5WVqo)&|M5O)D6JRwgg}CE!|5ksWF@{A--8{to*)kNkMx-NVZt# zhRB2yP~P!bMa3=o4HzXXD?3cPNzy1R{cXp6^f!Ui)xLlKZ-5mJ9{qrv0!r(^I*0d9 zrGdZ6vungYcF7&Yp z7e;N z;B)icUSFrALX`7g*ARc~9I=l#Pxb3-27cbO+oVziNXk%V%$NZ(+PEM3X9SCL%5|2@ zog|d^*(rlpvBLV`I6IRc?kv#c^rdeNx1R%HDCPyq%tcS{u4$+*a0W%+fHkyz?>Lp)p_4X8NO6t#CSHSud9kPgqy!_6Fx67jzHoNx>KX)TdbY8AH-HwC=9>|SdH1bLvN znJv*Al=E=i+BC`(sME0yJ#+^XdKvZe&m{6;7I{>fPog-#MWfT3bIfa zQfsnj1!x^($7cr1j{&AjvU0MB`T6mFutKIIXn-JAi$e)#!XFo4ZoQ za(OV*6)vD8TYN^wQS1|fR3cVGAj;fpgF0S0YV?x-$ugoR`YLXGzycYdYcS~bj_akz zP<`q}r|TO0u!I*~HU@QlI ztql`Wq~#4Z4^@kD0TiDw>b{_&Yp6!P4YA$)1?l0_+iarP0q3u=ScQry-V>1fg5nz0(Uw^A-QD(_QF66Jijze6Pg*WG1b5 zgG^+%^T^!j4p2$J!cdvrqbf+_WL|ZAT_IE0D{uU2pU!4xk*2e`A;abwGAezWx;dqL zrM%rlyYNLVS#<(k*N zDutX!O++-Z0?2=!SGaYwZX|3EF?)WroIH(YDsmO-`XE__G{c*t;!fL+*F{+gi!nxe zPt8a=`|Im-66T}zjAztq32caWY$uG|G{>rF13iiv!IF zj|@);7_*Bm-n5&o{~xa2GAfQP>eeNJ1Pku&9^5^+TQ{x&g1a0Ze!p|i9p|23J-WNBZOaXQYk)OFtj$&R|8iH&c z;ub~tt7au0{j3fK7_icR`YohY{+Y(w<>q)1Yq8=G#9gHZw<^O>XLkhrC4f8{O2_mG z^S$k?UAU42<(#o9VZi`uW=Yf`l4vfRaFJYYsC3C?C4aSky-l<%AD3}cI81$wE26=) zD+eFn9_Ht$-VJ)fiybBsOH&NQpTPBnT=eb_tas7inlk4rE$APiBZk3(&-D4*O0TE2 zR#}G1PjE(|$)tqR{* zO?PB!*b{Q~2Sa?#P~dR`lWyo?ewQ z(%IT*3T_?3Q*JyI=^r(V!J&p?y@48wOe%^}(fklmqN!r*T9jmP)av;k9XA)7}jGuAbtu_UUd`%N@ttfGg!|o?I=7Li%m5w$Jv`V!S76(<=!Aq* zSx&$8!&AX^(_Ss#qdmyrudW)@`R*t4RGRl)%O9hB^B{I~Gr>|H%iZ12fyrKDHByza z_RhK@+R0h5buI&t$NV`%mAJBq{7&sOY7j2~yD7lN1;kG`nCW&CRGCYl2hl(J?QT=i zdd`o$vYAz+@nio4dnHKT#N6OEgy$NY4Ip++$u1o@T*0=398`MHegmi}HWq+$3uEu? zCqXz>w*d(eK)|8J)+?rb48T+-F3&}zqD;O&gf3-qDsh@J{RlBO+MkJfB30auU+I*0 zd&HXyOfOAf31iM@A4%@LV^LodUgngztF5?~IVD;cTwK!&A-zkfa&lBRKhFwtC=owX z9=GsaPk%uSO^)w=6|sgZ1)3L{w(8o2#1aNwo(T1(HahixJY5OV38&lzmx~SjWS95- z@yIZ#!KSjN)pwC2s=cYot~kNn)3gyb(8VGJdXypkGqpkS)(o#nN;W(E#FSBluCcu^C(q2X(5u6ky8+FPml^^aFJStODVqN`auypMB zd@+ghb_dh&*P5b$wD5hpYD%oP%A^FOSbAI8xSBtHT$yWHllvoOwQ7sFz?=}Y3D zF@Gy!u_(L^`W=bWxkWEi{JJhk57#Q+-+Ejo8IA8S(>z`G*r}KKQ5th7o+q;vrNHv_ z{ja8k-`6gQ;Y|W;=ATCUr_(b*lOK8t;>g;nQk3dvI?9`(DnG9kE6!WjMfBNnWhTXP z)8EUI{Up5XI|eTwR04gE1|3(>W%`Y`8On=2#U%Q|tg1mY@g6T-!XW~9mS-1STgUfVCj}!S*{l0h(^uZn2lSAAV<31 zzMUwVtTP(>apX7Ygg(6{*8z838oit&lnc17@S$}ASNq}K8n66~An8>1`#+fB*A(|A z!>_H(tp>&kzirFbl}ikB%pO+ck7H`5CFGcNn6&Ya1im#zOezg>jXcFOecjP_==)ly z@N&*S<0I@c%kcVPSGAbtCZ;6WG^6V>|It|bMaSYz5&js9Hc z*N`rs7YUdoEDqCgp++~S2e52q#!s*;|AWzo&dZT9&yTfbp7f3yiSh=x?I$6O3;?q= z@}NNBS(pxt=)44eRytNpb4?XLmv`;E>gcSpNe?FVkS1S<(aY~@m07O$^|a;^NK~T6m znV}O;aw1!cagNMa zd&$`Ec~v=2XyOky@AF?m^%_1zvH})RA=*sWWB%fimQ=9+pjlT1tHRNc_H}i+Chnz5 zHf4y0qCk-ZhoJjQrE znflp-(G+C}PXa-fE!mak2jY?I$0+E+z`jA?PT7s^0G^;8U7mC;83B|=t6i$H?t-vX%F7-XW9}BI>^M&Ip+4@oC&ll zUVBbi@^Haam_MELs7Uw(Dl_>8?rnhj+f6M1C=@ z&}bJm>BwUu`aNSmYw0*H+X|6Vn32(l&usmdAJ1#YVF+&$DCzt+?*6DnBwTxBv@AH_B7X|!fj&cpH%pg6AtX-l8le`5TRTPYbMxIahZ;>?BQ?SZ%*tZazJ{Hmq|zX z$e6-=)?%yrR#J?Fjfl$`kkgB@utbWgZo#X$t8YYCZ4sVn#f|feu2tzJ`_sBU^3bKr zWh~zPb4ph$Z>&M>U01?8QqPQD_EFQj*0rXK(j8%J*}a`emw2{$fjp$1G{v`b z6n;2p4&50+SPVa=6JU=$PCFE6zla<{HCu*xGjo;>cDo^EK?D4awY;sij&KjFZ*%c# zW#u$A0OH75OODTS+UN&CthO4#<8ep{c~K9?IG@K<^a{ivV+1;p7l}TtV7d3kUGaY~ zyBV+lU|z&>4)V{hn>V*XbXTJNKFR)-%~M^EhpM7cI`7s*`=TLpIsN}&FrWo*<1^mA z4piX(fBm6;8l{CyBD}Q zkxdE1)oxC~GZdld^;b7e`AMMGs3w2^RgK{&^fflo#4EV@0vA8MXGVyXFbW7 z#k%p1(8Nz&k-Rd0*OF0%LyUmndR&{^K*QSZx|ZFX;;pzo-Y++?Bj`Sa`+mQRK504G z)|gqfl>O^p8iwxNkmjUJGQ7N=sJ6gG)!@ci#w#lJFt`4n_nr^M41-u+yvc+2)3R+} z3um-6va9Np9o10RsBv3X~jF-s&ED;vQ_e%wY9j`P*J75 zhh1}FNt)Pn;?KDqR$IZys*jcR))n<_Ib107Wu^h~@c zd`e0$Oj3=Wqe;SZ365;=nI?|E0rw<1;eyH`+(hSm`pZ(8j$Ir7TcrE>q(S@U6_eM! zOzV}9&q2HWTl76E|H^C0nWelp>(+8G=fuuq=Gxk@rr*~2uki=%(!`J3I`Z8S=HWz* z4XyKs|9e?Mo00=L2L+^00JO{2#K)W+5e3nOHA|ycmr}Ap=xmewKOMk{%8vi9r?38c zE_vVo!Q4+k>sgb@8`VvQSC+zey2sdrdOfP8yONqW{_I9(#Psobkvt*D1CE+6OEWGY zX$!lE3Sag*f^Qv^^hK0eF7ODmKThf9A51`TX@f%w&~pP5m!?6rLZf&&zG!Aie|)a5 z1ktQMP`%*#BfCt@FxxsVMcua+rU_n7R72!~K^_~ak-d-Qu~#Y6_ownnh;1a_SyNV~ zm1F{B4!<|!#cmyL6Py0N`zlo9;I_)D_e4LvYLttRk^My6=L~(Y*Q%_QW3K3Cd5elp zFmb?RxyiIN^V{kS3R>T)G*Exs!RCiC^xIN#q!^~G+XTgmwr~7h(hDzD|6)X|y9mE3 z&f%>hNc{fktM97a$GsKiyGMbVq=^{|d9V_MD{)&X*OT~d5r&H4jex|2tXj30A>NCx zrT)nCHI}W@c0gO+IfQds(R@<%ZV9l{eiQN3ChEkd(}@Qf2kz*`mpwtnZp-Q)*w53!;%^Wg6aSQ zAK$wS(#!v}I{!FuC5G98H+kk+ZH&xt)%`W!Wv}q#BMv{`mIz#s$kN(jTyX6=cx?mC1R(Yf~98F>{&3-(z)j(Y82~HyzuhV z(|j!Oi5@PK+La7zfzFy4NjC$zAdp45B>av4(_IW+FMx~R2}!5(=|y=*Weg&}M+2L8 zP6VE-`s_sf+J1y|I}Y@4(fo^Qlj=$~is4+jNE26h$R^%?a#fKcA*8@f7EdAcSAjwM zMr3?Z`?e3t7moEGqPma$R@QFVyk}bRHAQ=$Ah2?Jm^_7X%rR_1&tJ^_#(Zs2N)r$+ z2DS%5T9%8j>$~;KbFpGIzWa=udW?WlDgCgs46!XQP%Jw}&L~t~{oRd=UkWRZ+gklW zzzdLCdY0sI5>p@Xety)=hfT0S{sK#i6;3$84gAs${Kdxog+dV-n<+=6CMoGoZ1(_a z#6i?z8A=1%Yt|Xra~rfYlCl>n(YSkKdLKo*ZJJ_kEN4j@!CTR$L2WFghj#j4I^sXA zh+o@r@LLd*)HJw0r$TE$!>>EPH;@LLJXrrG4=-gmt7qrxr?1rYGE>q7cYCFRT0hR%+y!QTOV;hBuJ}R`>ibo% zFXB0FT&ou|Xdd^rW{W>+%fQ&LCr!AO!9|Um@n?}{)V|kV`S8@y?JXWH?ZJkE5)V8? z#A3GPIwbkT-1-ZG`B(SP_14OAh)xBY+-aR8J!&Ntp=B(9$_TIHVlS&MMD9;`{7dfd)bb?kJE(`+_mOq?EEbWWJHSX?|MVrBZ>4 z8ye-9wdNrf8K1lBh~MjgJXF4vPz7T>wW0dzoclVXitcpGIOM#Zb%Lm-@P)&21xCv9 z-kQB>pZSwA58}sAroyF2P+=ZT+I2dS!8{H*0>-fpZr<2zT5@rrb8 z3&@sctGx9XVTX&x-z%gSR4~=eqPGWx!`NCG&AF>&iMmrkEo2^^>v{$scP&^sUNCi| z9m=#zg9*VDG?sFc8+wfVu!~7KN$y+|c|jMG9P4(E(dy-5tAcXqCzqA4<4`2KO5TNL z_F>`i;2W2+i5TP?6XsTYTQ|g4q`~i4~6+_@5^f z@>pyT{Orslv0iGcs;?{xh8nZ96MfE3Q>Ib{sL9BE>zc)Z=oEputRAgud_?9-wZYKuXuO9|e5+c*7Q zh?cN0^IYHe_^&JavMjeRE%Kv?w?^I4{De_+?FciMeq)2a;?J*wi|L8_IZ27>Nnko> zD(MH+&W>HCyXr89)=x%jp~bR#xM#JQsp*=AspO9wO0a+VY6U+$|AC2eE&Jj5xC8eg zZSI;_lMv;zc{SfMfmqsZPeC|cz#A6q`~;}BSEtUMR z+E8;SZb)o_`spO8m^ca3IRL-V^F|b=q_x35C$Nx6DbD`Y9rs(>qp# zL?Z44xG>^jt*@8s6mkBe!(rL@`-^yDctpUhF|{dKo3Jn^2@7ej+6lE}^hNKDa1QtS z_m0d7P1&^6=Vs!^wr3fZ_O@Bz^c!{lQB2OPU51?y{O$Q6zC!KC2b3qxdP{cOqq zDC)h$3$pqv9^O=-5^4go@SnhMXsrsuihW}=eI_|-Upht~tXz45Cd1K)PUZ{KNnO{c zy!DGrpJd^!D3PCetqncsFug~!;}eu+yA{1-Po9#-$li3C&S*`BPraonXW0LrL-AZP zP&i`5Vc{L7({I}x6RPQw4&$`i98r((vgTkI3N3b;d&r;6* zmu27e1HL`>7$}E86knfa0Xa8jIb`O@ZmKG%P=lqVN5fSh@;eYDJzl1FmuAc_SD{d) zX_b(lgVFE>BYw`O-}9g-Z(Ph3q4(U8JSM+g0}+9cNy*OxLLi^DrgK2O>57LoqHDL^%RWs<#<7_UjFA0)Nrm1-s^WgsdV6 zLgv}gK4cV8e$V};J4;li+3*dbG*gzKfNXAJ;8UiIz$YRPKTaCTx9pi(Ly1sB(z@MG zKO^sDBF~ow_B0)IC((xn@Ij)_`#_C380*aGvm|ohl;qaz@5$@p5|)_&8JQ*i2T~R1 z?2RLJLhTRwUJE6h@6?_MAsc> zBdo6jdl>wnc6<$B;KX70`5Db~OVkG^)df{)p}4;y^;`(%z>j#=c_xc@_ATTnfd)sI zLaIc*kY7|S(}~{M_w0M6MpBN0Zbj)y*t@DUs8TPO_ zDu}pW{?3-Fp6Y#gB}lQx_pq#P2i!cUJbAew1n&dD$&NW^x=!~z0AaQb#OFj}>I*u^ z1lEicIVmi~etLv;^F@n@U#oGqvqP&;oe1Msx4#vlT!!4%wb|4;=o8JFDn5toC7kSX z8=E4u@8v0+P-Zzh%F|yQ=SLNLPT;A)CXKBqw@!1#a&p%F5N&Uc6eqZW0a>BsVqc75 zf6~x?ovO2k+dAl=zlG*<_|F4RW9W(TvN?H0XZ|uTo@9Vn$A!Bs2lWZQQ?~yz%wjby+kPmYY6i>93V1G$ElP z@L@|q71ZuT((|T{GVlF1CFv`O(u5qxwmz6rsfbr4o>V_N=8!C@qV#!C6-HIb23M(BjW%k1by&@kwHcy2kZ z;B3<+NI??DHO#9ixO8j9c7E7=x20)^OaGINIT!z>{*o`&Qys;V>$Q5hqYvdBksRRl zt{Gx86SKtl-Y;kGSs&ARL-*GoCc&3d7_w#cWZVUNO2}`s{a;FE^;v#3l}AFbw1uWJ zN#$%eSmQ)}Btr6~cGgyuiK91y#Oh!aHw29`f#ZvlL+C*UpLQ$`et+ma-|wwJZaD4O zd0PRsiWHVG13S%1-S&M--+Yc@tqURez2=~cO3{x9vFb^${e^0T1_M=ZrxV@~Q23~= z6Fc?w5URenbXW&lbeqFU@8+kPXp#9cDIUOY{?S#bO*i@6?IUD`NlupMFEMXuN%HFL zDl}SQXBs>KH(M%NlqKWBF_6?4v}w>Fm+&%L@9rGPrtABqCY7X{RBO~FUV=jgy=>_Op#8kx?orwOMq zI+waJ+tqr=VJso@){PS#vV|IqW0%x4G-(K&b$k!3;r1 zpJyLPQZ!5aiMAvrZxQ|#&jfz|>GtoYF)__KZdNAg;fKf8NkB>?sIJR?9Woa-Z@uu& zQuo^4a!*U*YVDg-I8&jpU{g=z8yB$3tfbVy^6^_YA{0*ypBt{swO3#FE#k6g;T!O5 z9w082SBIhV1?7Vy>BrN?y1%W$FS3$vsO8EPvZdEWPYg*S%OaU0)E3(WMF++*_OEpC zsNdJUis*_C@@{J<(Hx-h7YZ%#Y3x~EV6NTYrQ8!7OP@3=?H3r;88bi;-d5@HsX#Ou zMsFlpfGi(XUgUDwc4H7H;U|h|^5%v`f}kI9f9^ouFNa3{LoQ^aQTN12A8y|ig59CS ze(m=x1H<>H#m%d=q3gU>enwwWXrU$iALf5S`u{j7^k^?fWeyq;{vXW0`UJPb@VSkG zmC1Fz38jabaoZ`mjPpXVsA!kC`)ek#>oGR@r^7Ue(m1c12~{RMhkWBI-mZ($();F* z3M5dhY?XqXQWqEP0UZ>w%2J*h7?&U*6_PUH*6dFvRU{YABaRA_=zXJ?-RYaRnqlB- za#&~XdJ#@OFsAI4-4c3&TB#U@2~hu_j7Sh;j^$iWqaG%v1&{?i~ zTC9P-6KWm#<;dabq;cj&++z+FctbPow%YO0aW4oU?#5FVAUn@8b*&A#!eCC`r3Rnnsfz zxA3~<{Au#JpH2(ig+SyT9?evKVQq<~IsozFv&kh#?LOT>N153T+<%(>S&(~U$I!*z z>gS(&xWw5xYSsx0bGb>9vvn7F4gvX|5#o-5gH)&oCt`(mlw47n{L*6ZuDTCQRU4ut zODQbDk%OC&2 zEP(D_-PvDD?PKI_-~QJ*%A99+Cm|!{L;&c;@jBPt$}->4ll9BvY6(M+{ZKH9oi9s3 zS2D=fKd-FEl^M&f;$2LtFVUQ_Z|0BKZwe4NxybmF_G88%r+jh*u!n!LH@-QR!1_{n{a+dV4QhtV2A@9U;hY zGz`n=n-t5kFH71&YC`3s7aSy(6^~2UQZVNu9_$uf7tnoD#N1+T*wdGhE-Vw6*ZAh; zwoo~$!7$4*GC3@i7Vf^HCcolD$Dm3z^()#~%7?d+n$ra3BKHVm^COjM508sd&#qIR zQh`-^ic;Z*F<*;6t5Y;wQLOr4Y;CWe6h`YUE2p1wXUOJhR;H=X9wE|~AaWCE$mQ-b z$cd32cMtQ!-TfufSrb-C4<5!aj$Synt=#T$)vzUyEmf%1H9=SpOSE%aa>i{mDEqi5 zyls_&I;OTtres;mnL^3AQ#0DKoa};snC_ZwR{g+Fx0uYI4))5C2s?BB^65?I=bKCp zVB03OGd6@g9Yl)nYDjL0#+xN~a;q?@`|!c-g7DP9@T@{ngH}G=s@78*41cX>>kT2J z$j)-Xu#<*x3u6_fxn1pdyM6kgX;U|^{XQ*>-h8*KM*Sz{W6&=}6`s2s2f{DTPv z-o^BS14|B|==B1t9nj?ygz{(Y$$%t6d_c&TEB6)Q=*7X&b4v@vH^v#q{t4;qyLEMG zM8DE1bFsHEhF_y&RDUlcGfo%k1^>F6c*Ta({G# zGL$Dx$3#Uxj3~izPSZFg{P7!%j2TYhM zxDK%^wr)xA|DvHudzhF*T@jExD9X<8Jb)ao*UE!g4D>zraChI~&zWtp6ztvADezWH z%PedVc{>Nmmo{1@8ET=A1#A^9h9gL$Ir1k7lN0$YbT-8@)L$EiIkY8$A5ASV%;XziG#-!PVz zHLwS{^c^k;R{1wa(wu*C+3z9^@>}u`#^xK-p_=`VR(GFe1HWVm7{rwD?ORcGVRU zsCno00uvmt=z^R*5H&-n!lfz_a^r=SOBFzL)jLSgQH|^nmej`C4uzdnP^x>re_bMn zy+UPEmT8}^ygK&pK%t%}yDVU8es0WJ{?i*M2?YQGLuEK ze3`*<;z1kDR11$gN9xRJ;)QNj*Bot6WrRycC9s3SB-ASnUOR5c8!-POx$;udx?b#M zn;~-3Sfz6!1+Y|PSIn_Cv*kIZ?@dCNgGs)EN-Cn_?$pd@tubAkFTd1lHrHn%2n3g- zJ5vdSE}EGyXxSUOMnwAiTMXBnZ0bE5daou zx<2S$=!KvBtAs*)!#=!Ru3YthY~%Y{j!zJwgNtlM%Lp+;w+H@MKs`LZKn{&t1t|jv z%0~TY?GpDq!!}7;=`jcy8%YyNxqh8Lv_$(G-i_{jB~YlS32*lM2Lt~lQ}FSjD6MUS z#65X_!>ceUBXoAM1qimiQHUjf7A1c_y~t5pp(MP(N-x2@`YUn3=N3a6dH6-A?j!^~ zb#p`P^_$h3M3{Y+bw|i`1wAIh33OmgQjh`{@V5@=V{Yf1oPe}a^X%00y9kH+)3)&EYR=&(2bFVsDA+7rk zTDkFL#kKZ5C|D#t``9!V5^8@@*VFOp1qQ3^n|JyBczw-#nFJ z4Rx6j>NQN@{64`&Nt16Zu?CgU92kNR4?Jr|ROJrT@F%s)iREJ&qs8Llz#*6bbYm9Y zCX<9_cE_jb%KkImXqOE)6x^+Oh)Wn!b9!Pe9V`S@W48YA z54k9ET@%tXdQMAH;$~@Xx-;-OzQ&EKQg$@T^qFu!ckZZDt8CO)3^AN)(Eq*}m95>8 zh0#vmx&^NTPJf>%xMiN^8?>ZHtrV^31!JOPm%xm`Wcqwz+5el~$LFjQNZn7%MO>xS z$s|MMvxLhTP4X@zb5qln+;y}@0i9Yy!A4J&lZqZ5cJR7Zvz*D%Y2VjsYwbU0du8xJ zlDKL)NAtrF;7OdYH-#2@fnT;ql2a7KO}qCTu5=sRZB{WFnK;!~qOucYCx=^$&1p6( zXqxbKm7$S|ZE<49K{ze*9{DqlnY{Q8=skpiuICdD3+~8vm7Di-MJC}TxW2k6La~^J z_8NyIX1DjF>fo)IY&E6pMpeh?&zx|wY=pY5RPX9&1F>Pa;g%wOtdaRMv!gVq9ZHfd z$yuiTR|E&Cc8r7SGODuA-F)ko?0&dK@-z8EDS?FBHpEZ&WwYI|maR^$IY&vXFUlD% zNW3*KcnvP^R5^nOLkuKOGDne_b_x4DPLlCC7*T6%Z>zcwyRO6j5X44k6MG|j^^N5p z3=8unqY8A5`Tw`ZVo${Fx?W~i{fjpD`+Q37S)!rvHh=D<%b-<1(K{8p+sC%o<~&c0 z_W?D&G3fC{6L6~rv5kU;CebE(wVLZ3_TMLjS(zqAcMGA=mhjU6_!)XRCKsh5`C$pR zB7A(rj=&9#gv6LDD?J>8=J4~+1#NaT$q9w(9TC69d|hf2E=t3#=95hO3EaYzgU>4#s0$E4x`t)TAU}0RyCehvoae%e>hf%N*)ra zIG6Sq8Y2DsER3(-qq8WBLc)qY>NBImYLVWZn##UAOJ84)rI5hg)%k~Yl+R{Ek**Op zkRtWOOu9bMHyVqF$bGFbk>3@)4ZL#d+A7QLt={hnQgtV%5OApq-P$h+Vu6@%55%t; zjXQe%o)ZGjE-GgXQ|ZU@Y2|Yn?av83 zLG&zFC0!vPmeju$(-&m!kPe?oxW;1I6t3rW&wEEDx5~#b|DLgg4pG^Tq|(JI{W4E@`jOP>!8W17X>ih_F!F6XlKX>lS!K zt8Hlp$vE8&gu4%UD8u@Or_dj+6&G#W5Nde3=*Q;LElU>x@R3^CA4Wo$CKE?~U&@kk zV!lzK$N{@uY1jY3{5oA9@udo_p<#)F-Kv8+w)nFzBR*Xen!2tQZYMb(;KMOh~uyP5GR2`+VLx+*P0wz5epXl42u z>Gtw$cf$4ppNeFs5Bhp z?3OZ#`R-#F2pnLoc}d4WGX=&5Xutz&4(29~)obMxf>h)2NBb`UV&ot>m?&RmbE4|( z(#h4MC`(Qg{M(XfD!MkaF(mDL8M$7exiT7F5pxnP6qZ=Yz~k{#m>Zt|Id+?{KX?K ztLsagXUP5B9qvvgrUlbHq=-I2q3%yb=EFs4@sW-JKfy}-BY0j>sf$MrbZ9U#*;!kC z-RJt1mMNyl578J_WJhL7%3Zv^V{MT|u^X95^IN|LF5clHiQoWIxzaDgOg`@uB#_FF zRs#d1L-o5hdSd>`&d@$|L)CTNc#x`kT?}Zf@x|&fAtW)j(A(@jOvO(P{~){D7p=#R`hG$>!ripb zTkz+t=qEv|?9=)_M~<ff(lZi7L6`4W6fg9N*(-z3%G-LU5`zf6_iLVic6)m?RGag_HO0=UXW&| zo)c?Qc5m;E&xXX}z9vB<@c@fq%F|LY#bmH=LTy|M_eHkzl^J05kru=!!uGbFI>uc_ zd*h}LN7(@Wmbw+n!=7-IH=>`YB>nWb)D`{s9oW*i?L>NP_5v;Cc*4D1+T~5c1m;p{ zja57ZnQ5iM9GKMlAO;}BhyKPTPhOP|K7*{F%pJUyPDofoL#K{$aq>^FK#Mv+%XHYQpE9J=e0e8w`!ab{ zT?{d!b=;^w6>O8LA7Tif+x7}86hvX+r}Ad0(H12M@7@k`!j6*l#@zg@6aZPi4-4w~ za)328;Ws(zevuvh(;Fk+6ij;;>W7Ty;;=rd@3xjjb<95NwN?h3KubZ|KFCNxZQhjW z$UN*{vJnyiP?%8E^=r~z-&H%!OT8C%k`Ta5)mtAmbhVlM$PM!&B1^P6SM z6>7F@JBM}7vYirdV{QtG<&&SK4HqYTEnPBSDfhh|tC)(jtY*j80e;FE^kBG@gB%6s zK(7zu`JWZlkt>``If&fz(cujh@(HhgkiFG5_%)b|f{}3!pQW{42j7ivblzj`Wgg6| z{fXFLn{`Cu!2e*_yHb4?P}_CL)}tY>wnNw3CONXB&`#~b z6$9M=mkb50#t?nquGIVoQ_k6S5T!et`u{`oi^OH=H+cC!n*d+0srbiP!FwuilK*SO zFArmDzj_RPP7<2`!E6^(A#(!<=Q5=jf}Vy75(M|;uJJLB$g6Ygf&UTu|H06%OXB}^ z;(d?$)qN*q`~O+synBhTJh|v|D_8C=l;=MJ zjdpFm5jyCMg%13o-tE~dCcWLD#ZdbKzw7@s@DC#O50pM5ElYk&S2zj(QLX{SHEJ@r z=2*UxZF~P%we?87sv8@Zc8XtJ;E*J!LN;?@%R`I-cl&0^63^;DZ;UDQ;#8{anv163 zpQ(2DZ7iS%iWQ}@#8r?_e` z$jVH38*c^G&w*|7D9W75p;g)XPkhsQb8C5~%smNHi#Pjf%w~#NjUDMmj!$YStQ%6HF}f>)v2zK4ArGt@iZHkK>zP_Fm6n@-D|g_uum*paS07s zv2q*Bi#x}-RZP5RDkJl=WhlP)zc`#>zNBmXAV5|DN&xdG_D z-Z*-YuxXDtI=~~#wR(YWn3i(3dMskkm7AHfk+|015PE`siv2Ka{ys&RCA;M<{>%!y z@O?*DQXWTf{TvrQ+yNf7F~gJbItGidfuf94eXz((O`)Vxg;N(TaCKN&Juvv~9}M~y zs{R+OO%l^&KEguUFFcW4hJQMBtdg5f^kJ3_K#4Jd3(x2ZiM1z0plZif<*v15!HIe@qkDc10M>!a-*;M5SllsXjfy2Z@;Xl!FhQ zVbSvk%VdbW)(YTOZjHe4rq;7c82XUvae@Rv)Qv4+pEpXGzEtAYSsaX%&#&)>jCMWV z46X{wnjxY2WdC3s5?Zttc5lb*vE2%-A85M7&82EqIZy-=PUT?p0iztb?5l;cSll{@ zpF^d7Ev$ayzJTRsRoP+Wro;~+<-{lCu%z7mohR1RMVQ6hr0>+KA#hk-3I9QHj3RXL z8W;(GC&SHNPw2{NY3I`RDJ*@*f@ef37}|ll5`=*r4=FB>sq8w($Nl8qJLzw4kwM0? z{5%x&82j-b%$i_7v)HOD6fk#gU_Vu~{$Z!2XysVjB)KK+)UcdGoXDgixN$W?I3Wd) zAN4vs?qHSrCyQ?zbw5Ukmn3QX0GXtEI@pI=-Jk_P5Hqm59O^@%T)M`MReJqo>(` zSAw$X%=|UBmIGf~dcXefe1xDCgc@(R>j?BRl!$hH{>P<_HVHTR<#xsO@5)zy^rtanUaVZop`Bd&|I`DlQrS4R;6>TzsS1Ym(d@+ z_$ z;anaUsWZa1h#dJ;W|NuyB7H;2T^n@V{Mkg1XT1%gJhc`m%>3PuiYPYgwT#OaH+Q*z z6w;I@tRS$u{@#jA3`{1CG6fj1|!%1SGONwDYKa3^V z=ySA||IYu|%oP1WU+&6c@~2#BcGP8_xp_NP=4l=+?!OS(qx1Ah*E#cEftvUodto2& zR2Rere9BWvqnvin@HE1kmYg=|1CJeu*|n8`HX~H`)n%JMiiaA*Z>lZ)NR3jVKkU%kH z^Stt^|Bi4OAFj|MJVAfzX|6u)#}>uI2*21C;5r?W+Rv-Aul24&V^wkHy08L??2goI=K z^bCNk@2b=C1o#Kog(bQ*iG~ze>m0A20JT?4{NOLpzZqcd!OoPJkHC9Mndn;&lU*8J<2I+!4H?%KR`LQX;wUSZt8 z#=D}2;-kC49K`~aIW29^Nm7&2y}k$zlpwHB31@sPONSinem`KZ_=Gb$46(mWZ*7k+ z!QWlY?qiB-rFzIC?y z>i6P8Vw@bl+hw249~QgFvLO(#zaOms3Ur?&xe-xkO7a1+= z5NP)3sdHVe3k!0Ujuf*Hxby=v`y$LB?U%XJ285b*1@qIN574qk7B(+dO!cF4mw9p* zmCR~(Zoa)6K*y#zgF#`E1}@fIMiucKpR~jPJYY(lAI68~odeJ&I`+g89GizH($%*c zOZSumD2xB}H4uxce4&u^2olJ27A5MC_YtDKSkVtdU1zd70Sl+7Ny`F=Q^nO2|8#C; z#~*l1_~Hc>NZ1kT;~`(%oa$~836^C5w<}Ad#^$KWK(gRoqEz`L|JwAk591mI+=oMI zsV85*6Z1Kp8hVR&Y&;&5X0WC?{CCxvY%y}8G)EFRV{)_O#W4gY>(u%0L9U>4mzHMVoW48Lt33N2EkxVyU+cPS9u9g4db*W&K(?oMzi?oMzi?(W{3|GD?v zb3SDznRzmknY?@N^{o9{yiw}|YhziZi-DC0IW}3rpASjJxFH$eyuZij&Ea7 zv!r=Kbq`&xhnNb$Gz*gOp{=R9O^zYOR~E zcl+r6gc>eQOBFGyIq#BosffEYGnOy8 zohY6y);L;mJCtQUj_1MgMyVU8YM>arHC492A8=MKq~ccjdp^#CyODWIqk8+lub7f3ii>Yy1X3UB_oKrMdK*kE|(U{dHk!PAPU<@4y<7=lSi95A9AwO_bg-U0#!`Proi4Q{sVk< z5;#;js@%Q(n(3p!zmx6Kr&?UCJEjCdMNsjWqwTtxwPN#3QM9fnKUV@!Vx>UImi&wJ z#z!uP<<{^m=Q;s(n6bXVEky7idr?DpxAgCfp=C(^7x=#hKMhor zOvz=WL{idRZ6} z=;E*Jc;F1Pk`v58wkx334a1_zjDkx;!&)ASa_Tp~-#BESna>KmT=^~8d}ojeSG-kX zfub|);2)IS(7h&{fb-94&$7S*%VN?2Qrj{^X~hxp9kSrONKESt0tQu;N_@h(NprD~ z4l`w4$db-kx=FgUzd|mb1ho+VP>L7we97g!DE0xHAgME^EUrYE`FEx{kJw*N&12=( zxiIQjp(S?@`#MY%m}I#c1fv=~{J-PhD00JNs0tgK4~}WFr=G8rZ0(KCNd}64Oa~eq zcM9sAB%S_PyVUDa_lY=fIXuo9BP~ye@vw~Y$nU+#O>w>od&!r|jQ2eo|LXZf)R*5r zL|CR=U?<|@f6~o z#mlwQF@SuuR|<>#W_~w9fNuYR`M|G+tsEZ47PBI4yZ!FZ7qPQUqdZ(EjgvAmpuK^K zzJA+gf7KQY`A@>P6gy;fbB}=aQ3Jtd^a6g?fW19FmDufbkfmPb5x9XW-O^3Y1ummf zXhTX#Ub#mo!{70mhgHDP6WwyInH)m47J>o`Xe_nD!uJ=b_`AOxE_dF1==YwKf>gDX z$E6*c7zO4q*aYL}?UAsZ2sWV9Vj~Y$nKstX#`4Wplx^gf%&eSLq%0rj#t`t`(#_NE z&T;W_yhSb8>wjN&6(xNXcAR^e}U6Dr=+ zkTZG%A@9B~Zwa-vnLXp@&*Lk2leMl&9(|!B`ZThUqaj>p{|`WVtL6Ng=upq5SXJ8! z5_6B_0Ykim97mU(vlJsy2sz*ly@$(5iU(@}19dr~pXD#Y^!B9v70Lwm>GvKtXO-xi z4$N9zj>Mf zre`bs4j+-)q_oCg6K^#89Pow$=obE^; zuU-ucuI>rWL%kOJHe8^Oyk~3MV%)%(fwTo^)k!sHm{x!sKN{TPf0U>?N-dIMo2BPt zpB&KYjDE(sWoyToOw?2M$r#n)%b@Gv7Cw4L=woe@r|T(5-!tMw+p38QlU*v4yb>LX z8jw4ov7va!a{*&fkA#~{t0D5*&zU2+*CuNkysbfdkwP~6#fCroh8H8{LdMB;u2w<0 zLisc)x0JulBSaU6v6>n|nwXVXXfWtV36VMyE7R@TfN(-)@Hgs=Q6x`M0CHZSQ5sf{ zi=crApISRe{lr^9xp;oeGPczZrv%xX5_-Knl}9K*nM|SYwz^{8NZCwq{)b)`ilfR9 zba~4+CFAYqez))4$GmHy`F)0MWrsBu z>dL4cQRI^2fDZ$ehZaqpZ-4hIPf_4{wOzrrRJD|C%1nLLqd`LDxKesz)glILL_pGw zyUk%kn=qZhFm$cvxmkUdl0q;woz3|S!No)sUu!wA5VR69yMNs9Wf}?Ua(8U+7-1Jk1b?Lve7a!(*pY3xmP8&ELPAQzW4nCwvW$);JaKBb|9QV`owd& zeCqGAq*olYU9Zfp-5vXVQ|2+L?=)lDrX?$u(D0&YXi2gDs2$dL>zN6sI5V?98L%(D z!KP|<>DCqbqg0{NDS%e0rIVUL2R()}x;ef^ZJhp|L7dQx<_E<*A222%`IA)&s|2{L zLl=U9J4xKPv7$jWjsG*l!*>4+&+ti)=6s{BSq*ln29(50T4k+)_ z%ILwW6{$!%?arLvs*i9Z|6Ym^VdR`B`PNZ4??1I3(rSbSkawagyfHTI*-rbscUM~LnU zu)m!Z;^}HGGy;OZQ9Y_<4tKJHPYrnSK)i!4p@i3^l}k-WS6sP7Vjpp%y%949 zOY>|i1|m=6{`LY9{Uk(5Rn~s{ai8uv+rd|0$c$}56Pv-G21^U~GpQ^#Aa%;O9?fVh zv2}inYnAXPm_9yheYL-pH_%Ct>O^GwYYWxAx!&LQ3bYSKs!^c6u(Hpv z*&>{Uw+BG^!;lr!1Bz$oIcNR6h9Y=d8dB-kpvzBvVg?C`CRv6+YNI;8pEo4B$6m8l zC|$IjewDWp?4!Uap<8feDU~TKNH^8<@bIEFu5tRpnzn8XM=d-#B9!GuXCAh{#E z5$==uksLj5?C~LUyGgC~SRfeBm{vAxfu+ECndK@q(^MVq(P;1T1dr(YsIqy&CHfp;eXv+xEEui8r%h`H2bwy_6J*d`wVwVq$vS8H} z;ZTcnyAgRE0o#G);I3a)z}1Jb6A(hsY3%$#@wzP{Xb!BJ#7y`3Gs)KTuaU5+^1uqD zOfS>zji7yN?Y|`v@U>@ijmX8Q2@Y=CtqF6CKT)8^R%vr>3GgV&%FXI47SySDSpM{) zDPXNA&Q_wumi4YdDN$qe=KD$oaBxmFczklTaXTT00m-NvT>k4XOiv~ncDHmxAu6&lB;&k%}|G%U#=oJQU9I0%Z7u%LS&l%n7Nk zr+7BD<@{c$llVV|8|zDr=JqzVka0ISM_Qwu_q`hy$I^J4_;*s8Fsse!Ab8_kHt$`3 zCW`ek_x6K*TqqtvN84FL5jq@h@?EIq)ba04mJc2J8EPD<)g2{mlG<=2GLvN_yUmJD z0fI1+-Rh;^8SFoi+8d7d+U(J*;i7lZ;!d1VZQ{{O=M$JS>o7i0)<*m)ULavfo~TP2 zo1zpn&)d^_`oy()9!cLw1sJURvj6Q=yT$ERpXNqz+^hw?(5;iZ-% z?wZg}gWr>%JgJt)EMF~a>Jne_y0cg5* z&eV@PS$|QoTbr7zTjwZGmNCB!SV19|}B_B`BbB-?V=i{Y{-D~# z1$F4Stez9ZEGlC;`L%u`9J_19sCsga@O+?R>%Thwl+sJ z&W0PI1e+xaG2e&po0v^yZ~3rt~V*j zUvv~y{+cO0YQ&T_R`yXr0SeBo;)+F#wW%g&m2w}!L02hleO|WNMOBcMZLApN zKNe3Rp8B>43DXP!-L>e83Zyl)js4Klhz@J#mlc_2#W^ZW)%5Hv@iJTYJq;dpmKwv@ zECy_BA|}c}*j1x;lAk+vUwkZM-g61}4EW;aBj+}|D%n%fn-g!w6LPW)!rAcLnHj6i zE82i;@S~@jj9xZH@0m+fhk4?`9Ovibn%T~>mO`!!%2pGoKg%dwj$yQejrGn}F*g~H zUu!lKs%vrLeSJ#)@|^J?G$zgywZ&oplPbyXn8Ri+G`_0gwJ8J>uNV#g41bade#0v7 zWc-x4KIOl}IE=Phsy>=t(&_ocP9x1&<_-U-(EEe``-w3fnnJ2;bL%-Cqa9;jL%mES z&1I7{=*D5@jSo$#OiEUnj(JSBxy_?o17p5SLxK~O_05+g@P+&^w61B&x(U~H5L5yN<5v=z^E&f{{$Qgk)t*g%;&d>ghzuZ^?l`w4zSG5|-2L zKiqM{(TzmpltQDES&{)eJ>gjv(47~NfvY->tEmQ&+g`^*DwouUsB-6N213xb=!IDu z_n8HJE=}~0(RTWBFVT3~hUKbpZEa0h%2Hy+<e4$8FDDsFn(Q2)nHgA!oBV=$|I&Lm zp#1!c5#k{22>8MyE z5D6vGvJZAf5|>LYE1fI$Th3c2h5-cH!*kokML2!V<8A>OprH5#_|ZNt&)93SO9=2+ zYiIic2XXBO@Yf@;%a|uLWeK?l>9tFgbLi-IDQDJbf45B8>!dACB`hsBPo}ptuce_d zrP#$CLUdg=z>po6Wz%Ml%y0dj`}LZbl4c9hOMsjU8E86qr7LFR%(w+7(8)tJH`f+A zMm5u2sx)+VKc`{a;nZ0hKKw06C#l6u2Vsc7i=dkPpcEr$cNZICnyUyjF)^C#xCroq zFsS!GwreN#+%VpiJr;!cRMMU73enj_JbRn_$}EZk zD?Gkh??qZUZC`dr!ZRwl5>U-8-f#I7zg=(HbZB)?`IM;es@gBIPjo^f_8V8w9G5jOqwlUQY>3~2r1iv3 zG9Su`eHl5Bhvm3D+(G|rv?F-h@oQ=3woksZG+ z{mRjAqO%fu;P($;TD|lnY|&&&y>g71VJHyQBsT~9p0Db@v!$)lJuY=dz3WmN+h8N` z&8y1Ui^!KtDfzZ%bu9;&Y`^J};hgQE#KRDtgd!5B=r@iR9-TGbN)$p%m`5zbWaVX) zV27QQ=B%IzJ2&=e!55{ywsh3-1W2o4QvpT3tj{PY`L>pJdZQF$=JiSS=OMy0p%(F{ zR_&5edME#OfDOcOMq7Wyd9T*|I6wB0rIt$WFl+UTXXplF;;v%ZV1@=UHtd)nYM{!} z%}&zL@)S#HeJ^ODNIt>}*SL-QoA=VV?fL>fXTzjuG9SA_jXU4}SGp z^yit$PhF*ppSh4!Y+sbpuF$05rf@Bcw?d`_%{cjlqi~MU6mP6`{QU29vg_qIF{FXI z44Fu&EpaypFx!tsaEsNetE%NGo!Z_-z2)*0F~QAf>S(^74WUm7rf}lWU1%J#n7){I z2+cYyBP8BMJHcS1{36_4p7qnAj~tBkx<_I#jphZFCR3`H$%LNFN72Ai;%bP468DPz zr3LyNpJz9By?;C#p0Slo@wCQ3`zt?yuZy%uXxWfAyn@$%?)W>YzJh?S^QSt#H}cpmJ}J=V8N z(rSNd_*6eHu+yTidE;QrQxh>G3(cH*J|qsZb>t% z*Gh?Z+=DkkHYr{xDl;)Ahj4ZE;b)yY*vd!8MkQ<}%<57t4gtST=}IkZ&E$7>_2kP` z4)#m;sJ{~#9($y_DW5VomxmN%T^#e=^&7;Ps4caE>|-V~SW)^XBN^Cv3JA9CB3 zIWj*yA^*3piot`fI@*F~e_p#0ytdi0t}GL|PoJnDSnL=h)@%*wK;e%`Ech82g*9q zOq3M&N&u6=XsWTF)pKRjZcVDIYf&}3b{*Ck#onsiWfr#YjTs*a$M^D99pAYJKt&mQ zrdSlFnWjTv*sxAC9;M&@o`bz5kGh1Z#p2`8>9(S;I`cl@`P$;7sc2z?uQ>{8D{dRU+ydZ4S~NiSlHSBfO1%T(O-O5vwo$? zPWJXlQ?M8AIYe@+Vsf*c4!)@P-tVGncRV1cdT3@|f^UjQYR|2%xC#-xsb@(50M!1$ zSTt6Yu+ywg(9KpsczD_DI+>)0BLMLUz`*x8_0)2uDe|OZJsK@hsdB2-A%vnhr#lb8}6a~8?RYw0tr=b9TCBd1Bgzv;?EPYUjh7jw>nfd(@0$Z01L29(~wfwfo~o9BWh)dKiDkOXk;90SyO!Mh6o-H z7oBs$94SSBhlMQlW*}o0_ePNQ2SDMO`C891Hl7JFL-5U}{?2kkc4Y5CaV3j4K{;tL zK}=J}I`2j2b5tAil^W8=w>X)tL~pEWqF8L{X2~JS`F}T^)5Sil*jQ*Htz3LY5aSFd zS8{qwGvxDJVnqY7e*XU+g5)89*1*VT5rK|9aIobx3J(>RW@ zr}qds-ds}-WVV(1V@^zYyrY!_8UYH!OYK7w7EX0HLMOZn_XJdsM1JL#K869(pR?ND z_T<(ziK-=_UFmfQZYc=oPu$`@$Kv>q)59w^FO-{Im97nM2o~-n!$H92VsQW6(UASe zmf3P%X?-qfgC7FAY&S~XU2DQB(X*r`ni+C9mE39`OT3RHV$;!Pl(<2!JIFtof$?2$(wgk?=93#@(7Ysu57SX0JaBz!deSQAng9aKj>pl&o&yy7>l^h{fTeM@AHue zcL4L`@U!y~|1BgCi&Go9&@NT}E*&T_?6C%FNzAG{yZL%ZBvJ*elp`MbWK|Gxi6wwu#3;f=aCXU@)x@(89V=V(BZZ2ys1b6tnz*dN zh;QZYNfa;Bf#)lu(cYSI{#T8*vQ$?ptz4SsXN!=SO4*5b13auH+%8m2*g7(_q0tCdAMVG+?`b0$*tb7Ee`BcStXM7;iSk38X`B$ zTS~zLfzmhtC^U}BEgZv!J*=qUFcNp#22qB>yru*@%bx(GIk>trN_s(7h1Q3g66m0$ z7Rh88zplsWXX!9&qQA$=LP_2l6JENGA2awLU*7!G+OHIw)8z~Qa`BJAn}th@NlZ^5 zGn5YPGqlVg3lYj|7w@X-*IuLjp0Liq)>%7II!_JvoDO&)6Cj3T3L~^8D*v12OsDp$?;LOh|b0 zk;FE)srZw{>~1D6n}-4@{v%g2?8g%XLVe-pfYD#7w=CyI_>YL8gFcw`F9wZD8=Tu& z8U}4`C=u^1utT3FW;>oweWRtaItdfpdb84#GQ^b6st9P|lz^S0$Ra~uJk6LYcFUzT zH(;plf~-UU$WvA)4~v#_=w|BrxKSeYB>uc-&Rxk3E!JPU#1gZw^it8II3eZXs6X17 zf<9SnBx>tT+;t|myXRGV=R`R*a!$-fn9%_#rr`7x0_q&vSgjOv*aHj zQ`}Rl;Q0@+-!t?-z}gXd@}}Ut30FO6^j$DF*D1xy>E?25DxZkDLGUl#OhZ9#3DT-lRx5gH8+opG26Gy&ILXK}mSO~B>_G>J3<>|{f~he; z8ROOYCYtJ7?bEri{;JUbK00Uk55Oq&4-j0|jE%W@ZsFB%_|lEwxy|C(TcGd}4q-O# z`ZBmy7We-7;NT;^mU%oo^y|ipeJO>wy@r{w`6LK!#@|f;&Rqtqbk9lrRnFbC{StMz zds?i*IAoBTd?1K#$9>v)3f;oGFEyQqs&3JRuM~~Cz$rq#frRU@Q+l57lY_)vx$OEr z?!AHen(h*gWgZojhlX~9Ou`%&r@GxkVxHz2!L$GCmt4juFFF;~rd}shb0A0mudjAk z>dQ8q)tP9&$?cWa96GgrqLu0jEH;%TGfFOqKR|;tfr+%>HZ2@Im_leBa0@6#^*4sj z=jX|;vol(Ex;>aJH|_{@Q%U@<6<#IY#%JR#s(LL<2$(v{p+bvmMA4X+?$>H>S2LitDaC@vpEVtHJ zTJu&nt|rSRalTE~-MV=lv%a3^Kxc*3>_Y|;VhN4xre$= zPY*Q@T*z;6Y*)>2^O~}(ewApYy{JxKWjIxdW~3(6(bPEN?hR8{H=H47WB+dX*}F?2 zJDmDEvK5iMAu{36$%SKDN@|zQBwe>jpUig;4Xr~3r7;la%@^OwcU02dhh_#^%c1uz z$$Vl=c3YpS^wflMvDu#3Zv=vL$lF%04X+y@e!JG5&0wy~<|fI)+1xiSKKaBr7X+~(mpzWTxF2wo;)q7nQrD!cxw^uzI!E0N znbAxm=Bv8U@+~A^-6|Mej`hcFblQzPojS+`n|O8E(A2mN_A_}-vLL?V-x{*wjzlsD zyz(qZV3O_tgp6bvir zg$8e--}10Fqz-~;)h}dT3(G5gXhFJ^BvUc6cCq`0M7+e9%}U&j7R71<#Tqbgv0Pb$ z-}iz({Eb&9>>+8_IM?;+?@ zC4_gSwxpU7*-VkBIS>8eo`ZI6U;4tS>=msP?wd`(6Fw%nOgEu0NdVjkd3|(s-;qdb zt3in~rkY3qbHHA!DWf*xKG5f(*u(@!ccOoa9^Ml4shB2L7>ea9fo63`-(x0Jy1v=t zecN&MmR;XvS2hGI0y{NT9-#tM_r|4)C8vE(@-Mtrij~o3=vEqNa#O8frIHVdY%2EW<}% zf|KY-EgywNSbBFapH1w_O1ok9ANxQ==e5LI_y*A^bEKJGEK%7+={B#Ghf^Ut^V0y# zo9d^;H=4Hg8hb1if$mf+05)gqIqpZ~vQ#o0FGxaqphJp?&N)tyGQs+~GQ9YGq0^^#|FkB+hYaXIQmpmlS?jY)MeDgmefU z-4PL2SqsiIrL-zn;%16wl@Hwxq`LlI3Wn6GKpK#iJZM z@H~v>JgtVE=-hcW#>RSzArpj&Ql-(P26K19E5g)~Nvur&Vyb56xlyfp)F?$NGRhOo z=6nszW z7-m$ev@TlKHND~aoXdQVv!(Env)9%JNCZu*zLq zsVQ!o?tgwh#>Oe|1mM_E0001}tJp%3Is+RH00Z_BF@gY}#F7R>+bXSz6iW0Uot1sW z_nuv+d1R7;%uC7!kvDDrYod@iyl$uAxSW&M5#rDps`5c{$E6tX1JnIqqi%3lKP@{c z!|Hl6Iy{`C2t7e){xME)L@KL`x>YXlAzjRTqm#K-8L}9-u(|<7{@uzFQsnoL{HWMM zK7WJkp+S12Fkht`?P(-F&ZqTMbb9$CH7X}3`>U54y!BN|8K;}$1C;KPj$|KP+_)sAVt<&SxB|WeC3(?tn5CqD%H|GWByMOBZO*N9Oi3lTv|Y zj5Ng{3QCfMoG!pA=K5CySfVAv`nu&a|GGy>m_Bau%Rj1I1^fR1@2I7eW`6*$#C0K- z$4UFw{QBNo$oB1NLt9@WWP7*+d`rmEcYW4$NO&qgut(w(6O0DQYv>)gW8!5BfZn2@ zb3EftnVpPzk#fuM0Wgj{uahiis=Oeqg6pq!aL*6{(9cQp5PA z`rK}AqyB>3DlJK>tm`b`sDdZX-)8_coSA8R%s{+oam)(*lAO*f%wEs ziS;s_Qp3)*CKR;H#YT}koe!&xl@Q9iaPh*SN%4^z#aq&FT_})BHwDsp=rHvP@?&L`t&VxEFV3Ny-)tvx%mW+$z zpzndhW4NUtjzT;AEIoj!{;5qDI0sZsOp2c=>LK#2NS)dZXwVVZk@m3d>!2ss$jNMD zxUZ)11)9$7HiS(fENeH25I!rkJMM4tu3|A{#yOZsV)ySLyAt%pJEY87VWd@uvh1Rv zd~+tRF5S*Qk?R|8FRJx2%GNCDOwaI9*J^7sS6LZaj4R5W5tnBnXJgmieJLcqrZ3nw}3+2CD=!pX}{rWYpDJ@47uT{N= z!yK0H2|nVz@Llk1iMlEFx*~8NclMVz1$o_zOp@+ng|Q3-lE>)E{#UIFNRBxiqfryq z596k@ZS!(oma7A=w;5Ht(rRE_DI;i1JSl`Eo7etGSbk?c^qBPF`)#S;FC7B+n0ra| z&Y#>YHK5r+nf4zSAM@m0aI*+BEXg4Vld#~{dC=GfVb%AM>E$che#CdV%YKdo^dLHI zWBPrCOYEuGEy?1VZkhcoW2qOxax`?wcKKTm&9_FvvG&2!l>hISfY>Im-kZ+*)Lm<9 z)fXN`9Yvjf^M+=b&?mFo93nT=2F$4DhB)uztS>YbERd-jl1_^8fSzK=yF5*17>EAP@o8dB?y z(CQE{gMiDTtfn&4t+C_T86G>nKWm9AxL6!qsBk_`-izLrlo%Uz`Cv38M28)~Sf0 zBv8s1Co~23YmB$#G~(llzj%sTMyhV<`;>6VcVO)4Wl=4!)7CHMkJ>wxU{@{XSyr-_ z&-u{U;3@q1hNpC~2bM_nyBS;18(xSDRBg%`+kg+nMR$Gi%uJ9q4odOl7^9TYG12V~ zKa!I3iV3Xw@;afE4@4}H}LnRjES3vDq+rDP$q%1e}sf4MbV8^v`+=vaMUvPF@j6`3L za|&b@D81p+U8!(KC|eYIP{Ss zSbFAn{P_?9LcOKI8@bNvV-N+gE^@p?eBJ77 z`|Qtd&+KCPD%q#`U&pwmj$>H2aN7QNqp(HlCrcv6jm{z$2C;X7kei1AQ0)imp9cxI z({smjn*nx;G}&WgC>q9>4GN@BgT$Whk#O6}dM(E}0Y7kr7G)1;!~TLrq&;ZDq+| zS#Az;*ljVP@L8c4^jbaW#86Rhp6I@|+c7aW~6*dFRl5B=wMSjt6ZE%H$6Ol-kUV;jkudA>|;2C%q6%DrkczUy9ngIt+AZ z6kyY{SIafUn|ghhZPRhcDP9SF1Vo^{w-Vx|JXp;&q~B9V$W&H$$d7|NlU(F?4@s0S z4|x?=T;dXi+i7YYM^>sd%vmB3^Wq?#?23hUE(l@B!OVcfn2A4>)U@U~Z+}-wHlgwK zY7?sAdG^MY`tygiYk3Qsn6lxF8|4qzA27k}QOPTDh;Oh`@l2bwx_pC50AWUstGSSH zx4{A-Ml)I=$g8fuLt3cdoTAw=BtNnN&QHtHZO|V4o__rOJ1yezLPkPPJ`nPx^-W0d z)3wPIF6GdKJZgz4JA(s{Y>3W^gvL-R_IVl!smqFesj{>}v-ZlAwSci;3Y^L^jHRzQ zh;RACfk*aOzlwxL{w9F!p^b*2(la=O_Xa?r0D;&Az=ZuZiZggX&WVHRpW$6jHAG$6TLEwnCP+Z zPs2$*uw+ut@uyh)do8VYDOQ*`%7voNXSlhP$K9z4R>>p-pbiT&eLWCp%@PNQ76$-$ z0AM`EUV5XDpbCY%!wS0o%Y3P+_$Z6Ym9@bqI^rOoZ7RDL|G zAGXL)rB%1jF^x{^s|L+Y1g09QU8skvE0_^1x2{s*n$MA7a-1*#nGXp^h!b-Dcxa*) zS1s++R&B=E?H2wh-Cu39k&7&%aR@xva3jPMoP9=aNF8c-3;-SXp>{el$EF#&5j!9#iVccU$^$6Cy(hI>|nK(Ct;W@ImHf+s2bu=E2 zRZeqXTP6No?)h_PFULfU=5Ls!H$nhhs&k* zCB2q5ZH1Xgo*yZVB{lIjt>h_@jWNz(rNp1FC{Qo8U*-;Ey(NhL0bD1qh%4GVu}o3v zLG6L%!yJSCY)!dgmO-%4tMtP=Fe!x`E{V;iJ=?%S}-AJvcUoucP@d0mVOSt)6? zx+>A3YbE2nDdVkU z+u?AXsY5H0T;D54mV8L^^s1$VWUM+Es8kqE`oJ?h#}fe97H_z973!`Atwhp}{JsbJ z!n}oG`CJ5V_Fv-I^;|8U8|0fU|Lan%X;Pzm2oh}Lx}#f}I8q?h!CUSMMk@`ppxcMJ z!yR_Nm6!eXjhGFAD=x90S3$suL z)=7F%5iQsj_8Q`Q-_|47koV&Uu0+Xly_^*?Xw6_mho85UZUiVt@^UBp_xwe-ER(2h zYFVyKf*3-|h@PCzUp=kZpX}BKYV`+yVk=AxG>iDVL$3HgS6=i7pN|)!aYY}{W2By2 zIW2LIK>5xURrjE+{9u8j17z)f;&sF5vVg#1Y6JI6pxN&c4oyZ{lZmSG{vC!#?i35|4#nMFiv@RgO@QF;P~4?x@#5}o?Y``D?-=(y<}DAA^{@4PbGpqB+uWr+ z!nOc#l;oH-oJxpA>=)~Oc*V>domA^MVm!Jr|NEMArTkY-=xrj4>QO2KNP_K1&Izx_MdGb2d@D`ud^7(8^=8z0 z{-Zx|0jzLN7#0Z_zr@x0{|~A%VQz5B&ll@XO7peMMS4u`V2tI=DqMnEQpGQfhF73B z#=y^9##WWcwIF23Dm`f2jftJGdy6z*Q&&zadbAw=e;%Sg&(xnCgrATuKjmdZgjS~& ztGqnWZ_rjso|QDEQ%7prDtzw z(QBeT3K%!XD5<{o*ib+1L}>tl6# z28#m+6CTJ3V$DqcXZ zl$))!a%AS+;RZ4#Dwx6&E;0LYt|stUEhndPR*X87jb5^#LqAYkFTb!xYfmmfvCv4u z7H$dlYOdbpPYLk?sK}tn16sLoSYLWpM`Q1ZuF-u;7{*_!!+bsEf^N zCi*zcH}iD*=i$N{7*@pM3~z5}>wd5VgX5|-!*|nRv)c+Kut_ zC*(dfFZaqJnC1f~MRo@QUK;JsgBbI|U}!)xH|FLU5?WXb7o*lal75_YF$fcvHedEp zclSS_Tti%a=OK2ywGJJ$8in9xtq=g>7~`;|H!K*sV9fZ_xta4$HFRm`woMu^$=t)| z(2BncA=cgK{yy z6s%aAGWMcV1mEP|$TE6r-H{}N??$r8mU;El$%X27_o+uad^gXQtbxKWh)?;~XHqE< z3Bh#C(i+qvcb<->Y88k)xlJ_6s*4j^$NJm7QQxog2ZKY3cak9`c}oGF163IK%fN*9 znyQ~@lif=}*QfXKz{+l+a}+=TggMiT{)D>-^lYPIJe z_SSX131q5m7GIk`0|rBKvhD2z?*VI)pWL#OxW+)y$rguOikFZkCn9KIbh#u^a-f7R zbC~FCY~U@IYPx5Et<9E&$g27lY4Y!y)~B38Zw}@EauTi_ZE=g^F?XaLW(`ORG!?&| z^$ZUs{c$A?S@nb4dLn6?o#QzuPE?o9nw!h2KaUwhGd$_Q`=AUm4ZVDT1OJ58UF=Xk zIQ_R~oMwGdN2lTK>%mPoWFKrMKS6ScboMn_R>=zT8w5E(%33WXDuU1Kje~f)uc&@& zE+{4}+(1WGu9%|=R;b9MmG^?Sqc4{vMBtlob%k>?CMF|+%_}kOF{=CgiIN$3e6gD| zf>H?8q8pGQV(e8G-GrZ4E7|bc`D>C3j-n;EQxe*dJkFZ02hEdNyh*uHwdSOfqsf+9OxB3bJZK)Fs%k9U)eC^^l;aBGkFkWZyPz6NpRjuG12PAuTo4s`v+gKcGp>N|rBs>&nac zd58F<{aq*acu!zPgy@$z!%GH_YwyY>_xeioGE=+PnnHz6UCv<3M50YO=8zyja3Q3$ z&&i3(p9=ZDN?gzTjalY66_tGv1e z(wyGaD{+zG&3kMZz4)|Qc{Y}gKj`z+1_+Vnes^3^gOlPa8+W@gL++ZMoRR*|M#`>aR(i6zy^2fVs4bwxl$f8~C zwPzh`j`X26qPJ_p(*z>RQhQ)PFGP22 zi(eUwu%?vGjK5)>gMCc#mRPx-`3`D>)OxvoWOy5}t5kGQYwN(2*lO?(N1Gw^pjV1e)ALtH| zkc+xrMh|p?Sn=fJ9S^nTYQt{tE5vx(rUGLT^oC(k=Ml43q?Y72MOIVV0-1W-Krh5g$$OVn0 zKd0vudxoBCgP1eg57O(v(@+o~#tYxT#G^>rESs%SA^UJVNFgifnlw#rtbyi!aKkAB z?fJS&*7_vbcuvaN>TGM_VOQVTk~#aX)^6bV6G(^}zw`QKQaLQ5A%pSJzrnMA+i9vO z;JB`$ruxT7R#-y8cUz;azhzoTlb)A7Wt`d|Uao^7RREOVrV|@310%U_h<}BskW8&# zpF``Y0J08FNnSbENM}mGrpKH}Y8s(N2vn*H)P4MKwYJmVu&Zi+H#b9`$SPhYH0vVO zz`Is%Zkm0ijvy>B4NR~sV^8x)S`u2xwHa&?I2d_ZKI}`8&k2KIFYEGy}(opYEvsrC!x>`hNdC?y%xJJoBpmh8X+aH5RJ=WIWk<8hCE$fVpRO0h298Uj6AA)kvf&SgFBM z49c0dShv(7(h*2*J2-E{-=J>h6iJa?I*XSxyymbOtb0_ej1Kq^aSg=7gZJu-77f~ zEo0C!<`;UX$g!gByEs%Cn(>P^!7NXOj=cKTAz8o2{BAC-bEyv3bFv*Tg?E({%audA z13I`~fPEQLr-HqUNtKra6e@a88OI)xD9Q1}4m$Q#-9ov`p z$;PTDFMnrHJENl(UVB4i#T-A)Qd3w!OR11etD{|&kF%$-54uHCBM+Mo8zOU%t_#F&Wlw6s$0rH!q#yW*oW=vuH4R!>GmFUR*pEd5URx?c zLwX(dG8${x#YXt%LO$$C8XgO5)cH*@CAhHU9i2$j^EcT)ifl_*8)vz9@eHqPPT+FR zOZ#}u?OTjipFxJpPG(xf!z=b}^tOMyuz*4mX3CW0P6<&PeU(=JCw+Vt5jai;%dTCF zlay8DyZl&-)>+4>E7YlZjeQ{eFWTe-w|#cNr!6wuDC@5RmL~x1lZ1NLiMiQQx=B+M zO+e1QAj)A`HY%K~XXFQ^+oe&!&vT1zK%B2i5ezM0TzfG&Ghc1}yOwFbOx~31p`55v zm1m6&DQr^XRz(4zAhN~?+j+++qVD=pQzPK!PeFARSJ-Xc*ES>|R-_t^VA=0r4J&)! zvAzA+tx{Fy);u2tF-Z8d6JIh+EL;6RP_CpeHF3;MKT7>)@hv=auhCH7V zaMaskg-d1zAq@fb#GuExls!>=-2r~h&lb5u!GMFn6;X#>-@<}Izh(`|16fb3?mTCl zOadpPsmS z_+ug)1a2@tcBs;$7Z7*eG#jds1U)!X?|RQzeN)}H(c7l)RKf=TQ`YUxVQ2=vH5Ks% zS>bJXKCJwky%q9g;dq=D4;?1dRxwX#~~LnU*mp2SfJ@PfxA8b^FBP_~Nk<$Y8i4N5A67FD|$*#%hz z>9w$<8P!Q9*sSEmVNxZ@zGuI-8MxiuhnW*LqQBHf9oVN>F`*TNI2xS4Zg0KeY_(rj zUg~c!_t;xmfwdTer)O0)+HXR{#C`uo=QpyP02SgeZ=HH{R|w9ER|)*hIr+Aoq8w@v ztCTaHSl|9`0g|(%@`JtIGKr=9GZ+;C5`SsFA+e+OF`FeQz|I5$8PVoHap7Jx7}!?6 zFB|rh&cP#1pI|MXQ>&#SdC{dGIBV1gS)A|tSZu$J?!y@b(DqMomfQS+1*|HF7h(G+ zqR5LcTqoVX3wJB(#&UFb&Q+8ZoCZagl|`>;DL%f={`ltG053RiqclNw4t*1F8?2rKAZKmhvSVKYiq8o`LNz%`d7(1=t z1;EMQ@s$4+USwhJccTgRLDpoFoG9&CPpH5RETz{2Au%gQNbfX|G z-@38clMdcSrKnV@oD`~pWAyJ9!N{GH3IfjE(eH6c12aNj3x2QaDO^ys=_8*y*K!w| zBAaRFveMO9Ws+FV0{Cw2q?^*F(Z9UN65J(IV_I+sZT2%yaI0qKw6{n*T!Q<>s=E}; zGW+GB=o?6(;&-5vO}e32@WMMqM>Y(N=Feu%y;a(26+k??lk&H8`5%#ne@{*Z!bGxKhMv&LQwC+RWh-Y%P^I-mu6}n4iX1)<`5T?aTP}GIut5QAwdNK{Dhz z4PVu51L#qhd2N_&^NI@1Dq`geRa>*t-H3eUnh)S z6NUT2_|H!;gC53PJNTX*!w3}r#=zhwS5iTHml2$Tl_&J7m(L?I@T;(Y2o+rDBUpQ1D*41>FlU4Sj!(xM4kbm9~^*Hv(%g7hX58}S*JZRJ_OIZzG)}#@PpRc)QKyP1w?Z5Sj3Y6+Idx!9`tR?UN3NI{F=GF~ zQkDyw8Vb5Nw{nPoA7|P>Cr39#G~k!BJ|{3bB)V<2UduBY5h>=^RkYcZ-U*)xy}F0N zomvz{U?A!CyWphf7Dor?nREPM4%n=#k%IB_KVmWF`Y2b6(b@7s%OJr`jyP1L2yR~d zs#eQ~Pf0b`LRfc~JJx0BUqr7^rzvip?!j;i85d;u#Ii3mPiBNpCv#q-hW|$3Y7Kn< zyhB*qtd8;|jIM4PY9rq4?V-3jVjJXEPvTeM)#}TecPpR_TIV7@4f`Rgr)mVgh5tHY zGE6;gXroP%&pAbK6u0#t88f9l@sZ~3+t?J#S#UmADId{^(z6tqn#oTu;$iRsEcoLT z7DxB@22~nN8@NYn&BScdfr_?Tc*I zHZRsQMXT#pLV43siv;P#pz_InZM*627J!GpjKh`*8P%X5c1bwi$haeY&5+{xDeK&8 zTZimbD*@4^n;48}mXUW5b1>bKf2oX)KRBSX{?o}exW9bAQ#|QJ=3Rzeni^p)TxqrK z32j9BbtlKo2#uRRXw!WOFCAJdK@i5;;=o8iN_CvM#V7r5;aH4~%J9}1VG$`}Zf0dS zVO})snff=*7sG3El8}3yAIiSEwlg7f@JwgjSPYFLdu3R8X(gn&?6JzA|*r$GSw z6Z_=#ZeD7d1t@Y}c;@r$Ba3~Z7Gv`6RVX8x?ZsPEclo5wrC*5sMZ6_v=y#9w+Ky{g zXmcJjzaiK%%!b(UEyql$?ipaCPC)2A;`B4&*V^2A)#*VFYbqE1x<3sqUZ`fe1zDag z7ZzWmLZMCz^)0m@caMkavOu_1coli6?$w;?;HU#y)a)#*iELXBk3ugG2KcXwu(LEZ ztU$^s!3?tGRKw)f+8zPOoXxvfxN01Fg9Sc5-z9Nd+s*82hOxkaE%uv7M}%?H!r2Fg z9~Y@+s3>us#GxDg^TZdm$H`vmjQ{pDMK4*&E>zVkADBef58)RwOr5_!Q`@ZWrjYK7 z<96Jr{^t3`{`NPN?}@Ok&EB1&x}t?IISc9~vtRtZs(91PJ=a?|#+U>C;2?hyD` zGK`@yrq{~Oa(!p_2^FL(8T_qJwCq%>rq536@id`LZMM z7+&g{Kym{ELHoZFQ*u&>ZhMAYdolN#sC}{}NoCK-_n>Q(PBNG4CUg8)m1ifb^60p) zS&s)KVE3Ta|DoD8ioB$kk3@S>4YeF}viRvb3B{1{-n(v*)=f>B9OOQ1FXr@Jzmejp zBYT@yiw#%Xkxm#ai2ueU0l^Y+e{a{t#2@#3wN2wzJIrP?+7X`6ZB9^WF3tPa^01(= ze+9Q}6QZk;POIFM_IRv&^G!imqSCnyCmiFR>>_GJh1j|#QYus~((`$1mCN=~bU<>G z`2p>()fOoq=pJ6nK^#}10;kAh7!kGoeXoc4Ae`g~rCi9qlq7gs-1jK~@yCE0F-M6| ziJ_&r=(yPHCAp<{T9JDf@$>G6$`;yi+}M)>uVf)ub4v3IR+tqwYWs9!=yYlZYLS|| z@VC$ySCCNn-)hhnE8$^al&>b9@Y)B_v?)l)e2f$H)nOB*(1NFv=h*xCU!0+eIx9+| zGf#^Y2fqPF$XXulUWi{D1(XaK$>BsnzX>|R8`=KX4SP|enQBU`md=xsmLxelR#0gY=D zqjulOdkHFt^OOsmCC*XtxF`(&)NH!{i{qld+T#uLX6bZ&=-T*M<;KJnUcQ9sX`yO@ z5Vvhr&KJyLwi`~dTg;%NmY7PDkX&-vE4-(%u!_9+km4xllxQ*5pB$IqkyEdF-s_Tw z;~iD~XzIM=hV)%5bFNvbYQ(N+UpPp9SXj(6As@kxho6&P&7FVsmy`d-kJs&ATd8q0 z)<|Pgu;dHMOg5S=MYm|!;zF3~qS7;!X8X4HrFBbbsfsOC?@>5)9|BmgP4~v-*^I)m!P@-<%4b@(5vt+4EKD)ev=b+(8KbtHe( zp)L+Jx=DDF`pHQisaW$(9Zj3X?r9&fOqcLW&ec3-P~7aSl4V>|OIueEEFn4g=$>R) zZ_4mB5qjsILk~j;vvg3#4Wv>f)Car1hOZ>jux8>7x#J2--4c77Rmd4^>DxQ!wBqZX zbrUTLEtBgp=`0g%r)9uw)vvFdFD#f*HsNj56bo#y(@N4p?J~{vN4IN$BUOgM{hmy5 zxhor69z+t08PSMYFr`(V<)Gy@R`yyUZMhDnw`2E(?1~kVnkd8E$|>+Bnb-lcrgoI`J7ODz}y%mvS4{ zFaC;{gAW07S@I=C@BHVq6oHlldLNK)}LKVi_+`>=>m836GV|#nhg~L z(AIEWoX4tgO^4$~k@|U8?%&FtEE%10FpyS-maQYAMA4K2S$!ONVA#@y7tws&twf8I zl}s>JKV#dBwTccTc1XLvvsc)XBx z=*=C&Mo}HAKu%k`DkAo{P}8)buJ;}9EbaUhs2G-SK2WuH&N#H!I0S(OH9M@+K6OeN zv=KgrRevWjamsGKS|o(6Y1qx%l&>)(>Jt07i4OSNZ)``ad!LtOUS>#q=T&S6%`>d^ zBYQ~%%rY-4>S(Fv(1{$>SD!Xvg1|Y zHP%dh)>A(2*Fr0dZYal|WpS`WlPFDiIHjwd zPo^DHPIrTMjXqLOC;%aMnK^4`aiZ9!hiR z+HX0Nx)A>N(|LR4TR1?QtZH~@qEdm(@dZU{@tS{WfBV@ju?z|il~WnAm`h7nNF_}q zbhKYxK>z zT6Bo+?QHtb+wW;JYo&pQ(=FHjdEw%=Fjw^ zhBRtEx^=$^ph;E&c6wA62A{h{NtIsAkde^_Aur3v_EQ6O7azgUX^=?JqjDu0X3voo zq{*8gUel8_xRkm}Tf}broh-%`cF9|hKxsq|sSnU)L~~B( z+Xahk)hgA@dL_%biZx0cJ=lwkao?$rii|Z>x^{gbd+T|3QtqlXb?jGU}`njlUTUwfJD-tfI|g zBf`T_IAYAJAA~Qbak9qEuOlHsFsSU%?5JE;z{b{j#g288D9LkLYc*4s6PI3-mF+Id zL~T;^v1l{Qh+*K0S9()0K4D--vMc{Z>7pycC^(_I^f=gQ z(xZNgDs!nmd`Ap>ykQ?Gmk(Eo2}L+Q2lDKo#~6y%*E&U_+FNN&(*McqYTP$@<4KSh zWqca5MdQ4_;#HeN^T$LwY>tq$LRnBJYgve`xs$qmS_YR|K#2)JDPoYz zb>nX!zQpT$#ZG;M?Wdl0A#D)5Z(6DQg*74 z$(D`DTou|chJM#%5IA{Y_Boe5IEZ%x>kXsvMMJFq1I66A^W3sl{ZlOwykhk^{X}N8 zDxg!t*xSB?2MSLeaZ&%d_aTT9pvRu1{Ox0SHgw%?o@=n#?(L=-tWV;97#<254N7b$ zRn>!?eqhStdQ zC(p%jb8Jq&zUWFrR zF*zU-Fl8r4JQ-yC05IiqKOpkiG}_|)4-{zwMztc;{%>S)hI3F`6xsowGj0?X+Q-jB z1)SQJ!RT*7`F$rsnR!VtVzfSWH=iO&(Ide0ra^$4sDiugOA^1586!%}%E94mx*G@!qA@p}xJ!lEJW=*i{OeKGc8#s* zVM(hdqqkv(5OwFE*1Nvl+_pe|iK&!Dd;xeyq1)S<(4``JLRQ-VS1R}K#0hOd8T;4N zGZw>tt>yeQt=4}avkOe_u_(^w`U~r$=&nJM_lOivoM8s`EIpEVuyIBLz=M>+oV570 z7~M<(H>A>=+dY2vPMvuNV=*}3Im?Qn8+}G@ljfE@dFl=+An+~agwByUc0)|G(1Ia+ z;15+_&f33Ud3rDapRGiM9umZ_zo&hf-GRWwRdI&P^s27Bl_8Kl8rqqaa}Kp`g%Sw7 z=GUfi?m&)NY=P=$tDbEpW9usNrR!dKLLxWv`*ee7MJPTp^{*>(p`i9&!~7mrh~S$3 zX}Q*r#+4F2?ZHARau0`p{EiPj43Fm~J2f?zHFy9n4)u=Ya=lZC4hFPPQ6NsYKq3=D zw_|E(DkJ#dL@kh*;9Xs^K6S=?Dk@av02z$QeE5ncsK^2{$%8xP(7dm3bku@Mevmv^JuMTI)1^E~>`-fF># zvd;03x_#5ilviG}l1t8$4XuIpH^ypQ%661PGO8G@s#IbIX^RqqemH^__@30%1m*an z0r(qpOxYh>=K-cyckO4D3BX#I`@K$(P_1^Mfi*8)w}G>i;VAWLWqi{`dMY3wm~*(J z`Lh=I)y+QXkp9j7qgR%4mb%Ef9J=cdw#N$icZVLcXP{2qj9TaE9d@oZE#KlaUpp4) zH{Cqjx}TKXwjCSnw5oepVXt{sL(>)s?d418{jCIt;M@^sYMMGPU7n{wP;;K5#*_9FbxFtc3wfv|cY3o1VZ7fAbCcT#PX$oLn@)25QG z`r}*DY&?e7(qpqDAMH*MH9^4jyQuQzIbMnQQMzq%@?B6DNN^EK^Sn91!I>SqRSV(ehl zo47uo^N8?`&!s#}4Dy;-HXPWM!AVWXnWo(zZpCtH#jMFc4{0;W<#k=IZbm*#1}SQt zp|fdZPiiPo(j-TK2`{)qpzgk!;qjvc(5=1o_-ardj!7W#!R{1}p|U3E2>dv)d~7x8 zS@x*%cAR@ra8g##h+@Nu^oyav5=?hZN!ys-r8}m5V8bc?1_BU#Exq3SQ|WE2Z9FWT;E(KygRPA% zz3b7=DmO5!6f*8Wis@$8tEm!MboXKkh;zb+SsLsuIQ43MCN)u$bwv^+7=URrf^~BX zh2cpsx!E7GYa|a~iEhWpV7pRTpDFBp_oassLIbq&_1+jahSbB(iVy*+VxJT15Q)a-EaArtXC?1c(OA;a407G#Zz zOkp)y=ykNZk^Q{z?DDl|GkF?b;=oFk9%<*qk(K7;jbd*KGrfU1C36q9u1)sXBZk|c z&D{dSTawrx#70|AS>3(tXfwUUZGD z>LGNr#%^QTBC)BRT_e?YUdbMCrlPXnrvM!d+~Z+2{yQ>teE2$|D|U=HX9dptn_@fX zi{=LY)h2bzCTM^bpwP*O8Im1!A67DgNF%v`N}tHZ{g(N0M}Di&>1H1dWHh&G*}S*7 zk4BCp(nj;UFaqG%i9X!J-dd-<%B2$WofU^rmjg?9_gQk{1x#KsNrOxVjrX;R{%_e{ zFtn1MrX|&&b=xPsQ13I$Ee^d`4@@XuV&!Vr2X8#Y1`!pp!Ke+FA=$z1xov*(@l$CJ zDW)2v!+y;#v6P<@dA6ieBB+NZkU+uy3s~aqOSo5m0@_f1k=8bBDi5<;`WFbr9d-}$ z0w8APM)%)SOU7{0Fj-RYMthQDb&|Lx6svvLLpzy!XMHJ@@Hbg{3;hC;Le5}AjIEL? zv@ccaN^_j&&v+rv{`u)i}@UYw0`2Z#I8M|#D#)klE! zJQD_H-gv<5E9{@m3WsJNfAt|GC}_VUtHp;ukJbo6%;~+iS0SE2IO@^U9ZL z9BHU^GWy|dOKpfQG@Yoc4ZCTsu2hg*R@ms|V9HbkZ>m$&P|PK>Ovmr;A%5yAlZ^yI-?BWN$qHtp+8v#4%*rH`4g7k8Cxsar%K`^S|AfP$xTX|zb19? zL}IaI5l!Ogcld~HTf7X7R$EcKo`fgLR0!x75+|e}0J% z3y;?258L|VIq3kjj#0`|nrRlKzT`FEoBUR~;JO&|sLpgamOb&2o$MV%X4RYsL^X~u zdUO}-s|C*U{ZGhygml|oP zoe@(asZu-x-r_4C`(d~*5pa40E<6aYe{ux~wRAmPW*VrVZqg3|Z5!dInx|1)eiils zsOSGN7*v-nxNC4NF2#JFHL<2NuyLL?tXk-sGxWkuw!k0{ca$Wpb+n5UyEi=DRYgYyprjH!Q;Npt7kGiN-h9D(& zNkQb1a0=dy5RP3UWK&4kNAb5)Zp$GVm`M_SszY;@J2fM5O5t8%oKt~u^{(;TC!V*k zK8dG7>j*>#H=QpvhHm7aBsQ3P)GrXa*lzJno2+I7XKEq$x5#vpc4S2|x`=i2!;z}1 zDoEMa6nhB5T^4gSOq3X8v4_9pi$OL}Cw)R4vU-V69UjGbOO!Zb=G;w}7NlNSp<~yC ztB(|s-pS54XyPRC285xmOn||$&iwDRi5}ONn-I{tl3Sh>HIPHG9ak|6aCliQxTPSvwlAe|8xPe zz(b)*F_#ciY>l7#)c|&{;E*YExZ1f?ZndkGz34f|TC!&qga^0{W%Sfo@~sEd+FxKL zE5wGhJ`$z(#KUrO?W`ni8`~k@a_@mXUDj{4`Bc|y|B950C;=(i@@JC$#kmt>O_d@Z zzsxD+rxnyp1fIs;#{0LZBotmy7Wehq!+WU*$Cv-~!FZ_|7d9A!DBr;pxWay!Xa4Rg@@vdqS$WdfNF{St${<+aZGNj+;T#97xuFXB#m^Gs(Pwst z@F=nc7v`{;?CeUlrHA}lssfIAKfHdwy{Ji?ntv3u@;4i5hf-ZZn>2=154zN{rQ7zzvRKohUDhRZ>TYtOj^WG%3< z5dWxBq?6LASM?3xwf<98!jzq&ewGUQaGTD;J`%$GhLxjTp?gRIwM`^gq5gh^<+(R7 zWF258)R&VBFwI-sNEu;E;@Ru`&C~#`)m(9(R^(aKQxwgaQP&m0S%~9BW#LceTJWS= z$yAPgN{Aj8trHjVG+5p~*D0u=@apr!3~xP6)HE9w$v{p^5%zVK!D1)5>@?rmE6XC> zb-${)q@G-7bJM@m=i_&&KP{e4xMF5%ni)i2fiHCkwI$@gw-#DP*kULs4pC~DH+N#9 zmBszey+%?-LdmZ?5e)ZxtBzXej<$iC)+wsfbSdCA&3;mCer1chFPi~bqL?}(El`1tEa&w9X z3DNqZpr^1J=A>RGH7K_$xW6XC>q$|~Vn4$hb zu3D9o^GO_NN`(qR;!K^-`PI1hgmCbPC5p?n9{Ku6 zZQZ`doar>cvth6MWXxwZ(o3nP5`12JjJQ(*#Gqc`!ec{Eing&!@z=M>b3>F~#~-Qp zKMr#F>8fg!5o4j(bp_wf$WE`6A)0p8Hsutb7P?rc5DNv<1XjS<;eCKzTYIvmXl(^B)ynt08 zs+nkz+ zOQSO00VWhm`~Gj`-fu7}kZiw$iq*rpUCZENi;E+P>D+?^LmlbT^>Ghu<}$fyt)u@` zdwyhGY-5Yf_K))$dbODx8FLVZGvpqI15ZB)BeF5Dz&Ni~xKW_xLl9`UZX!3zV(hBt zMe?^lPQFv7Z1vr_iRbLKYBSx?JWaKBMaL*F->zts_1mEy{gnqvDt3I~YZ6>9lLnlP z91s<;ThnnRR}p;?OlQi^3SS%WNMZA4`e}UcMOaJ>>YUxg&+|v4f{vaDGB#J-b0}In$;A`{Ci6Il)?}uc$G*r;H4gP z%A>$!`IVg(mBX)aaYUPp(eOQYfreexabgZ(x4@DOBDT=bAIB0wdE!5$^SU@d^Vtt>bV{iL~lM_$@Hjaea_sE`-GqwO==`+ zm8!Z2nN!l_Pri1fDsR?oFTLphA&xXRsMVV(aqIyX{7<5EHtW|$FkqDR6ZGUV0(r z3-a>FeFs~V%DJY5aAR#z0hyJp4?3pLRKP*lkvI83ETkxg&DlD^j_13xd1(==me=k) zJ4kipKu+6P(>;<7g0wT%!f24_L(xw3FN;@oth|L=i_j}Ua_zCpV9CbHAkpT?n}9^Q zC0c-L_bFyU)>Or}Sph1$ch4>4jw{+WxDo3*D~%p6R<8BHzn?|Nd2ICALut!BB~qhv zU+CMeH#ze===AZ$K$CYL9*h{KX=i3fzRY9CUAZpEpcPAHCo9j|K~A~}=QQv&P;y+% zTyx)COV@vtOkIMH#t{{xlZamKZ7K|1z{ zz!fN4$`2Y03?)lp`rf1Rl7#a*X7qRA$E`TL_(eC_xA@?~^_uRsRhrf-DXF{QynA~6Ks0Q4??3}dM z+|f%(i}l*~@|AMz+XNXwL2I zs2C?pnU>rw17kdrj}LXwoYHeP_F86NrkE&*gvk8(#nP zS*)z8-Fk$lk}?K-5X)s5L}c(HP9TXcNf7EkC!G<7v-J546C^rU6N9q7jfi+)pGTyj z%(>XS{#zm1{?ZrA>+HT!JvFyjeq>31u!%l3t=LswU}jONp_Pg(FZTUFrVfRLp+0iw ziTLZVeQx1x%e!uS{Sb=M6$;(o0JX{o08ew443%cVBpBd>bc^>2_(`O%`_xLq;102@ zd?W~IoKfGL!vOm=r>G zq)hT1(5Z}PU4xb*SiXNx3oUHa)puQ$l{IiKpRn7L*j?vtPr*|vOPal><-vB==u(%r zmU$a324t2SdexcHsz@Fl%U|GuL2yNEG9;E?%3w11l8hbmlPiXxx+o6If^DK$7C@`s z`&R`G&XLOPjegGyL5y|hks$R;Vf(^MK&NqzQcX=4qBg@L%^M9pO%VlB{K(YN z1R+7Q+Xg$7@$9_6)GFTeNgnaW2-6H0;fM(t#=cSPD{zS(CuO!N;4a=BL^dJ+GKxrC zUwLzgj?1Ln^y_hXEN6dfLdCdr`Pcs~r+pSE{{Ms*hpfsm{8a;;TZAWX+dCyM^LLt~ zJ#YDBe)<6tpc-ojmq}Z0M*l2KRXSx&J*AG)mEaheuYz?^g4}3EJlp-1hT>k+`%&Lk zFId)JVd0CQ(+Y{gUq%frvG37yDW*c0CoYZSNm?td_o^8qC=sI7Kh!dW4Sss|!eiTd zu+1tVTH65|@-FcFF?z+fgPDI8}@v}o+A4mwrHN95(33%NWyJN(ga@jJR zRHb~WY%l*$ZEqD7*Sc*Bo%RZ>Wgi4hi0zYV70Z zp*Oktp#Tt)1zmD!hq4?ari!pL*JeNqe}`zcVBoDv@8@-g78&oadBt-SCXrT9aAT*F z(2X@Cso3C;Z2|<5Qqju({^`66^H^6DiTnyve2%IJpSK!zI#ZIB26V}O$)ESAFhd!B zrH#Fj)BW2EQ7$AmljgamD!bHb`2f%y9d^N9PP6@Lny7Waeb?DP2E>1LQKZGEwhrce zGcg>Z&@KyU4@TQ< zh3KLjgYC=v^6GbdYLZ#yi%dGc$q>gBz>%5?>V{EYf+uYe(=BbC`gJKOd?(p_u?CL* zeBy5tn}cUgFXC0wLXA#7t^S#nfTe_;(cTX4{}w%SsIAZOS}Ukvqqj}Ua5GaPACp%8`l4m&lSd{Xkw|Zz6-zDk zS-V6^9N`4rUhbDfB`jFK-Cn)c+OLU!e;&eE3DBa#4kCxUK@hjg&GHzsk^ky!?EH_LMl{9fWrPA#nl?nL{*XL!n*jV*UP@ z4oV;P{{8Ce;ZcR%4dZ^fNuJ|QQ4K`bz|GwP8-8k|Gk7ri;~N!}IRr^%$v(s}*_!o#!x8c;Os@dWfz9A<UVYHFVD5F5gzTqLE6zdAbN0r)L)~04 zS3nD;uPo8YGO`d+m99hI`05 zz#2axQ4f74ifm%!-RkDZ-BF0*AeJ+uKBu~&3j5_@fA=Tqy{_{*F*;wi z!#hi?z8ZLi`^+bGX^Zj<%TGh_Hy>PqjAFIQ_{)2?;?#~j+&lY;lA4FYcdfdgEKi>vg_8kwf5CMoV{;6oXE1hMz=>NH`rZ?Nr{NzGq=eCao#S_Y zfW3!b;=a-J;#yV8&VHv8!{hTjHG}!;3WITSLLRi4ZdZe{ZZ>esD@16SyhF^gFR3B0 zOcMV`-e~n;70z}BH6-)IjV(DfoN3TyBx0+cEj3r6#(y?HQEtijL05dXSWO4gl)M1* zh_O0}%F2*Y;|}81DaC_BV6lp3wU#l9!U;kFmj(=pT%gR?v;qk8zoI}E5%33>Wsb$C z*j+=cax{ON_Yl^3mrK@O`x^x4s6@4YovDaQ{o#WFRFa6A;epnDP0c zb%akY`lU|vEOmJ)b&=_^I!ign;54ngUZ3PZ{}j7oEQV~dHWYhT2%_Ukh}gI*h|?=} zbZHvb9ybf(LxHxcg)H2jWI1ISBvWvJ_n~Qw@NM{hiLr2g`^lp!>vDZbb5 zWM}_;+&kSl8SA=R?hpL!;S)dRT%u{odpMeJd{JZvWz$YkP1#wr>1TRCJ)mw##I_>9 z_#({aU)yDj81@oE)Cz4oGv@xIb;}&VA1|ArKLV~NVgeNZTVDOg3iaPv zmSbwEBi_xtO{YvLY@Ez;-*;{c)j}DzGI`iW6UT{`6Y?X zNppmmQ!9aDz6_+vbd}qY&>Et!*sYQO3(n`4W{O5?k*zj*o@=k~6ZCg)NRSXSjNmd9o}*Uu?xpUiu3-S z+uWzj4k`&NA43X)!_9n?svrc+%K4(id9&-ARudeI$$=~TuPhgPe=PqIf;^i`$y7Qw z9Kh=*yEv$uo|Kdk=;r1+)HwL2ux2#9Bq%j_A^CHmTH2MFFzp~Qy9Tn0H+6sh<(fRy zlFzNlU2AD~H9ase_C3%f^&K)bno8Ed62>TJA2}9!tp955X=SiAot=3R9@RJ5R@jFc znH<@zKjn_ad#VQ+9|AhvV=%KzrYbeG%dRAttG6k7eFgPKSh_rmi_PtxjDE2G1?L^3 z{_!Dj;!_Uu-gN0&oN6h>ts(E}xp!JGmgfM(WA1u+@C&hqY5#ZA$MIOXNL_wSuPAB} zTX)7{-=m)zW;>g8vS|j{sX5ws(>z@4Ptt3iE#XB&Nr_VAXtdwi8WZ1NNxBHv6-lw9 z6}K0?MWRU5HI9@*`oLx_1*zftr8h%nZr8%Si)}?O86qJSW4zSv+$G&G9I+D1xm83c z7OuI`?Goj$;m!Txe3+F8SzYi}onj7h+u!}w2s4!;h9{qPDMn3|Z?49$IwfayBY)e` zR+siHxzZ`Os1=kp1x4xz5O3td@q1i=p;&m{E_;iVW-Ayybu(#tAr zT4va^@P0qr=0ukvhKKu9OW8!~hMtacnZmcy)^&%NOAh8n?Q;bFs4nTzJn5aeV=feq z!xG{3b+0s>`c+*N`bFqHVwfG_pKYIl>;s8NV)qx$YwszQ_a>#OVr0$q6@(jN_?@Cz zTGj>_z~p!<0tyJ-Jzf6wD{mo`1x+ewjn~CQjz5cy8cs#2%`;SL4c01nU*|^^6FP>W zU~G+65r6BWrFJIXJz?x+ad)aF+`g^r+7>X&-W_j~tw^pM;U?_A@fj+=5{X38(NAGG z5o`Zoh`&h=2lW}G(@D|Ii7*L9QVe}^2|#vNfGt-EfZ2@$-TZW83RdwQrqE2rUrcY82EL;(Encte90D@Di^42SErM6|85 zxln4Hzx4>u!v-m&OHSb|6AIRMWD2{*P2~Q(yKLfT`9zWRZ8l)?$jLFY{M~n2ueYR* zpxUToc=G}Wjrr2@Q+ND(<`Mv`!$h{6TakXQn)&>wvVWXTo^unRHJn+dmdttL5t}JV z7h5ytb$fN#g@uv}Sg!OOC%0SxnDPG(&1dUS%aUZS?9886QM#PNUPt}GWK&O4Qjo5~ zFn#q!{czz(p_9i}RD)-6$ekI=z2YR+FyCyhoqY5k4x#@`;NrDO{l`VQ+v%}3G1fix z)<{ENdQ9nL3{c8U2)2u?4k?Rq-;RxSGSPc1ydM|ew(}M)#LH_Y5y~FKl%-M={v>qQ zaf&<($GiRt+iSp03zy9Nfs)0Tb;AE*6#p?`{^m}l9ZM_!I+cKkbaU8k=zqInysm`S zyi&oe7dszA3~$=(PgoLls(1BMD|sWWPO&S~sNbYk<0nf;!96hF$7-;&_TgCV>aMKj z7>?39ymM~xYEO2ds&%-m)BO?0RbiMKrZ&l$ez9e)8cIEcrHfzFFosc2kemLAm^hoWJm4#xFsih_iUftRX+t?ZSy=U%V5!&1a%MX4^EAfbl7Cu&3*^h ze6y@@+fOh}^3TOcuwuG@FMG%Q+Mx2)A|1Fo+%rylueJvMXzk@v1gH*YeAbdS@J#iQ z_J3^P*2CU^wTgUyix-f1*hDV_ zQ5~C{o~+8M9D#W%szixL!x7){u5+Y9@Gdg4qLAY^-_jwjfSM-U=U z>U@zl@7}aHY`lBt!M~WMWgo5$MDh&-=e8BBCuGB~wp zG@F#cQnTy>OXnF`F-kI(kKQXE*;VP)YGQsxdIAR@e%>Q3T2;Y!Dpyi9NU7nR7E^!* z-s)|sdd4p-8Tk?zfp);gyhkW9H}4dEX^%o@?$2Eb4HO$wZst`_=r_rPqgNBRY1^YY z3=a}}WsoE#DP-=X{%PG7YmFA``OnlHgGzjcq(BpZdK^Madh;pSUb#9vWVzlggzD{dN^J3Sg$Q&CvDN%uy(hFJsFu0Jk0ZFg|7 z7`AT|8BOm8A{SS>HA0krXP}8*CTvwCgjqS@Ya5NE0n*lRpR0efjd|bd)+ZM$oe>3f z;OQ~IID}If$tJ1ZB%%b`fOSgbrb0SE0`OA-3oHQT9<{V4lf(+;s1Eb&XZ6Q%T4r=@ zGex>T&y>XV-RzeiY|?yY!ayi1F$q4O31r)A-lx@heV?(hGI6fb$T5zYJ0Uw$88eWQ z9Ce4HoooWgh=}k&v^1IQ6j%Ih$pxNk;jI<6D64E!I#pw>_k^8cB0PWrG9Dev8qqPG z3B?Eaiz_`c=VepyHOqLQM-8%1upCGB=4ZLUT%=<4gr(1--VGu`0)3U0@>-od4zE>))Ju z?s4RnjnkoV^RhYRI;HeSC%03~QBl4TgK^!kU<~DCKOCuk6ij?YHGdTfb}R$1?(kNa zlZ?$W{?x6-q^gd`9QdcP6K&pFz%V6j`Ott-v}&|#mT{~~#KbXUI&&HMTkfp@fCH&u z!k3~}`Rfq_H!>Nn<-hO(St%lB-*QW9&0BWC$*Br9ENVG`bnvkhxzu!Kj4?aGxv$+O z$`E7lK(F%s7a9Ip#RU_YB`+(-r=e)TM9DfUAfSVpuOzP=X<|znW|$20^$X6$%^Ywl zenLf?&?&Vf6%yjHbtYY_C3;nI_a?kG*}}KrF-j!|Eka`fc;*-iF13FD3&eKK5Db(Sx2 z$pMtzsIbpF%vG78iA8_bOrE72Ab*x))oaX4u)TG9%X`u2jJ0MJ+rYH+3^0_yWr*$;F#nu*s|AKJ6fUt zQEwuZQoIqBoRe17+;);iA|RBRC#^eC8;K#EtT>_gyC}r`XXh#1kiNBj0I(*?lk$Q zqra@}QQgS3X|(D~?C2HEz(5x)aQtYnan00` z3iN3(OT=Oc*n3GjV}Pn`MF3ec39Yt4;A7)Ugg)?;1G%^MwF7MuCv z7knQx#Mi&zRiK@4IT`%M)B3ZFUe1xN= zvNa|9`u0;I+g9xl@^FsxmonuKOCJY3$N zzmj6#ZM_xIxO1%`D^aC?j<`U}tU_6*U7<@X1eboKxU9hT_@k_&;Y!434l@ZzvhY3YipZ zf?}fp2l9Zt*oT+7XF5tCl7;^-_8+Jpk6ZMMdChETwWli>k3OEvand7^BUPXKN2 z(%-Q#GqbM9IPxb2!BHOE3W>WOeNa%D91MOV6~?->4`;GXjAJnS-xg;7h}lvkA?&Ru zEp`v#y&@_QrAZyT78D{5J{Hau*YY(@Y9*kCG#Z_plzxenC5YG+8RkZH43ykm?r$t_ zIn9&)3vND3C^|n+m92avcc19AU?939(+kaVZaOSd z{qyg0AyB~H!RWY1KI^md!rMWo=wPs)mTdU~={xSjinGO6Wootb40#eI!wws-x_JhH!wJn_TlRlu(BcoUCw@qubxqGMw~0Uf0#;Yw~2fc?XW0t*o-s)c}<$ zUd3l``GZoMGo&1GIP)Qx&&6!Q8%8oE%T_yR?~;grVp>XhMpC(H_q?>k;TG%t1!uEt zl~vxTYe2mV)jix_%~0USP+rgt%e}=-v%JSON}ExgV|Mz|0+spbvXF?N&~|uRdy(n< z7o2AHEa{E8>4co0oyWL=O%Eh_57Ph$OxH`F0Ae#yc`5eOR$FbfYrdKCGtU6VGHZRS z@>#K8`;eYzGHJTCjDxZCYV`tg0`_V+=YiUo<)uIM>2o(}xTi7yOjO|cyhk<;(43GW zj&5l+RaJ?i(cQqH)3V`f6^%MXMgE|mIM!kL`t0?YQPAoG=ES*F;s`YLLK77^so_~1 z8}3^l=eZf2CR4GEPN3fjl99qDKM%fErhc*8yGh=2Qmy@KeQT zMdCodt$^rd^aHbI<|4*=HX)}r)vN9AU2CGk%&;scAw*oTW_U@6Jd4Txd7bYCf|a(O zQL}0a_zNx;Fb##j!O%ir>1^gYb$6%S8g}n8R$f8*&PQ_5RHZLrF`a{??vGo^ypK$9 z>*nrbd7J9~+`Vm)12)8RSiPe=NqW+7Kdo~(IH~iC<;tH;K2!FkV#L*z`rW0%)cJwD3ls@KN$A&!m~h|o#lIP*=>p^;C1wN3-~Rf;VF|P8wN%XaY+h$ zrPIRU<_dD;*$Z;uswsSi>c8$@1=GjFXJn4$$%yLkY}pm){M>ow^?OcF6dDb-+hl=Q zw&VMz?k1<+(UNx_vSNLU;d4iv0)oTV2ZN!Qn0d|+P9fYZmjl$o5Oxer+HPEvP5gDL zv#!OuiH$Y~Z4WiBWDjQbgkdOX0sBJLrsC7My2ZBe5pn*RC0LX^#C&0sfkFNpclWaB zn)!#_eX#tzh z;JTW@R3GICUH+|{{!fkczxrueiW#|^h05whx{ZuXtG0tf93#(iJp4#Jc??A{G}^K^ z1PLE~*j(C~Wv=zNt?!Xx`NhXAVx#*SqKlvvON7dg!jY?j_=s%7 zr<4jpwVSH!)k>ojcaI2&PFV&YoA4e0hyhd5_&WD1B^ZjKaAZ+gdt%CXfgqPuqCEA3 zYpT7=9VxbP7yDDR8f>w48PsB;-QTVpHl|jUAKR@ZKC zn_DGrmpy(xuv(y`e=MT1KqcEE9=grGbRp+dpB6`iVXBZ}>y9Y2o|c>F&>IPeDQE8v zEURa^=_->)Nlz%fX!)?jCSbeKW{o9g{(uvcXz^xqwr|bY;y>WuY0Gm4HNvb74~@Gv zZ|T*lJ)Pu4dw^087G$bT!1SqugQF}iTHe0H{Tq~DIR$+3tIckn zCppF4+9VwT7#=RT&3t)CkQh#Pft%S%pjRDOtOyzvq5(3p({L`k0ImrP)8;kDCOp(0C8Do6~;FdmKFFRnzmJ zVGsG5>g!hak^969u+M%&Az925;%`Ht<#n?s%;@~Xh`vwm?ET+OYQ3ZqR1*@XeYZ0| zjIH9(#n?>Q{i!Q3+-0p%ZSJ(Osjf6KdGAY%bn*&Z1sgS;Tdb#e(vizl0DT0Pajdd2 zEf+rD6daDauT^pCWBO(OKFn(tA{=8~7AYd6nw=_6pp-#wZWU>)j7Ao%gu00;6hy?& z;uLK^yU+YRFTH^VcC6r;#tYkmX_l_*Zhc&wRLk+x8hUOq#E#*fBgmgLlT$%|ELE03 zt2E!KB-M_@eNH3xFS+JF8nUlx(TEvYqj#Kj5vufHj`Hebwre$Ub7vgm8Q--zAq?sg zuu!H7pZB;B7r1I!6yBhO0nH15fdydx7@l)5ep#JMco9HcS+*0OAc5<9p`WL!r32Ah zeORU)=gG{E-`{_IUWDJk#x?n{NtRK+MPzZ??DEjeki|M@1)gho(4bKO^2}%ES`P1| zYxP**FW5V+XGeK${v<>km!^G1BNk%`$l#Zl6m=1KC)%~I#0r+e=RHyNEX(ZD^R;=@ zi&!7s34&)zKVe-o`jL46zaf5O6jLFVu(@VXu>q(xi^=%pZ^}T+g`C%tL3wz<9JQ2; z#PYG`bozr#NB;S@V7OFz+QNS<$71B}NL%uwIT9jD3Uc%khm8FPN8{hH-T!Ac z!Oe9WQksUc>)kKDxe?7+#{$otK2mg3Ick9Zc5G=ecBc=u{bckav}n2;+oxN2LTI=h z>#c&Hz4k`T{*R~W#-_eZ2Q{YNh#e6E@fG?1AZl38t^A4zZ3@)IjOSBO590)q9 zyx!jvMw(PH7OJt6O|_Jp93B4fjV$|Zt^ed|-d}LmYi-a)RE=X+D7$p}L}CVuCjnhM zz0S876{MS6%bNoWxcdE>9aNYyWEtQ2yLn90hK5YU0{I;rf zD2UgPGI9bS4(hN}d1jAs63a)m>1iJ`bZI;#LEudbS;{0-iGeHF?OSk#hIA1SF)r9b z5;54b-TP3NZHKlYfMP3tlf7#9xlC>O2jJe`HF_7i7l&eOH1LtRK3Pb|H~0`$aO_s7 z16E6lvh(C*s7|*&!~XRM=ltoiV)UI@jE19t>pbY#M{O-F=s-G{YZ}_bWUTpC=r1^? zE!nE&qT84i=aO!t$ULRoW2r7ICoz+N&x^j~B{T;~|7@s;T+Qr;`yp-bbo`y;fVGR1 zsL3E{gHN4x+7pTD;#_u~84!>lB(c@edHngK&;8s{xrg0BWs8x#XgQ?xJ9ohYPuJev2Gfs*~vvZo`F=f}Ak z($XLCdxQ5mc;u!tmNBvblzVJ=F*EDt*Er){9y;`(niSx2 zxzH)CX|2a7WjrZ56sXg3JpC3l>{d+wo>$Cq?a{h34r?|lZrBCfdY;qr z3$)eY81ahRONz$2bBm%X3Z4d$YEe{jY1gVCi%Whtdvt4}^S|o58GEoycT!R1*KR^T zvSPP?*{Jmn5Dn{PQ5kBXZcBMc`7R_K;YT^b(dIb3%qT}Fu7Np^ZaKKYh2ugZ@UW>P z(QQii!&yb;;BI$i{!Bpq?l4C*>X#CRWdo0cD>_g*&!3v^`IIq@D)*j&!@;+Pz?*<% z)s>M0xqb)H1#X(%3dG4pLPYj=@SuwQO7l|5%KTJaWjigLC#@B{fae$3-ZDs^|8@hn zBn$oK6Nh&0;wFYbfW3M)K3H>+nsebsK|~&8sUo^f^4zb+taN)pEMIjT4PDJ1#HDHI z^?DWan{d=fmi}m&zb*ff`-5QhO{^DXNjGSsYZzXTOV=KX>;3*qdD@g-*FM{`bB2&% zjn2lKiSdb6W6woONzopqV^nRz6swc8)EF)tsi3(dayWI12vme8@!$@PZ+vE1$}flU z3i~G%XN~xt-pB!*XMMk3hLUF9g%TS-S1zNUKarK4um1(t^GPQr=JzdFK1_Z73oeS&@`8DI|I^_1&h>utOU#d(#})MwA;T6^Yv(L2 zCN)qJuTpA7Ysx$oS*ZnMnnlI5ap>2ooB*O2tX%_$Y+y2cyKwWdcFCMGw;yT6nzz3e zpk>@Y&^$Krb)RhvOUJ4}b2MTORZqHMR`DdECzi}yh#96r3M^5|8jIgxTP(jo9CPo~ zG*)YmUh)Y!d*?o!!3^?595Rno13b)y<3evYTue++TxY4^couTwJ`terLbV9%ILVz> zoStcS{TAo9`MRgb)>=0p{EO4Riki-_`+(JQ`f{As8Lgexhy}7yDq^L}qWA`fwd^1K z-X>+^W0|+LWPWAIuoSQ#lhwgPfRRR`9K& z01RFw?=Aec+t=zXMN^eUz;`?ukv_z_5qGRB)alT8&k}uL z7S&l}HIQ66N*GkQrZ5gZ+EDzlkY6VHeK9~Gy#B6*UALIf*#)U9ZxpMPv0~XLfnPYPln$ndj4puODD*a7$Y( z&F>0M#*O3j7oNX->J&2SvJb{ff9asvzuk%G8?<}9X~yy=;=*u=?z-BPR7$#s_x?!C zeroH_#{(mgTo}QQxwc&fevk9PRkqc`ewNiMk+=dlS4?g|SISs{Sb6bvg2cQi`p*oN zhIa9IL%Hv|BV5afVQM2@uDlfUr4AoICxKGR?JmJ5TJ+cNBkiS!0jaaTV#2!!w5&Ktlscm1pAR;?G#?GM`{ z$#d7=LkN>gW8Wl=N%k?4(+nKu>3xW-UkqpnDApn7AL|(PFj^iL=qa=h=+%X-hj)0` z=M_NsR+;Uh1MGbvp~`Y{z8^&FF@!YtHPsdS$v^MmjH`JVjIk#kdtxtxc^!t>omE3}7kxcPHgYnOm=9dpsF6z5nc<}{$SUFPFh>pV!z({; z$eV*$nU9?^!VA5w+CPyRvGL9~Fn?nZma?%6cyX#zJMSu6t2Zfq=8Xfjv7WITe`;+s z$x?Jdw!l<8eU}u1heFGO5y|C|+KmR~*34az)h9Ckgj-28oiPq_fs2hvF!)zMc{O{u zo3-QdJZ)|UhpP7oaqb$~p&?aNB%(U^>@E|eW0E~Yv;lK%-c_|4SsK%@kyFY3V4HLq zQ`V;VRKVVu6j@Hr2qoBmx1Kgac&Kv}V@;wuSbwtounu>2xVM|4Nc=eBQs|ipVWsG4 znxA7*t{?(Yns@Vf(gqUYz3rubKhVga+Q-)7{4+t=AQD z{vk>M4>rSl=NWs7<%ZNxm85!-o5z&?XYGXlDt!LO(2NExgqsWL!&~!E78mU@y7JP} zBf7&JZ4J&F#JJ87P9Qix40qqWO@Hfcot?#S|8{x-4rSAxC}l8Wbsq&h8Yy47m5hXi$z_+^h{e<445`WioMAE%{<%(hR4$kJO? zlX#dsYTLqzu29&w)0r{X=|5h+xHqB%l+fd zFQs$9g-&fAZNyPXb2+@18JLj@8~5_`fQ}9x_Lg6t>*9Tp5FwjXA3r`0Tn9AfP{G&O z2kq(s4BB5`_P>ZoqE*T^BHZsj1!j4&Q6Z}{Rczl>q?U-K;`{Gn0jBcI+MPem&HEu* zZ>hI^M*YnY*=f@IN{;)q%ohyI&n_EVpUa2tFVrKCh;0`ZV#_Sj7G2Vk-vT!bbH`7Z zl0Wfd5w@FpdmlACrRl5(5~ng<^3a0zrsp({5!)xbXqgoe%@UUB{tdKFnOMYFHG?{FHm|e zTDG?^&#h`IDob5GA%T76!h>El7ESpJ%9Wy+C6{7S{K@ zsq+u%L{LU#pTg%%O-V+y8{oRTi6(?=dDn9CYuVJE**hRIBC5cVQIUs6FnLCled7j$^7OPESv(17#j@bRJ z-ud>^dM10pxYqp_#R`Xt>9ytO=G}bzgeLy;supy&vZWh5cxC(ygi-a%QyOq3g|SRl zhVyd{PP`CqH#g%j`!_Zbjjp;lO30SsS@lv6*RyL>B%6_Wl=@Za2cO`GITQC`!zH{R z-DOJnISz|dv)JP%+|ms`-7H&HRM#33mq{Ks4Rg(7V%#igS=N57zkxe_T246u$8w)W zBcTpROC;~ja?h!@6E(NS;<^VQdzX+9KPY8F3MelRFF49cPK!%qLP%NLnE4>fX@rww z_%N~hRP(`Ot)g2Xyh=MG?npE$J9cc}E&^V*IYp*PjVfg#>6R`ZoY_(2%feUrYB7|V zEx_34#kB819&6e{XhfJ{$2V2;`p);-%>oFXpr;*%X(_htP1h3HNrxptqELO)o@AZJ zTUS{UuMEK}^cqL(^P9050!Fv9ut86fg`4ih7P_BSIbzq`$1hEj>r&|Vz(~cBqso-G z=d+2gO9BECVRoB7WpVa~I%^EUjLP8G2+7>|mOjhQ02Hdm8N`W~8XQE6m%iN}i5^16 z3HKhZOOW+{T-E)1SZc3?H`$JnTt``T445%cUwfG9qBbpOrI37*AZ0J+g3gCxNPlCd zZPvl#H9SoNfC6ztdHZ&julCaz?*Du*9kv9xb(QGy88$WQAC^P4%jeBB zN=LvSvZy=#o%nVkWP8`X=ldEwy4j?cA8$MMIb3X<))MSvQ7hL?2YCXG26*3sfG%-~?a1`9_h>qqg z@EyK;UJjTF=glXGNhc*#WrroA7Tq_D7P~a z?p06Eyjr=^*1!57oeb0Ti}FBn_aremRZy&bg_gBm)wtfx-DQ7l#`zhq*FYwO>0K2( zB=M!(^0{P|-~Q~XI{PoU9h5Wrmc(W&HvYA6v1n4@uY>}hmf{_+l?>b$BF!{hi7u}z z??SOjhh44)3`)GbUTv^t6`mV`0g*Pa2&5UzxkQ+YPocfohNob$PIo^9zhSzr7Fh(^ zhmCZOg1TNE1(9juHqR3@`Q-vJ$gh=udO)p!x+GRamgO^|Ywn135Je0Ma~{6s52AXU zcL(eoa=dFFq0_3n{yxLTrP(s`PDP`LY&B*qF5s#`B{ALLg>4(KX4MKj&0)Ww zljw?kgwvM7c~^XIZOf{dxX#yF9p>JMMVgopPp6TBGJ`Dqtbp!WL0n((~^J|?ezVHr~K9B&*g_l_7rfz$D zC`9&S8X7en%;S2b6r@gt@xZz_B^!sRLVcI!ZS~`>E_8I(4R40tk9=xpMcS9r{0olE zlmxUihwHrc-d!%U%&;XRgI*^CyXMf++7&vh2vXLpCt6QR3XTmX;#*#{{TUoqs0|yh zWWX74RG=N~V813=(Ek_WRWC0J1#TCgv#|(QeK7{6Mw9b<)#&8wJDUQv(nN0|Vj;l% zGNQ(51XDVjuP)ifdxhvxj$P+JOkuhEc@CiRzu?R~1cZ!#69AgKbTTzQGu_JwJ>D|; zp$(ZcO9wXzW&VuO5{cUvUp~%ZSp6AU-lfp2EmwchA|H&d3^0qNh}%YXkRH*9-&hOEN0 zK{i)a9*TCRY-?9ehU) zexa?(*X3KFJo63zoHQ&|?toVuZ&R2%Jm8HbHnQ9zfb~4)fng{8$)^U1EVJ`W&(a+Y zQv3=z&fP0jk|;$GXJuMNHHSg@Bk@zk6QzUnQ8D0xe(nk zYd-d8wGGj6{6#FDFr_lZ?t!cG)PX$DjM7}CQ+&c!McSf6;#g^akk1lDxOrzRZQY+VB zY?k`#Cs#J-(B#(N@E$u|XJ3c0eD~d39OlX)p-}h7Kn$LHpMuP22|1V}Oh>-DW%Lr^ zbIr982aI9Kn-9@{E`0zeY{GySWEGOE!M*W0BbTvO7$HbpOW%T}PJX`p$-W{;;wKE% z7y1ObZ4sySYipWF%{W&-&F3}HlU|n1GYHJTasR=A936>(XKc5V z&snE0zsWb!4u<_3KgF{%&uNY?`Y5fbAkcm>y%Nf&mY~TT< z4VZHXxr&={Dsy?rtg`n%O!1{jPq(f`*R44c>v6aS&c8_+Yq1h1KE_DKQXJ=s_*FUL zjSAdn8lFF#Wy@(2|JjPDUi#)G`$amv?ZMs8sLCkFm~hog9|u*q^bp4-P&A(D<&Ey{ zjxE0d?~=xF=wnWX>jwdtM8n}lz~P#J2=`gKdCRB9h{Lr9m4y1UpnLMv)XLsPf{*PW zCuz*Wg&iobk#C83{g)kuoa8++u zBb~>H`SvgbvNs-eYMU6{AIN^>#5M*oMxmwIj-Iv!Q01c048$X8rfCxH6|a5nc_477 zNwQoch)MR8rB_PMV39F62laLaUu)so`50jY0GvHv$f__CG*wmX3an4JJ7QZj^e!1H zxTC4;Wh;6;0tpZOzlV07-2?(Gl~xdW#FuhQGr=U~pw9u40`6*Nf-E&FwL{l5Px za`o>u&c9Czs9xENqk~BOz{%>)bBOM(OlBFVa*of?W6qZ%J;#iLcl(Z6lWO1$-xg=L zelQvyH5dxe>j;E@PhE4Gnb%fo|31LL8-N8gIZOd2eDE{zRix}@ej47T#teg;AExoD z6o$UtF!Z4eDG~{P7VTUtg#_Bq2S$CF$4#T>PVi^Fq9n{6(9oDNh#8V2*ZFA$4E9z@ z!ttR*R=rn|`6Py2m4!L5RJ-ObnTRXFum|sR3Dn^%x<(p)UN$D%;J4OYYkI>b_Awkn z;Bgm|5iz0I=569*WMo!%Li!Fk(ZnvC!)I4U=9rQAY=z|G@EtGNNQxGfo`#K$$jA#+ zWZQ=@S~O6OsY@2kOb>}0g$!JslT^+<9!6%*k& zUBw|AZS?6?sdWN^Azhw2!QeEkWg6vmoXzXMs_>a|5+Fdl%6T)_r4{r}@v7BXD^7FYe z0=t%Y%R;L{awIHQUpKHztycZatr@DM3c(?_4tg|KX)ILm$-Z4dv=k@XT|X_NqDCkC zD^>r?po7Pc~mIlAIr4?^c8|{2OtTVd7$VP0T>b&u%i%)ZQOEp~v?d53+ zL-YH~oJILVH=)7Tg1B}pi!8KU+<)Rt!bdAij7`416x4Vu9;i$itInQd7-a%<)h$MH z52jPz+MrVAf`=n$E-Q2RAgTtleG$U5o0dED)$NJ%ru^QIs8L4e|-2_8H4yQdsl{(dx*clts zvkP9K@Ud!&9$%m@_9B*ZTeMjW@F7$!B3wycuVHtIc+M#ZU>kyL~Zs8^Me!MmDou{wR^=SN|td*?CLRSg4SY~s7BeynX zd_IC-)!(bce!25Sr6PW(;mj*$QVtXYDq};%P+iZ7P5csuWK2ew&RXr`D+cyvujbgL zcIiF<+v83Z2pR)$I^KPSu#0X4;;Ix+3-k>f*Oyu>NWXr@ZdX_nVQkNNU^WyP>N@1@ z7iXzdF63(U;WbwIr2NMDsA#Y=G2+|wUgKkD+BpTe@KI%qB-qYs|J0al-1XG&AcuB= z`MVZj269{az`g#j!JzY}n$z+go@cS=h+a(PN+rXext;aOr)j31!TI0z&|QwUU2kJl zB|DkbvX?CMha3!GfqEbXr9h}hD2CJIo;Jf_uUIr|hym(0nMc)MaBqy}*lQImuSVQm zUz65jn%&)p5cW-8E7k5C&h?ZU&!5rfup@NwZRQ4KMqr`V%e{pqOj%t zoxp<<6H?`Q(nWh+BMryZdy3p?ApOD@4OY;B56vvtob(%}hKr$`clhDfr5w|g9bdNc zZ2^`d-_`?I#-|QRl~K{R>tJoWlO!Lj%Q?%Z`a}($?ickZYKcM)T$A1I%YI%3Ypb=R zTNB&uyw>35Cz5fSKZ)6Yyhw-5{`AlMc Date: Wed, 10 Aug 2022 16:42:25 +0800 Subject: [PATCH 03/12] delay version for valve debug --- main.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/main.py b/main.py index c039491..eff38b1 100755 --- a/main.py +++ b/main.py @@ -11,7 +11,7 @@ from models import RgbDetector, SpecDetector import logging -def main(only_spec=False, only_color=False, if_merge=False): +def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, delay_repeat_time=None): spec_detector = SpecDetector(blk_model_path=Config.blk_model_path, pixel_model_path=Config.pixel_model_path) rgb_detector = RgbDetector(tobacco_model_path=Config.rgb_tobacco_model_path, background_model_path=Config.rgb_background_model_path) @@ -28,6 +28,11 @@ def main(only_spec=False, only_color=False, if_merge=False): if not os.access(rgb_mask_fifo_path, os.F_OK): os.mkfifo(rgb_mask_fifo_path, 0o777) logging.info(f"请注意!正在以调试模式运行程序,输出的信息可能较多。") + if (interval_time is not None) and (delay_repeat_time is not None): + interval_time = float(interval_time) / 1000.0 + delay_repeat_time = int(delay_repeat_time) + logging.warning(f'Delay {interval_time*1000:.2f}ms will be added per {delay_repeat_time} frames') + delay_repeat_time_count = 0 while True: fd_img = os.open(img_fifo_path, os.O_RDONLY) fd_rgb = os.open(rgb_fifo_path, os.O_RDONLY) @@ -59,7 +64,7 @@ def main(only_spec=False, only_color=False, if_merge=False): else: rgb_data_total = rgb_data os.close(fd_rgb) - # 识别 + # 识别 read since = time.time() try: img_data = np.frombuffer(data_total, dtype=np.float32).reshape((Config.nRows, Config.nBands, -1)) \ @@ -70,6 +75,7 @@ def main(only_spec=False, only_color=False, if_merge=False): rgb_data = np.frombuffer(rgb_data_total, dtype=np.uint8).reshape((Config.nRgbRows, Config.nRgbCols, -1)) except Exception as e: logging.error(f'毁灭性错误!收到的rgb数据长度为{len(rgb_data)}无法转化成指定形状 {e}') + # predict if only_spec: # 光谱识别 mask_spec = spec_detector.predict(img_data).astype(np.uint8) @@ -92,7 +98,13 @@ def main(only_spec=False, only_color=False, if_merge=False): masks = [cv2.resize(mask.astype(np.uint8), Config.target_size) for mask in masks] # merge the masks if needed if if_merge: - masks = [masks[0] | mask[1], mask[1]] + masks = [masks[0] | masks[1], masks[1]] + if (interval_time is not None) and (delay_repeat_time is not None): + delay_repeat_time_count += 1 + if delay_repeat_time_count > delay_repeat_time: + logging.warning(f"Delay time {interval_time*1000:.2f}ms after {delay_repeat_time} frames") + delay_repeat_time_count = 0 + time.sleep(interval_time) # 写出 output_fifos = [mask_fifo_path, rgb_mask_fifo_path] for fifo, mask in zip(output_fifos, masks): @@ -112,6 +124,8 @@ if __name__ == '__main__': parser.add_argument('-os', default=False, action='store_true', help='只进行光谱预测 only spec', required=False) parser.add_argument('-m', default=False, action='store_true', help='if merge the two masks', required=False) parser.add_argument('-d', default=False, action='store_true', help='是否使用DEBUG模式', required=False) + parser.add_argument('-dt', default=None, help='delay time', required=False) + parser.add_argument('-df', default=None, help='delay occours after how many frames', required=False) args = parser.parse_args() # fifo 参数 img_fifo_path = '/tmp/dkimg.fifo' @@ -126,4 +140,4 @@ if __name__ == '__main__': console_handler.setLevel(logging.DEBUG if args.d else logging.WARNING) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', handlers=[file_handler, console_handler], level=logging.DEBUG) - main(only_spec=args.os, only_color=args.oc, if_merge=args.m) + main(only_spec=args.os, only_color=args.oc, if_merge=args.m, interval_time=args.dt, delay_repeat_time=args.df) From 5754c741676a4f60daa09726eeee17a094f21589 Mon Sep 17 00:00:00 2001 From: "li.zhenye" Date: Thu, 11 Aug 2022 17:37:52 +0800 Subject: [PATCH 04/12] =?UTF-8?q?[ext]=20=E8=A1=A5=E5=85=85=E4=BF=AE?= =?UTF-8?q?=E6=94=B9=EF=BC=8C=E5=85=B3=E9=97=AD=E5=96=B7=E9=98=80=E9=99=90?= =?UTF-8?q?=E5=88=B6=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main.py b/main.py index eff38b1..666c6f2 100755 --- a/main.py +++ b/main.py @@ -92,8 +92,8 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, mask_rgb = rgb_detector.predict(rgb_data).astype(np.uint8) # 进行多个喷阀的合并 masks = [utils.valve_expend(mask) for mask in [mask_spec, mask_rgb]] - # 进行喷阀同时开启限制 - masks = [utils.valve_limit(mask, Config.max_open_valve_limit) for mask in masks] + # 进行喷阀同时开启限制,在8月11日后收到倪超老师的电话,关闭 + # masks = [utils.valve_limit(mask, Config.max_open_valve_limit) for mask in masks] # control the size of the output masks, 在resize前,图像的宽度是和喷阀对应的 masks = [cv2.resize(mask.astype(np.uint8), Config.target_size) for mask in masks] # merge the masks if needed From 6bdc464ca9ea5af16387bfc64e07092ead84ab4b Mon Sep 17 00:00:00 2001 From: "li.zhenye" Date: Thu, 11 Aug 2022 22:29:33 +0800 Subject: [PATCH 05/12] =?UTF-8?q?[ext]=20=E6=B7=BB=E5=8A=A0=E4=BA=86?= =?UTF-8?q?=E4=BF=A1=E6=81=AF=E8=AF=BB=E5=8F=96=E4=B8=8E=E6=89=93=E5=8D=B0?= =?UTF-8?q?=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 4 ++-- valve_test.py | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/config.py b/config.py index e03fadc..18c4450 100644 --- a/config.py +++ b/config.py @@ -28,8 +28,8 @@ class Config: rgb_tobacco_model_path = r"models/tobacco_dt_2022-08-05_10-38.model" rgb_background_model_path = r"models/background_dt_2022-08-09_16-08.model" threshold_low, threshold_high = 10, 230 - threshold_s = 190 - rgb_size_threshold = 4 + threshold_s = 190 # 饱和度的最高允许值 + rgb_size_threshold = 4 # rgb的尺寸限制 # mask parameter target_size = (1024, 1024) # (Width, Height) of mask diff --git a/valve_test.py b/valve_test.py index 86d05c1..a308ccb 100755 --- a/valve_test.py +++ b/valve_test.py @@ -35,7 +35,16 @@ m. 模式切换:测下一个喷阀还是重发? print("我在等连接...") self.c, addr = self.s.accept() # 建立客户端连接 print('和它的链接建立成功了:', addr) + self.c.settimeout(0.1) while True: + data = '' + try: + data = self.c.recv(1024) + except Exception as e: + print(f"===================================================================") + if len(data) > 0: + print("receive data!!!") + print(data) value = input(self.reminder) if value == 'q': print("好的,我退出啦") From d01c689c7fafa131303207b2a40a8dba069a5e13 Mon Sep 17 00:00:00 2001 From: "li.zhenye" Date: Sun, 21 Aug 2022 02:42:34 +0800 Subject: [PATCH 06/12] =?UTF-8?q?[ext]=20=E6=B7=BB=E5=8A=A0yolov5=E5=8A=9F?= =?UTF-8?q?=E8=83=BD=E5=AE=8C=E6=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 2 +- config.py | 10 +- detector.py | 171 +++++ main.py | 151 +++-- main_test.py | 3 +- models.py => models/__init__.py | 10 +- models/common.py | 677 +++++++++++++++++++ models/experimental.py | 120 ++++ models/tf.py | 464 +++++++++++++ models/yolo.py | 329 ++++++++++ utils.py => utils/__init__.py | 0 utils/activations.py | 101 +++ utils/augmentations.py | 277 ++++++++ utils/autoanchor.py | 165 +++++ utils/autobatch.py | 57 ++ utils/benchmarks.py | 92 +++ utils/callbacks.py | 78 +++ utils/dataloaders.py | 1092 +++++++++++++++++++++++++++++++ utils/datasets.py | 1037 +++++++++++++++++++++++++++++ utils/downloads.py | 153 +++++ utils/general.py | 880 +++++++++++++++++++++++++ utils/loss.py | 222 +++++++ utils/metrics.py | 342 ++++++++++ utils/plots.py | 471 +++++++++++++ utils/torch_utils.py | 329 ++++++++++ 25 files changed, 7161 insertions(+), 72 deletions(-) create mode 100644 detector.py rename models.py => models/__init__.py (97%) create mode 100644 models/common.py create mode 100644 models/experimental.py create mode 100644 models/tf.py create mode 100644 models/yolo.py rename utils.py => utils/__init__.py (100%) create mode 100644 utils/activations.py create mode 100644 utils/augmentations.py create mode 100644 utils/autoanchor.py create mode 100644 utils/autobatch.py create mode 100644 utils/benchmarks.py create mode 100644 utils/callbacks.py create mode 100644 utils/dataloaders.py create mode 100755 utils/datasets.py create mode 100644 utils/downloads.py create mode 100755 utils/general.py create mode 100644 utils/loss.py create mode 100644 utils/metrics.py create mode 100644 utils/plots.py create mode 100644 utils/torch_utils.py diff --git a/.gitignore b/.gitignore index 1200a96..80325ec 100644 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,7 @@ # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 data/* -models/* +weights/* .idea/* # User-specific stuff diff --git a/config.py b/config.py index 18c4450..2d3d7ca 100644 --- a/config.py +++ b/config.py @@ -20,16 +20,18 @@ class Config: # 光谱模型参数 blk_size = 4 # 必须是2的倍数,不然会出错 - pixel_model_path = r"./models/pixel_2022-08-02_15-22.model" - blk_model_path = r"./models/rf_4x4_c22_20_sen8_9.model" + pixel_model_path = r"./weights/pixel_2022-08-02_15-22.model" + blk_model_path = r"./weights/rf_4x4_c22_20_sen8_9.model" spec_size_threshold = 3 # rgb模型参数 - rgb_tobacco_model_path = r"models/tobacco_dt_2022-08-05_10-38.model" - rgb_background_model_path = r"models/background_dt_2022-08-09_16-08.model" + rgb_tobacco_model_path = r"weights/tobacco_dt_2022-08-05_10-38.model" + rgb_background_model_path = r"weights/background_dt_2022-08-09_16-08.model" threshold_low, threshold_high = 10, 230 threshold_s = 190 # 饱和度的最高允许值 rgb_size_threshold = 4 # rgb的尺寸限制 + ai_path = 'weights/best.pt' + ai_conf_threshold = 0.5 # mask parameter target_size = (1024, 1024) # (Width, Height) of mask diff --git a/detector.py b/detector.py new file mode 100644 index 0000000..23f7336 --- /dev/null +++ b/detector.py @@ -0,0 +1,171 @@ +import numpy as np +import torch +import os +import cv2 +import json + +from models.experimental import attempt_load +from utils.datasets import letterbox +from utils.general import check_img_size, non_max_suppression, scale_coords +from utils.torch_utils import select_device + + +root_dir = os.path.split(__file__)[0] + +default_config = {'model_name': 'best.pt', + 'model_path': os.path.join(root_dir, 'weights/'), + 'conf_thres': 0.5} + +cmd_param_dict = {'RL': ['conf_thres', lambda x: (100.0 - int(x)) / 100.0], + 'MP': ['model_path', lambda x: str(x)], + 'MN': ['model_name', lambda x: str(x)]} + + +class SugarDetect(object): + def __init__(self, model_path): + self.device = select_device(device='0' if torch.cuda.is_available() else 'cpu') + self.half = self.device.type != "cpu" + self.model = attempt_load(weights=model_path, + map_location=self.device) + self.stride = int(self.model.stride.max()) + self.imgsz = check_img_size(640, s=self.stride) # check img_size + self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names # get class names + if self.half: + self.model.half() # to FP16 + # run once if on GPU + if self.device.type != 'cpu': + self.model(torch.zeros(1, 3, self.imgsz, self.imgsz).to(self.device).type_as(next(self.model.parameters()))) + + @torch.no_grad() + def detect(self, img, conf_thres=0.5, return_mask=True): + half, device, model, stride = self.half, self.device, self.model, self.stride + iou_thres, classes, agnostic_nms, max_det = 0.45, None, True, 1000 + names, imgsz = self.names, self.imgsz + + im0_shape = img.shape + + # Padded resize + img = letterbox(img, (imgsz, imgsz), stride=stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + # Preprocess + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + # Inference + pred = model(img, augment=False)[0] + + # Apply NMS + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + + # Process detections + s, det, boxes = "", pred[0], [] + s += '%gx%g ' % img.shape[2:] # print string + gn = torch.tensor(im0_shape)[[1, 0, 1, 0]] # normalization gain whwh + if return_mask: + mask = np.zeros((im0_shape[0], im0_shape[1]), dtype=np.uint8) + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0_shape).round() + # Print results + # for c in det[:, -1].unique(): + # n = (det[:, -1] == c).sum() # detections per class + # s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + # Write results + for *xyxy, conf, cls in reversed(det): + if return_mask: + c1, c2 = (int(xyxy[0]), int(xyxy[1])), (int(xyxy[2]), int(xyxy[3])) + cv2.rectangle(mask, c1, c2, 1, thickness=-1) + else: + for i in range(4): + boxes.append((int(xyxy[i]))) + if return_mask: + return mask + else: + return boxes + + +def read_config(config_file): + config = default_config + # get config from file + if not os.path.exists(config_file): + with open(config_file, 'w') as f: + json.dump(config, f) + else: + with open(config_file, 'r') as f: + config = json.load(f) + return config + + +def write_config(config_file, config=None): + if config is None: + config = default_config + dir_path, _ = os.path.split(config_file) + if not os.path.exists(dir_path): + print(f"Path '{dir_path}' not exist, try to create.") + os.makedirs(dir_path) + with open(config_file, 'w') as f: + json.dump(config, f) + with open(config['model_path']+"current_model.txt", "w") as f: + f.write(config["model_name"]) + + +def main(height, width, channel): + img_pipe_path = "/tmp/img_fifo.pipe" + result_pipe_path = "/tmp/result_fifo.pipe" + + config_file = os.path.join(root_dir, 'config.json') + config = read_config(config_file) + detect = SugarDetect(model_path=os.path.join(config['model_path'], config['model_name'])) + # 第一次检测太慢,先预测一张 + test_img = np.zeros((height, width, channel), dtype=np.uint8) + detect.detect(test_img) + print("load success") + + if not os.access(img_pipe_path, os.F_OK): # 判断管道是否存在,不存在创建 + os.mkfifo(img_pipe_path) + if not os.access(result_pipe_path, os.F_OK): + os.mkfifo(result_pipe_path) + fd_img = os.open(img_pipe_path, os.O_RDONLY) # 打开管道 + print("Open pipe successful.") + while True: + data = os.read(fd_img, height * width * channel) + if len(data) == 0: + continue + elif len(data) < 128: # 切换分选糖果类型 + cmd = data.decode() + print("to python: ", cmd) + for cmd_pattern, para_f in cmd_param_dict.items(): + if cmd.startswith(cmd_pattern): + para, f = para_f + print(f"modify para {para}") + try: + cmd_value = cmd.split(':')[-1] # split to get command value with ':' + config[para] = f(cmd_value) # convert value with function defined on the top + except Exception as e: + print(f"Convert command Error with '{e}'.") + write_config(config_file, config) + detect = SugarDetect(model_path=config['model_path']+config['model_name']) + else: # 检测缺陷糖果 + img = np.frombuffer(data, dtype=np.uint8).reshape((height, width, channel)) + points = detect.detect(img, config['conf_thres']) + + points_bytes = b'' + if len(points) == 0: + for i in range(4): + points.append(0) + for i in points: + points_bytes = points_bytes + i.to_bytes(2, 'big') # 转为字节流 + fd_result = os.open(result_pipe_path, os.O_WRONLY) + os.write(fd_result, points_bytes) # 返回结果 + os.close(fd_result) + + +if __name__ == '__main__': + main(height=584, width=2376, channel=3) diff --git a/main.py b/main.py index 666c6f2..741d4cf 100755 --- a/main.py +++ b/main.py @@ -5,13 +5,14 @@ import cv2 import time import numpy as np -import utils +import utils as utils_customized from config import Config from models import RgbDetector, SpecDetector import logging -def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, delay_repeat_time=None): +def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, delay_repeat_time=None, + single_spec=False, single_color=False): spec_detector = SpecDetector(blk_model_path=Config.blk_model_path, pixel_model_path=Config.pixel_model_path) rgb_detector = RgbDetector(tobacco_model_path=Config.rgb_tobacco_model_path, background_model_path=Config.rgb_background_model_path) @@ -19,14 +20,16 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, rgb_detector.predict(np.ones((Config.nRgbRows, Config.nRgbCols, Config.nRgbBands), dtype=np.uint8)*40) total_len = Config.nRows * Config.nCols * Config.nBands * 4 # float型变量, 4个字节 total_rgb = Config.nRgbRows * Config.nRgbCols * Config.nRgbBands * 1 # int型变量 - if not os.access(img_fifo_path, os.F_OK): - os.mkfifo(img_fifo_path, 0o777) - if not os.access(rgb_fifo_path, os.F_OK): - os.mkfifo(rgb_fifo_path, 0o777) - if not os.access(mask_fifo_path, os.F_OK): - os.mkfifo(mask_fifo_path, 0o777) - if not os.access(rgb_mask_fifo_path, os.F_OK): - os.mkfifo(rgb_mask_fifo_path, 0o777) + if not single_color: + if not os.access(img_fifo_path, os.F_OK): + os.mkfifo(img_fifo_path, 0o777) + if not os.access(mask_fifo_path, os.F_OK): + os.mkfifo(mask_fifo_path, 0o777) + if not single_spec: + if not os.access(rgb_fifo_path, os.F_OK): + os.mkfifo(rgb_fifo_path, 0o777) + if not os.access(rgb_mask_fifo_path, os.F_OK): + os.mkfifo(rgb_mask_fifo_path, 0o777) logging.info(f"请注意!正在以调试模式运行程序,输出的信息可能较多。") if (interval_time is not None) and (delay_repeat_time is not None): interval_time = float(interval_time) / 1000.0 @@ -34,70 +37,80 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, logging.warning(f'Delay {interval_time*1000:.2f}ms will be added per {delay_repeat_time} frames') delay_repeat_time_count = 0 while True: - fd_img = os.open(img_fifo_path, os.O_RDONLY) - fd_rgb = os.open(rgb_fifo_path, os.O_RDONLY) + if not single_color: + fd_img = os.open(img_fifo_path, os.O_RDONLY) + # spec data read + data = os.read(fd_img, total_len) + if len(data) < 3: + try: + threshold = int(float(data)) + Config.spec_size_threshold = threshold + logging.info(f'[INFO] Get spec threshold: {threshold}') + except Exception as e: + logging.error( + f'毁灭性错误:收到长度小于3却无法转化为整数spec_size_threshold的网络报文,报文内容为 {data},' + f' 错误为 {e}.') + else: + data_total = data + os.close(fd_img) + try: + img_data = np.frombuffer(data_total, dtype=np.float32).reshape((Config.nRows, Config.nBands, -1)) \ + .transpose(0, 2, 1) + except Exception as e: + logging.error(f'毁灭性错误!收到的光谱数据长度为{len(data_total)}无法转化成指定的形状 {e}') - # spec data read - data = os.read(fd_img, total_len) - if len(data) < 3: + if not single_spec: + fd_rgb = os.open(rgb_fifo_path, os.O_RDONLY) + # rgb data read + rgb_data = os.read(fd_rgb, total_rgb) + if len(rgb_data) < 3: + try: + rgb_threshold = int(float(rgb_data)) + Config.rgb_size_threshold = rgb_threshold + logging.info(f'Get rgb threshold: {rgb_threshold}') + except Exception as e: + logging.error(f'毁灭性错误:收到长度小于3却无法转化为整数spec_size_threshold的网络报文,报文内容为 {total_rgb},' + f' 错误为 {e}.') + continue + else: + rgb_data_total = rgb_data + os.close(fd_rgb) try: - threshold = int(float(data)) - Config.spec_size_threshold = threshold - logging.info(f'[INFO] Get spec threshold: {threshold}') + rgb_data = np.frombuffer(rgb_data_total, dtype=np.uint8).reshape((Config.nRgbRows, Config.nRgbCols, -1)) except Exception as e: - logging.error(f'毁灭性错误:收到长度小于3却无法转化为整数spec_size_threshold的网络报文,报文内容为 {data},' - f' 错误为 {e}.') - else: - data_total = data - os.close(fd_img) - # rgb data read - rgb_data = os.read(fd_rgb, total_rgb) - if len(rgb_data) < 3: - try: - rgb_threshold = int(float(rgb_data)) - Config.rgb_size_threshold = rgb_threshold - logging.info(f'Get rgb threshold: {rgb_threshold}') - except Exception as e: - logging.error(f'毁灭性错误:收到长度小于3却无法转化为整数spec_size_threshold的网络报文,报文内容为 {total_rgb},' - f' 错误为 {e}.') - continue - else: - rgb_data_total = rgb_data - os.close(fd_rgb) + logging.error(f'毁灭性错误!收到的rgb数据长度为{len(rgb_data)}无法转化成指定形状 {e}') + # 识别 read since = time.time() - try: - img_data = np.frombuffer(data_total, dtype=np.float32).reshape((Config.nRows, Config.nBands, -1)) \ - .transpose(0, 2, 1) - except Exception as e: - logging.error(f'毁灭性错误!收到的光谱数据长度为{len(data_total)}无法转化成指定的形状 {e}') - try: - rgb_data = np.frombuffer(rgb_data_total, dtype=np.uint8).reshape((Config.nRgbRows, Config.nRgbCols, -1)) - except Exception as e: - logging.error(f'毁灭性错误!收到的rgb数据长度为{len(rgb_data)}无法转化成指定形状 {e}') # predict - if only_spec: - # 光谱识别 - mask_spec = spec_detector.predict(img_data).astype(np.uint8) - _ = rgb_detector.predict(rgb_data) - mask_rgb = np.zeros_like(mask_spec, dtype=np.uint8) - elif only_color: - # rgb识别 - _ = spec_detector.predict(img_data) - mask_rgb = rgb_detector.predict(rgb_data).astype(np.uint8) - # mask_spec = mask_rgb - mask_spec = np.zeros_like(mask_rgb, dtype=np.uint8) + if single_spec or single_color: + if single_spec: + mask_spec = spec_detector.predict(img_data).astype(np.uint8) + masks = [mask_spec, ] + else: + mask_rgb = rgb_detector.predict(rgb_data).astype(np.uint8) + masks = [mask_rgb, ] else: - mask_spec = spec_detector.predict(img_data).astype(np.uint8) - mask_rgb = rgb_detector.predict(rgb_data).astype(np.uint8) + if only_spec: + # 光谱识别 + mask_spec = spec_detector.predict(img_data).astype(np.uint8) + mask_rgb = np.zeros_like(mask_spec, dtype=np.uint8) + elif only_color: + # rgb识别 + mask_rgb = rgb_detector.predict(rgb_data).astype(np.uint8) + mask_spec = np.zeros_like(mask_rgb, dtype=np.uint8) + else: + mask_spec = spec_detector.predict(img_data).astype(np.uint8) + mask_rgb = rgb_detector.predict(rgb_data).astype(np.uint8) + masks = [mask_spec, mask_rgb] # 进行多个喷阀的合并 - masks = [utils.valve_expend(mask) for mask in [mask_spec, mask_rgb]] + masks = [utils_customized.valve_expend(mask) for mask in masks] # 进行喷阀同时开启限制,在8月11日后收到倪超老师的电话,关闭 - # masks = [utils.valve_limit(mask, Config.max_open_valve_limit) for mask in masks] + # masks = [utils_customized.valve_limit(mask, Config.max_open_valve_limit) for mask in masks] # control the size of the output masks, 在resize前,图像的宽度是和喷阀对应的 masks = [cv2.resize(mask.astype(np.uint8), Config.target_size) for mask in masks] # merge the masks if needed - if if_merge: + if if_merge and (len(masks) > 1): masks = [masks[0] | masks[1], masks[1]] if (interval_time is not None) and (delay_repeat_time is not None): delay_repeat_time_count += 1 @@ -106,14 +119,20 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, delay_repeat_time_count = 0 time.sleep(interval_time) # 写出 - output_fifos = [mask_fifo_path, rgb_mask_fifo_path] + if single_spec: + output_fifos = [mask_fifo_path, ] + elif single_color: + output_fifos = [rgb_fifo_path, ] + else: + output_fifos = [mask_fifo_path, rgb_mask_fifo_path] for fifo, mask in zip(output_fifos, masks): fd_mask = os.open(fifo, os.O_WRONLY) os.write(fd_mask, mask.tobytes()) os.close(fd_mask) time_spent = (time.time() - since) * 1000 - logging.info(f'Total time is: {time_spent:.2f} ms') - if time_spent > 200: + predict_by = 'spec' if single_spec else 'rgb' if single_color else 'spec+rgb' + logging.info(f'Total time is: {time_spent:.2f} ms, predicted by {predict_by}') + if time_spent > Config.max_time_spent: logging.warning(f'警告预测超时,预测耗时超过了200ms,The prediction time is {time_spent:.2f} ms.') @@ -122,6 +141,8 @@ if __name__ == '__main__': parser = argparse.ArgumentParser(description='主程序') parser.add_argument('-oc', default=False, action='store_true', help='只进行RGB彩色预测 only rgb', required=False) parser.add_argument('-os', default=False, action='store_true', help='只进行光谱预测 only spec', required=False) + parser.add_argument('-sc', default=False, action='store_true', help='只进行RGB预测且只返回一个mask', required=False) + parser.add_argument('-ss', default=False, action='store_true', help='只进行光谱预测且只返回一个mask', required=False) parser.add_argument('-m', default=False, action='store_true', help='if merge the two masks', required=False) parser.add_argument('-d', default=False, action='store_true', help='是否使用DEBUG模式', required=False) parser.add_argument('-dt', default=None, help='delay time', required=False) diff --git a/main_test.py b/main_test.py index 8db39d1..3e60d98 100644 --- a/main_test.py +++ b/main_test.py @@ -30,7 +30,8 @@ class TestMain: self._spec_detector = SpecDetector(blk_model_path=Config.blk_model_path, pixel_model_path=Config.pixel_model_path) self._rgb_detector = RgbDetector(tobacco_model_path=Config.rgb_tobacco_model_path, - background_model_path=Config.rgb_background_model_path) + background_model_path=Config.rgb_background_model_path, + ai_path=Config.ai_path) def pony_run(self, test_path, test_spectra=False, test_rgb=False, convert_dir=None, get_delta=False, silent=False): diff --git a/models.py b/models/__init__.py similarity index 97% rename from models.py rename to models/__init__.py index da48a54..1fa6a10 100755 --- a/models.py +++ b/models/__init__.py @@ -17,6 +17,7 @@ from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split from config import Config +from detector import SugarDetect from utils import lab_scatter, read_labeled_img, size_threshold @@ -306,10 +307,13 @@ class BlkModel: class RgbDetector(Detector): - def __init__(self, tobacco_model_path, background_model_path): + def __init__(self, tobacco_model_path, background_model_path, ai_path): self.background_detector = None self.tobacco_detector = None self.load(tobacco_model_path, background_model_path) + self.ai_path = ai_path + if ai_path is not None: + self.ai_detector = SugarDetect(model_path=ai_path) def predict(self, rgb_data): rgb_data = self.tobacco_detector.pretreatment(rgb_data) # resize to the required size @@ -320,6 +324,10 @@ class RgbDetector(Detector): non_tobacco_or_background = 1 - (background | tobacco_d) # 既非烟梗也非背景的区域 rgb_predict_result = high_s | non_tobacco_or_background # 高饱和度区域或者是双非区域都是杂质 mask_rgb = size_threshold(rgb_predict_result, Config.blk_size, Config.rgb_size_threshold) # 杂质大小限制,超过大小的才打 + if self.ai_path is not None: + mask_ai = self.ai_detector.detect(rgb_data, Config.ai_conf_threshold) + mask_ai = cv2.resize(mask_ai, dsize=(mask_rgb.shape[1], mask_rgb.shape[0])) + mask_rgb = mask_ai | mask_rgb return mask_rgb def load(self, tobacco_model_path, background_model_path): diff --git a/models/common.py b/models/common.py new file mode 100644 index 0000000..0dae024 --- /dev/null +++ b/models/common.py @@ -0,0 +1,677 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Common modules +""" + +import json +import math +import platform +import warnings +from collections import OrderedDict, namedtuple +from copy import copy +from pathlib import Path + +import cv2 +import numpy as np +import pandas as pd +import requests +import torch +import torch.nn as nn +import yaml +from PIL import Image +from torch.cuda import amp + +from utils.datasets import exif_transpose, letterbox +from utils.general import (LOGGER, check_requirements, check_suffix, check_version, colorstr, increment_path, + make_divisible, non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box +from utils.torch_utils import copy_attr, time_sync + + +def autopad(k, p=None): # kernel, padding + # Pad to 'same' + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + + +class Conv(nn.Module): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def forward_fuse(self, x): + return self.act(self.conv(x)) + + +class DWConv(Conv): + # Depth-wise convolution class + def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + + +class TransformerLayer(nn.Module): + # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + def __init__(self, c, num_heads): + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + # Vision Transformer https://arxiv.org/abs/2010.11929 + def __init__(self, c1, c2, num_heads, num_layers): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) + self.c2 = c2 + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2).permute(2, 0, 1) + return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) + + +class Bottleneck(nn.Module): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2, 3, 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class BottleneckCSP(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) + self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) + self.act = nn.SiLU() + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) + + +class C3(nn.Module): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) + # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)]) + + def forward(self, x): + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) + + +class C3TR(C3): + # C3 module with TransformerBlock() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = TransformerBlock(c_, c_, 4, n) + + +class C3SPP(C3): + # C3 module with SPP() + def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = SPP(c_, c_, k) + + +class C3Ghost(C3): + # C3 module with GhostBottleneck() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) + + +class SPP(nn.Module): + # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 + def __init__(self, c1, c2, k=(5, 9, 13)): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) + + +class Focus(nn.Module): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) + # return self.conv(self.contract(x)) + + +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super().__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat([y, self.cv2(y)], 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super().__init__() + c_ = c2 // 2 + self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), + Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + +class Contract(nn.Module): + # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain' + s = self.gain + x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) + return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40) + + +class Expand(nn.Module): + # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + s = self.gain + x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) + return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) + + +class Concat(nn.Module): + # Concatenate a list of tensors along dimension + def __init__(self, dimension=1): + super().__init__() + self.d = dimension + + def forward(self, x): + return torch.cat(x, self.d) + + +class DetectMultiBackend(nn.Module): + # YOLOv5 MultiBackend class for python inference on various backends + def __init__(self, weights='yolov5s.pt', device=None, dnn=False, data=None): + # Usage: + # PyTorch: weights = *.pt + # TorchScript: *.torchscript + # ONNX Runtime: *.onnx + # ONNX OpenCV DNN: *.onnx with --dnn + # OpenVINO: *.xml + # CoreML: *.mlmodel + # TensorRT: *.engine + # TensorFlow SavedModel: *_saved_model + # TensorFlow GraphDef: *.pb + # TensorFlow Lite: *.tflite + # TensorFlow Edge TPU: *_edgetpu.tflite + from models.experimental import attempt_download, attempt_load # scoped to avoid circular import + + super().__init__() + w = str(weights[0] if isinstance(weights, list) else weights) + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs = self.model_type(w) # get backend + stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults + w = attempt_download(w) # download if not local + if data: # data.yaml path (optional) + with open(data, errors='ignore') as f: + names = yaml.safe_load(f)['names'] # class names + + if pt: # PyTorch + model = attempt_load(weights if isinstance(weights, list) else w, map_location=device) + stride = max(int(model.stride.max()), 32) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + self.model = model # explicitly assign for to(), cpu(), cuda(), half() + elif jit: # TorchScript + LOGGER.info(f'Loading {w} for TorchScript inference...') + extra_files = {'config.txt': ''} # model metadata + model = torch.jit.load(w, _extra_files=extra_files) + if extra_files['config.txt']: + d = json.loads(extra_files['config.txt']) # extra_files dict + stride, names = int(d['stride']), d['names'] + elif dnn: # ONNX OpenCV DNN + LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') + check_requirements(('opencv-python>=4.5.4',)) + net = cv2.dnn.readNetFromONNX(w) + elif onnx: # ONNX Runtime + LOGGER.info(f'Loading {w} for ONNX Runtime inference...') + cuda = torch.cuda.is_available() + check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) + import onnxruntime + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(w, providers=providers) + elif xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements(('openvino-dev',)) # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + core = ie.IECore() + if not Path(w).is_file(): # if not *.xml + w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir + network = core.read_network(model=w, weights=Path(w).with_suffix('.bin')) # *.xml, *.bin paths + executable_network = core.load_network(network, device_name='CPU', num_requests=1) + elif engine: # TensorRT + LOGGER.info(f'Loading {w} for TensorRT inference...') + import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 + Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + logger = trt.Logger(trt.Logger.INFO) + with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + model = runtime.deserialize_cuda_engine(f.read()) + bindings = OrderedDict() + for index in range(model.num_bindings): + name = model.get_binding_name(index) + dtype = trt.nptype(model.get_binding_dtype(index)) + shape = tuple(model.get_binding_shape(index)) + data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device) + bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr())) + binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) + context = model.create_execution_context() + batch_size = bindings['images'].shape[0] + elif coreml: # CoreML + LOGGER.info(f'Loading {w} for CoreML inference...') + import coremltools as ct + model = ct.models.MLModel(w) + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + if saved_model: # SavedModel + LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + import tensorflow as tf + keras = False # assume TF1 saved_model + model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) + elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + import tensorflow as tf + + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped + ge = x.graph.as_graph_element + return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) + + gd = tf.Graph().as_graph_def() # graph_def + gd.ParseFromString(open(w, 'rb').read()) + frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs="Identity:0") + elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python + try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu + from tflite_runtime.interpreter import Interpreter, load_delegate + except ImportError: + import tensorflow as tf + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, + if edgetpu: # Edge TPU https://coral.ai/software/#edgetpu-runtime + LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') + delegate = {'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) + else: # Lite + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + interpreter = Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs + elif tfjs: + raise Exception('ERROR: YOLOv5 TF.js inference is not supported') + self.__dict__.update(locals()) # assign all variables to self + + def forward(self, im, augment=False, visualize=False, val=False): + # YOLOv5 MultiBackend inference + b, ch, h, w = im.shape # batch, channel, height, width + if self.pt or self.jit: # PyTorch + y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize) + return y if val else y[0] + elif self.dnn: # ONNX OpenCV DNN + im = im.cpu().numpy() # torch to numpy + self.net.setInput(im) + y = self.net.forward() + elif self.onnx: # ONNX Runtime + im = im.cpu().numpy() # torch to numpy + y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0] + elif self.xml: # OpenVINO + im = im.cpu().numpy() # FP32 + desc = self.ie.TensorDesc(precision='FP32', dims=im.shape, layout='NCHW') # Tensor Description + request = self.executable_network.requests[0] # inference request + request.set_blob(blob_name='images', blob=self.ie.Blob(desc, im)) # name=next(iter(request.input_blobs)) + request.infer() + y = request.output_blobs['output'].buffer # name=next(iter(request.output_blobs)) + elif self.engine: # TensorRT + assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape) + self.binding_addrs['images'] = int(im.data_ptr()) + self.context.execute_v2(list(self.binding_addrs.values())) + y = self.bindings['output'].data + elif self.coreml: # CoreML + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + im = Image.fromarray((im[0] * 255).astype('uint8')) + # im = im.resize((192, 320), Image.ANTIALIAS) + y = self.model.predict({'image': im}) # coordinates are xywh normalized + if 'confidence' in y: + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + else: + k = 'var_' + str(sorted(int(k.replace('var_', '')) for k in y)[-1]) # output key + y = y[k] # output + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3) + if self.saved_model: # SavedModel + y = (self.model(im, training=False) if self.keras else self.model(im)[0]).numpy() + elif self.pb: # GraphDef + y = self.frozen_func(x=self.tf.constant(im)).numpy() + else: # Lite or Edge TPU + input, output = self.input_details[0], self.output_details[0] + int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model + if int8: + scale, zero_point = input['quantization'] + im = (im / scale + zero_point).astype(np.uint8) # de-scale + self.interpreter.set_tensor(input['index'], im) + self.interpreter.invoke() + y = self.interpreter.get_tensor(output['index']) + if int8: + scale, zero_point = output['quantization'] + y = (y.astype(np.float32) - zero_point) * scale # re-scale + y[..., :4] *= [w, h, w, h] # xywh normalized to pixels + + y = torch.tensor(y) if isinstance(y, np.ndarray) else y + return (y, []) if val else y + + def warmup(self, imgsz=(1, 3, 640, 640), half=False): + # Warmup model by running inference once + if self.pt or self.jit or self.onnx or self.engine: # warmup types + if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models + im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image + self.forward(im) # warmup + + @staticmethod + def model_type(p='path/to/model.pt'): + # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx + from export import export_formats + suffixes = list(export_formats().Suffix) + ['.xml'] # export suffixes + check_suffix(p, suffixes) # checks + p = Path(p).name # eliminate trailing separators + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, xml2 = (s in p for s in suffixes) + xml |= xml2 # *_openvino_model or *.xml + tflite &= not edgetpu # *.tflite + return pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs + + +class AutoShape(nn.Module): + # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + agnostic = False # NMS class-agnostic + multi_label = False # NMS multiple labels per box + classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs + max_det = 1000 # maximum number of detections per image + amp = False # Automatic Mixed Precision (AMP) inference + + def __init__(self, model): + super().__init__() + LOGGER.info('Adding AutoShape... ') + copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes + self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance + self.pt = not self.dmb or model.pt # PyTorch model + self.model = model.eval() + + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) + return self + + @torch.no_grad() + def forward(self, imgs, size=640, augment=False, profile=False): + # Inference from various sources. For height=640, width=1280, RGB images example inputs are: + # file: imgs = 'data/images/zidane.jpg' # str or PosixPath + # URI: = 'https://ultralytics.com/images/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + t = [time_sync()] + p = next(self.model.parameters()) if self.pt else torch.zeros(1) # for device and type + autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference + if isinstance(imgs, torch.Tensor): # torch + with amp.autocast(enabled=autocast): + return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference + + # Pre-process + n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(imgs): + f = f'image{i}' # filename + if isinstance(im, (str, Path)): # filename or uri + im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im + im = np.asarray(exif_transpose(im)) + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = (size / max(s)) # gain + shape1.append([y * g for y in s]) + imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update + shape1 = [make_divisible(x, self.stride) for x in np.stack(shape1, 0).max(0)] # inference shape + x = [letterbox(im, new_shape=shape1 if self.pt else size, auto=False)[0] for im in imgs] # pad + x = np.stack(x, 0) if n > 1 else x[0][None] # stack + x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 + t.append(time_sync()) + + with amp.autocast(enabled=autocast): + # Inference + y = self.model(x, augment, profile) # forward + t.append(time_sync()) + + # Post-process + y = non_max_suppression(y if self.dmb else y[0], self.conf, iou_thres=self.iou, classes=self.classes, + agnostic=self.agnostic, multi_label=self.multi_label, max_det=self.max_det) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) + + t.append(time_sync()) + return Detections(imgs, y, files, t, self.names, x.shape) + + +class Detections: + # YOLOv5 detections class for inference results + def __init__(self, imgs, pred, files, times=(0, 0, 0, 0), names=None, shape=None): + super().__init__() + d = pred[0].device # device + gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations + self.imgs = imgs # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.files = files # image filenames + self.times = times # profiling times + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) # number of images (batch size) + self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.s = shape # inference BCHW shape + + def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')): + crops = [] + for i, (im, pred) in enumerate(zip(self.imgs, self.pred)): + s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string + if pred.shape[0]: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + if show or save or render or crop: + annotator = Annotator(im, example=str(self.names)) + for *box, conf, cls in reversed(pred): # xyxy, confidence, class + label = f'{self.names[int(cls)]} {conf:.2f}' + if crop: + file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None + crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label, + 'im': save_one_box(box, im, file=file, save=save)}) + else: # all others + annotator.box_label(box, label, color=colors(cls)) + im = annotator.im + else: + s += '(no detections)' + + im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np + if pprint: + LOGGER.info(s.rstrip(', ')) + if show: + im.show(self.files[i]) # show + if save: + f = self.files[i] + im.save(save_dir / f) # save + if i == self.n - 1: + LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") + if render: + self.imgs[i] = np.asarray(im) + if crop: + if save: + LOGGER.info(f'Saved results to {save_dir}\n') + return crops + + def print(self): + self.display(pprint=True) # print results + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % + self.t) + + def show(self): + self.display(show=True) # show results + + def save(self, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir + self.display(save=True, save_dir=save_dir) # save results + + def crop(self, save=True, save_dir='runs/detect/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None + return self.display(crop=True, save=save, save_dir=save_dir) # crop results + + def render(self): + self.display(render=True) # render results + return self.imgs + + def pandas(self): + # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) + return new + + def tolist(self): + # return a list of Detections objects, i.e. 'for result in results.tolist():' + r = range(self.n) # iterable + x = [Detections([self.imgs[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] + # for d in x: + # for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + # setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + def __len__(self): + return self.n + + +class Classify(nn.Module): + # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) + self.flat = nn.Flatten() + + def forward(self, x): + z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list + return self.flat(self.conv(z)) # flatten to x(b,c2) diff --git a/models/experimental.py b/models/experimental.py new file mode 100644 index 0000000..463e551 --- /dev/null +++ b/models/experimental.py @@ -0,0 +1,120 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Experimental modules +""" +import math + +import numpy as np +import torch +import torch.nn as nn + +from models.common import Conv +from utils.downloads import attempt_download + + +class CrossConv(nn.Module): + # Cross Convolution Downsample + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): + # ch_in, ch_out, kernel, stride, groups, expansion, shortcut + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, (1, k), (1, s)) + self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class Sum(nn.Module): + # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 + def __init__(self, n, weight=False): # n: number of inputs + super().__init__() + self.weight = weight # apply weights boolean + self.iter = range(n - 1) # iter object + if weight: + self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights + + def forward(self, x): + y = x[0] # no weight + if self.weight: + w = torch.sigmoid(self.w) * 2 + for i in self.iter: + y = y + x[i + 1] * w[i] + else: + for i in self.iter: + y = y + x[i + 1] + return y + + +class MixConv2d(nn.Module): + # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy + super().__init__() + n = len(k) # number of convolutions + if equal_ch: # equal c_ per group + i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(n)] # intermediate channels + else: # equal weight.numel() per group + b = [c2] + [0] * n + a = np.eye(n + 1, n, k=-1) + a -= np.roll(a, 1, axis=1) + a *= np.array(k) ** 2 + a[0] = 1 + c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b + + self.m = nn.ModuleList( + [nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() + + def forward(self, x): + return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + + +class Ensemble(nn.ModuleList): + # Ensemble of models + def __init__(self): + super().__init__() + + def forward(self, x, augment=False, profile=False, visualize=False): + y = [] + for module in self: + y.append(module(x, augment, profile, visualize)[0]) + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 1) # nms ensemble + return y, None # inference, train output + + +def attempt_load(weights, map_location=None, inplace=True, fuse=True): + from models.yolo import Detect, Model + + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + model = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + ckpt = torch.load(attempt_download(w), map_location=map_location) # load + if fuse: + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model + else: + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().eval()) # without layer fuse + + # Compatibility updates + for m in model.modules(): + if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model]: + m.inplace = inplace # pytorch 1.7.0 compatibility + if type(m) is Detect: + if not isinstance(m.anchor_grid, list): # new Detect Layer compatibility + delattr(m, 'anchor_grid') + setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) + elif type(m) is Conv: + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + + if len(model) == 1: + return model[-1] # return model + else: + print(f'Ensemble created with {weights}\n') + for k in ['names']: + setattr(model, k, getattr(model[-1], k)) + model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride + return model # return ensemble diff --git a/models/tf.py b/models/tf.py new file mode 100644 index 0000000..74681e4 --- /dev/null +++ b/models/tf.py @@ -0,0 +1,464 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +TensorFlow, Keras and TFLite versions of YOLOv5 +Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 + +Usage: + $ python models/tf.py --weights yolov5s.pt + +Export: + $ python path/to/export.py --weights yolov5s.pt --include saved_model pb tflite tfjs +""" + +import argparse +import sys +from copy import deepcopy +from pathlib import Path + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +# ROOT = ROOT.relative_to(Path.cwd()) # relative + +import numpy as np +import tensorflow as tf +import torch +import torch.nn as nn +from tensorflow import keras + +from models.common import C3, SPP, SPPF, Bottleneck, BottleneckCSP, Concat, Conv, DWConv, Focus, autopad +from models.experimental import CrossConv, MixConv2d, attempt_load +from models.yolo import Detect +from utils.activations import SiLU +from utils.general import LOGGER, make_divisible, print_args + + +class TFBN(keras.layers.Layer): + # TensorFlow BatchNormalization wrapper + def __init__(self, w=None): + super().__init__() + self.bn = keras.layers.BatchNormalization( + beta_initializer=keras.initializers.Constant(w.bias.numpy()), + gamma_initializer=keras.initializers.Constant(w.weight.numpy()), + moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()), + moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()), + epsilon=w.eps) + + def call(self, inputs): + return self.bn(inputs) + + +class TFPad(keras.layers.Layer): + def __init__(self, pad): + super().__init__() + self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) + + def call(self, inputs): + return tf.pad(inputs, self.pad, mode='constant', constant_values=0) + + +class TFConv(keras.layers.Layer): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, weights, kernel, stride, padding, groups + super().__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + assert isinstance(k, int), "Convolution with multiple kernels are not allowed." + # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) + # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch + + conv = keras.layers.Conv2D( + c2, k, s, 'SAME' if s == 1 else 'VALID', use_bias=False if hasattr(w, 'bn') else True, + kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) + self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) + self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity + + # YOLOv5 activations + if isinstance(w.act, nn.LeakyReLU): + self.act = (lambda x: keras.activations.relu(x, alpha=0.1)) if act else tf.identity + elif isinstance(w.act, nn.Hardswish): + self.act = (lambda x: x * tf.nn.relu6(x + 3) * 0.166666667) if act else tf.identity + elif isinstance(w.act, (nn.SiLU, SiLU)): + self.act = (lambda x: keras.activations.swish(x)) if act else tf.identity + else: + raise Exception(f'no matching TensorFlow activation found for {w.act}') + + def call(self, inputs): + return self.act(self.bn(self.conv(inputs))) + + +class TFFocus(keras.layers.Layer): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) + + def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) + # inputs = inputs / 255 # normalize 0-255 to 0-1 + return self.conv(tf.concat([inputs[:, ::2, ::2, :], + inputs[:, 1::2, ::2, :], + inputs[:, ::2, 1::2, :], + inputs[:, 1::2, 1::2, :]], 3)) + + +class TFBottleneck(keras.layers.Layer): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2) + self.add = shortcut and c1 == c2 + + def call(self, inputs): + return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) + + +class TFConv2d(keras.layers.Layer): + # Substitution for PyTorch nn.Conv2D + def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): + super().__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + self.conv = keras.layers.Conv2D( + c2, k, s, 'VALID', use_bias=bias, + kernel_initializer=keras.initializers.Constant(w.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None, ) + + def call(self, inputs): + return self.conv(inputs) + + +class TFBottleneckCSP(keras.layers.Layer): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2) + self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3) + self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4) + self.bn = TFBN(w.bn) + self.act = lambda x: keras.activations.relu(x, alpha=0.1) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + y1 = self.cv3(self.m(self.cv1(inputs))) + y2 = self.cv2(inputs) + return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3)))) + + +class TFC3(keras.layers.Layer): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) + self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) + + +class TFSPP(keras.layers.Layer): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13), w=None): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) + self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k] + + def call(self, inputs): + x = self.cv1(inputs) + return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3)) + + +class TFSPPF(keras.layers.Layer): + # Spatial pyramid pooling-Fast layer + def __init__(self, c1, c2, k=5, w=None): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2) + self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME') + + def call(self, inputs): + x = self.cv1(inputs) + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3)) + + +class TFDetect(keras.layers.Layer): + def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer + super().__init__() + self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [tf.zeros(1)] * self.nl # init grid + self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32) + self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), + [self.nl, 1, -1, 1, 2]) + self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] + self.training = False # set to False after building model + self.imgsz = imgsz + for i in range(self.nl): + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] + self.grid[i] = self._make_grid(nx, ny) + + def call(self, inputs): + z = [] # inference output + x = [] + for i in range(self.nl): + x.append(self.m[i](inputs[i])) + # x(bs,20,20,255) to x(bs,3,20,20,85) + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] + x[i] = tf.transpose(tf.reshape(x[i], [-1, ny * nx, self.na, self.no]), [0, 2, 1, 3]) + + if not self.training: # inference + y = tf.sigmoid(x[i]) + xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] + # Normalize xywh to 0-1 to reduce calibration error + xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) + wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) + y = tf.concat([xy, wh, y[..., 4:]], -1) + z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) + + return x if self.training else (tf.concat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny)) + return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) + + +class TFUpsample(keras.layers.Layer): + def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' + super().__init__() + assert scale_factor == 2, "scale_factor must be 2" + self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=mode) + # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) + # with default arguments: align_corners=False, half_pixel_centers=False + # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, + # size=(x.shape[1] * 2, x.shape[2] * 2)) + + def call(self, inputs): + return self.upsample(inputs) + + +class TFConcat(keras.layers.Layer): + def __init__(self, dimension=1, w=None): + super().__init__() + assert dimension == 1, "convert only NCHW to NHWC concat" + self.d = 3 + + def call(self, inputs): + return tf.concat(inputs, self.d) + + +def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m_str = m + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except NameError: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [nn.Conv2d, Conv, Bottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]: + c1, c2 = ch[f], args[0] + c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3]: + args.insert(2, n) + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) + elif m is Detect: + args.append([ch[x + 1] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + args.append(imgsz) + else: + c2 = ch[f] + + tf_m = eval('TF' + m_str.replace('nn.', '')) + m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ + else tf_m(*args, w=model.model[i]) # module + + torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum(x.numel() for x in torch_m_.parameters()) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + ch.append(c2) + return keras.Sequential(layers), sorted(save) + + +class TFModel: + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes + super().__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict + + # Define model + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) + + def predict(self, inputs, tf_nms=False, agnostic_nms=False, topk_per_class=100, topk_all=100, iou_thres=0.45, + conf_thres=0.25): + y = [] # outputs + x = inputs + for i, m in enumerate(self.model.layers): + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + x = m(x) # run + y.append(x if m.i in self.savelist else None) # save output + + # Add TensorFlow NMS + if tf_nms: + boxes = self._xywh2xyxy(x[0][..., :4]) + probs = x[0][:, :, 4:5] + classes = x[0][:, :, 5:] + scores = probs * classes + if agnostic_nms: + nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres) + return nms, x[1] + else: + boxes = tf.expand_dims(boxes, 2) + nms = tf.image.combined_non_max_suppression( + boxes, scores, topk_per_class, topk_all, iou_thres, conf_thres, clip_boxes=False) + return nms, x[1] + + return x[0] # output only first tensor [1,6300,85] = [xywh, conf, class0, class1, ...] + # x = x[0][0] # [x(1,6300,85), ...] to x(6300,85) + # xywh = x[..., :4] # x(6300,4) boxes + # conf = x[..., 4:5] # x(6300,1) confidences + # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes + # return tf.concat([conf, cls, xywh], 1) + + @staticmethod + def _xywh2xyxy(xywh): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) + return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) + + +class AgnosticNMS(keras.layers.Layer): + # TF Agnostic NMS + def call(self, input, topk_all, iou_thres, conf_thres): + # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 + return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), input, + fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), + name='agnostic_nms') + + @staticmethod + def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS + boxes, classes, scores = x + class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) + scores_inp = tf.reduce_max(scores, -1) + selected_inds = tf.image.non_max_suppression( + boxes, scores_inp, max_output_size=topk_all, iou_threshold=iou_thres, score_threshold=conf_thres) + selected_boxes = tf.gather(boxes, selected_inds) + padded_boxes = tf.pad(selected_boxes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], + mode="CONSTANT", constant_values=0.0) + selected_scores = tf.gather(scores_inp, selected_inds) + padded_scores = tf.pad(selected_scores, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + selected_classes = tf.gather(class_inds, selected_inds) + padded_classes = tf.pad(selected_classes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode="CONSTANT", constant_values=-1.0) + valid_detections = tf.shape(selected_inds)[0] + return padded_boxes, padded_scores, padded_classes, valid_detections + + +def representative_dataset_gen(dataset, ncalib=100): + # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays + for n, (path, img, im0s, vid_cap, string) in enumerate(dataset): + input = np.transpose(img, [1, 2, 0]) + input = np.expand_dims(input, axis=0).astype(np.float32) + input /= 255 + yield [input] + if n >= ncalib: + break + + +def run(weights=ROOT / 'yolov5s.pt', # weights path + imgsz=(640, 640), # inference size h,w + batch_size=1, # batch size + dynamic=False, # dynamic batch size + ): + # PyTorch model + im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image + model = attempt_load(weights, map_location=torch.device('cpu'), inplace=True, fuse=False) + _ = model(im) # inference + model.info() + + # TensorFlow model + im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + _ = tf_model.predict(im) # inference + + # Keras model + im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) + keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im)) + keras_model.summary() + + LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.') + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(FILE.stem, opt) + return opt + + +def main(opt): + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/models/yolo.py b/models/yolo.py new file mode 100644 index 0000000..f659a04 --- /dev/null +++ b/models/yolo.py @@ -0,0 +1,329 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +YOLO-specific modules + +Usage: + $ python path/to/models/yolo.py --cfg yolov5s.yaml +""" + +import argparse +import sys +from copy import deepcopy +from pathlib import Path + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +# ROOT = ROOT.relative_to(Path.cwd()) # relative + +from models.common import * +from models.experimental import * +from utils.autoanchor import check_anchor_order +from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args +from utils.plots import feature_visualization +from utils.torch_utils import fuse_conv_and_bn, initialize_weights, model_info, scale_img, select_device, time_sync + +try: + import thop # for FLOPs computation +except ImportError: + thop = None + + +class Detect(nn.Module): + stride = None # strides computed during build + onnx_dynamic = False # ONNX export parameter + + def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer + super().__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + self.anchor_grid = [torch.zeros(1)] * self.nl # init anchor grid + self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + self.inplace = inplace # use in-place ops (e.g. slice assignment) + + def forward(self, x): + z = [] # inference output + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.onnx_dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) + + y = x[i].sigmoid() + if self.inplace: + y[..., 0:2] = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 + xy = (y[..., 0:2] * 2 - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, y[..., 4:]), -1) + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x) + + def _make_grid(self, nx=20, ny=20, i=0): + d = self.anchors[i].device + if check_version(torch.__version__, '1.10.0'): # torch>=1.10.0 meshgrid workaround for torch>=0.7 compatibility + yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)], indexing='ij') + else: + yv, xv = torch.meshgrid([torch.arange(ny, device=d), torch.arange(nx, device=d)]) + grid = torch.stack((xv, yv), 2).expand((1, self.na, ny, nx, 2)).float() + anchor_grid = (self.anchors[i].clone() * self.stride[i]) \ + .view((1, self.na, 1, 1, 2)).expand((1, self.na, ny, nx, 2)).float() + return grid, anchor_grid + + +class Model(nn.Module): + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes + super().__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg, encoding='ascii', errors='ignore') as f: + self.yaml = yaml.safe_load(f) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + if anchors: + LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + self.inplace = self.yaml.get('inplace', True) + + # Build strides, anchors + m = self.model[-1] # Detect() + if isinstance(m, Detect): + s = 256 # 2x min stride + m.inplace = self.inplace + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + m.anchors /= m.stride.view(-1, 1, 1) + check_anchor_order(m) + self.stride = m.stride + self._initialize_biases() # only run once + + # Init weights, biases + initialize_weights(self) + self.info() + LOGGER.info('') + + def forward(self, x, augment=False, profile=False, visualize=False): + if augment: + return self._forward_augment(x) # augmented inference, None + return self._forward_once(x, profile, visualize) # single-scale inference, train + + def _forward_augment(self, x): + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self._forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi = self._descale_pred(yi, fi, si, img_size) + y.append(yi) + y = self._clip_augmented(y) # clip augmented tails + return torch.cat(y, 1), None # augmented inference, train + + def _forward_once(self, x, profile=False, visualize=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + if profile: + self._profile_one_layer(m, x, dt) + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + if visualize: + feature_visualization(x, m.type, m.i, save_dir=visualize) + return x + + def _descale_pred(self, p, flips, scale, img_size): + # de-scale predictions following augmented inference (inverse operation) + if self.inplace: + p[..., :4] /= scale # de-scale + if flips == 2: + p[..., 1] = img_size[0] - p[..., 1] # de-flip ud + elif flips == 3: + p[..., 0] = img_size[1] - p[..., 0] # de-flip lr + else: + x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale + if flips == 2: + y = img_size[0] - y # de-flip ud + elif flips == 3: + x = img_size[1] - x # de-flip lr + p = torch.cat((x, y, wh, p[..., 4:]), -1) + return p + + def _clip_augmented(self, y): + # Clip YOLOv5 augmented inference tails + nl = self.model[-1].nl # number of detection layers (P3-P5) + g = sum(4 ** x for x in range(nl)) # grid points + e = 1 # exclude layer count + i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices + y[0] = y[0][:, :-i] # large + i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices + y[-1] = y[-1][:, i:] # small + return y + + def _profile_one_layer(self, m, x, dt): + c = isinstance(m, Detect) # is final layer, copy input as inplace fix + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + t = time_sync() + for _ in range(10): + m(x.copy() if c else x) + dt.append((time_sync() - t) * 100) + if m == self.model[0]: + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} {'module'}") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + if c: + LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") + + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _print_biases(self): + m = self.model[-1] # Detect() module + for mi in m.m: # from + b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) + LOGGER.info( + ('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + + # def _print_weights(self): + # for m in self.model.modules(): + # if type(m) is Bottleneck: + # LOGGER.info('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + LOGGER.info('Fusing layers... ') + for m in self.model.modules(): + if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.forward_fuse # update forward + self.info() + return self + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + m = self.model[-1] # Detect() + if isinstance(m, Detect): + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) + return self + + +def parse_model(d, ch): # model_dict, input_channels(3) + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except NameError: + pass + + n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3TR, C3SPP, C3Ghost]: + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3, C3TR, C3Ghost]: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum(ch[x] for x in f) + elif m is Detect: + args.append([ch[x] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + elif m is Contract: + c2 = ch[f] * args[0] ** 2 + elif m is Expand: + c2 = ch[f] // args[0] ** 2 + else: + c2 = ch[f] + + m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum(x.numel() for x in m_.parameters()) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--profile', action='store_true', help='profile model speed') + parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') + opt = parser.parse_args() + opt.cfg = check_yaml(opt.cfg) # check YAML + print_args(FILE.stem, opt) + device = select_device(opt.device) + + # Create model + model = Model(opt.cfg).to(device) + model.train() + + # Profile + if opt.profile: + img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + y = model(img, profile=True) + + # Test all models + if opt.test: + for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): + try: + _ = Model(cfg) + except Exception as e: + print(f'Error in {cfg}: {e}') + + # Tensorboard (not working https://github.com/ultralytics/yolov5/issues/2898) + # from torch.utils.tensorboard import SummaryWriter + # tb_writer = SummaryWriter('.') + # LOGGER.info("Run 'tensorboard --logdir=models' to view tensorboard at http://localhost:6006/") + # tb_writer.add_graph(torch.jit.trace(model, img, strict=False), []) # add model graph diff --git a/utils.py b/utils/__init__.py similarity index 100% rename from utils.py rename to utils/__init__.py diff --git a/utils/activations.py b/utils/activations.py new file mode 100644 index 0000000..a4ff789 --- /dev/null +++ b/utils/activations.py @@ -0,0 +1,101 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Activation functions +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- +class SiLU(nn.Module): # export-friendly version of nn.SiLU() + @staticmethod + def forward(x): + return x * torch.sigmoid(x) + + +class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() + @staticmethod + def forward(x): + # return x * F.hardsigmoid(x) # for TorchScript and CoreML + return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX + + +# Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- +class Mish(nn.Module): + @staticmethod + def forward(x): + return x * F.softplus(x).tanh() + + +class MemoryEfficientMish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + fx = F.softplus(x).tanh() + return grad_output * (fx + x * sx * (1 - fx * fx)) + + def forward(self, x): + return self.F.apply(x) + + +# FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- +class FReLU(nn.Module): + def __init__(self, c1, k=3): # ch_in, kernel + super().__init__() + self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) + self.bn = nn.BatchNorm2d(c1) + + def forward(self, x): + return torch.max(x, self.bn(self.conv(x))) + + +# ACON https://arxiv.org/pdf/2009.04759.pdf ---------------------------------------------------------------------------- +class AconC(nn.Module): + r""" ACON activation (activate or not). + AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1): + super().__init__() + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) + + def forward(self, x): + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x + + +class MetaAconC(nn.Module): + r""" ACON activation (activate or not). + MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r + super().__init__() + c2 = max(r, c1 // r) + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) + self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) + # self.bn1 = nn.BatchNorm2d(c2) + # self.bn2 = nn.BatchNorm2d(c1) + + def forward(self, x): + y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) + # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 + # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable + beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(beta * dpx) + self.p2 * x diff --git a/utils/augmentations.py b/utils/augmentations.py new file mode 100644 index 0000000..0311b97 --- /dev/null +++ b/utils/augmentations.py @@ -0,0 +1,277 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np + +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box +from utils.metrics import bbox_ioa + + +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self): + self.transform = None + try: + import albumentations as A + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + self.transform = A.Compose([ + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)], + bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(colorstr('albumentations: ') + f'{e}') + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + # HSV color-space augmentation + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + + +def hist_equalize(im, clahe=True, bgr=False): + # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def replicate(im, labels): + # Replicate labels + h, w = im.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return im, labels + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return im, targets + + +def copy_paste(im, labels, segments, p=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if p and n: + h, w, c = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + for j in random.sample(range(n), k=round(p * n)): + l, s = labels[j], segments[j] + box = w - l[3], l[2], w - l[1], l[4] + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + if (ioa < 0.30).all(): # allow 30% obscuration of existing labels + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=im, src2=im_new) + result = cv2.flip(result, 1) # augment segments (flip left-right) + i = result > 0 # pixels to replace + # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + return im, labels, segments + + +def cutout(im, labels, p=0.5): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + if random.random() < p: + h, w = im.shape[:2] + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) # create random masks + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def mixup(im, labels, im2, labels2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + return im, labels + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates diff --git a/utils/autoanchor.py b/utils/autoanchor.py new file mode 100644 index 0000000..27d6fb6 --- /dev/null +++ b/utils/autoanchor.py @@ -0,0 +1,165 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +AutoAnchor utils +""" + +import random + +import numpy as np +import torch +import yaml +from tqdm import tqdm + +from utils.general import LOGGER, colorstr, emojis + +PREFIX = colorstr('AutoAnchor: ') + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary + a = m.anchors.prod(-1).view(-1) # anchor area + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da.sign() != ds.sign(): # same order + LOGGER.info(f'{PREFIX}Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + + +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1 / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1 / thr).float().mean() # best possible recall + return bpr, aat + + anchors = m.anchors.clone() * m.stride.to(m.anchors.device).view(-1, 1, 1) # current anchors + bpr, aat = metric(anchors.cpu().view(-1, 2)) + s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' + if bpr > 0.98: # threshold to recompute + LOGGER.info(emojis(f'{s}Current anchors are a good fit to dataset ✅')) + else: + LOGGER.info(emojis(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...')) + na = m.anchors.numel() // 2 # number of anchors + try: + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + except Exception as e: + LOGGER.info(f'{PREFIX}ERROR: {e}') + new_bpr = metric(anchors)[0] + if new_bpr > bpr: # replace anchors + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss + check_anchor_order(m) + LOGGER.info(f'{PREFIX}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + else: + LOGGER.info(f'{PREFIX}Original anchors better than new anchors. Proceeding with original anchors.') + + +def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + dataset: path to data.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + from scipy.cluster.vq import kmeans + + npr = np.random + thr = 1 / thr + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1 / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k, verbose=True): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ + f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ + f'past_thr={x[x > thr].mean():.3f}-mean: ' + for i, x in enumerate(k): + s += '%i,%i, ' % (round(x[0]), round(x[1])) + if verbose: + LOGGER.info(s[:-2]) + return k + + if isinstance(dataset, str): # *.yaml file + with open(dataset, errors='ignore') as f: + data_dict = yaml.safe_load(f) # model dict + from utils.datasets import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + LOGGER.info(f'{PREFIX}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels + # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans calculation + LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') + s = wh.std(0) # sigmas for whitening + k = kmeans(wh / s, n, iter=30)[0] * s # points + if len(k) != n: # kmeans may return fewer points than requested if wh is insufficient or too similar + LOGGER.warning(f'{PREFIX}WARNING: scipy.cluster.vq.kmeans returned only {len(k)} of {n} requested points') + k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init + wh = torch.tensor(wh, dtype=torch.float32) # filtered + wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered + k = print_results(k, verbose=False) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), desc=f'{PREFIX}Evolving anchors with Genetic Algorithm:') # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k, verbose) + + return print_results(k) diff --git a/utils/autobatch.py b/utils/autobatch.py new file mode 100644 index 0000000..cb94f04 --- /dev/null +++ b/utils/autobatch.py @@ -0,0 +1,57 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Auto-batch utils +""" + +from copy import deepcopy + +import numpy as np +import torch +from torch.cuda import amp + +from utils.general import LOGGER, colorstr +from utils.torch_utils import profile + + +def check_train_batch_size(model, imgsz=640): + # Check YOLOv5 training batch size + with amp.autocast(): + return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size + + +def autobatch(model, imgsz=640, fraction=0.9, batch_size=16): + # Automatically estimate best batch size to use `fraction` of available CUDA memory + # Usage: + # import torch + # from utils.autobatch import autobatch + # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) + # print(autobatch(model)) + + prefix = colorstr('AutoBatch: ') + LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') + device = next(model.parameters()).device # get model device + if device.type == 'cpu': + LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') + return batch_size + + d = str(device).upper() # 'CUDA:0' + properties = torch.cuda.get_device_properties(device) # device properties + t = properties.total_memory / 1024 ** 3 # (GiB) + r = torch.cuda.memory_reserved(device) / 1024 ** 3 # (GiB) + a = torch.cuda.memory_allocated(device) / 1024 ** 3 # (GiB) + f = t - (r + a) # free inside reserved + LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') + + batch_sizes = [1, 2, 4, 8, 16] + try: + img = [torch.zeros(b, 3, imgsz, imgsz) for b in batch_sizes] + y = profile(img, model, n=3, device=device) + except Exception as e: + LOGGER.warning(f'{prefix}{e}') + + y = [x[2] for x in y if x] # memory [2] + batch_sizes = batch_sizes[:len(y)] + p = np.polyfit(batch_sizes, y, deg=1) # first degree polynomial fit + b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) + LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%)') + return b diff --git a/utils/benchmarks.py b/utils/benchmarks.py new file mode 100644 index 0000000..962df81 --- /dev/null +++ b/utils/benchmarks.py @@ -0,0 +1,92 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run YOLOv5 benchmarks on all supported export formats + +Format | `export.py --include` | Model +--- | --- | --- +PyTorch | - | yolov5s.pt +TorchScript | `torchscript` | yolov5s.torchscript +ONNX | `onnx` | yolov5s.onnx +OpenVINO | `openvino` | yolov5s_openvino_model/ +TensorRT | `engine` | yolov5s.engine +CoreML | `coreml` | yolov5s.mlmodel +TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ +TensorFlow GraphDef | `pb` | yolov5s.pb +TensorFlow Lite | `tflite` | yolov5s.tflite +TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov5s_web_model/ + +Requirements: + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU + +Usage: + $ python utils/benchmarks.py --weights yolov5s.pt --img 640 +""" + +import argparse +import sys +import time +from pathlib import Path + +import pandas as pd + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +# ROOT = ROOT.relative_to(Path.cwd()) # relative + +import export +import val +from utils import notebook_init +from utils.general import LOGGER, print_args + + +def run(weights=ROOT / 'yolov5s.pt', # weights path + imgsz=640, # inference size (pixels) + batch_size=1, # batch size + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + ): + y, t = [], time.time() + formats = export.export_formats() + for i, (name, f, suffix) in formats.iterrows(): # index, (name, file, suffix) + try: + w = weights if f == '-' else export.run(weights=weights, imgsz=[imgsz], include=[f], device='cpu')[-1] + assert suffix in str(w), 'export failed' + result = val.run(data, w, batch_size, imgsz=imgsz, plots=False, device='cpu', task='benchmark') + metrics = result[0] # metrics (mp, mr, map50, map, *losses(box, obj, cls)) + speeds = result[2] # times (preprocess, inference, postprocess) + y.append([name, metrics[3], speeds[1]]) # mAP, t_inference + except Exception as e: + LOGGER.warning(f'WARNING: Benchmark failure for {name}: {e}') + y.append([name, None, None]) # mAP, t_inference + + # Print results + LOGGER.info('\n') + parse_opt() + notebook_init() # print system info + py = pd.DataFrame(y, columns=['Format', 'mAP@0.5:0.95', 'Inference time (ms)']) + LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') + LOGGER.info(str(py)) + return py + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + opt = parser.parse_args() + print_args(FILE.stem, opt) + return opt + + +def main(opt): + run(**vars(opt)) + + +if __name__ == "__main__": + opt = parse_opt() + main(opt) diff --git a/utils/callbacks.py b/utils/callbacks.py new file mode 100644 index 0000000..c51c268 --- /dev/null +++ b/utils/callbacks.py @@ -0,0 +1,78 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Callback utils +""" + + +class Callbacks: + """" + Handles all registered callbacks for YOLOv5 Hooks + """ + + def __init__(self): + # Define the available callbacks + self._callbacks = { + 'on_pretrain_routine_start': [], + 'on_pretrain_routine_end': [], + + 'on_train_start': [], + 'on_train_epoch_start': [], + 'on_train_batch_start': [], + 'optimizer_step': [], + 'on_before_zero_grad': [], + 'on_train_batch_end': [], + 'on_train_epoch_end': [], + + 'on_val_start': [], + 'on_val_batch_start': [], + 'on_val_image_end': [], + 'on_val_batch_end': [], + 'on_val_end': [], + + 'on_fit_epoch_end': [], # fit = train + val + 'on_model_save': [], + 'on_train_end': [], + 'on_params_update': [], + 'teardown': [], + } + self.stop_training = False # set True to interrupt training + + def register_action(self, hook, name='', callback=None): + """ + Register a new action to a callback hook + + Args: + hook The callback hook name to register the action to + name The name of the action for later reference + callback The callback to fire + """ + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + assert callable(callback), f"callback '{callback}' is not callable" + self._callbacks[hook].append({'name': name, 'callback': callback}) + + def get_registered_actions(self, hook=None): + """" + Returns all the registered actions by callback hook + + Args: + hook The name of the hook to check, defaults to all + """ + if hook: + return self._callbacks[hook] + else: + return self._callbacks + + def run(self, hook, *args, **kwargs): + """ + Loop through the registered actions and fire all callbacks + + Args: + hook The name of the hook to check, defaults to all + args Arguments to receive from YOLOv5 + kwargs Keyword Arguments to receive from YOLOv5 + """ + + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + + for logger in self._callbacks[hook]: + logger['callback'](*args, **kwargs) diff --git a/utils/dataloaders.py b/utils/dataloaders.py new file mode 100644 index 0000000..00f6413 --- /dev/null +++ b/utils/dataloaders.py @@ -0,0 +1,1092 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders and dataset utils +""" + +import contextlib +import glob +import hashlib +import json +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import Pool, ThreadPool +from pathlib import Path +from threading import Thread +from urllib.parse import urlparse +from zipfile import ZipFile + +import numpy as np +import torch +import torch.nn.functional as F +import yaml +from PIL import ExifTags, Image, ImageOps +from torch.utils.data import DataLoader, Dataset, dataloader, distributed +from tqdm import tqdm + +from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, + cv2, is_colab, is_kaggle, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes +BAR_FORMAT = '{l_bar}{bar:10}{r_bar}{bar:-10b}' # tqdm bar format +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(paths): + # Returns a single hash value of a list of paths (files or dirs) + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.md5(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + with contextlib.suppress(Exception): + rotation = dict(img._getexif().items())[orientation] + if rotation in [6, 8]: # rotation 270 or 90 + s = (s[1], s[0]) + return s + + +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90,}.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info["exif"] = exif.tobytes() + return image + + +def seed_worker(worker_id): + # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix='', + shuffle=False): + if rect and shuffle: + LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabels( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + generator = torch.Generator() + generator.manual_seed(0) + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, + worker_init_fn=seed_worker, + generator=generator), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for _ in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler: + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadImages: + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, path, img_size=640, stride=32, auto=True): + files = [] + for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: + p = str(Path(p).resolve()) + if '*' in p: + files.extend(sorted(glob.glob(p, recursive=True))) # glob + elif os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + elif os.path.isfile(p): + files.append(p) # files + else: + raise FileNotFoundError(f'{p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.auto = auto + if any(videos): + self.new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + ret_val, img0 = self.cap.read() + while not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + path = self.files[self.count] + self.new_video(path) + ret_val, img0 = self.cap.read() + + self.frame += 1 + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + img0 = cv2.imread(path) # BGR + assert img0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return path, img, img0, self.cap, s + + def new_video(self, path): + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + def __len__(self): + return self.nf # number of files + + +class LoadWebcam: # for inference + # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0` + def __init__(self, pipe='0', img_size=640, stride=32): + self.img_size = img_size + self.stride = stride + self.pipe = eval(pipe) if pipe.isnumeric() else pipe + self.cap = cv2.VideoCapture(self.pipe) # video capture object + self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if cv2.waitKey(1) == ord('q'): # q to quit + self.cap.release() + cv2.destroyAllWindows() + raise StopIteration + + # Read frame + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right + + # Print + assert ret_val, f'Camera Error {self.pipe}' + img_path = 'webcam.jpg' + s = f'webcam {self.count}: ' + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return img_path, img, img0, None, s + + def __len__(self): + return 0 + + +class LoadStreams: + # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + + if os.path.isfile(sources): + with open(sources) as f: + sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] + else: + sources = [sources] + + n = len(sources) + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n + self.sources = [clean_str(x) for x in sources] # clean source names for later + self.auto = auto + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + st = f'{i + 1}/{n}: {s}... ' + if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + check_requirements(('pafy', 'youtube_dl==2020.12.2')) + import pafy + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + if s == 0: + assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' + assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'{st}Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback + + _, self.imgs[i] = cap.read() # guarantee first frame + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + self.threads[i].start() + LOGGER.info('') # newline + + # check for common shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + if not self.rect: + LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') + + def update(self, i, cap, stream): + # Read stream `i` frames in daemon thread + n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame + while cap.isOpened() and n < f: + n += 1 + # _, self.imgs[index] = cap.read() + cap.grab() + if n % read == 0: + success, im = cap.retrieve() + if success: + self.imgs[i] = im + else: + LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') + self.imgs[i] = np.zeros_like(self.imgs[i]) + cap.open(stream) # re-open stream if signal was lost + time.sleep(0.0) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + # Letterbox + img0 = self.imgs.copy() + img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0] + + # Stack + img = np.stack(img, 0) + + # Convert + img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + img = np.ascontiguousarray(img) + + return self.sources, img, img0, None, '' + + def __len__(self): + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + + +class LoadImagesAndLabels(Dataset): + # YOLOv5 train_loader/val_loader, loads images and labels for training and validation + cache_version = 0.6 # dataset labels *.cache version + rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] + + def __init__(self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0.0, + prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + self.albumentations = Albumentations() if augment else None + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('*.*')) # pathlib + elif p.is_file(): # file + with open(p) as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise FileNotFoundError(f'{prefix}{p} does not exist') + self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert self.im_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') + + # Check cache + self.label_files = img2label_paths(self.im_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == self.cache_version # matches current version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + except Exception: + cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists and LOCAL_RANK in {-1, 0}: + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + labels, shapes, self.segments = zip(*cache.values()) + self.labels = list(labels) + self.shapes = np.array(shapes) + self.im_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Update labels + include_class = [] # filter labels to include only these classes (optional) + include_class_array = np.array(include_class).reshape(1, -1) + for i, (label, segment) in enumerate(zip(self.labels, self.segments)): + if include_class: + j = (label[:, 0:1] == include_class_array).any(1) + self.labels[i] = label[j] + if segment: + self.segments[i] = segment[j] + if single_cls: # single-class training, merge all classes into 0 + self.labels[i][:, 0] = 0 + if segment: + self.segments[i][:, 0] = 0 + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.im_files = [self.im_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride + + # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) + self.ims = [None] * n + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + if cache_images: + gb = 0 # Gigabytes of cached images + self.im_hw0, self.im_hw = [None] * n, [None] * n + fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) + pbar = tqdm(enumerate(results), total=n, bar_format=BAR_FORMAT, disable=LOCAL_RANK > 0) + for i, x in pbar: + if cache_images == 'disk': + gb += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + gb += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' + pbar.close() + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." + with Pool(NUM_THREADS) as pool: + pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), + desc=desc, + total=len(self.im_files), + bar_format=BAR_FORMAT) + for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x[im_file] = [lb, shape, segments] + if msg: + msgs.append(msg) + pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" + + pbar.close() + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + try: + np.save(path, x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{prefix}New cache created: {path}') + except Exception as e: + LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable + return x + + def __len__(self): + return len(self.im_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp['mixup']: + img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels = random_perspective(img, + labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) + + if self.augment: + # Albumentations + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + + # Flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + + # Cutouts + # labels = cutout(img, labels, p=0.5) + # nl = len(labels) # update after cutout + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.im_files[index], shapes + + def load_image(self, i): + # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + assert im is not None, f'Image Not Found {f}' + h0, w0 = im.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA + im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images_to_disk(self, i): + # Saves an image as an *.npy file for faster loading + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + def load_mosaic9(self, index): + # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) + hp, wp = -1, -1 # height, width previous + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9 = random_perspective(img9, + labels9, + segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + @staticmethod + def collate_fn(batch): + im, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + return torch.stack(im, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + img, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', + align_corners=False)[0].type(img[i].type()) + lb = label[i] + else: + im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + im4.append(im) + label4.append(lb) + + for i, lb in enumerate(label4): + lb[:, 0] = i # add target image index for build_targets() + + return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def flatten_recursive(path=DATASETS_DIR / 'coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(f'{str(path)}_flat') + if os.path.exists(new_path): + shutil.rmtree(new_path) # delete output folder + os.makedirs(new_path) # make new output folder + for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() + # Convert detection dataset into classification dataset, with one directory per class + path = Path(path) # images dir + shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in IMG_FORMATS: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file) as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.dataloaders import *; autosplit() + Arguments + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only + n = len(files) # number of files + random.seed(0) # for reproducibility + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path.parent / txt[i], 'a') as f: + f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file + + +def verify_image_label(args): + # Verify one image-label pair + im_file, lb_file, prefix = args + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any(len(x) > 6 for x in lb): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = segments[i] + msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, 5), dtype=np.float32) + return im_file, lb, shape, segments, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] + + +class HUBDatasetStats(): + """ Return dataset statistics dictionary with images and instances counts per split per class + To run in parent directory: export PYTHONPATH="$PWD/yolov5" + Usage1: from utils.dataloaders import *; HUBDatasetStats('coco128.yaml', autodownload=True) + Usage2: from utils.dataloaders import *; HUBDatasetStats('path/to/coco128_with_yaml.zip') + Arguments + path: Path to data.yaml or data.zip (with data.yaml inside data.zip) + autodownload: Attempt to download dataset if not found locally + """ + + def __init__(self, path='coco128.yaml', autodownload=False): + # Initialize class + zipped, data_dir, yaml_path = self._unzip(Path(path)) + try: + with open(check_yaml(yaml_path), errors='ignore') as f: + data = yaml.safe_load(f) # data dict + if zipped: + data['path'] = data_dir + except Exception as e: + raise Exception("error/HUB/dataset_stats/yaml_load") from e + + check_dataset(data, autodownload) # download dataset if missing + self.hub_dir = Path(data['path'] + '-hub') + self.im_dir = self.hub_dir / 'images' + self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images + self.stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary + self.data = data + + @staticmethod + def _find_yaml(dir): + # Return data.yaml file + files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive + assert files, f'No *.yaml file found in {dir}' + if len(files) > 1: + files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name + assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' + assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' + return files[0] + + def _unzip(self, path): + # Unzip data.zip + if not str(path).endswith('.zip'): # path is data.yaml + return False, None, path + assert Path(path).is_file(), f'Error unzipping {path}, file not found' + ZipFile(path).extractall(path=path.parent) # unzip + dir = path.with_suffix('') # dataset directory == zip name + assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' + return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path + + def _hub_ops(self, f, max_dim=1920): + # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing + f_new = self.im_dir / Path(f).name # dataset-hub image filename + try: # use PIL + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(f_new, 'JPEG', quality=50, optimize=True) # save + except Exception as e: # use OpenCV + print(f'WARNING: HUB ops PIL failure {f}: {e}') + im = cv2.imread(f) + im_height, im_width = im.shape[:2] + r = max_dim / max(im_height, im_width) # ratio + if r < 1.0: # image too large + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) + cv2.imwrite(str(f_new), im) + + def get_json(self, save=False, verbose=False): + # Return dataset JSON for Ultralytics HUB + def _round(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] + + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + self.stats[split] = None # i.e. no test set + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + x = np.array([ + np.bincount(label[:, 0].astype(int), minlength=self.data['nc']) + for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80) + self.stats[split] = { + 'instance_stats': { + 'total': int(x.sum()), + 'per_class': x.sum(0).tolist()}, + 'image_stats': { + 'total': dataset.n, + 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{ + str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} + + # Save, print and return + if save: + stats_path = self.hub_dir / 'stats.json' + print(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(self.stats, f) # save stats.json + if verbose: + print(json.dumps(self.stats, indent=2, sort_keys=False)) + return self.stats + + def process_images(self): + # Compress images for Ultralytics HUB + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + desc = f'{split} images' + for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc): + pass + print(f'Done. All images saved to {self.im_dir}') + return self.im_dir diff --git a/utils/datasets.py b/utils/datasets.py new file mode 100755 index 0000000..e132e04 --- /dev/null +++ b/utils/datasets.py @@ -0,0 +1,1037 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders and dataset utils +""" + +import glob +import hashlib +import json +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import Pool, ThreadPool +from pathlib import Path +from threading import Thread +from zipfile import ZipFile + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +import yaml +from PIL import ExifTags, Image, ImageOps +from torch.utils.data import DataLoader, Dataset, dataloader, distributed +from tqdm import tqdm + +from utils.augmentations import Albumentations, augment_hsv, copy_paste, letterbox, mixup, random_perspective +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, check_dataset, check_requirements, check_yaml, clean_str, + segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +HELP_URL = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +IMG_FORMATS = ['bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp'] # include image suffixes +VID_FORMATS = ['asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'wmv'] # include video suffixes + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(paths): + # Returns a single hash value of a list of paths (files or dirs) + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.md5(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + try: + rotation = dict(img._getexif().items())[orientation] + if rotation == 6: # rotation 270 + s = (s[1], s[0]) + elif rotation == 8: # rotation 90 + s = (s[1], s[0]) + except Exception: + pass + + return s + + +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = {2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info["exif"] = exif.tobytes() + return image + + +def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, + rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False): + if rect and shuffle: + LOGGER.warning('WARNING: --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabels(path, imgsz, batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler: + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadImages: + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, path, img_size=640, stride=32, auto=True): + p = str(Path(path).resolve()) # os-agnostic absolute path + if '*' in p: + files = sorted(glob.glob(p, recursive=True)) # glob + elif os.path.isdir(p): + files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir + elif os.path.isfile(p): + files = [p] # files + else: + raise Exception(f'ERROR: {p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.auto = auto + if any(videos): + self.new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + ret_val, img0 = self.cap.read() + while not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + else: + path = self.files[self.count] + self.new_video(path) + ret_val, img0 = self.cap.read() + + self.frame += 1 + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + img0 = cv2.imread(path) # BGR + assert img0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0] + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return path, img, img0, self.cap, s + + def new_video(self, path): + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + def __len__(self): + return self.nf # number of files + + +class LoadWebcam: # for inference + # YOLOv5 local webcam dataloader, i.e. `python detect.py --source 0` + def __init__(self, pipe='0', img_size=640, stride=32): + self.img_size = img_size + self.stride = stride + self.pipe = eval(pipe) if pipe.isnumeric() else pipe + self.cap = cv2.VideoCapture(self.pipe) # video capture object + self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if cv2.waitKey(1) == ord('q'): # q to quit + self.cap.release() + cv2.destroyAllWindows() + raise StopIteration + + # Read frame + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right + + # Print + assert ret_val, f'Camera Error {self.pipe}' + img_path = 'webcam.jpg' + s = f'webcam {self.count}: ' + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return img_path, img, img0, None, s + + def __len__(self): + return 0 + + +class LoadStreams: + # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True): + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + + if os.path.isfile(sources): + with open(sources) as f: + sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] + else: + sources = [sources] + + n = len(sources) + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n + self.sources = [clean_str(x) for x in sources] # clean source names for later + self.auto = auto + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + st = f'{i + 1}/{n}: {s}... ' + if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video + check_requirements(('pafy', 'youtube_dl==2020.12.2')) + import pafy + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'{st}Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback + + _, self.imgs[i] = cap.read() # guarantee first frame + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + self.threads[i].start() + LOGGER.info('') # newline + + # check for common shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs]) + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + if not self.rect: + LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.') + + def update(self, i, cap, stream): + # Read stream `i` frames in daemon thread + n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame + while cap.isOpened() and n < f: + n += 1 + # _, self.imgs[index] = cap.read() + cap.grab() + if n % read == 0: + success, im = cap.retrieve() + if success: + self.imgs[i] = im + else: + LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.') + self.imgs[i] = np.zeros_like(self.imgs[i]) + cap.open(stream) # re-open stream if signal was lost + time.sleep(1 / self.fps[i]) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + # Letterbox + img0 = self.imgs.copy() + img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0] + + # Stack + img = np.stack(img, 0) + + # Convert + img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + img = np.ascontiguousarray(img) + + return self.sources, img, img0, None, '' + + def __len__(self): + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + + +class LoadImagesAndLabels(Dataset): + # YOLOv5 train_loader/val_loader, loads images and labels for training and validation + cache_version = 0.6 # dataset labels *.cache version + + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, + cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + self.albumentations = Albumentations() if augment else None + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('*.*')) # pathlib + elif p.is_file(): # file + with open(p) as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise Exception(f'{prefix}{p} does not exist') + self.img_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert self.img_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {HELP_URL}') + + # Check cache + self.label_files = img2label_paths(self.img_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == self.cache_version # same version + assert cache['hash'] == get_hash(self.label_files + self.img_files) # same hash + except Exception: + cache, exists = self.cache_labels(cache_path, prefix), False # cache + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists: + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupt" + tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {HELP_URL}' + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + labels, shapes, self.segments = zip(*cache.values()) + self.labels = list(labels) + self.shapes = np.array(shapes, dtype=np.float64) + self.img_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Update labels + include_class = [] # filter labels to include only these classes (optional) + include_class_array = np.array(include_class).reshape(1, -1) + for i, (label, segment) in enumerate(zip(self.labels, self.segments)): + if include_class: + j = (label[:, 0:1] == include_class_array).any(1) + self.labels[i] = label[j] + if segment: + self.segments[i] = segment[j] + if single_cls: # single-class training, merge all classes into 0 + self.labels[i][:, 0] = 0 + if segment: + self.segments[i][:, 0] = 0 + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.img_files = [self.img_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride + + # Cache images into RAM/disk for faster training (WARNING: large datasets may exceed system resources) + self.imgs, self.img_npy = [None] * n, [None] * n + if cache_images: + if cache_images == 'disk': + self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy') + self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files] + self.im_cache_dir.mkdir(parents=True, exist_ok=True) + gb = 0 # Gigabytes of cached images + self.img_hw0, self.img_hw = [None] * n, [None] * n + results = ThreadPool(NUM_THREADS).imap(self.load_image, range(n)) + pbar = tqdm(enumerate(results), total=n) + for i, x in pbar: + if cache_images == 'disk': + if not self.img_npy[i].exists(): + np.save(self.img_npy[i].as_posix(), x[0]) + gb += self.img_npy[i].stat().st_size + else: # 'ram' + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + gb += self.imgs[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB {cache_images})' + pbar.close() + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels..." + with Pool(NUM_THREADS) as pool: + pbar = tqdm(pool.imap(verify_image_label, zip(self.img_files, self.label_files, repeat(prefix))), + desc=desc, total=len(self.img_files)) + for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x[im_file] = [lb, shape, segments] + if msg: + msgs.append(msg) + pbar.desc = f"{desc}{nf} found, {nm} missing, {ne} empty, {nc} corrupt" + + pbar.close() + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{prefix}WARNING: No labels found in {path}. See {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.img_files) + x['results'] = nf, nm, ne, nc, len(self.img_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + try: + np.save(path, x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{prefix}New cache created: {path}') + except Exception as e: + LOGGER.warning(f'{prefix}WARNING: Cache directory {path.parent} is not writeable: {e}') # not writeable + return x + + def __len__(self): + return len(self.img_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp['mixup']: + img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels = random_perspective(img, labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) + + if self.augment: + # Albumentations + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + + # Flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + + # Cutouts + # labels = cutout(img, labels, p=0.5) + # nl = len(labels) # update after cutout + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.img_files[index], shapes + + def load_image(self, i): + # loads 1 image from dataset index 'i', returns (im, original hw, resized hw) + im = self.imgs[i] + if im is None: # not cached in RAM + npy = self.img_npy[i] + if npy and npy.exists(): # load npy + im = np.load(npy) + else: # read image + f = self.img_files[i] + im = cv2.imread(f) # BGR + assert im is not None, f'Image Not Found {f}' + h0, w0 = im.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + im = cv2.resize(im, + (int(w0 * r), int(h0 * r)), + interpolation=cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + else: + return self.imgs[i], self.img_hw0[i], self.img_hw[i] # im, hw_original, hw_resized + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, labels4, segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + def load_mosaic9(self, index): + # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) + hp, wp = -1, -1 # height, width previous + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9 = random_perspective(img9, labels9, segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + @staticmethod + def collate_fn(batch): + img, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + img, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', align_corners=False)[ + 0].type(img[i].type()) + lb = label[i] + else: + im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + img4.append(im) + label4.append(lb) + + for i, lb in enumerate(label4): + lb[:, 0] = i # add target image index for build_targets() + + return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def create_folder(path='./new'): + # Create folder + if os.path.exists(path): + shutil.rmtree(path) # delete output folder + os.makedirs(path) # make new output folder + + +def flatten_recursive(path=DATASETS_DIR / 'coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(str(path) + '_flat') + create_folder(new_path) + for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.datasets import *; extract_boxes() + # Convert detection dataset into classification dataset, with one directory per class + path = Path(path) # images dir + shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in IMG_FORMATS: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file) as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.datasets import *; autosplit() + Arguments + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only + n = len(files) # number of files + random.seed(0) # for reproducibility + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + [(path.parent / x).unlink(missing_ok=True) for x in txt] # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path.parent / txt[i], 'a') as f: + f.write('./' + img.relative_to(path.parent).as_posix() + '\n') # add image to txt file + + +def verify_image_label(args): + # Verify one image-label pair + im_file, lb_file, prefix = args + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING: {im_file}: corrupt JPEG restored and saved' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any([len(x) > 8 for x in lb]): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = segments[i] + msg = f'{prefix}WARNING: {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, 5), dtype=np.float32) + return im_file, lb, shape, segments, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING: {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] + + +def dataset_stats(path='coco128.yaml', autodownload=False, verbose=False, profile=False, hub=False): + """ Return dataset statistics dictionary with images and instances counts per split per class + To run in parent directory: export PYTHONPATH="$PWD/yolov5" + Usage1: from utils.datasets import *; dataset_stats('coco128.yaml', autodownload=True) + Usage2: from utils.datasets import *; dataset_stats('path/to/coco128_with_yaml.zip') + Arguments + path: Path to data.yaml or data.zip (with data.yaml inside data.zip) + autodownload: Attempt to download dataset if not found locally + verbose: Print stats dictionary + """ + + def round_labels(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] + + def unzip(path): + # Unzip data.zip TODO: CONSTRAINT: path/to/abc.zip MUST unzip to 'path/to/abc/' + if str(path).endswith('.zip'): # path is data.zip + assert Path(path).is_file(), f'Error unzipping {path}, file not found' + ZipFile(path).extractall(path=path.parent) # unzip + dir = path.with_suffix('') # dataset directory == zip name + return True, str(dir), next(dir.rglob('*.yaml')) # zipped, data_dir, yaml_path + else: # path is data.yaml + return False, None, path + + def hub_ops(f, max_dim=1920): + # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing + f_new = im_dir / Path(f).name # dataset-hub image filename + try: # use PIL + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(f_new, 'JPEG', quality=75, optimize=True) # save + except Exception as e: # use OpenCV + print(f'WARNING: HUB ops PIL failure {f}: {e}') + im = cv2.imread(f) + im_height, im_width = im.shape[:2] + r = max_dim / max(im_height, im_width) # ratio + if r < 1.0: # image too large + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) + cv2.imwrite(str(f_new), im) + + zipped, data_dir, yaml_path = unzip(Path(path)) + with open(check_yaml(yaml_path), errors='ignore') as f: + data = yaml.safe_load(f) # data dict + if zipped: + data['path'] = data_dir # TODO: should this be dir.resolve()? + check_dataset(data, autodownload) # download dataset if missing + hub_dir = Path(data['path'] + ('-hub' if hub else '')) + stats = {'nc': data['nc'], 'names': data['names']} # statistics dictionary + for split in 'train', 'val', 'test': + if data.get(split) is None: + stats[split] = None # i.e. no test set + continue + x = [] + dataset = LoadImagesAndLabels(data[split]) # load dataset + for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics'): + x.append(np.bincount(label[:, 0].astype(int), minlength=data['nc'])) + x = np.array(x) # shape(128x80) + stats[split] = {'instance_stats': {'total': int(x.sum()), 'per_class': x.sum(0).tolist()}, + 'image_stats': {'total': dataset.n, 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{str(Path(k).name): round_labels(v.tolist())} for k, v in + zip(dataset.img_files, dataset.labels)]} + + if hub: + im_dir = hub_dir / 'images' + im_dir.mkdir(parents=True, exist_ok=True) + for _ in tqdm(ThreadPool(NUM_THREADS).imap(hub_ops, dataset.img_files), total=dataset.n, desc='HUB Ops'): + pass + + # Profile + stats_path = hub_dir / 'stats.json' + if profile: + for _ in range(1): + file = stats_path.with_suffix('.npy') + t1 = time.time() + np.save(file, stats) + t2 = time.time() + x = np.load(file, allow_pickle=True) + print(f'stats.npy times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') + + file = stats_path.with_suffix('.json') + t1 = time.time() + with open(file, 'w') as f: + json.dump(stats, f) # save stats *.json + t2 = time.time() + with open(file) as f: + x = json.load(f) # load hyps dict + print(f'stats.json times: {time.time() - t2:.3f}s read, {t2 - t1:.3f}s write') + + # Save, print and return + if hub: + print(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(stats, f) # save stats.json + if verbose: + print(json.dumps(stats, indent=2, sort_keys=False)) + return stats diff --git a/utils/downloads.py b/utils/downloads.py new file mode 100644 index 0000000..d7b87cb --- /dev/null +++ b/utils/downloads.py @@ -0,0 +1,153 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Download utils +""" + +import os +import platform +import subprocess +import time +import urllib +from pathlib import Path +from zipfile import ZipFile + +import requests +import torch + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') + return eval(s.split(' ')[0]) if len(s) else 0 # bytes + + +def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): + # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes + file = Path(file) + assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" + try: # url1 + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file)) + assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check + except Exception as e: # url2 + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') + os.system(f"curl -L '{url2 or url}' -o '{file}' --retry 3 -C -") # curl download, retry and resume on fail + finally: + if not file.exists() or file.stat().st_size < min_bytes: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f"ERROR: {assert_msg}\n{error_msg}") + print('') + + +def attempt_download(file, repo='ultralytics/yolov5'): # from utils.downloads import *; attempt_download() + # Attempt file download if does not exist + file = Path(str(file).strip().replace("'", '')) + + if not file.exists(): + # URL specified + name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + if Path(file).is_file(): + print(f'Found {url} locally at {file}') # file already exists + else: + safe_download(file=file, url=url, min_bytes=1E5) + return file + + # GitHub assets + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) + try: + response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api + assets = [x['name'] for x in response['assets']] # release assets, i.e. ['yolov5s.pt', 'yolov5m.pt', ...] + tag = response['tag_name'] # i.e. 'v1.0' + except Exception: # fallback plan + assets = ['yolov5n.pt', 'yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt', + 'yolov5n6.pt', 'yolov5s6.pt', 'yolov5m6.pt', 'yolov5l6.pt', 'yolov5x6.pt'] + try: + tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] + except Exception: + tag = 'v6.0' # current release + + if name in assets: + safe_download(file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + # url2=f'https://storage.googleapis.com/{repo}/ckpt/{name}', # backup url (optional) + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/') + + return str(file) + + +def gdrive_download(id='16TiPfZj7htmTyhntwcZyEEAejOUxuT6m', file='tmp.zip'): + # Downloads a file from Google Drive. from yolov5.utils.downloads import *; gdrive_download() + t = time.time() + file = Path(file) + cookie = Path('cookie') # gdrive cookie + print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') + file.unlink(missing_ok=True) # remove existing file + cookie.unlink(missing_ok=True) # remove existing cookie + + # Attempt file download + out = "NUL" if platform.system() == "Windows" else "/dev/null" + os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') + if os.path.exists('cookie'): # large file + s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' + else: # small file + s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' + r = os.system(s) # execute, capture return + cookie.unlink(missing_ok=True) # remove existing cookie + + # Error check + if r != 0: + file.unlink(missing_ok=True) # remove partial + print('Download error ') # raise Exception('Download error') + return r + + # Unzip if archive + if file.suffix == '.zip': + print('unzipping... ', end='') + ZipFile(file).extractall(path=file.parent) # unzip + file.unlink() # remove zip + + print(f'Done ({time.time() - t:.1f}s)') + return r + + +def get_token(cookie="./cookie"): + with open(cookie) as f: + for line in f: + if "download" in line: + return line.split()[-1] + return "" + +# Google utils: https://cloud.google.com/storage/docs/reference/libraries ---------------------------------------------- +# +# +# def upload_blob(bucket_name, source_file_name, destination_blob_name): +# # Uploads a file to a bucket +# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python +# +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(destination_blob_name) +# +# blob.upload_from_filename(source_file_name) +# +# print('File {} uploaded to {}.'.format( +# source_file_name, +# destination_blob_name)) +# +# +# def download_blob(bucket_name, source_blob_name, destination_file_name): +# # Uploads a blob from a bucket +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(source_blob_name) +# +# blob.download_to_filename(destination_file_name) +# +# print('Blob {} downloaded to {}.'.format( +# source_blob_name, +# destination_file_name)) diff --git a/utils/general.py b/utils/general.py new file mode 100755 index 0000000..3044b9c --- /dev/null +++ b/utils/general.py @@ -0,0 +1,880 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +General utils +""" + +import contextlib +import glob +import logging +import math +import os +import platform +import random +import re +import shutil +import signal +import time +import urllib +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from subprocess import check_output +from zipfile import ZipFile + +import cv2 +import numpy as np +import pandas as pd +import pkg_resources as pkg +import torch +import torchvision +import yaml + +from utils.downloads import gsutil_getsize +from utils.metrics import box_iou, fitness + +# Settings +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +DATASETS_DIR = ROOT.parent / 'datasets' # YOLOv5 datasets directory +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf + +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads + + +def is_kaggle(): + # Is environment a Kaggle Notebook? + try: + assert os.environ.get('PWD') == '/kaggle/working' + assert os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + return True + except AssertionError: + return False + + +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if test: # method 1 + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except OSError: + return False + else: # method 2 + return os.access(dir, os.R_OK) # possible issues on Windows + + +def set_logging(name=None, verbose=VERBOSE): + # Sets level and returns logger + if is_kaggle(): + for h in logging.root.handlers: + logging.root.removeHandler(h) # remove all handlers associated with the root logger object + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + logging.basicConfig(format="%(message)s", level=logging.INFO if (verbose and rank in (-1, 0)) else logging.WARNING) + return logging.getLogger(name) + + +LOGGER = set_logging('yolov5') # define globally (used in train.py, val.py, detect.py, etc.) + + +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required + return path + + +CONFIG_DIR = user_config_dir() # Ultralytics settings dir + + +class Profile(contextlib.ContextDecorator): + # Usage: @Profile() decorator or 'with Profile():' context manager + def __enter__(self): + self.start = time.time() + + def __exit__(self, type, value, traceback): + print(f'Profile results: {time.time() - self.start:.5f}s') + + +class Timeout(contextlib.ContextDecorator): + # Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager + def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): + self.seconds = int(seconds) + self.timeout_message = timeout_msg + self.suppress = bool(suppress_timeout_errors) + + def _timeout_handler(self, signum, frame): + raise TimeoutError(self.timeout_message) + + def __enter__(self): + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + + def __exit__(self, exc_type, exc_val, exc_tb): + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True + + +class WorkingDirectory(contextlib.ContextDecorator): + # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager + def __init__(self, new_dir): + self.dir = new_dir # new dir + self.cwd = Path.cwd().resolve() # current dir + + def __enter__(self): + os.chdir(self.dir) + + def __exit__(self, exc_type, exc_val, exc_tb): + os.chdir(self.cwd) + + +def try_except(func): + # try-except function. Usage: @try_except decorator + def handler(*args, **kwargs): + try: + func(*args, **kwargs) + except Exception as e: + print(e) + + return handler + + +def methods(instance): + # Get class/instance methods + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + + +def print_args(name, opt): + # Print argparser arguments + LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items())) + + +def init_seeds(seed=0): + # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html + # cudnn seed 0 settings are slower and more reproducible, else faster and less reproducible + import torch.backends.cudnn as cudnn + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + cudnn.benchmark, cudnn.deterministic = (False, True) if seed == 0 else (True, False) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def is_docker(): + # Is environment a Docker container? + return Path('/workspace').exists() # or Path('/.dockerenv').exists() + + +def is_colab(): + # Is environment a Google Colab instance? + try: + import google.colab + return True + except ImportError: + return False + + +def is_pip(): + # Is file in a pip package? + return 'site-packages' in Path(__file__).resolve().parts + + +def is_ascii(s=''): + # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) + s = str(s) # convert list, tuple, None, etc. to str + return len(s.encode().decode('ascii', 'ignore')) == len(s) + + +def is_chinese(s='人工智能'): + # Is string composed of any Chinese characters? + return True if re.search('[\u4e00-\u9fff]', str(s)) else False + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +def file_size(path): + # Return file/dir size (MB) + path = Path(path) + if path.is_file(): + return path.stat().st_size / 1E6 + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / 1E6 + else: + return 0.0 + + +def check_online(): + # Check internet connectivity + import socket + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + return True + except OSError: + return False + + +@try_except +@WorkingDirectory(ROOT) +def check_git_status(): + # Recommend 'git pull' if code is out of date + msg = ', for updates see https://github.com/ultralytics/yolov5' + s = colorstr('github: ') # string + assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg + assert not is_docker(), s + 'skipping check (Docker image)' + msg + assert check_online(), s + 'skipping check (offline)' + msg + + cmd = 'git fetch && git config --get remote.origin.url' + url = check_output(cmd, shell=True, timeout=5).decode().strip().rstrip('.git') # git fetch + branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + if n > 0: + s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `git pull` or `git clone {url}` to update." + else: + s += f'up to date with {url} ✅' + LOGGER.info(emojis(s)) # emoji-safe + + +def check_python(minimum='3.6.2'): + # Check current python version vs. required python version + check_version(platform.python_version(), minimum, name='Python ', hard=True) + + +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): + # Check version vs. required version + current, minimum = (pkg.parse_version(x) for x in (current, minimum)) + result = (current == minimum) if pinned else (current >= minimum) # bool + s = f'{name}{minimum} required by YOLOv5, but {name}{current} is currently installed' # string + if hard: + assert result, s # assert min requirements met + if verbose and not result: + LOGGER.warning(s) + return result + + +@try_except +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True): + # Check installed dependencies meet requirements (pass *.txt file or list of packages) + prefix = colorstr('red', 'bold', 'requirements:') + check_python() # check python version + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + assert file.exists(), f"{prefix} {file.resolve()} not found, check failed." + with file.open() as f: + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] + + n = 0 # number of packages updates + for r in requirements: + try: + pkg.require(r) + except Exception: # DistributionNotFound or VersionConflict if requirements not met + s = f"{prefix} {r} not found and is required by YOLOv5" + if install: + LOGGER.info(f"{s}, attempting auto-update...") + try: + assert check_online(), f"'pip install {r}' skipped (offline)" + LOGGER.info(check_output(f"pip install '{r}'", shell=True).decode()) + n += 1 + except Exception as e: + LOGGER.warning(f'{prefix} {e}') + else: + LOGGER.info(f'{s}. Please install and rerun your command.') + + if n: # if packages updated + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + LOGGER.info(emojis(s)) + + +def check_img_size(imgsz, s=32, floor=0): + # Verify image size is a multiple of stride s in each dimension + if isinstance(imgsz, int): # integer i.e. img_size=640 + new_size = max(make_divisible(imgsz, int(s)), floor) + else: # list i.e. img_size=[640, 480] + new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] + if new_size != imgsz: + LOGGER.warning(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + return new_size + + +def check_imshow(): + # Check if environment supports image displays + try: + assert not is_docker(), 'cv2.imshow() is disabled in Docker environments' + assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments' + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + LOGGER.warning(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + return False + + +def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): + # Check file(s) for acceptable suffix + if file and suffix: + if isinstance(suffix, str): + suffix = [suffix] + for f in file if isinstance(file, (list, tuple)) else [file]: + s = Path(f).suffix.lower() # file suffix + if len(s): + assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + + +def check_yaml(file, suffix=('.yaml', '.yml')): + # Search/download YAML file (if necessary) and return path, checking suffix + return check_file(file, suffix) + + +def check_file(file, suffix=''): + # Search/download file (if necessary) and return path + check_suffix(file, suffix) # optional + file = str(file) # convert to str() + if Path(file).is_file() or file == '': # exists + return file + elif file.startswith(('http:/', 'https:/')): # download + url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth + if Path(file).is_file(): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + return file + else: # search + files = [] + for d in 'data', 'models', 'utils': # search directories + files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file + assert len(files), f'File not found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_font(font=FONT): + # Download font to CONFIG_DIR if necessary + font = Path(font) + if not font.exists() and not (CONFIG_DIR / font.name).exists(): + url = "https://ultralytics.com/assets/" + font.name + LOGGER.info(f'Downloading {url} to {CONFIG_DIR / font.name}...') + torch.hub.download_url_to_file(url, str(font), progress=False) + + +def check_dataset(data, autodownload=True): + # Download and/or unzip dataset if not found locally + # Usage: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128_with_yaml.zip + + # Download (optional) + extract_dir = '' + if isinstance(data, (str, Path)) and str(data).endswith('.zip'): # i.e. gs://bucket/dir/coco128.zip + download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False, threads=1) + data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) + extract_dir, autodownload = data.parent, False + + # Read yaml (optional) + if isinstance(data, (str, Path)): + with open(data, errors='ignore') as f: + data = yaml.safe_load(f) # dictionary + + # Resolve paths + path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' + if not path.is_absolute(): + path = (ROOT / path).resolve() + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + data[k] = str(path / data[k]) if isinstance(data[k], str) else [str(path / x) for x in data[k]] + + # Parse yaml + assert 'nc' in data, "Dataset 'nc' key missing." + if 'names' not in data: + data['names'] = [f'class{i}' for i in range(data['nc'])] # assign class names if missing + train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) + if val: + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + LOGGER.info('\nDataset not found, missing paths: %s' % [str(x) for x in val if not x.exists()]) + if s and autodownload: # download script + root = path.parent if 'path' in data else '..' # unzip directory i.e. '../' + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + LOGGER.info(f'Downloading {s} to {f}...') + torch.hub.download_url_to_file(s, f) + Path(root).mkdir(parents=True, exist_ok=True) # create root + ZipFile(f).extractall(path=root) # unzip + Path(f).unlink() # remove zip + r = None # success + elif s.startswith('bash '): # bash script + LOGGER.info(f'Running {s} ...') + r = os.system(s) + else: # python script + r = exec(s, {'yaml': data}) # return None + LOGGER.info(f"Dataset autodownload {f'success, saved to {root}' if r in (0, None) else 'failure'}\n") + else: + raise Exception('Dataset not found.') + + return data # dictionary + + +def url2file(url): + # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt + url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + return file + + +def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1): + # Multi-threaded file download and unzip function, used in data.yaml for autodownload + def download_one(url, dir): + # Download 1 file + f = dir / Path(url).name # filename + if Path(url).is_file(): # exists in current path + Path(url).rename(f) # move to dir + elif not f.exists(): + LOGGER.info(f'Downloading {url} to {f}...') + if curl: + os.system(f"curl -L '{url}' -o '{f}' --retry 9 -C -") # curl download, retry and resume on fail + else: + torch.hub.download_url_to_file(url, f, progress=True) # torch download + if unzip and f.suffix in ('.zip', '.gz'): + LOGGER.info(f'Unzipping {f}...') + if f.suffix == '.zip': + ZipFile(f).extractall(path=dir) # unzip + elif f.suffix == '.gz': + os.system(f'tar xfz {f} --directory {f.parent}') # unzip + if delete: + f.unlink() # remove zip + + dir = Path(dir) + dir.mkdir(parents=True, exist_ok=True) # make directory + if threads > 1: + pool = ThreadPool(threads) + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multi-threaded + pool.close() + pool.join() + else: + for u in [url] if isinstance(url, (str, Path)) else url: + download_one(u, dir) + + +def make_divisible(x, divisor): + # Returns nearest x divisible by divisor + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(np.int) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights) + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels]) + image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) + # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample + return image_weights + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + return x + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + clip_coords(x, (h - eps, w - eps)) # warning: inplace clip + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center + y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center + y[:, 2] = (x[:, 2] - x[:, 0]) / w # width + y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2]] -= pad[0] # x padding + coords[:, [1, 3]] -= pad[1] # y padding + coords[:, :4] /= gain + clip_coords(coords, img0_shape) + return coords + + +def clip_coords(boxes, shape): + # Clip bounding xyxy bounding boxes to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x1 + boxes[:, 1].clamp_(0, shape[0]) # y1 + boxes[:, 2].clamp_(0, shape[1]) # x2 + boxes[:, 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + + +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=(), max_det=300): + """Runs Non-Maximum Suppression (NMS) on inference results + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + nc = prediction.shape[2] - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + + # Settings + min_wh, max_wh = 2, 7680 # (pixels) minimum and maximum box width and height + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + lb = labels[xi] + v = torch.zeros((len(lb), nc + 5), device=x.device) + v[:, :4] = lb[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + LOGGER.warning(f'WARNING: NMS time limit {time_limit}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + LOGGER.info(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") + + +def print_mutation(results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): + evolve_csv = save_dir / 'evolve.csv' + evolve_yaml = save_dir / 'hyp_evolve.yaml' + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + 'val/box_loss', 'val/obj_loss', 'val/cls_loss') + tuple(hyp.keys()) # [results + hyps] + keys = tuple(x.strip() for x in keys) + vals = results + tuple(hyp.values()) + n = len(keys) + + # Download (optional) + if bucket: + url = f'gs://{bucket}/evolve.csv' + if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): + os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local + + # Log to evolve.csv + s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header + with open(evolve_csv, 'a') as f: + f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') + + # Save yaml + with open(evolve_yaml, 'w') as f: + data = pd.read_csv(evolve_csv) + data = data.rename(columns=lambda x: x.strip()) # strip keys + i = np.argmax(fitness(data.values[:, :4])) # + generations = len(data) + f.write('# YOLOv5 Hyperparameter Evolution Results\n' + + f'# Best generation: {i}\n' + + f'# Last generation: {generations - 1}\n' + + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + '\n' + + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') + yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) + + # Print to screen + LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + + prefix + ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + + prefix + ', '.join(f'{x:20.5g}' for x in vals) + '\n\n') + + if bucket: + os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload + + +def apply_classifier(x, model, img, im0): + # Apply a second stage classifier to YOLO outputs + # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for j, a in enumerate(d): # per item + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + # cv2.imwrite('example%i.jpg' % j, cutout) + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=False, sep='', mkdir=False): + # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. + path = Path(path) # os-agnostic + if path.exists() and not exist_ok: + path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') + dirs = glob.glob(f"{path}{sep}*") # similar paths + matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] + i = [int(m.groups()[0]) for m in matches if m] # indices + n = max(i) + 1 if i else 2 # increment number + path = Path(f"{path}{sep}{n}{suffix}") # increment path + if mkdir: + path.mkdir(parents=True, exist_ok=True) # make directory + return path + + +# Variables +NCOLS = 0 if is_docker() else shutil.get_terminal_size().columns # terminal window size for tqdm diff --git a/utils/loss.py b/utils/loss.py new file mode 100644 index 0000000..5aa9f01 --- /dev/null +++ b/utils/loss.py @@ -0,0 +1,222 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Loss functions +""" + +import torch +import torch.nn as nn + +from utils.metrics import bbox_iou +from utils.torch_utils import de_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super().__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False): + self.sort_obj_iou = False + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors': + setattr(self, k, getattr(det, k)) + + def __call__(self, p, targets): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + pxy = ps[:, :2].sigmoid() * 2 - 0.5 + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + score_iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + sort_id = torch.argsort(score_iou) + b, a, gj, gi, score_iou = b[sort_id], a[sort_id], gj[sort_id], gi[sort_id], score_iou[sort_id] + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * score_iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(ps[:, 5:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch = [], [], [], [] + gain = torch.ones(7, device=targets.device) # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + + return tcls, tbox, indices, anch diff --git a/utils/metrics.py b/utils/metrics.py new file mode 100644 index 0000000..857fa5d --- /dev/null +++ b/utils/metrics.py @@ -0,0 +1,342 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" + +import math +import warnings +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes, nt = np.unique(target_cls, return_counts=True) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = nt[ci] # number of labels + n_p = i.sum() # number of predictions + + if n_p == 0 or n_l == 0: + continue + else: + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + eps) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + eps) + names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data + names = {i: v for i, v in enumerate(names)} # to dict + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + + i = f1.mean(0).argmax() # max F1 index + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype('int32') + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(np.int16) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[detection_classes[m1[j]], gc] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # background FP + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # background FN + + def matrix(self): + return self.matrix + + def tp_fp(self): + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return tp[:-1], fp[:-1] # remove background class + + def plot(self, normalize=True, save_dir='', names=()): + try: + import seaborn as sn + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig = plt.figure(figsize=(12, 9), tight_layout=True) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, annot=nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, vmin=0.0, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + fig.axes[0].set_xlabel('True') + fig.axes[0].set_ylabel('Predicted') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close() + except Exception as e: + print(f'WARNING: ConfusionMatrix plot failure: {e}') + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + if CIoU or DIoU or GIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + return iou # IoU + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def bbox_ioa(box1, box2, eps=1E-7): + """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + box1: np.array of shape(4) + box2: np.array of shape(nx4) + returns: np.array of shape(n) + """ + + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + +def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) + plt.close() + + +def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = py.mean(0) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) + plt.close() diff --git a/utils/plots.py b/utils/plots.py new file mode 100644 index 0000000..6c3f5bc --- /dev/null +++ b/utils/plots.py @@ -0,0 +1,471 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Plotting utils +""" + +import math +import os +from copy import copy +from pathlib import Path + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sn +import torch +from PIL import Image, ImageDraw, ImageFont + +from utils.general import (CONFIG_DIR, FONT, LOGGER, Timeout, check_font, check_requirements, clip_coords, + increment_path, is_ascii, is_chinese, try_except, xywh2xyxy, xyxy2xywh) +from utils.metrics import fitness + +# Settings +RANK = int(os.getenv('RANK', -1)) +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +class Colors: + # Ultralytics color palette https://ultralytics.com/ + def __init__(self): + # hex = matplotlib.colors.TABLEAU_COLORS.values() + hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb('#' + c) for c in hex] + self.n = len(self.palette) + + def __call__(self, i, bgr=False): + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + +colors = Colors() # create instance for 'from utils.plots import colors' + + +def check_pil_font(font=FONT, size=10): + # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary + font = Path(font) + font = font if font.exists() else (CONFIG_DIR / font.name) + try: + return ImageFont.truetype(str(font) if font.exists() else font.name, size) + except Exception: # download if missing + check_font(font) + try: + return ImageFont.truetype(str(font), size) + except TypeError: + check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 + + +class Annotator: + if RANK in (-1, 0): + check_pil_font() # download TTF if necessary + + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' + self.pil = pil or not is_ascii(example) or is_chinese(example) + if self.pil: # use PIL + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + self.font = check_pil_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + else: # use cv2 + self.im = im + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + # Add one xyxy box to image with label + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + w, h = self.font.getsize(label) # text width, height + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle((box[0], + box[1] - h if outside else box[1], + box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1), fill=color) + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) + else: # cv2 + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h - 3 >= 0 # label fits outside box + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, + thickness=tf, lineType=cv2.LINE_AA) + + def rectangle(self, xy, fill=None, outline=None, width=1): + # Add rectangle to image (PIL-only) + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255)): + # Add text to image (PIL-only) + w, h = self.font.getsize(text) # text width, height + self.draw.text((xy[0], xy[1] - h + 1), text, fill=txt_color, font=self.font) + + def result(self): + # Return annotated image as array + return np.asarray(self.im) + + +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): + """ + x: Features to be visualized + module_type: Module type + stage: Module stage within model + n: Maximum number of feature maps to plot + save_dir: Directory to save results + """ + if 'Detect' not in module_type: + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + LOGGER.info(f'Saving {f}... ({n}/{channels})') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + from scipy.signal import butter, filtfilt + + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + +def output_to_target(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + for *box, conf, cls in o.cpu().numpy(): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + return np.array(targets) + + +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + ti = targets[targets[:, 0] == i] # image targets + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + annotator.im.save(fname) # save + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_val_txt(): # from utils.plots import *; plot_val() + # Plot val.txt histograms + x = np.loadtxt('val.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() + # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) + save_dir = Path(file).parent if file else Path(dir) + plot2 = False # plot additional results + if plot2: + ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: + for f in sorted(save_dir.glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + if plot2: + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] + for i in range(7): + ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 57) + ax2.set_ylim(25, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + f = save_dir / 'study.png' + print(f'Saving {f}...') + plt.savefig(f, dpi=300) + + +@try_except # known issue https://github.com/ultralytics/yolov5/issues/5395 +@Timeout(30) # known issue https://github.com/ultralytics/yolov5/issues/5611 +def plot_labels(labels, names=(), save_dir=Path('')): + # plot dataset labels + LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + try: # color histogram bars by class + [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 + except Exception: + pass + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(names, rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + +def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() + # Plot evolve.csv hyp evolution results + evolve_csv = Path(evolve_csv) + data = pd.read_csv(evolve_csv) + keys = [x.strip() for x in data.columns] + x = data.values + f = fitness(x) + j = np.argmax(f) # max fitness index + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + print(f'Best results from row {j} of {evolve_csv}:') + for i, k in enumerate(keys[7:]): + v = x[:, 7 + i] + mu = v[j] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print(f'{k:>15}: {mu:.3g}') + f = evolve_csv.with_suffix('.png') # filename + plt.savefig(f, dpi=200) + plt.close() + print(f'Saved {f}') + + +def plot_results(file='path/to/results.csv', dir=''): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + for fi, f in enumerate(files): + try: + data = pd.read_csv(f) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].set_title(s[j], fontsize=12) + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + LOGGER.info(f'Warning: Plotting error for {f}: {e}') + ax[1].legend() + fig.savefig(save_dir / 'results.png', dpi=200) + plt.close() + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print(f'Warning: Plotting error for {f}; {e}') + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_coords(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + file.parent.mkdir(parents=True, exist_ok=True) # make directory + cv2.imwrite(str(increment_path(file).with_suffix('.jpg')), crop) + return crop diff --git a/utils/torch_utils.py b/utils/torch_utils.py new file mode 100644 index 0000000..c5257c6 --- /dev/null +++ b/utils/torch_utils.py @@ -0,0 +1,329 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch utils +""" + +import datetime +import math +import os +import platform +import subprocess +import time +import warnings +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F + +from utils.general import LOGGER + +try: + import thop # for FLOPs computation +except ImportError: + thop = None + +# Suppress PyTorch warnings +warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + """ + Decorator to make all processes in distributed training wait for each local_master to do something. + """ + if local_rank not in [-1, 0]: + dist.barrier(device_ids=[local_rank]) + yield + if local_rank == 0: + dist.barrier(device_ids=[0]) + + +def date_modified(path=__file__): + # return human-readable file modification date, i.e. '2021-3-26' + t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def git_describe(path=Path(__file__).parent): # path must be a directory + # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + s = f'git -C {path} describe --tags --long --always' + try: + return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] + except subprocess.CalledProcessError: + return '' # not a git repository + + +def device_count(): + # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Only works on Linux. + assert platform.system() == 'Linux', 'device_count() function only works on Linux' + try: + cmd = 'nvidia-smi -L | wc -l' + return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) + except Exception: + return 0 + + +def select_device(device='', batch_size=0, newline=True): + # device = 'cpu' or '0' or '0,1,2,3' + s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string + device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' + cpu = device == 'cpu' + if cpu: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + + cuda = not cpu and torch.cuda.is_available() + if cuda: + devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + n = len(devices) # device count + if n > 1 and batch_size > 0: # check batch_size is divisible by device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * (len(s) + 1) + for i, d in enumerate(devices): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\n" # bytes to MB + else: + s += 'CPU\n' + + if not newline: + s = s.rstrip() + LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + return torch.device('cuda:0' if cuda else 'cpu') + + +def time_sync(): + # pytorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(input, ops, n=10, device=None): + # YOLOv5 speed/memory/FLOPs profiler + # + # Usage: + # input = torch.randn(16, 3, 640, 640) + # m1 = lambda x: x * torch.sigmoid(x) + # m2 = nn.SiLU() + # profile(input, [m1, m2], n=100) # profile over 100 iterations + + results = [] + device = device or select_device() + print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}") + + for x in input if isinstance(input, list) else [input]: + x = x.to(device) + x.requires_grad = True + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs + except Exception: + flops = 0 + + try: + for _ in range(n): + t[0] = time_sync() + y = m(x) + t[1] = time_sync() + try: + _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() + t[2] = time_sync() + except Exception: # no backward method + # print(e) # for debug + t[2] = float('nan') + tf += (t[1] - t[0]) * 1000 / n # ms per op forward + tb += (t[2] - t[1]) * 1000 / n # ms per op backward + mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + results.append([p, flops, mem, tf, tb, s_in, s_out]) + except Exception as e: + print(e) + results.append(None) + torch.cuda.empty_cache() + return results + + +def is_parallel(model): + # Returns True if model is of type DP or DDP + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def de_parallel(model): + # De-parallelize a model: returns single-GPU model if model is of type DP or DDP + return model.module if is_parallel(model) else model + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def fuse_conv_and_bn(conv, bn): + # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, img_size=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPs + from thop import profile + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 + img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs + img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float + fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs + except (ImportError, Exception): + fs = '' + + LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + else: + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +class EarlyStopping: + # YOLOv5 simple early stopper + def __init__(self, patience=30): + self.best_fitness = 0.0 # i.e. mAP + self.best_epoch = 0 + self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.possible_stop = False # possible stop may occur next epoch + + def __call__(self, epoch, fitness): + if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training + self.best_epoch = epoch + self.best_fitness = fitness + delta = epoch - self.best_epoch # epochs without improvement + self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch + stop = delta >= self.patience # stop training if patience exceeded + if stop: + LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' + f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' + f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' + f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.') + return stop + + +class ModelEMA: + """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models + Keeps a moving average of everything in the model state_dict (parameters and buffers) + For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + """ + + def __init__(self, model, decay=0.9999, updates=0): + # Create EMA + self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA + # if next(model.parameters()).device.type != 'cpu': + # self.ema.half() # FP16 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + with torch.no_grad(): + self.updates += 1 + d = self.decay(self.updates) + + msd = de_parallel(model).state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: + v *= d + v += (1 - d) * msd[k].detach() + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) From 74d2d031a3a11a975cc454b7b882e1e5ddffd386 Mon Sep 17 00:00:00 2001 From: FEIJINTI <83849113+FEIJINTI@users.noreply.github.com> Date: Sun, 21 Aug 2022 09:51:31 +0800 Subject: [PATCH 07/12] =?UTF-8?q?=E8=B0=83=E6=95=B4mask=E5=A4=A7=E5=B0=8F?= =?UTF-8?q?=E4=B8=BA256*256?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.py b/config.py index 2d3d7ca..d93b3e3 100644 --- a/config.py +++ b/config.py @@ -34,7 +34,7 @@ class Config: ai_conf_threshold = 0.5 # mask parameter - target_size = (1024, 1024) # (Width, Height) of mask + target_size = (256, 256) # (Width, Height) of mask valve_merge_size = 2 # 每两个喷阀当中有任意一个出现杂质则认为都是杂质 valve_horizontal_padding = 3 # 喷阀横向膨胀的尺寸,应该是奇数,3时表示左右各膨胀1 max_open_valve_limit = 25 # 最大同时开启喷阀限制,按照电流计算,当前的喷阀可以开启的喷阀 600W的电源 / 12V电源 = 50A, 一个阀门1A From 5435723b4c44e8cb50700504371f03fc0bf90c2e Mon Sep 17 00:00:00 2001 From: "li.zhenye" Date: Mon, 22 Aug 2022 19:09:03 +0800 Subject: [PATCH 08/12] =?UTF-8?q?[fix]=20=E4=BF=AE=E5=A4=8D=E4=BA=86?= =?UTF-8?q?=E4=B8=80=E4=B8=AA=E5=8F=AF=E8=83=BD=E5=87=BA=E7=8E=B0bug?= =?UTF-8?q?=E7=9A=84=E5=9C=B0=E6=96=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index 741d4cf..207b8b2 100755 --- a/main.py +++ b/main.py @@ -122,7 +122,7 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, if single_spec: output_fifos = [mask_fifo_path, ] elif single_color: - output_fifos = [rgb_fifo_path, ] + output_fifos = [rgb_mask_fifo_path, ] else: output_fifos = [mask_fifo_path, rgb_mask_fifo_path] for fifo, mask in zip(output_fifos, masks): From 10a0a2b23aba90228ce99a658c02e6afa71106aa Mon Sep 17 00:00:00 2001 From: "li.zhenye" Date: Tue, 23 Aug 2022 10:46:47 +0800 Subject: [PATCH 09/12] =?UTF-8?q?[fix]=20=E6=9B=B4=E6=8D=A2=E6=A8=A1?= =?UTF-8?q?=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.py b/config.py index d93b3e3..718bf17 100644 --- a/config.py +++ b/config.py @@ -30,7 +30,7 @@ class Config: threshold_low, threshold_high = 10, 230 threshold_s = 190 # 饱和度的最高允许值 rgb_size_threshold = 4 # rgb的尺寸限制 - ai_path = 'weights/best.pt' + ai_path = 'weights/best0823.pt' ai_conf_threshold = 0.5 # mask parameter From 55c879d0d4549efdb906755c61401fd1c960f129 Mon Sep 17 00:00:00 2001 From: "li.zhenye" Date: Tue, 23 Aug 2022 19:54:37 +0800 Subject: [PATCH 10/12] =?UTF-8?q?[fix]=20=E4=BF=AE=E5=A4=8Dai=20path?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index 207b8b2..69e7bed 100755 --- a/main.py +++ b/main.py @@ -15,7 +15,8 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, single_spec=False, single_color=False): spec_detector = SpecDetector(blk_model_path=Config.blk_model_path, pixel_model_path=Config.pixel_model_path) rgb_detector = RgbDetector(tobacco_model_path=Config.rgb_tobacco_model_path, - background_model_path=Config.rgb_background_model_path) + background_model_path=Config.rgb_background_model_path, + ai_path=Config.ai_path) _, _ = spec_detector.predict(np.ones((Config.nRows, Config.nCols, Config.nBands), dtype=float)*0.4),\ rgb_detector.predict(np.ones((Config.nRgbRows, Config.nRgbCols, Config.nRgbBands), dtype=np.uint8)*40) total_len = Config.nRows * Config.nCols * Config.nBands * 4 # float型变量, 4个字节 From b81c000d5a0c95cce37fb8e4b4d47b3c3a4d4ee0 Mon Sep 17 00:00:00 2001 From: "li.zhenye" Date: Tue, 23 Aug 2022 21:47:57 +0800 Subject: [PATCH 11/12] version 21 --- config.py | 4 ++-- main.py | 42 +++++++++++++++++++++++++++++------------- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/config.py b/config.py index 718bf17..1f4e295 100644 --- a/config.py +++ b/config.py @@ -26,7 +26,7 @@ class Config: # rgb模型参数 rgb_tobacco_model_path = r"weights/tobacco_dt_2022-08-05_10-38.model" - rgb_background_model_path = r"weights/background_dt_2022-08-09_16-08.model" + rgb_background_model_path = r"weights/background_dt_2022-08-22_22-15.model" threshold_low, threshold_high = 10, 230 threshold_s = 190 # 饱和度的最高允许值 rgb_size_threshold = 4 # rgb的尺寸限制 @@ -38,7 +38,7 @@ class Config: valve_merge_size = 2 # 每两个喷阀当中有任意一个出现杂质则认为都是杂质 valve_horizontal_padding = 3 # 喷阀横向膨胀的尺寸,应该是奇数,3时表示左右各膨胀1 max_open_valve_limit = 25 # 最大同时开启喷阀限制,按照电流计算,当前的喷阀可以开启的喷阀 600W的电源 / 12V电源 = 50A, 一个阀门1A - + max_time_spent = 200 # save part offset_vertical = 0 diff --git a/main.py b/main.py index 69e7bed..9937170 100755 --- a/main.py +++ b/main.py @@ -22,51 +22,58 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, total_len = Config.nRows * Config.nCols * Config.nBands * 4 # float型变量, 4个字节 total_rgb = Config.nRgbRows * Config.nRgbCols * Config.nRgbBands * 1 # int型变量 if not single_color: + logging.info("create color fifo") if not os.access(img_fifo_path, os.F_OK): os.mkfifo(img_fifo_path, 0o777) if not os.access(mask_fifo_path, os.F_OK): os.mkfifo(mask_fifo_path, 0o777) if not single_spec: + logging.info("create rgb fifo") if not os.access(rgb_fifo_path, os.F_OK): os.mkfifo(rgb_fifo_path, 0o777) if not os.access(rgb_mask_fifo_path, os.F_OK): os.mkfifo(rgb_mask_fifo_path, 0o777) logging.info(f"请注意!正在以调试模式运行程序,输出的信息可能较多。") + # specially designed for Miaow. if (interval_time is not None) and (delay_repeat_time is not None): interval_time = float(interval_time) / 1000.0 delay_repeat_time = int(delay_repeat_time) logging.warning(f'Delay {interval_time*1000:.2f}ms will be added per {delay_repeat_time} frames') delay_repeat_time_count = 0 while True: - if not single_color: + img_data, rgb_data = None, None + if single_spec: fd_img = os.open(img_fifo_path, os.O_RDONLY) # spec data read - data = os.read(fd_img, total_len) - if len(data) < 3: + data_total = os.read(fd_img, total_len) + if len(data_total) < 3: try: - threshold = int(float(data)) + threshold = int(float(data_total)) Config.spec_size_threshold = threshold logging.info(f'[INFO] Get spec threshold: {threshold}') except Exception as e: logging.error( - f'毁灭性错误:收到长度小于3却无法转化为整数spec_size_threshold的网络报文,报文内容为 {data},' + f'毁灭性错误:收到长度小于3却无法转化为整数spec_size_threshold的网络报文,报文内容为 {data_total},' f' 错误为 {e}.') + if single_spec: + continue else: - data_total = data + data_total = data_total os.close(fd_img) try: img_data = np.frombuffer(data_total, dtype=np.float32).reshape((Config.nRows, Config.nBands, -1)) \ .transpose(0, 2, 1) + print(f"get image_shape {img_data.shape}") except Exception as e: logging.error(f'毁灭性错误!收到的光谱数据长度为{len(data_total)}无法转化成指定的形状 {e}') - if not single_spec: + if single_color: fd_rgb = os.open(rgb_fifo_path, os.O_RDONLY) # rgb data read - rgb_data = os.read(fd_rgb, total_rgb) - if len(rgb_data) < 3: + rgb_data_total = os.read(fd_rgb, total_rgb) + if len(rgb_data_total) < 3: try: - rgb_threshold = int(float(rgb_data)) + rgb_threshold = int(float(rgb_data_total)) Config.rgb_size_threshold = rgb_threshold logging.info(f'Get rgb threshold: {rgb_threshold}') except Exception as e: @@ -74,23 +81,29 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, f' 错误为 {e}.') continue else: - rgb_data_total = rgb_data + rgb_data_total = rgb_data_total os.close(fd_rgb) try: rgb_data = np.frombuffer(rgb_data_total, dtype=np.uint8).reshape((Config.nRgbRows, Config.nRgbCols, -1)) + print(f"get rgb_data shape {rgb_data.shape}") except Exception as e: - logging.error(f'毁灭性错误!收到的rgb数据长度为{len(rgb_data)}无法转化成指定形状 {e}') + logging.error(f'毁灭性错误!收到的rgb数据长度为{len(rgb_data_total)}无法转化成指定形状 {e}') # 识别 read since = time.time() # predict if single_spec or single_color: + print('start predict') if single_spec: + print('spec predict', img_data.shape) mask_spec = spec_detector.predict(img_data).astype(np.uint8) masks = [mask_spec, ] + print('spectral mask shape:', masks[0].shape) else: + print('rgb predict', rgb_data.shape) mask_rgb = rgb_detector.predict(rgb_data).astype(np.uint8) masks = [mask_rgb, ] + print("rgb mask shape: ", masks[0].shape) else: if only_spec: # 光谱识别 @@ -127,8 +140,10 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, else: output_fifos = [mask_fifo_path, rgb_mask_fifo_path] for fifo, mask in zip(output_fifos, masks): + print("open fifo") fd_mask = os.open(fifo, os.O_WRONLY) os.write(fd_mask, mask.tobytes()) + print("close fifo") os.close(fd_mask) time_spent = (time.time() - since) * 1000 predict_by = 'spec' if single_spec else 'rgb' if single_color else 'spec+rgb' @@ -162,4 +177,5 @@ if __name__ == '__main__': console_handler.setLevel(logging.DEBUG if args.d else logging.WARNING) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', handlers=[file_handler, console_handler], level=logging.DEBUG) - main(only_spec=args.os, only_color=args.oc, if_merge=args.m, interval_time=args.dt, delay_repeat_time=args.df) + main(only_spec=args.os, only_color=args.oc, if_merge=args.m, interval_time=args.dt, delay_repeat_time=args.df, + single_spec=args.ss, single_color=args.sc) From 127e57eca541997012ca67863b4a63f48db1ff48 Mon Sep 17 00:00:00 2001 From: "li.zhenye" Date: Wed, 24 Aug 2022 09:39:44 +0800 Subject: [PATCH 12/12] perfect yolo version --- config.py | 2 +- main.py | 9 --------- transmit.py | 2 -- 3 files changed, 1 insertion(+), 12 deletions(-) diff --git a/config.py b/config.py index 1f4e295..1d88d64 100644 --- a/config.py +++ b/config.py @@ -34,7 +34,7 @@ class Config: ai_conf_threshold = 0.5 # mask parameter - target_size = (256, 256) # (Width, Height) of mask + target_size = (1024, 1024) # (Width, Height) of mask valve_merge_size = 2 # 每两个喷阀当中有任意一个出现杂质则认为都是杂质 valve_horizontal_padding = 3 # 喷阀横向膨胀的尺寸,应该是奇数,3时表示左右各膨胀1 max_open_valve_limit = 25 # 最大同时开启喷阀限制,按照电流计算,当前的喷阀可以开启的喷阀 600W的电源 / 12V电源 = 50A, 一个阀门1A diff --git a/main.py b/main.py index 9937170..4771e10 100755 --- a/main.py +++ b/main.py @@ -63,7 +63,6 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, try: img_data = np.frombuffer(data_total, dtype=np.float32).reshape((Config.nRows, Config.nBands, -1)) \ .transpose(0, 2, 1) - print(f"get image_shape {img_data.shape}") except Exception as e: logging.error(f'毁灭性错误!收到的光谱数据长度为{len(data_total)}无法转化成指定的形状 {e}') @@ -85,7 +84,6 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, os.close(fd_rgb) try: rgb_data = np.frombuffer(rgb_data_total, dtype=np.uint8).reshape((Config.nRgbRows, Config.nRgbCols, -1)) - print(f"get rgb_data shape {rgb_data.shape}") except Exception as e: logging.error(f'毁灭性错误!收到的rgb数据长度为{len(rgb_data_total)}无法转化成指定形状 {e}') @@ -93,17 +91,12 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, since = time.time() # predict if single_spec or single_color: - print('start predict') if single_spec: - print('spec predict', img_data.shape) mask_spec = spec_detector.predict(img_data).astype(np.uint8) masks = [mask_spec, ] - print('spectral mask shape:', masks[0].shape) else: - print('rgb predict', rgb_data.shape) mask_rgb = rgb_detector.predict(rgb_data).astype(np.uint8) masks = [mask_rgb, ] - print("rgb mask shape: ", masks[0].shape) else: if only_spec: # 光谱识别 @@ -140,10 +133,8 @@ def main(only_spec=False, only_color=False, if_merge=False, interval_time=None, else: output_fifos = [mask_fifo_path, rgb_mask_fifo_path] for fifo, mask in zip(output_fifos, masks): - print("open fifo") fd_mask = os.open(fifo, os.O_WRONLY) os.write(fd_mask, mask.tobytes()) - print("close fifo") os.close(fd_mask) time_spent = (time.time() - since) * 1000 predict_by = 'spec' if single_spec else 'rgb' if single_color else 'spec+rgb' diff --git a/transmit.py b/transmit.py index 17e91e4..821385e 100644 --- a/transmit.py +++ b/transmit.py @@ -9,8 +9,6 @@ from config import Config from models import SpecDetector, RgbDetector import typing import logging -logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s %(message)s', - level=logging.WARNING) class Transmitter(object):