From c6fb6485ed26b29269f7f353759008f0334d70bc Mon Sep 17 00:00:00 2001 From: haven890611 Date: Sat, 30 Mar 2024 18:00:00 +0800 Subject: [PATCH 1/2] dataloader modified --- __pycache__/export.cpython-38.pyc | Bin 0 -> 24138 bytes __pycache__/val_dual.cpython-38.pyc | Bin 0 -> 12902 bytes data/hyps/hyp.scratch-high.yaml | 4 +- data/polyp_2.yaml | 18 + models/.ipynb_checkpoints/yolo-checkpoint.py | 767 ++++++++++ models/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 137 bytes models/__pycache__/common.cpython-38.pyc | Bin 0 -> 52011 bytes .../__pycache__/experimental.cpython-38.pyc | Bin 0 -> 10271 bytes models/__pycache__/yolo.cpython-38.pyc | Bin 0 -> 33143 bytes models/common.py | 28 +- models/yolo.py | 2 +- requirements.txt | 2 - run.sh | 7 + train_dual.py | 9 +- .../.ipynb_checkpoints/augment-checkpoint.py | 970 +++++++++++++ .../augmentations-checkpoint.py | 395 +++++ .../dataloaders-checkpoint.py | 1278 +++++++++++++++++ .../.ipynb_checkpoints/general-checkpoint.py | 1193 +++++++++++++++ .../.ipynb_checkpoints/instance-checkpoint.py | 390 +++++ .../.ipynb_checkpoints/metrics-checkpoint.py | 395 +++++ utils/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 2496 bytes utils/__pycache__/augment.cpython-38.pyc | Bin 0 -> 32715 bytes .../__pycache__/augmentations.cpython-38.pyc | Bin 0 -> 13760 bytes utils/__pycache__/autoanchor.cpython-38.pyc | Bin 0 -> 6439 bytes utils/__pycache__/autobatch.cpython-38.pyc | Bin 0 -> 2481 bytes utils/__pycache__/callbacks.cpython-38.pyc | Bin 0 -> 2593 bytes utils/__pycache__/dataloaders.cpython-38.pyc | Bin 0 -> 43824 bytes utils/__pycache__/downloads.cpython-38.pyc | Bin 0 -> 3913 bytes utils/__pycache__/general.cpython-38.pyc | Bin 0 -> 41309 bytes utils/__pycache__/instance.cpython-38.pyc | Bin 0 -> 13151 bytes utils/__pycache__/lion.cpython-38.pyc | Bin 0 -> 2318 bytes .../__pycache__/loss_tal_dual.cpython-38.pyc | Bin 0 -> 10964 bytes utils/__pycache__/metrics.cpython-38.pyc | Bin 0 -> 12394 bytes utils/__pycache__/plots.cpython-38.pyc | Bin 0 -> 21610 bytes utils/__pycache__/torch_utils.cpython-38.pyc | Bin 0 -> 18274 bytes utils/augment.py | 970 +++++++++++++ utils/dataloaders.py | 223 +-- utils/general.py | 60 +- utils/instance.py | 390 +++++ .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 13573 bytes .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 152 bytes .../__pycache__/clearml_utils.cpython-38.pyc | Bin 0 -> 5585 bytes .../comet/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 14797 bytes .../__pycache__/comet_utils.cpython-38.pyc | Bin 0 -> 4179 bytes .../wandb/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 150 bytes .../__pycache__/wandb_utils.cpython-38.pyc | Bin 0 -> 19695 bytes utils/metrics.py | 44 +- .../__pycache__/__init__.cpython-38.pyc | Bin 0 -> 144 bytes .../__pycache__/general.cpython-38.pyc | Bin 0 -> 4585 bytes utils/tal/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 140 bytes .../anchor_generator.cpython-38.pyc | Bin 0 -> 1546 bytes utils/tal/__pycache__/assigner.cpython-38.pyc | Bin 0 -> 6034 bytes val_dual.py | 10 +- 53 files changed, 7026 insertions(+), 129 deletions(-) create mode 100644 __pycache__/export.cpython-38.pyc create mode 100644 __pycache__/val_dual.cpython-38.pyc create mode 100644 data/polyp_2.yaml create mode 100644 models/.ipynb_checkpoints/yolo-checkpoint.py create mode 100644 models/__pycache__/__init__.cpython-38.pyc create mode 100644 models/__pycache__/common.cpython-38.pyc create mode 100644 models/__pycache__/experimental.cpython-38.pyc create mode 100644 models/__pycache__/yolo.cpython-38.pyc create mode 100755 run.sh create mode 100644 utils/.ipynb_checkpoints/augment-checkpoint.py create mode 100644 utils/.ipynb_checkpoints/augmentations-checkpoint.py create mode 100644 utils/.ipynb_checkpoints/dataloaders-checkpoint.py create mode 100644 utils/.ipynb_checkpoints/general-checkpoint.py create mode 100644 utils/.ipynb_checkpoints/instance-checkpoint.py create mode 100644 utils/.ipynb_checkpoints/metrics-checkpoint.py create mode 100644 utils/__pycache__/__init__.cpython-38.pyc create mode 100644 utils/__pycache__/augment.cpython-38.pyc create mode 100644 utils/__pycache__/augmentations.cpython-38.pyc create mode 100644 utils/__pycache__/autoanchor.cpython-38.pyc create mode 100644 utils/__pycache__/autobatch.cpython-38.pyc create mode 100644 utils/__pycache__/callbacks.cpython-38.pyc create mode 100644 utils/__pycache__/dataloaders.cpython-38.pyc create mode 100644 utils/__pycache__/downloads.cpython-38.pyc create mode 100644 utils/__pycache__/general.cpython-38.pyc create mode 100644 utils/__pycache__/instance.cpython-38.pyc create mode 100644 utils/__pycache__/lion.cpython-38.pyc create mode 100644 utils/__pycache__/loss_tal_dual.cpython-38.pyc create mode 100644 utils/__pycache__/metrics.cpython-38.pyc create mode 100644 utils/__pycache__/plots.cpython-38.pyc create mode 100644 utils/__pycache__/torch_utils.cpython-38.pyc create mode 100644 utils/augment.py create mode 100644 utils/instance.py create mode 100644 utils/loggers/__pycache__/__init__.cpython-38.pyc create mode 100644 utils/loggers/clearml/__pycache__/__init__.cpython-38.pyc create mode 100644 utils/loggers/clearml/__pycache__/clearml_utils.cpython-38.pyc create mode 100644 utils/loggers/comet/__pycache__/__init__.cpython-38.pyc create mode 100644 utils/loggers/comet/__pycache__/comet_utils.cpython-38.pyc create mode 100644 utils/loggers/wandb/__pycache__/__init__.cpython-38.pyc create mode 100644 utils/loggers/wandb/__pycache__/wandb_utils.cpython-38.pyc create mode 100644 utils/segment/__pycache__/__init__.cpython-38.pyc create mode 100644 utils/segment/__pycache__/general.cpython-38.pyc create mode 100644 utils/tal/__pycache__/__init__.cpython-38.pyc create mode 100644 utils/tal/__pycache__/anchor_generator.cpython-38.pyc create mode 100644 utils/tal/__pycache__/assigner.cpython-38.pyc diff --git a/__pycache__/export.cpython-38.pyc b/__pycache__/export.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e6a8dad02d45d8906baf49450a4a55d4a708d41 GIT binary patch literal 24138 zcmch932+?Qd0uzVec}cI@M?l5xWItNa+izca>*r$UD{lbCa{M(l1GE-1~`Mc_I1ya zZ1+eiVP$b$p+ui0Tf6o^cI>R9#5wH5NfejtiW9|Ur<|nX$Q@U?%1Xti%W)h_xfsRD z_y4cw0LV?bDpdh$UcdMHUH#tw{`OY=#0C89KhqS& zR7|y^81ktaDyKC=<7dbS@iT0M`57@H{EQk=e#VR#KjTK6p9v#@r&dW;Q$|XaZKjP3 zzsnj~JVTX%>Yy>GDptxG+Em-aTZ%Dajg&`CWl}N5tZ{2>Q?+8{T^lj+WqP(JWZssF zI^p)@mSToCLgHVdeh6=;tT8Lq-o2%4X`8Ash8`l7J=ML&-s(PMACDkf*S5z>^@wqV%fu>2tH+FE9FJFKs>hAv)u)W7sz&Umgn+T@02ESM=X z{f=ThZ)VIap69G{)=6_ed3(&goOhA)_Ho{RQ|G*wtaIi8a~gdgM1IITWFF@HOUR$b{}KEjHIH$L z8I*{d$IYj>#AR-6mRp-MbDVdD^X574gn5$l7CG+}=bbj6=De3V?+oWXV?N7yub5}e z=P(mjt>-osYs_5OP2uyq74w|=f_dJ2k;g-0lK&U*#K`dQ#tKvs5TaHz9{Mh9nlDyn3RDyVl<3S=TpWT{6%hr0;syV&JFu3G3bb7T~ zShwt%j32#r{pFXh-1K8NM175hnJM0~iW_;e;1q1j@v|~ns;=j4e2_mZ)53bISrQgC zZg;A@Z3!D)`$<{kZlPN7k^trL{rZDPv=@qH$?N>GNt?I7w>JJ=`yJQ=4AG9$~cXIhG*lvPimjbhH4 z*cvE3e_sQjometrH}0;~Me&xe&OHj{8qSK5;`DM+lp2noa_FVaiAU*NU&;?(Us`(8 z57%q8JC7ncdbnxI(yuICK82s3ro6lbSj5lGbYfi8h+l75wb!pMUH9X4#NIB|>i$5! z6XnUTEE@3@t7g~5%@sdt)z(Wj>rpHxBS!R6U0APP^P@#PtCdIbT(v?=R&mEgZ7wdQM$Rhp4iccAa>wUPs%aMmh#y0Kgk z>92pqT(|U<8?X5>3o)nBbO-W5tW!MGYcN-~{jjr!Fqyw?t;*3E=^F*ptXMMiqYX~^ znY@eyg`$gfQ7t%r{GwQI(!TMd%gwd5(j7l^>BehD=;a%)VROc?Uz*h#AqlNv`iaZf zhKr(5wLVaN_09*1co6{n=69|9$mRzKwbo8hduh!REGHJv{YbtM%1>?uSX4<~prph_d_v1vzy|5Qq)80^)m9XMO zHj?6JRHYSF757-Tl~}ixxEKAR@_vNsy`g+W`BK5TyxX*tj^G?7i|5JrXPD0)?a;TW?QYbLaEXe zmi`Ct|5e=$*$euNwzM=86@zG53?T5sz-4t`bp&x!e&WiVq6I2auYoLt8A(PrZqrk| zMDI1z@zpgy!h6V9tq(LmS*q165g=3WnHTP;W*P--Cy^NdbYSxK6 z%LC4!7ePr-f%}@O)P}uid1!l{2WEARFfl+wO}cw@+W`?O)z&+(al7Q)(i!U#XykH& z_S_=&ZWJW2RCH!Se#E|2XaFfu1}IjfhRtoe$zr{>R$9+FcN}1WG-lrsg*;I~duEv7 zLoA{xKf;asseC@5w)woDqF(cMfK19I2c<-zXo)weMhJw}4^<0y#7B@62IYmzHt2N3 zY*rh#pq=ecF@j}wg>WV4DgXt*kI)Yh`3^G~PKZ~jv{tJ6VJ6X_29ncZs1(=uEm#88 z^izG`AmM|7HN0~pM6DKz1RR~e5kyk> zF|a%W%5s2Rm@k90QBhklM{5If(0&K*h?N<8bb$w;0^o@|030W_sfk4}0`YaN7B*w+ zYAfM|#g~{NU{>J05WpqeE&Bst6u=g|r+FxSPC*J>x0!h_gmnCzQk&qCakToS?IoQ= zSuH2g_Ah!$JGBLD1F#K?U-Q%;MF2SLg}`Pc)>RLqXnY+q$msn9uw1O2_JHqNNqkGD zo%NE0!yE!!r3~4J!F<>pdrxbnyi|K+OS!Ck>czMPXVA6Q7os@YO*`;E>aL2Mg(0V)E9iXoi%^EhRHMibq`l-GQcbpP) zcB$H^K-RFt`4>*)p3a?gr}ZH3F5xsrW;;sik47M%Si;en$=8XjS*APIsffln52Owx zP8YO8<+>1!V;m+vMzQZj0E%Uo_-lczGL)O9;G^^^Dlvo~Dq`?qDJOB4Ku9SmU@++u z1*)=2K@Y!0fWoS_k*!)zfdrX+Ltvu)5EwJDMy-c7th;{X?Lwt#iFL{d7n^3mPnYa` z;ca|UVU+|n`gTl+AMebHpS{Q*52}1ch`JE3pzh2FBSmqG#!#ZhBD|`8ELa`!fZh<* zmNP*}Qb3J@@8?g*^U_o(L*4MzxBW1UTimB&pQqdif3H}g491~7L<%HJ+#Waa2Hx8Q z`AR6R#x=sC6TqN@AnRFmm_jXzSQg}7{)h35{`Y7)sis5YTy7k(C~}kPI9izC(u1gb zL=``dcHKaB;s`zHl)RW1-;A{s(vy%F zw;pZ{cxiLUO9N96Q%Jw{1h7~Vm^npEgqLC=Zw!K795{Hu%b4T$lfY+5;NKMR?+~yl z;ol*~azo{T@*ptnvDUCRTplV9m!W+^db~B_i~$FZoA@%|=8^m^U|Qv^Kl4JsbMLt) z7=KQ*Mx9A>7vWQHv^_;_0$&cb#=J4$%Oz*G3C+x>Rp7`|z>#|xM}8VOavV5v7&Rxo zabQQr#{qU6|HOC~J0^jFhcISfb(Ed*QsWBul=6lcYwq5OHANk3$k@@%%muAK(b!QU@%LA8j^(b1XqT zFXJ-t4#mEog3nTL4}l+9C&AT^N_-BX@-CRdl*96Ko)Q7~W|pb}%4*abP==PO5Gyek z0cK#mi9|3zNNp2BVvIJkYX?^1LVnz-H#YKxO2to<>dm}!i&lr3&mbNR^ea)|K&v8! zjUL+alS~p>jl&afqymCO6i6bqC^NXWAm-gyOTJp>v5GIy(4K_DM*47AN9#a%&n6nd zxD34Ds2bJ=ft&dM+tFxJ9o5pnQ<}p6-w6Zfb?_QJ3Abqyw?Wn&RL6n)Am(Us#)ZSG z&N8t0CG_AXI?9cOB7tD%FOCRtn~c$Z7Lo1TWfk%&c2x*MNXR3x1X5`^f@ef>n8X2Y zs_hsln4EYSc<+5}OKU|uO)Qyl@KK3Y)J%d41b*96Q7`RL?F>t&QSh#*avEGxW=k=% zs1NLQKNN6#B&JZk!7a5sv>O;}lexA)I2l@U(;dys9G0`k61n?7ge3}8Issdx<6Ffs z1n%(MT(Mq-9AMjdr=DMHoP5St+@We+pPQ3x2~#kgIVm@b+yETRlF)rsUja`<>+Xl3 zGlJTQ6ncuo@MTbGEn#+iC<{h22~Q!6zZz z4}k}Q$VGb-8XoAaM7<_X30=f~Z zXjO>0fQN89($R}}pre4GXaIkpsDMABp%8@2$pHRh9tp~ztDd^9n#ubF!zl-l2AI;? z8G^-f7BCfJn1VHi=|p;o)Yej=73jC(cTUL|!xyC1Y~wD=bDeBrMLrDT-aIA0zaQeB zL3#0U1fVEUz*Q4GSe(SpJfO*sL$GDUXk;biS$7KHUx9W0AypZWNcx{6ZVYxtmIE^l zR@)I@L-vzkNcpDb2*vvfC+VpIZVnEQk@VV)EH0 z5si67@-1GXJx&T4)QfouhPTlraQUn$cItJIA@Gw@sv{YtL+Rz7lX6O5;Q*#)QDsrQ zj?j;=K({`~e~lMsLXx;fY^b&@KfGEh*nrzvax*dJQ8*194bwto6FM>z>lzYC?u9xommS>$S>V-EKCBRkute^=hGb zeHm=VMyb(&tOXtg1e=AW-$I)^L6r!hWF_(x`#1v96fjU?8`+xG6opE@W;wU(V#65j|E_$~ zDWS|@kY_<9PJ*&6{sxV59l=a`$L1j|r*l^P8Wo@|IGWuj?_c)$^=xJVUl@NUCZJrzCm67B!WdZAr<;JR~Cx2yE=c9h>%1R z!p}a75PJir7*pK{*`p5j2l3%<8m1$q!th*s8XS2T+U6Bip?@dRj)Gc;H#G5z6O*PT z+6|o=Qn&0S%5gIUIVX;CKMT!G!c0s;P1#D4r1K7>jLYB~I{EpeBV`f|`~hT|P2ha& zsGwFpS(^s#rWRSgVGiN(@`TsHE3{HhwvAot#j*E>sTS`+)`UY}G`OWe;t1nQN4=Dk zDWIpJx?`w|UfM&bgMEz@6sVA4a-4>=<7M(=yo)nO3ykW>;vyiax-Tc+Nz0vX^eDxkY597HP&m;I& z#y*hF{+TzzHO9I%_IYDLjj^D{*tQz~!wbVM{AIw(VP4ho4HfBYsDG4u+z+mgFk7~L z40AHhBYLGZ>W#N&Xe{Ott~qWhpMmWA1El}+T+FNgW z1MR20ah7>IUwFXFmS-`7uR+R0XifujAHP2^q+Ff^Dj zLPdo5VC&LXn~q*>+K#?zQ5Gy;vwA6K<@Aq(gSdqpSq&yEFqBdr(NBDwX@DQOR;o4c zxC3D2%u-=a3QWRrkI%ivJgI5jnS%jYzvVa$dtrXQako**)z*tSSpghD6_`2<77j}4 zXCHnCe~;O^2>mY;5;%rnUGbEU>15p^rxR2AjMfd+&uDAmN#zH%x58xk$}O=N%jk;i zcexY3TG)xalIJQV+aad?B!q_ehl;tP~xgmvuU%&0o;PadaVt@qN_URIf0p;H0c9i#|4M zWW^y0W{)G_S1F*7exm)KfBEwSyiz}_2U3!Bny^9r@06;|DhjYT4Na_T=@2d0E-c=r zrqkFZB~}3Ii#No-pAWZ-yebyy_`F(|x z5S1J<1jF)l*M@pjavQ9RCH~e=NiBAM9cC$3_K8tyi&>8WiqRL6S{C4I!)>I#z7ADv z`0CQ)btArPfz2*Ccm3GwS8l#^eff%!u9j+fyPN$IeT*fHv7)fRB<1V*ROGxjb?#se1qQmsVh&`Z<+o{N>jx{9zjSfs(kn(b zXuiX)B(Fn-d!rzru{H*vH?iT7BkEN)tVy*>`leKpvp(04h(c}M@{^>`lH8!5qPSdU zn+1Y^cQjdJ6AQQ~V|UPVU=QOVlG>TAWC>F8OSoC3ubO}&8qO1JIonp!81F1o{#L=x zuN7?A0U&f6acW_WJ~$L1`RG}c3KhWD%;XMM zOfq3VOiN1&YCqAPe^$Lo_AKNG#}6}19HNpW=YS2CiysSCk(@fndI_5#Mo112G~wc3 zQQ5$#0!bY6>4z|pek_<4F+(lBOabpe-isHgQb%%vCyce?I z>(Y>@hY=ej{WeM2%<)AbaVNq5WkaOt0Jo@uTf7!i24`LFkx=y(~D% zVaSF`8N8C2xgTzgcxgy&gWx)bp(9qm0J|HA-$;ARQ}1b{QzwnOm)MGdhe?;m%e#Vd z@X`?9GRdAfXl5Zlj&Yr_9d*XJ&V)DSjYFTMy$f%}duo32p4y(;0tfAp1#t16THft} zPnEJ{t&I`wLEc{S94QZVWBc$$yYOy5oKRHK*uO)zQQ}i{Zx_ch9(3p6FVpP<;Jqff ztnyh6E-TROzZ3U%t*cv_H<_RICfn0QrZ9qZ`5=0q^adPoOl4^PH#Lmu>zscWR^DCr z)mofV?Ze>pCy3+r2JXQP1}wnjl4Men68EmbrAhpKz=Io|gTPm}-S8aDS)g+V0^u$H zb^+TH#wJ#;A(KsicK+>CpzpDj#aGEgN?J1yJX!AT_0+FQ$)QtdQ9{MFb>8*9Rx6Se zyl%VD$dO-w_&ZcY<4r06Ps*aNIcqbj!G2eP%g{Dbi!$I-0vb;&64*pA-^-xeOL7Ch zfJ}T`YQqvhUQF|PwAGKFs}!oMX5suATKXo6D(<0x=jn6x>Sugp@y7;1yK4kLdk=zp zsu-06A@+=T=;iLf;9|oPUvG3^%Tgc!u2o|v7;+8E*I-xZaR3J}xH5iX8i5-+m3szr zvKQk79%Wa`O1=oz6*L42e6vLUkF->T%&~eiN5mLaCXy=&EEzMUDerR>`&|V7iQb}t z$0d$8w?;l*gn&CLD^j<1;X&*|qke*C)P`G{n4nI&RJo@UWqlURrJ&K0Ood6~Dt1%# zh-(R$D}a416Rs$JgqO*WTqX0eA7M~467U=;)uj2;ZdT!Ac~=@plM?+cH>{$BneWh- z#L1P2zT6)p&I$tt83cLnW=GL1>+$k==vpfIyqKeDAlAZ2Ekn0=J(#$uoz#g5(;*so zx&Q@>?ZC{9rSh|E!N77zdy@Dvwf8qE_*)dr(#()%LF}a%O`M!9qFfC0P&wQZdysF$ zvHCoxzMAiC1h~53BYm~%hn%%4g9a2v2dN>Q0veOJfY!QREPdenB3|0G`bt((szXGr zG}88g=4hj^V~oSaima_s&>v7DZ4cXKi5kITi;%4siuRpRHU7V&VQ~&Ex}$RTnfP=( z9v6zV5juagA!GP4>G>H%0@4w}Q>&=kj1}Zm7lu0=so(-*1>8@fCb`2zyihyF;xv?n zj8LOq6o%DMI{|?^$tXD0je)43P85m>cu=)s(k+D%d5Danly-)omWC5LLb$X@g>%=B zDkTx7fH705_rxT{Uc>ms2?USr{U;(I0l)l!haeOr_IHyPC`nt|qb%Oiy91olSbZGT zEY5_DVK&U=;kPX#@KZL3;eigC**_x&+(w4JnNhjdk5JPLXFE0|v(6{!?I{XQP(VD9 z#J{wE#9u=I__lj}!MCbzq?mb=nUYNE6PC1;o4M&nXC=v7GNsFDVV@ufX|dO z`7F`{L41(kK@mmi{0JD2KWBDmv^w@KNVqfZ>~bc^MHEh=@ks^de1f}B*I$*6PQ+1j zGU#Kf(+8wh>WkXnjrV(k=Uy1)GWmUdBu_M zP>T#~YVCv4Iiag084O^51vA+P_BxVodXBjMzyTTbWeTP)7~uc`5EsZGBE9qT>%{3Z z%3cI%i+8>B!-b|(FQTPwf23<*!bv>qJ;>&S8`!fvv?PYHh9Gs(8t`;!RZ>I(pAR%R zvdHelMU>e=D}RKtcw_AkfJ-`AKfbwE9;IP3bj}7}CH@N zHODdrRu%~sFG<}uq`EUaoZpo-wt3JzqcfnGidcs6;yOAkk!zqqn^@wIC!2E4X`XcvPj}x7$Fl( zASC)HfsG9QaXiHh)N>OZzx{h*e_Qp9;bMStc@ZAW- z;gU24+!KS+AG|ku7&XU{j+?t6!ja%NK}zTM!z~sqp}dESDSDe^qg=C2X- zLv3|_9-7?wTC-9i-6g!BtxAQZC8@kT*U?4>CyQ*ov%-3(!<>Fm9-AUnGW!iyDmra{ z-JP_XCix@;pU~IYOxV3%+@=sN6gTMXQ?6L4H*vV8USF?Rq!ykpG#vU;dd|T=Ft^@V z*FBi+&~lKaLvN$cxu3G&@2O9l)@o19{C{f|Ux1063np+5G9Dy8C;>~g=}rd@CTZSs zxbu0t-W2d&$PpW#Gcnxr>5d8rO_vTw?%{2joeApn+L=W+!o^rFh5YwDv^&W~ z13WIp$y(TO;J(>WlhQjuY7qC>lO@o{)~Qbrbw?TH?>JE-UZyR4G#FVQ2-^A_=66Fs zc>-$*JV%CqomH(+bMhdwf+R=rEed{+g0E2UPZ0P?NFGg5E0V>qM^DJWPtNuxX`bNR z#IJvaDlusxetjk(F&`tje@f-!0M14pP-BE?vV67ZhX7jA5hSvf2LkI6NQv7zXKjLy z`f0QqSS7xUehZ%MAEBAZ_BbsZFxrmM$RJ5rjWHTYI6GVb zw7>;`uoUoEzYBm9VH`o&Ck7l5g3Azidhk|^{mjJsa7F@VBd39s#Babq@pc0IK5!D_ zx)hy=Xs4k(i8&c??P-{T_Q7SI%!t5kFxtZq>c!iGPzV7x_L+iUBxRMzz>=SKSQAa! zkaE8z|KOA1slRj|D2b=>@hlX$sd<*#Fjv4IH&7 z*Ol9%3 z*36L9pBUI6ecPGs6#=MTN2@B^~nyZrON%n2{(*#x| zHW}fyd}Gy!uH^}?1R(-JEVTGVdI_w#3Z)7QR2*DDE}d~<_iH*5_!Zjn)5v89ALeM1 z5UU2YEF2ZnKmu9_|Nk=_A5}#jWw(j%#5vMd=WR~XFfl9+gzE?D=M~oWYL!qm+yc)A zragpsguD+C4y)pBdyqFZ4 zz(J&7Yj9R^=m}g4sKc*f@{qfyb$&BZ!|Hhbzw}rdS&I}<%=s9^NUxnUCCd) zd}V3n>f%)#6pvo#6DY<2pE_e?%H}j?gVn8tHB{zqSh9U9}&P-emEXmHq`(qv=`;kAG)|~VA%%R4$<`Ey?TJoL zkasd!nFOhmA7x3R0xMUnvlHy27@gL|0dDw`eG1?sswgDg2gq{*jM)L2gF_Vj6oMid z)jNOBAVSA2LipM5!0rxYpv5tk>FVHz6buhsZ#HD|CqgyVHM{@`gpseMjq4|`!CuOs&~Ixwdy zc#iVHxf5`AJlZ-2SH)veqvTAkh1nRi0u$J=JPuPJ+zq#^WBEOB9a?ernptu@`dGF# z1GCo=eD!hUe~xuTU-9-~lwa|V5L!bY2sNd36b z%R1V4s{f1dWe3cW_q5imGu=LDjyi`-T;_qb8u!SZX_xmD!U^ZFcNlXvi6hff?=xp+ z?q0_VdpKq?spz+08hY33HjL2b%pkZx2fTb!RpxnW2)KQ7)C`QoHf~PaPNqojH7~ zCjKLRPb=r-f*NUW#2bZiFpRP0u!4zR-C7*d87?0IyzF@p5^?7Q#yJbZ5#lmK-2Ohi zIpUlQS|hR&;kWzgt#iuT@9EwFZ`wP!22%MTEEc>2sIlZ7q!18<`33~h_)cSdAwhfL z0Ovi8H9NDV@a&|#!3UwnCvc#V!h`($emsYR(Z|i{`zh}CAo`v6^ammP=edusIL~N5jz5{=Pc%MzPtc9IL1cGgY>=5^WAzg+}a`JzV4h0W^6WC_v7>q_fG(u zcyqjO<}iD6n6rrZ6bz^;-An-|ywlE{^TJw)N_;fbI?+1WIt4B1DYW?oGgnJ{ClLFR zIS-RIoc>y;u@l1L@0uql-Flkee$_mQa#Z6t%u}3y#@mfv!VKT1C1&%+ClrR}rvdM0 zyu;=5?H9M;8V6W^M(Rgu-e0o(v)*a%B-%e~o<(oZ?N-pn0%q*_Ns68Go<{hBciKGv z#5XT?OJ4BKaPM%91QRm{^Y3>f>&L?mbK$nCZ4709Nj zL2#p_a9WsmV}asHIuAZSH%CVUQ0-`l?zuLeEMt*(IsNI=)-vtw_zR)}31`(31QDKA)zA&z`{L1wBibt(Ty) z2z13)CnODENR&VBv~w<&ehjBtNkS=57@ zQFv5$p-Jm1|8qEi$cA24e?Nn%dXk~}AuZ3tD1CHDyh??ZD4;qINewOjHN8@qpM6ML zoQEXiJ+{bQG%Qf`k<5flvkqFp*(i%YQJHR9sREcWA|cnat$ID4V9 zu2#CIP9|R`bc~JCZK~5y6iAtBepj+@0%a+=RbPg|kVoKWR``0A;H(VaPQtE( z(ruQEt9-6h{4a|AItAaLYN0~yu3)rqj>hsCdU1hbWYJIU-J?LGAIk@kg^uOqpK%J_S4n4=DCS6u1;Xebk4$ z#FOyd%P0qGSB3L$gxoxBo(n0V9-()kJMc7`q1K+Ffc<>tDRz>AQxp*D5oahUQNTvW zAEg+3ES#lpRPRc|@2^redo%oP#Ee+rPX?E^3Vcbi4FFUY&SKKhKo!@aNqPq~Or8yL z2FR_MmGf*Lc!YfW%g9nD=?pOWjUK{@%zf~G2qOi_Jv0GB<7kNPN|-?IAoTTg+BnJT zyEJmAQ7*^tqliV}DM4mRc$?yP1kyih<2Qkj{yXQ9v)vdxd;VKCMJ=V#HyJgjIYet{ zl{_!@U}W%E{f-77)e|H#lVzhwgYNv%-XX{)!fXFFzKIltkp0PpQ!`a3`)BYbASSUVx3| zJPuC2XkL)@+A*w&YF>oLOMHWTM80aiLRg$^>v3R3iof;8-dTk4U-{8k?_lqcPQc{2<%mEqDurGsZWUt)g5GH4mA_kJ7&+o zDHJzO(Jf+(WPcQe#E((Q7l<*sCv*_+6SF60PtBgDD41U;y}IKOEewjt15Q@v9nrMh zDROn!i9MeK{W7b9WzRP5;RCYu5vQ^Vkd>=Aa|vFRW0L_-1w- zaWgB63p(-Q-%XyY^z9u+-qly=M#W{mQE@@8aLlCBM$_P$TFOx zcWn6R4^g^fsB^y}^%G?2aA$K?lS}RGl`-eZQovZcN zbS`&`xHrH&%!_M5P4X^rXFsea6)eG=ScEJ>cWiJ244P7!J^=LXmiV~5&9F5K;iYy* zhMh|=kP5C7m<>+-%#tesuFniED3NY9kbzcbb*ZdIL3xcOWt1Ui$|L%~7+;_r(%WA| zvR4*o2K+d%0Uy}F&;>4q4-dHp(=AJio#03K7gxs^}VFiK&* zgEcVwk85F7+)JZ4Nw$foe-lb~|aix#wV92Av&Wa-RC|3Br( zT(YFoKi~O5;kTm^(!U3{g&-!rgig1~T}Ug$Vlo|l8WG4^aQueGToX7DQC4fXCZPY9G}_FN!-;rg9O)x5LLne=)4lVwBJRuewb`O)GoQ6 z+TwlcYK_|a5(U>N`7NpjzO=%Z;Dm~|O@D+jf}MkS*mX^l+&l@UekyoP_7BMoKZ#ps zCOSjqhva~X2$CEEYo8!vsh^Sg(zsWVZ2KTyNdG%lhKx{>R9)qDm>o+Np&T?!+6so3vQaFK##3ThPmI0e5%0kinONwIHGFhmo>EdCDeo~*u-#b*d1 znCvb*NimrB9pKV~nfN)(8zh|bsHE7xh#;^!VTbho$s$GmykvWl&UE8A8+8c$<~bOm z)1mlCd{|S52k?6qzvu89!VkXc!{eGd{(=@E@AKY&^uMN0yP zeqX)jA=~*Q-FWq?>eczxSKslgdZ(=|so^)&8p(a~K-2z=61#r~khqG+`gQj?x3$p5 zv_8#gclWsM8@g*2I#yy_(z~%Y%-hse9RoR?Zo3<4?Az2fjSbytN2^w`tF+(QuX>CW zyGuPzkBUc&y`?^q! z@3yo40gVkHZx9ca*WC!~+0fab+e7tpgX{o1_^}R3oM(rajg~L4!|Vv&7u`$j=!)@D z=l{~Zu%S6K?nQTI19W8iLoGG*iq?2Gr3co0Ml91?tg;f`oi|`Jhdl?-k-a3=hi~7=h53YAEd8eTS(uYTX=Bo zQ4pKwm0Z5)25s48H@l*0d9DcB!(_g+l=kv4~47#(G zY~}o9T2#^$yD*i`7Bij~#Iu!Rg?oYr?fEiBMm5t_Oe)w{u9VZI%v##3Rjb_fynLk` zB&$4+0vW#K1ub4SQ*_gdl{ME35^L+vmrt#&udP#bZ5`3)%PBM1bE8tu)u`@+jNtjT zpfyuXS6$xhK8D)qe5Hnx9hI7>)&z}Epnc4-rCO|rbiR~XLI>?C@hnqJd!okHQ%2C{ zxkb=A&D>}CtQ&NBr3^>$a?a&$IqRlN73O9wTB+u*1#jA}H}H6Wh(Kr!T^gHU3}(=? zWc6vjCAEgJsom5*_})w7L(Px)Q9mY3RW`DzH!PGiY5PcPKtswx9?JW1sq>FyET_x3 zjOO&u&2l_UeQx*(5s@Z~Y#5&n`AL~GNkVoIO1`@g`a#2~7Atq3%uTkq0`^g+E<=hH~PwtR79B=D%^`=^LV zqksk6)OeRLD88YWEqeQwFeOP)?Gq(iKQVGfAHMn#8QIP?C^rJhxt)iyVM%5TXy-Lk zo6!U@Lj(&Hp>V?hQ_MUyh#{ojh)PSE=sC(vYP}D#ltn()Wb7tVSl%e3JafZf(LVr_ z=-R4K3mTi{hcNnA1_coWSz6kyALUZ!Lp)wLf+bCAAGG_r&?Gd*hVeVbss*;tUzzv_ zc@p83TA6)i1jhJeps%Tg2!pdI5F}@a6|$95_3{KYNgNc_46S~6dkG3&r93`UtYABP zml4@2pR$6u+I{Ifb6Ty~cl?O>wTUvX$jW#8f zcD2OJ6$>iu2_#g!Stm+$8tt2^4Ho+ZEkXw3yU+@Iwo93+mpDtjKwVUF{7VJ{6@mp|3jhAgO7>A6vNkV~BphoPH z(j%(SHNFIfgQ~lzl2nf={fly*eImnoX8rxyQG5D>)Tq5SHEOTpSv!R{9+G7UNegb* zE@l?p;&#mkqxM;v{eL^&i6;!Zz`~wXdtZl7*#_ zUZn4*QPk^4pe1bmB-BEO-i=*70~K*ZKVlrwZJjfGePvVdJqXTU{OYU!`SS;h^H+Y6 zQ@=mI@@fQonrp*D^1}NmYz5iFw3UQ^ zKpx<;5Wqd8ZZ>qF%m`v?6)zAd|CykKI7nGZC-OLp;_C~a+O7dsxNoteky`N^VHADZ1=!RnKVeOl`=-q&sT$zSM zY=;tD%ypJ5Cq+NLbSU*ZBnAqDq0tVx(DlOdhvjhN0HoA0dElkt+43N2 z{g~={KgK8z77qEx<#9PghPd}*VM`q&+%JzK?3P0ad%lcr=-<~-_OPn^kI^Gc0oIH8 zn>ma1y}&FWC;OEt|8q7_j8V{G_o z(jWIH{7HYxKjokH&-iEkY5$y<+|ah3oavtjeWx&YqOIXySc2(5f5jOW{Y(Cg|AriB zoMOk++MbrZa!j0I$6uKKWsK!AX{49d2Nq~^1$=$=2fBaFzpiSWpc)cRQlIwOJYq*) zn*5BsS~!cobe2MIS3lJoNNwsr&}A3CUYA#!+<%q0-@huavXckYyD*K`Z}>O;TmJ1G zYZt92(fSFrK8X6?mp$?-47zrD%N8~Q%>&(rvEA|C+*OOp{zG|(>iT!et%zZaVd_(I zu*n1*4GlXo_CTR?^0vH1PtFP{bwQbWvi+sSqra`^bT-DuKaPtDd4ni|F&Imle;4B$ z#rOuXE6&S1(7_RTm+9b;D4Tc@^Y8483ADVUXlZ<~fbqWN-}_YG9?K!plk%+O9@ZW{h0 zaZxVFM{;<>^xww3qC84ep**5Wxqo*a+M2@HPj4DK6x*Kb8MTJHW{&y2PCrZSmp1gV zD6XQ1Js7K!>vFDe4N~OnmvAxI^b4AI2TDB?*HP-CoRg!97PL#x$$Q9~lOrK5H=|9; z)5!m$yux}{G>VBE5_=Dv(#OurD`@+|mnJmI$Ob?p`X+7?H}3j=ap$|*+n3~(W((Kl zoy{0}F;@Qvo53E?#4UO0NAQFpqb`Zt^3sO>GKQUbi8!%wXH%n;cvD`I{jyJvus8m| z{E_}Nwo6ix#F=TXe8~ATjk{P49eh0P&&bKf-AzqSsc>3__`VrRtWe6G6K@snp~qhI zcwh9xdK_YxA#dJcSNzAMx8!4X^;1p0(|CZEPRld$tUQ;Cbt_*w%vJj#21<1{X4y3* z#jk&<_*=HVjCubHE#3ev<9r@kDDKT8orIl?baT!W8JLDC(44h@2P%&xTCkQ!$~9Gmh)d| zUmEhg=4|>grk`Rj&dEM;QjRp{$rfREXiut@d=ujsmlGJrlsqM8pP+sa)@;OTkIa89_;=A z@x%L~7i;c--yRSzBj>3a^Q?S|yt$CR;%)hq%>(u5^gj>_jfddy4`@H_$jbVs;t_jW z0c(fYLh+r_WB)^dY=_vx;=83Mei~!{P`;1WKaw9c-jh#ZTilar`4RTwBkbc0G-rbU zbM_8#tL!Z}h4+c3GE;b({y=6LA1bRQ(?~bpABFEi20S_X2_!LA^PGS7U9Eh^(!}CQ z64Zf&_ZM}o-lgXAST52U(0-;-Gkp7v#V6W(pwbhviEYi`D8|WnOyyJo8v8cp}{sata0y#4qVFJ)IK**?!6p&RZnLh-R2IAT6h#mWn3%N z5UIGk26sPQS>b8=Yy`1t2Jo5ic$LzYviJj&mCJ!eh$S$g>eMtU>Ynmj>SMP7J>|4!YiP*chU4rLQ=_Lxzt%_Vy(#4m0bU%NUw=fvl}%7x(c=YlS#N=9 zerj^++&GXScsJQ4Wa9UquPlW|{eQdiia@-Su7*}O>=X5w&_#2-lr4A$kQB4wD6)J` zL4g88qoLbkIa9WuFK2|kUa8qtIDRO2qW*Zh_P9Nd%x$L=pvhvsoKaq9C5Kj?yZNPM z;n|ro+tsG9X`)*liwdAixL4!+f*NHI0Y>Z9;b&D{m!*tf&}8U$`zsT;9Sv8_8xXSV ziMnyoPDKKpM7Le=8Q2TtpOTlw7}n%Y@TONq_|(#lzO;+ds##S=iv z;0mt+!4`au`Z7Z2pYTA$Kg3H_4sihj4ZpT%TY&e+k#IVivbN zC29Ek6uU$Lku(gpX@^s+9ohl>w+P)2;tJPedCosZ6;Pgd9$58~SgsE>**OoErf2;0 z(znCF#6e5#2r9pOZDIEA>>c~R{C?xV|Ku~fT(O(jPL9x0h$Xu|YU3aU=njNJxgxOZ zOE`XT?GhFvpY`nLd9jQmiDtc5r_kuPC`CAdf++~X229)KgGL4L+yJ<)OM48Tc@_hCEbozYUiA9gI;C-$^!kiy+$-%6$_- zJvKf*LFY&HBbhRYrevcxk>3&>wAYpFCP=xyS20}(+kp)!>say@iqudK8l$>SO$p#s zo((B8K0a=P0qD!uCK7@E+-ccWhNyL8?#3M6aeBLGiC@L2zO^jd_&K3Rih(>8O}D`ysq&;b=2%XHD^#DVqJkmDv! z(w437-C{uEaYjY!dly^}Lcyb1hgksDD@B&d1r~Tf;Iu8nAm#Cg1f0-8Y(DXq}{u_zv;10LoEMpXo6ArCwcTM62c(?J*)6jUgPQDF;gQsJao z1B)mdB=g?Zx&|g4$C-3W7zaOiv^7GiDL65XQy{OJ$+`+abc+}S>2sx*g7#}rvz&D3 zEzap|>UoBjYE@3+Ixs6$!M{&q*h2vwcLcHJjF%B2lmS5$P1SJxiIXM|V5O*4DWOgc z60SOKz_A2xr`nGxAatLT@Cr;U1*Z+l3HJ*O9FY+~8seCHDfSKp1o`pX2!cqqhyy83 zI)u}4LMY3oDW>>>P7*kA6z`+p76pW`lA9^>1&YRDI%FYh1b?K;c(oFA zd4`w4-~1(IMQJAh1U;^5()Pf_0KjrO3UfMk@z*%*6aM>@-Sy3EwF|zWPcb@821^Ao zvUVWb9Gu>y=?IT32I3@Wdl(~kndii*8n&SuBs1A;4Z4Zq2@Dd4sC1r8urQ906Svl~ zZk1Lqu$GFI#Za`CqG>Sr2TBoceLF;L-2s%yx1`ebANA%6Y$W|0H^WRcs z!DLG~MaG#T|DkH{nadX|p4(IgWCXS-wXnRt2yW)2-J3_1iexc}y8;Ij)XZc^Jg1<)v6Qn`Xw1!TC z+E!6c+Ej5mC!P*0D#ZUTMRn|ZOg^;2psM}`m3%>gvMT-=V)yZQk07SC!*QGe9>i%u zx6x{L>HTJ>I#iA#H*Q3A`r2#8&A6U4Y@8}|>TxrQ^TN13WE{l#A=0|>+TYS!e`zI* zUs^4Qp+pD5UX`} z*M(70O&Y@}#@DV}sMmou&fwjPvkx0zkdJR&`ZV%v)z6^WfqB^m&94`EgDBZ+T8Opl zo#sijaNg`zt#+YKw|)@iM^Um9|5nh(Ko2-2H9!Z|K8>3GM3Ez)P86m4(N+ikoG6^d zv;8B5L$dV-9`DaVcCrzH|G^TX6B68+u;?fqr>2%dx+ph-n-7#9Lq44+0;N)=lOjTw zeRh?^l^?bTxg9t$uwDTDQsp{DR9*SmX>mVt==#r2PA_WpvHo2-1GoZ&t3EHxW-Eu7 zjf*}z=_9BCtbJFFVRnoiXD7C6q>w(jD?Nhr=&tk_(&M|*6G%_)N>3quYFGL+(r0$1 z&w?h?pvk#iqdLznyq3O*I+u3UnL+xEUFpk6U)hzuidkJ_*SBWXh+%DXpgDR0H&3LV zPDtI-3OHFRBpNO3#-=_8OEZl$a$AwxrqZpV9p_03$pH1pmc2P!kA^OFy`7}0vIxiN zno)gkQ}w76Tx?v9GWp*Ge?Td@xeTnGz*=^T)p}yvoBmoKsmJJw$aq!M+mz*?DxUCC zxPSnU7Kf3l3I0p8!~YM0dh2emfxDCy!WuTA5B1pCm~y)75f!N?)k`50JV%9lwryfn z!9G#VuK|EaQNtvv$CUEoWV6@1Cz&}qvs(dsQ|vQlf7Tc&b*$*QIU-kn02 zB;30(a_{m~WDu!*saD#3O1*7|6HS$JR47P@Cyd%q)1x*a1MpVJ(zkJt+`Is_iym<- zhq?u&O!fLODJOJ7_o7Da{J1-APg2Fn(W%i>qo*l~<6gA}A-KScG&~xty`v`WCh`F4 zz*ktfHhYiUrkgkC6{f zQD^z#VuDId$G*O=8NocaSBQq$vG%AvjXnMDl7GqDh5Ko zR^G0xjM}WCT-)uNebfj!5B0+}&$VyQPfgpDME7nM<~~5P9kOHZ0QCz?&f(_HjmP%Q z*;xhM&CSj}*;NvAg`YmAuqg!w1g*E$z%zLQYcj=pg3MPX5mFsN9Qzr@C=9&zt?L`} zl;pS>$Qa!$R98Cq-@`NmYhiBgQRr!GQkonh&sEnCc^fj63CjO1#r{48|AvB}Q_zV3 zSKm;a*iBXA{2$Sm*U^X|5jF`&X=#l_-A;G2A0qvG@sLG;nu-(qG^jS@U#4D_H!z`VyYT zM{*i&5CGr)5zy$Ddie})5^06FNNgDVDaCNwjBrVZlus5~=nhcj8FJcKbOmjv+Y!e- z!xe-7gvFmyj*e#!o(`$wHqszsak1SCOR!{K|6PN6Tm8KJQ&qb~ouvPUD&L^;tX1sA zmrgj=`!M!4m5Mi0?O#T4yC5OD=;AKhyJ2qVOLT^5;;xj59`B=t{c47t5-2#^hnDu^ z-POE*@Ev1$YKAv5rPb)h8Eo^`|PAu{$(%S z22fY<`ewPrk|+UR8$#?GaM8~8-jU{Z{jh!6o*bW?oT81!YJf_y2_T^JY->N)KKgcB zuZbR@O2r~dtIJKxK$Ddy%kfIdZc3lfa2{3rR^{&qC;-pOLnR=e*Ab*l(op#Q>nlrt z^2hZ14_9vUe}|0Q4jedKlXZXr;FacoflU4xf>*$#wEXv?bY%v~dLp?{E0a@d*AtLq zN^jcybNUn=o|+s#vjn`w%#}oa5Lfx^v8MVNr$pGc60v|J>1t1EkpC;Z_#aacqu}pQ zptdC0j~wfw@vkWMM-==M3XW3n?-2y8@TW4*GI0M)Z{G}LV*eVzX&s;o$){F85p2)G@9 zAu(`o9OX1v^+{OauFNmk%_W{07r{3Vd zroR3K1vr@lcz^-Yp^I29<`)%)6C$THm5?$fV$=fZigY@NLAzCo&)^N+zI*=`U!`*3 zYfVYa&?O^UP`RSIS7~OSGbc2xJRk|BQl%W?Lwjl>UqsfH1I!~-Z*Q2pV+VL4R?`;d z6Y|Ibys5<{*%m@B1RRC9QY*!jaBioV0(KNWr7)LH%G*bQf>`z=7BpLK0WA4 zWe}eU!Kcfpq 1 else nn.Identity() + + def forward(self, x): + shape = x[0].shape # BCHW + for i in range(self.nl): + x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1) + if self.training: + return x + elif self.dynamic or self.shape != shape: + self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5)) + self.shape = shape + + box, cls = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2).split((self.reg_max * 4, self.nc), 1) + dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + y = torch.cat((dbox, cls.sigmoid()), 1) + return y if self.export else (y, x) + + def bias_init(self): + # Initialize Detect() biases, WARNING: requires stride availability + m = self # self.model[-1] # Detect() module + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1 + # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency + for a, b, s in zip(m.cv2, m.cv3, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + + +class DDetect(nn.Module): + # YOLO Detect head for detection models + dynamic = False # force grid reconstruction + export = False # export mode + shape = None + anchors = torch.empty(0) # init + strides = torch.empty(0) # init + + def __init__(self, nc=80, ch=(), inplace=True): # detection layer + super().__init__() + self.nc = nc # number of classes + self.nl = len(ch) # number of detection layers + self.reg_max = 16 + self.no = nc + self.reg_max * 4 # number of outputs per anchor + self.inplace = inplace # use inplace ops (e.g. slice assignment) + self.stride = torch.zeros(self.nl) # strides computed during build + + c2, c3 = make_divisible(max((ch[0] // 4, self.reg_max * 4, 16)), 4), max((ch[0], min((self.nc * 2, 128)))) # channels + self.cv2 = nn.ModuleList( + nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3, g=4), nn.Conv2d(c2, 4 * self.reg_max, 1, groups=4)) for x in ch) + self.cv3 = nn.ModuleList( + nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch) + self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity() + + def forward(self, x): + shape = x[0].shape # BCHW + for i in range(self.nl): + x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1) + if self.training: + return x + elif self.dynamic or self.shape != shape: + self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5)) + self.shape = shape + + box, cls = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2).split((self.reg_max * 4, self.nc), 1) + dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + y = torch.cat((dbox, cls.sigmoid()), 1) + return y if self.export else (y, x) + + def bias_init(self): + # Initialize Detect() biases, WARNING: requires stride availability + m = self # self.model[-1] # Detect() module + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1 + # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency + for a, b, s in zip(m.cv2, m.cv3, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + + +class DualDetect(nn.Module): + # YOLO Detect head for detection models + dynamic = False # force grid reconstruction + export = False # export mode + shape = None + anchors = torch.empty(0) # init + strides = torch.empty(0) # init + + def __init__(self, nc=80, ch=(), inplace=True): # detection layer + super().__init__() + self.nc = nc # number of classes + self.nl = len(ch) // 2 # number of detection layers + self.reg_max = 16 + self.no = nc + self.reg_max * 4 # number of outputs per anchor + self.inplace = inplace # use inplace ops (e.g. slice assignment) + self.stride = torch.zeros(self.nl) # strides computed during build + + c2, c3 = max((ch[0] // 4, self.reg_max * 4, 16)), max((ch[0], min((self.nc * 2, 128)))) # channels + c4, c5 = max((ch[self.nl] // 4, self.reg_max * 4, 16)), max((ch[self.nl], min((self.nc * 2, 128)))) # channels + self.cv2 = nn.ModuleList( + nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch[:self.nl]) + self.cv3 = nn.ModuleList( + nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch[:self.nl]) + self.cv4 = nn.ModuleList( + nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, 4 * self.reg_max, 1)) for x in ch[self.nl:]) + self.cv5 = nn.ModuleList( + nn.Sequential(Conv(x, c5, 3), Conv(c5, c5, 3), nn.Conv2d(c5, self.nc, 1)) for x in ch[self.nl:]) + self.dfl = DFL(self.reg_max) + self.dfl2 = DFL(self.reg_max) + + def forward(self, x): + shape = x[0].shape # BCHW + d1 = [] + d2 = [] + for i in range(self.nl): + d1.append(torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1)) + d2.append(torch.cat((self.cv4[i](x[self.nl+i]), self.cv5[i](x[self.nl+i])), 1)) + if self.training: + return [d1, d2] + elif self.dynamic or self.shape != shape: + self.anchors, self.strides = (d1.transpose(0, 1) for d1 in make_anchors(d1, self.stride, 0.5)) + self.shape = shape + + box, cls = torch.cat([di.view(shape[0], self.no, -1) for di in d1], 2).split((self.reg_max * 4, self.nc), 1) + dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + box2, cls2 = torch.cat([di.view(shape[0], self.no, -1) for di in d2], 2).split((self.reg_max * 4, self.nc), 1) + dbox2 = dist2bbox(self.dfl2(box2), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + y = [torch.cat((dbox, cls.sigmoid()), 1), torch.cat((dbox2, cls2.sigmoid()), 1)] + return y if self.export else (y, [d1, d2]) + + def bias_init(self): + # Initialize Detect() biases, WARNING: requires stride availability + m = self # self.model[-1] # Detect() module + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1 + # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency + for a, b, s in zip(m.cv2, m.cv3, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + for a, b, s in zip(m.cv4, m.cv5, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + + +class DualDDetect(nn.Module): + # YOLO Detect head for detection models + dynamic = False # force grid reconstruction + export = False # export mode + shape = None + anchors = torch.empty(0) # init + strides = torch.empty(0) # init + + def __init__(self, nc=80, ch=(), inplace=True): # detection layer + super().__init__() + self.nc = nc # number of classes + self.nl = len(ch) // 2 # number of detection layers + self.reg_max = 16 + self.no = nc + self.reg_max * 4 # number of outputs per anchor + self.inplace = inplace # use inplace ops (e.g. slice assignment) + self.stride = torch.zeros(self.nl) # strides computed during build + + c2, c3 = make_divisible(max((ch[0] // 4, self.reg_max * 4, 16)), 4), max((ch[0], min((self.nc * 2, 128)))) # channels + c4, c5 = make_divisible(max((ch[self.nl] // 4, self.reg_max * 4, 16)), 4), max((ch[self.nl], min((self.nc * 2, 128)))) # channels + self.cv2 = nn.ModuleList( + nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3, g=4), nn.Conv2d(c2, 4 * self.reg_max, 1, groups=4)) for x in ch[:self.nl]) + self.cv3 = nn.ModuleList( + nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch[:self.nl]) + self.cv4 = nn.ModuleList( + nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3, g=4), nn.Conv2d(c4, 4 * self.reg_max, 1, groups=4)) for x in ch[self.nl:]) + self.cv5 = nn.ModuleList( + nn.Sequential(Conv(x, c5, 3), Conv(c5, c5, 3), nn.Conv2d(c5, self.nc, 1)) for x in ch[self.nl:]) + self.dfl = DFL(self.reg_max) + self.dfl2 = DFL(self.reg_max) + + def forward(self, x): + shape = x[0].shape # BCHW + d1 = [] + d2 = [] + for i in range(self.nl): + d1.append(torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1)) + d2.append(torch.cat((self.cv4[i](x[self.nl+i]), self.cv5[i](x[self.nl+i])), 1)) + if self.training: + return [d1, d2] + elif self.dynamic or self.shape != shape: + self.anchors, self.strides = (d1.transpose(0, 1) for d1 in make_anchors(d1, self.stride, 0.5)) + self.shape = shape + + box, cls = torch.cat([di.view(shape[0], self.no, -1) for di in d1], 2).split((self.reg_max * 4, self.nc), 1) + dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + box2, cls2 = torch.cat([di.view(shape[0], self.no, -1) for di in d2], 2).split((self.reg_max * 4, self.nc), 1) + dbox2 = dist2bbox(self.dfl2(box2), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + y = [torch.cat((dbox, cls.sigmoid()), 1), torch.cat((dbox2, cls2.sigmoid()), 1)] + return y if self.export else (y, [d1, d2]) + #y = torch.cat((dbox2, cls2.sigmoid()), 1) + #return y if self.export else (y, d2) + #y1 = torch.cat((dbox, cls.sigmoid()), 1) + #y2 = torch.cat((dbox2, cls2.sigmoid()), 1) + #return [y1, y2] if self.export else [(y1, d1), (y2, d2)] + #return [y1, y2] if self.export else [(y1, y2), (d1, d2)] + + def bias_init(self): + # Initialize Detect() biases, WARNING: requires stride availability + m = self # self.model[-1] # Detect() module + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1 + # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency + for a, b, s in zip(m.cv2, m.cv3, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + for a, b, s in zip(m.cv4, m.cv5, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + + +class TripleDetect(nn.Module): + # YOLO Detect head for detection models + dynamic = False # force grid reconstruction + export = False # export mode + shape = None + anchors = torch.empty(0) # init + strides = torch.empty(0) # init + + def __init__(self, nc=80, ch=(), inplace=True): # detection layer + super().__init__() + self.nc = nc # number of classes + self.nl = len(ch) // 3 # number of detection layers + self.reg_max = 16 + self.no = nc + self.reg_max * 4 # number of outputs per anchor + self.inplace = inplace # use inplace ops (e.g. slice assignment) + self.stride = torch.zeros(self.nl) # strides computed during build + + c2, c3 = max((ch[0] // 4, self.reg_max * 4, 16)), max((ch[0], min((self.nc * 2, 128)))) # channels + c4, c5 = max((ch[self.nl] // 4, self.reg_max * 4, 16)), max((ch[self.nl], min((self.nc * 2, 128)))) # channels + c6, c7 = max((ch[self.nl * 2] // 4, self.reg_max * 4, 16)), max((ch[self.nl * 2], min((self.nc * 2, 128)))) # channels + self.cv2 = nn.ModuleList( + nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch[:self.nl]) + self.cv3 = nn.ModuleList( + nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch[:self.nl]) + self.cv4 = nn.ModuleList( + nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, 4 * self.reg_max, 1)) for x in ch[self.nl:self.nl*2]) + self.cv5 = nn.ModuleList( + nn.Sequential(Conv(x, c5, 3), Conv(c5, c5, 3), nn.Conv2d(c5, self.nc, 1)) for x in ch[self.nl:self.nl*2]) + self.cv6 = nn.ModuleList( + nn.Sequential(Conv(x, c6, 3), Conv(c6, c6, 3), nn.Conv2d(c6, 4 * self.reg_max, 1)) for x in ch[self.nl*2:self.nl*3]) + self.cv7 = nn.ModuleList( + nn.Sequential(Conv(x, c7, 3), Conv(c7, c7, 3), nn.Conv2d(c7, self.nc, 1)) for x in ch[self.nl*2:self.nl*3]) + self.dfl = DFL(self.reg_max) + self.dfl2 = DFL(self.reg_max) + self.dfl3 = DFL(self.reg_max) + + def forward(self, x): + shape = x[0].shape # BCHW + d1 = [] + d2 = [] + d3 = [] + for i in range(self.nl): + d1.append(torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1)) + d2.append(torch.cat((self.cv4[i](x[self.nl+i]), self.cv5[i](x[self.nl+i])), 1)) + d3.append(torch.cat((self.cv6[i](x[self.nl*2+i]), self.cv7[i](x[self.nl*2+i])), 1)) + if self.training: + return [d1, d2, d3] + elif self.dynamic or self.shape != shape: + self.anchors, self.strides = (d1.transpose(0, 1) for d1 in make_anchors(d1, self.stride, 0.5)) + self.shape = shape + + box, cls = torch.cat([di.view(shape[0], self.no, -1) for di in d1], 2).split((self.reg_max * 4, self.nc), 1) + dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + box2, cls2 = torch.cat([di.view(shape[0], self.no, -1) for di in d2], 2).split((self.reg_max * 4, self.nc), 1) + dbox2 = dist2bbox(self.dfl2(box2), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + box3, cls3 = torch.cat([di.view(shape[0], self.no, -1) for di in d3], 2).split((self.reg_max * 4, self.nc), 1) + dbox3 = dist2bbox(self.dfl3(box3), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + y = [torch.cat((dbox, cls.sigmoid()), 1), torch.cat((dbox2, cls2.sigmoid()), 1), torch.cat((dbox3, cls3.sigmoid()), 1)] + return y if self.export else (y, [d1, d2, d3]) + + def bias_init(self): + # Initialize Detect() biases, WARNING: requires stride availability + m = self # self.model[-1] # Detect() module + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1 + # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency + for a, b, s in zip(m.cv2, m.cv3, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + for a, b, s in zip(m.cv4, m.cv5, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + for a, b, s in zip(m.cv6, m.cv7, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + + +class TripleDDetect(nn.Module): + # YOLO Detect head for detection models + dynamic = False # force grid reconstruction + export = False # export mode + shape = None + anchors = torch.empty(0) # init + strides = torch.empty(0) # init + + def __init__(self, nc=80, ch=(), inplace=True): # detection layer + super().__init__() + self.nc = nc # number of classes + self.nl = len(ch) // 3 # number of detection layers + self.reg_max = 16 + self.no = nc + self.reg_max * 4 # number of outputs per anchor + self.inplace = inplace # use inplace ops (e.g. slice assignment) + self.stride = torch.zeros(self.nl) # strides computed during build + + c2, c3 = make_divisible(max((ch[0] // 4, self.reg_max * 4, 16)), 4), \ + max((ch[0], min((self.nc * 2, 128)))) # channels + c4, c5 = make_divisible(max((ch[self.nl] // 4, self.reg_max * 4, 16)), 4), \ + max((ch[self.nl], min((self.nc * 2, 128)))) # channels + c6, c7 = make_divisible(max((ch[self.nl * 2] // 4, self.reg_max * 4, 16)), 4), \ + max((ch[self.nl * 2], min((self.nc * 2, 128)))) # channels + self.cv2 = nn.ModuleList( + nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3, g=4), + nn.Conv2d(c2, 4 * self.reg_max, 1, groups=4)) for x in ch[:self.nl]) + self.cv3 = nn.ModuleList( + nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch[:self.nl]) + self.cv4 = nn.ModuleList( + nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3, g=4), + nn.Conv2d(c4, 4 * self.reg_max, 1, groups=4)) for x in ch[self.nl:self.nl*2]) + self.cv5 = nn.ModuleList( + nn.Sequential(Conv(x, c5, 3), Conv(c5, c5, 3), nn.Conv2d(c5, self.nc, 1)) for x in ch[self.nl:self.nl*2]) + self.cv6 = nn.ModuleList( + nn.Sequential(Conv(x, c6, 3), Conv(c6, c6, 3, g=4), + nn.Conv2d(c6, 4 * self.reg_max, 1, groups=4)) for x in ch[self.nl*2:self.nl*3]) + self.cv7 = nn.ModuleList( + nn.Sequential(Conv(x, c7, 3), Conv(c7, c7, 3), nn.Conv2d(c7, self.nc, 1)) for x in ch[self.nl*2:self.nl*3]) + self.dfl = DFL(self.reg_max) + self.dfl2 = DFL(self.reg_max) + self.dfl3 = DFL(self.reg_max) + + def forward(self, x): + shape = x[0].shape # BCHW + d1 = [] + d2 = [] + d3 = [] + for i in range(self.nl): + d1.append(torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1)) + d2.append(torch.cat((self.cv4[i](x[self.nl+i]), self.cv5[i](x[self.nl+i])), 1)) + d3.append(torch.cat((self.cv6[i](x[self.nl*2+i]), self.cv7[i](x[self.nl*2+i])), 1)) + if self.training: + return [d1, d2, d3] + elif self.dynamic or self.shape != shape: + self.anchors, self.strides = (d1.transpose(0, 1) for d1 in make_anchors(d1, self.stride, 0.5)) + self.shape = shape + + box, cls = torch.cat([di.view(shape[0], self.no, -1) for di in d1], 2).split((self.reg_max * 4, self.nc), 1) + dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + box2, cls2 = torch.cat([di.view(shape[0], self.no, -1) for di in d2], 2).split((self.reg_max * 4, self.nc), 1) + dbox2 = dist2bbox(self.dfl2(box2), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + box3, cls3 = torch.cat([di.view(shape[0], self.no, -1) for di in d3], 2).split((self.reg_max * 4, self.nc), 1) + dbox3 = dist2bbox(self.dfl3(box3), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + #y = [torch.cat((dbox, cls.sigmoid()), 1), torch.cat((dbox2, cls2.sigmoid()), 1), torch.cat((dbox3, cls3.sigmoid()), 1)] + #return y if self.export else (y, [d1, d2, d3]) + y = torch.cat((dbox3, cls3.sigmoid()), 1) + return y if self.export else (y, d3) + + def bias_init(self): + # Initialize Detect() biases, WARNING: requires stride availability + m = self # self.model[-1] # Detect() module + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1 + # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency + for a, b, s in zip(m.cv2, m.cv3, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + for a, b, s in zip(m.cv4, m.cv5, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + for a, b, s in zip(m.cv6, m.cv7, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (5 objects and 80 classes per 640 image) + + +class Segment(Detect): + # YOLO Segment head for segmentation models + def __init__(self, nc=80, nm=32, npr=256, ch=(), inplace=True): + super().__init__(nc, ch, inplace) + self.nm = nm # number of masks + self.npr = npr # number of protos + self.proto = Proto(ch[0], self.npr, self.nm) # protos + self.detect = Detect.forward + + c4 = max(ch[0] // 4, self.nm) + self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nm, 1)) for x in ch) + + def forward(self, x): + p = self.proto(x[0]) + bs = p.shape[0] + + mc = torch.cat([self.cv4[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2) # mask coefficients + x = self.detect(self, x) + if self.training: + return x, mc, p + return (torch.cat([x, mc], 1), p) if self.export else (torch.cat([x[0], mc], 1), (x[1], mc, p)) + + +class Panoptic(Detect): + # YOLO Panoptic head for panoptic segmentation models + def __init__(self, nc=80, sem_nc=93, nm=32, npr=256, ch=(), inplace=True): + super().__init__(nc, ch, inplace) + self.sem_nc = sem_nc + self.nm = nm # number of masks + self.npr = npr # number of protos + self.proto = Proto(ch[0], self.npr, self.nm) # protos + self.uconv = UConv(ch[0], ch[0]//4, self.sem_nc+self.nc) + self.detect = Detect.forward + + c4 = max(ch[0] // 4, self.nm) + self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nm, 1)) for x in ch) + + + def forward(self, x): + p = self.proto(x[0]) + s = self.uconv(x[0]) + bs = p.shape[0] + + mc = torch.cat([self.cv4[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2) # mask coefficients + x = self.detect(self, x) + if self.training: + return x, mc, p, s + return (torch.cat([x, mc], 1), p, s) if self.export else (torch.cat([x[0], mc], 1), (x[1], mc, p, s)) + + +class BaseModel(nn.Module): + # YOLO base model + def forward(self, x, profile=False, visualize=False): + return self._forward_once(x, profile, visualize) # single-scale inference, train + + def _forward_once(self, x, profile=False, visualize=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + if profile: + self._profile_one_layer(m, x, dt) + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + if visualize: + feature_visualization(x, m.type, m.i, save_dir=visualize) + return x + + def _profile_one_layer(self, m, x, dt): + c = m == self.model[-1] # is final layer, copy input as inplace fix + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + t = time_sync() + for _ in range(10): + m(x.copy() if c else x) + dt.append((time_sync() - t) * 100) + if m == self.model[0]: + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + if c: + LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + LOGGER.info('Fusing layers... ') + for m in self.model.modules(): + if isinstance(m, (RepConvN)) and hasattr(m, 'fuse_convs'): + m.fuse_convs() + m.forward = m.forward_fuse # update forward + if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.forward_fuse # update forward + self.info() + return self + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + m = self.model[-1] # Detect() + if isinstance(m, (Detect, DualDetect, TripleDetect, DDetect, DualDDetect, TripleDDetect, Segment, Panoptic)): + m.stride = fn(m.stride) + m.anchors = fn(m.anchors) + m.strides = fn(m.strides) + # m.grid = list(map(fn, m.grid)) + return self + + +class DetectionModel(BaseModel): + # YOLO detection model + def __init__(self, cfg='yolo.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes + super().__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg, encoding='ascii', errors='ignore') as f: + self.yaml = yaml.safe_load(f) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + if anchors: + LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + self.inplace = self.yaml.get('inplace', True) + + # Build strides, anchors + m = self.model[-1] # Detect() + if isinstance(m, (Detect, DDetect, Segment, Panoptic)): + s = 256 # 2x min stride + m.inplace = self.inplace + forward = lambda x: self.forward(x)[0] if isinstance(m, (Segment, Panoptic)) else self.forward(x) + m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward + # check_anchor_order(m) + # m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + m.bias_init() # only run once + if isinstance(m, (DualDetect, TripleDetect, DualDDetect, TripleDDetect)): + s = 256 # 2x min stride + m.inplace = self.inplace + #forward = lambda x: self.forward(x)[0][0] if isinstance(m, (DualSegment, DualPanoptic)) else self.forward(x)[0] + forward = lambda x: self.forward(x)[0] + m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward + # check_anchor_order(m) + # m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + m.bias_init() # only run once + + # Init weights, biases + initialize_weights(self) + self.info() + LOGGER.info('') + + def forward(self, x, augment=False, profile=False, visualize=False): + if augment: + return self._forward_augment(x) # augmented inference, None + return self._forward_once(x, profile, visualize) # single-scale inference, train + + def _forward_augment(self, x): + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self._forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi = self._descale_pred(yi, fi, si, img_size) + y.append(yi) + y = self._clip_augmented(y) # clip augmented tails + return torch.cat(y, 1), None # augmented inference, train + + def _descale_pred(self, p, flips, scale, img_size): + # de-scale predictions following augmented inference (inverse operation) + if self.inplace: + p[..., :4] /= scale # de-scale + if flips == 2: + p[..., 1] = img_size[0] - p[..., 1] # de-flip ud + elif flips == 3: + p[..., 0] = img_size[1] - p[..., 0] # de-flip lr + else: + x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale + if flips == 2: + y = img_size[0] - y # de-flip ud + elif flips == 3: + x = img_size[1] - x # de-flip lr + p = torch.cat((x, y, wh, p[..., 4:]), -1) + return p + + def _clip_augmented(self, y): + # Clip YOLO augmented inference tails + nl = self.model[-1].nl # number of detection layers (P3-P5) + g = sum(4 ** x for x in range(nl)) # grid points + e = 1 # exclude layer count + i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices + y[0] = y[0][:, :-i] # large + i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices + y[-1] = y[-1][:, i:] # small + return y + + +Model = DetectionModel # retain YOLO 'Model' class for backwards compatibility + + +class SegmentationModel(DetectionModel): + # YOLO segmentation model + def __init__(self, cfg='yolo-seg.yaml', ch=3, nc=None, anchors=None): + super().__init__(cfg, ch, nc, anchors) + + +class ClassificationModel(BaseModel): + # YOLO classification model + def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index + super().__init__() + self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg) + + def _from_detection_model(self, model, nc=1000, cutoff=10): + # Create a YOLO classification model from a YOLO detection model + if isinstance(model, DetectMultiBackend): + model = model.model # unwrap DetectMultiBackend + model.model = model.model[:cutoff] # backbone + m = model.model[-1] # last layer + ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module + c = Classify(ch, nc) # Classify() + c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type + model.model[-1] = c # replace + self.model = model.model + self.stride = model.stride + self.save = [] + self.nc = nc + + def _from_yaml(self, cfg): + # Create a YOLO classification model from a *.yaml file + self.model = None + + +def parse_model(d, ch): # model_dict, input_channels(3) + # Parse a YOLO model.yaml dictionary + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") + anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') + if act: + Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() + RepConvN.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() + LOGGER.info(f"{colorstr('activation:')} {act}") # print + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + with contextlib.suppress(NameError): + args[j] = eval(a) if isinstance(a, str) else a # eval strings + + n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in { + Conv, AConv, ConvTranspose, + Bottleneck, SPP, SPPF, DWConv, BottleneckCSP, nn.ConvTranspose2d, DWConvTranspose2d, SPPCSPC, ADown, + RepNCSPELAN4, SPPELAN}: + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in {BottleneckCSP, SPPCSPC}: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum(ch[x] for x in f) + elif m is Shortcut: + c2 = ch[f[0]] + elif m is ReOrg: + c2 = ch[f] * 4 + elif m is CBLinear: + c2 = args[0] + c1 = ch[f] + args = [c1, c2, *args[1:]] + elif m is CBFuse: + c2 = ch[f[-1]] + # TODO: channel, gw, gd + elif m in {Detect, DualDetect, TripleDetect, DDetect, DualDDetect, TripleDDetect, Segment, Panoptic}: + args.append([ch[x] for x in f]) + # if isinstance(args[1], int): # number of anchors + # args[1] = [list(range(args[1] * 2))] * len(f) + if m in {Segment, Panoptic}: + args[2] = make_divisible(args[2] * gw, 8) + elif m is Contract: + c2 = ch[f] * args[0] ** 2 + elif m is Expand: + c2 = ch[f] // args[0] ** 2 + else: + c2 = ch[f] + + m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum(x.numel() for x in m_.parameters()) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolo.yaml', help='model.yaml') + parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--profile', action='store_true', help='profile model speed') + parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer') + parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') + opt = parser.parse_args() + opt.cfg = check_yaml(opt.cfg) # check YAML + print_args(vars(opt)) + device = select_device(opt.device) + + # Create model + im = torch.rand(opt.batch_size, 3, 640, 640).to(device) + model = Model(opt.cfg).to(device) + model.eval() + + # Options + if opt.line_profile: # profile layer by layer + model(im, profile=True) + + elif opt.profile: # profile forward-backward + results = profile(input=im, ops=[model], n=3) + + elif opt.test: # test all models + for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): + try: + _ = Model(cfg) + except Exception as e: + print(f'Error in {cfg}: {e}') + + else: # report fused model summary + model.fuse() diff --git a/models/__pycache__/__init__.cpython-38.pyc b/models/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc7fdc1167dd00c059dd38ce367523c86b6a765e GIT binary patch literal 137 zcmWIL<>g`k0*TN6QrUp?V-N=!FakLaKwQiMBvKfH88jLFRx%WUgb~CqJ^hUQ+*JKi zGYeBoQ)2^tkHoUnJpIc2ocuCN{oMSN)SP1d`1s7c%#!$cy@JYH95z6~(wtN~kU5`$ Gm;nGv@*R@^ literal 0 HcmV?d00001 diff --git a/models/__pycache__/common.cpython-38.pyc b/models/__pycache__/common.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51de3c1073df39394f969e927db44b1b0ff82cd6 GIT binary patch literal 52011 zcmd_T3!GfnStnTc*1Nj8`k~famfLz+ElaJZ<+rVuTaqKS6{-D5#df7sx4OGZ^=O~E ztw(VyaokQ~2Na7i!(@{F%&<%b?f?@Y5X7)ELly?e49oui z-?_IQ-PN{iJg~pr>b~cmd+xdC`JL~4uXD~*{r!mme$$tX>A(0lfxsW|q4O8Q!x3Dy zm*KMlmQf0%_1#Dt@@%F}xd+ohxrfprxrftXxku6w+|5$7981T_@pRna_rX%4oJc3c zA1d{glj)@R!=?UmDxDI4q%=?-Ob?3RC=I2DB)#GED!GrOSId1(dX3!Irq{}SU3qzB_$)d1rcOc`Q9v-j&`}z9)T8d3Snuc~5#zc|1K{-kaWA-k082-k;uIK9D|8 zKA1jOK9oKrY4n%wEgw!FF5j2FuY7;{ehEvJ9w%+DA@_PA=MZw(x(^}uc_Hsb$o2zkH@IggMB zt%nfukQZ_RArD)RAmkA*WC|gVT8|;*vHaSi?u(}ocEmc0u%lkArx9|@I*yRzUdS^R z$H%kf^=7~bl;4+5OK+bjy}$fi`Z*(j_>Wr?h(F=Qe;y$xty2g&<%PU}kki%^2zkN_ z$spuC))|DH@j^a;kh9h#LMFYCEJB{N&LQNS7jh9H@3qb&L&ybd3L#Toh=q`+ ztfvw3v=@>`$TQZn2zk~EnMTO_tTaN>UdRkW-fumJkmtORS%f@qy?~GxypRGyGS&wW z@&PZTh>)yx5g`{B%=9HZ;Zc{gPsa7)O?A1(der`_XYm!!9Etf6jGbmub=EkpHyE1#=>b0xa=)HOk-Yc_i{93kL z%9N^EYb@x-k5-UD)~+fyqKn}sYuU^BOtm7Zpbg?|(@fSz`9^BxtgXI>>ck%zkk&}fQMHxJT8UQAv@p%Y-7PxTV93FGS&q~;I0i+20s>g?LrB4p1a1aynyxv%&o!>zG+e_{Tk+jDcki98mh*e( z58r?91NR=>xAznpsIvDOdhPN9d)Wk~+TL8XT&`Be=dQVlhf9T;jpw;Xw=-)_q>+Fb zsBg^9+ts`CN{XqrwR|kK z8l$rd&fU0Lr9g)!06>+Roka(_!9v9b7#NpaV@~bl7xFDXkp$eRS7lg8!YKP&;2S_WV&3v#KS`2_LNJvMAtne#wPzMn!`br=jwPIEr_t?h* zp+Gv0&ggSP$E%gg(j}8qxp+qg^s^X2T(xO9c3>gUG!}t)ZL=73499eWPRI_y4Li|= zpc8ST7GUc-AS)BL&1S?g7Y+EMPRt4d!cIAXVyv>pjyo}=kN|W(>%=M%#OzxH#AK2d z`d}wae8#QtxXRFpcMtJPB1?*9wp?(T%)G;%6&$6-(z?H=*BadTq#?tWiq-$1sK>Pj9iPs2^a}u zn~^l?{Ve0SU+f$K(f`pO#dRE4Z4DeJ(By^YYnqO+D1R4_-{e?OF=zD&O@C!%iFPtK%NG_ws&eQ!S3aRgYHdai2cw_@Fz2w7w5Aj{YKq~ z#NE(^!kMSk!z>7!U7e=$9=_@Kx+a$|mHh6JvOdlz(oy^21RG z(_cN%0WC6Qh)W1rwIrMX+GG&JW`skhU$>11h3Z+n?)bP3YVX5~30yv?MHEApff}2V zzuHAOK60^)bTA)khB2QJi6pj47v#D63{7j ztQZGfGAVRPteFtHq|f3IP1^lF%>#NTLiBe%itv;Uw@fF5aKQ2gP`n-Q2V|%MdRwD( z3IvYO66ypBt!Go>&9T9bftxy7tK}6@<>N|Km5$oR?K0nyhS6|OysjQ?r6$CI%naci z`glJ^18xV#`j$=+q-@l2IwV(gCOf;50q98|_d*%ZA!cnD4rh}QBO-r0%?OdlkOx(h zz43>MkHWZ^cWn^PQd(oo_~yWbrd~$pR5b^-OFe;jJ!gsc;KeLcVzL3VzPgopCoR%N z>=0-;4@&_!X8;JuJjGPXGAkfqL4RRZYXDH%R2cjKphYlJj=44xk}3(=JDUQcr-+HY`a6}os9(uzdEQhH8%y7`JI z8_=vSHERS!i|1e?xJPP9Y#+~ z$17Or2%{A=H>lf)&EsyE$=eyS`Io_SSL}?ApJC(|@!p%Z27#Ujjr!JB%dPxX7bvhX zTXPwEjG(}dFJc~YK=h#bFcWPEG{QF6EBM10>!IlozymXv6SB-zfsJT9JFIyg+@sP*=z+(UQ-8)IG8;6Yg##YNN|E8-&ISkpV0-nStWTi2*k`yp?Pzoe<@HZ3U3{O0H zGpI+BpnD9b?wQ>U0^G(teuv|INnO+o!d}8vXtc(1{CYowIAjJkxne7bZf8$da_HqczV7-;kXu=_R! zn(IK2A!X3_p+*wmo*V(llFi2*@7D>A06~VXAi*gPz|8Wo1ZO(1#$n6822Onoo^Jgu zeq-x}>(&G**+(~WC104CwKXzHs`&~Di%dD61+(ryUCt_Xh>?T&xtgBs9^_+mHd~{t ztUYQ}8R3d(6pSy#ZuY?gZditj9zuy}RV`;K^JU16OuOo0X1iR-<<&gD2-RLxwi|_b zZ#FxZAL|zk=0+~!``oM>(c@9onZvg*et0HZE@vT?$lF=vFqoJqo%9AS5Y>F%H41L9 zW?QbY4B<~B@<(yiI8FspF@?rje+kG7cx^CJ#v1(Tsa3m4OttlOZKx)j-A2(}(*&mo z%5Y>HehnVV_$3?O3S9*k>7+6P03P}V;R`f}76Sm`B!)|{IZQdyi!UQ|6~N3eSe8{*ggz-_6lMHL z35&MEVhH=PgvDB6q}IMFVE}$A1Ati*lOh8MiN!ffST*pULd5D~m94e9j-_A+m@V&~@Au z!T6M~`_m;NVKJ49F;VYFKQHO2S$Nv)ey2i$%y|-%+;vQ2VDuyE02B-&auQWTWDx)j zl|&SRftY#%)00+7L_L*6+KG0=1M4rOI)I*Nb80NvX3+(OxdRlUw8{=mLyDNTt;tj* zRy1oayrE#b-3)t#4p~Lu&?EF8rE`oqR z=&nH$GRiwx-9@mHJ|ozva#&>fPSNQBWxvU?cw>KccPV^;3+wJBsuQ|#Y6=3p?n$EO z+&~~+8T)?@Z-0%MN8o5VzGB!iK$UUEh@0W~fNF(1@x1^hHI)%Ja^e}*LMFDU9NWwP z(I3Ypz{t6Sz*vOJ5V)OMF%gtQ&PiSoJmU4JN1Fh31eb_eZgzo;jw*PMeKcBGnHnzpn@lE+rk1aF7q^ z7vu>g#TMi;p}q^Qx(sJ5wFHT3;`!V8Uf9C;IiK>?0)PtMxM5-G+|cDh{)*7b>XZC{ zNYkHT>F?Ebe}eE16uA#dP&7@e%etC}GZ~xbIKLsX20tBm4!pi6MMI^bk%fi^RmMPH zlt!{3vmn^m$hg{vWeTmkp(>7EV zV_`inu+v@RqDsqsDSa;l^Pl0BM?*l-+{fDa2-i?QfR`)J@Sn%qKlVSz3?EetxlIs^ z3*sb!Op*)s1zjXp!c85k+BQUXu+upPUBHxp!cCZ^ut=l_zm5yPuRhi_(AfdRut8k4 zDL9=l3Je<=q2)@H$}uMj8Nacf0u=~3GgO#G5d(66hCwzLyiQdrB=glX#LkyU#=%?? zvZhdntO*nR3FLD#+5wt-5YX11srTa%_5qf4bLo8T#%i-wW#=H*Wg?!vKpwUOnO@-d z*NCGnJcQ30;hep;yP3q%PqO=3eqG7Nc_0Dy9d$t8vKV6#=Ucdhqf>eK_hKswriUZO-V z()&Oa_p)__rV@OT<)fH8?5B@NMGwlAKx7r0Z3&2M>QUo=2r!|9TnLwdRt_xfh1MkG z)tc)vu9xXD&bX%apkar1G5^Yq`jV1(F{J$1WjP`(mRqd+$Lv;P!9bG@%QIw^~ z;g<;DZ;l*(5m7dG^*BS`r5;alsI<%H;e}SI{(=kqnMuVqw|Z1R7kEL+s&2k@{| zMoR=Q6lMh0t*{#*mm}0=7|#(rll=E7OU8kvYby@ym#WXUD9xBpX>LJ2S_1l0@ImGw z^TMw%v^_bvyv_i7p|`!hLEpq$jp7A+6gWIgfQfS04-L zgH*7iA6K3>*pgWL)0#G7iXrb3G(}~;{T~BSfTtx86Ru`Y;;3vux;QMs{%2BKeO z1NQ==EMEwPA$=gU|NqXg9*}aXC#28{W%BO@TGW?Wuv2ulwwx9{daeUF2K+L;Jpf@V zO>_YWL*69->2J?4$DteIV4sp%uWNqkLOkDxIK9}~*8*f~xsby{Iw6}XSuaFXtz>9n zU{Z{{DhoaklL}ad#%w4)Ud3XM4ull9!w}X;sHkWA@SGGWct6sBP2k6jkD1e2%@wl; zoFFX?7K~3&PCiIrqv<7Uj0-IQt0wJK(tHHON!1^|Y(avc@tkqpqms>h2Eo2e%ej-_{gB1;CUK6ksy@ z6hq#n5gvStaUDihL0mNoD{pgLcaL)zmszGicCnlWVk(MUTjP`J=ol8ybdJx2j89L* zDpJP3+sIV^3Z+;7n$EwWBS6|3eCjt4R`pw0hTWZ=5ZPm}-RxU=un8CmXvwhq3Nvht zBlYk2*khy=n7<}zC^DhR-!Mp;&T@2(3}UA^E*Tj+;o-=zDW)aK;w@evV#HVg+{4~n z#sG+z-$LY@5e@t;@Yc%j7LB&zjP)Xm$-KcD_pxyA0HA*dU(fi>4S$C6R+H=gyRaQ;MqvUR!piF)32`CX zMUK!~@U;b*ESu=bU2t9!5?t0W3lUDE1T0_yH=zyA^fGLa*i1w#CjTe+Kz++BEM`mF z_-bp*5h`^T9zW$bvMjS6Ma%R8#@1SEU&c7m1Mdx-4}1@LfcIyi z2M6O6o^&IHnMzgVm-Q9_XcY@F0;j#={8c2Yd#K(19%G~%#}LB0qYc;>NH(m{!*{7%p+Wbf)#3tQE@G>;YRXzywwI*AgnIJ=gpvB&C^| z)9`(o#UV$?F5w@o4O>M5*82lSUr}55`*{29evL4kqlzIpTwOnQA%AaYd1mM+3X=1` zLf_-c@YGgz3Yd2I2AV95r4+X^8Zh7mjJEhJoc;|+)HSC%pbp0(n3wMnrEOT@DiscR+wX)bOzsUr8;Ggdyq!L4E&Yd>Z zt0=;){~Iefu&5)5bwp^>zeJGkRE`<-7jRr-0?RG(rTL8db3Vo)rNJ7}Otz-2cqklz zt?Uup1c#-=TzGWV4RSr)-|!3Ukuldsdu@F3&)LL-h`}Zfp#j$b$*;k^o|H!PaNWun zl8zBOtJG9_8z*8!QaZpC5nCo@p4?+YC`L}{s1X;8L!3>z~qCtYF~h} zRObNXU}ETM%a)kv9bzqdfqDl6t5tm(fjWm6SFd{X1SQ6N_TNKHK#`vX3us4XDi586 ztcVjMf@sA|E=j51#%gVegS5QVWvxi}`s?$k!LljsZ4n^q>qtWVF`bpDjeq6$m{1$r zyk>q=ibMS{5*!=Q*oejzJ>}wU!Nb30{w-W3Gc4tO_tH5`=RVLdM;PfS9ic`95r|as z1j8u8d~*urS6B~D^HjpHf+UO>66GGHsb4kG0@ z@Z=FGYm9LcDFaIigPCs&KAzA>@F$oK$Mwo+h5+|VOqNXr!X>E~vJB1eEq~o;=B@aU z5K1?pzJ~8sWnzijKe^;#$W5bix?`d_qVTridL&7tZw2%;&0OD%jK|WPKSh252Oiz5{xbvq3!OisBP4y?vJ0{h z4d1taLC^bkh~@D4{F7>?3lAU%-jYyZdwql7>26I16+?1toX#KL^7r;INDONk zU}cQWukOLOU1N)F%S-x|Q&lGx0smpPdKQ9%*0^^=GuW;J?CBAFp=h+#tAGE-uMdZL zotr>OA$oT*X$W$T$8pt)aA0m7SPVQD(gxTdY`H2Q>QI_lbESoY{WhRM2TR4fY)B;B zJE#}TW@NG!Bis^=+qrq)fW{)7<$Wt(E^@sG)SM`)XQlBq`nJ$vV+r278OADMF36Xf z{t$w(ZZxmT^LE}%GYTn`q&0iHkplecJBt5(8K~9xMi%uetzh7y>Au4qy;44xR-%RD?Wh*U@{W$!9;oi zweVntM5l%off`zDsQYWcR(*BTW&m?19qz+fx8b|0|%EB66wb(ddcSvAa%9Fyz(v5@?|9U3Tl0eh@q z|IWV?6@-1Ty;jGr+dUinp04`qELU!AB%)iJFvo3KrH|jdh4*yiFyK|nr#4q^S(bcP zyJeH6V#vGHvggh{e&*<87dOKSg>kW~YEQ%IphIX8A^YTslu8?9f^UJ4g0POwcRU{~ zqdXt2p&-c;PRF;V67xB%%j&?_A~W!8=|ok*`MP7_&!_mfL}oyq)8BvLDBmB0g9Sea zdkv?1*@DDUq<|t|{XMBTS${Uc$~{d(_xYU}F#NBA_7#PM&*a0L0qfDQA;+|* zG!o^7a67Oi6V@LjPH3_VkWnr{qX%)XZICRUA10-QPXgNK<&l|`c$_U>WOS-|=MMUyfvCov%tF3#}+vtQg4tbP$ zv%vxovToDtytJ;D`^r#G^ZJ8+$zx_Rt{Acm%2_Y|x)!-x+7cOrMCYypo<%a&6^F3r z7aBh9!su2^3Rt8=Qtg}+R;MZJx?=E4Z@&UhC*+eS?|}XeuARnPnrA0nvk-*sASA+C zFo?y+v<+8Z7EAv@u4^Azb(N8Z{H_kv5w=h&~;KnOJ8t-p0V;jAL`?0ICAw~M`QqtT!0YVYTw= z_655K7K31Nj7?3MB-ZybmL42$y93;QuY+}77usF1nO(7AWO_TVx190V2+#9d&S}^IIieWy z?)LQAm3#UGa((ALP3g4P*S_T|B}Hlvs(@YhL)<^EtIH3v35Bwgo|Ad1)!l9gCoo`d zcgERAhdr(K)7PVm3v9IBU3~U!b@7NVDpd!V)k+;)?bX3&-`bAO$HshH@a^QXH4brv5=qGZ2}}B!Gzm1*MHt)KP&bz_fVXzR-e;*u36`Ne1BI z2tHR2F|#f0GthG4#i<8#yqQU&6}w%Y(9nDk%=S)4bLS94YD z6_}T{!i(+5LA^$22OMuUbNjI2Aa}|Y(e_rp70_UDUiI5D0{*1F#cRvuBxu{TjJ4CS z9adj}A66i+;>jzPLZoqAqX!ZRyBS{c_-AaqUURXHa!qlE01c*((I>??jYK}mVrZm0 z3@Qk8P^!BqE3Ql!SjC3L&&-Yow3kG&UTV;*B0t>NpQdq-& zV1#w<@Coeln6uB~Y?8t;Gz@s)dX%T$jL+FHLYSMMz}YN>6aRM>9?BC0a6g5XA=)ej z%GeX7Y&M`2w}T5%Zpw~>Af32t4@||zHF(oN2%wAIJhAEryJkL@Y9tzcjbx+0k!lPy z1{*_-;l`@QNMm(lO=B$t--)@R(}ydGYv3$)L3Re`UUG(T4dYscYoxKx3O3ergIk{E zQvH-;VlSZ3rC?)&lc;^(S&IwiXOe5;hJh_hVy#kr(J@e>I9N}J*kKYEYHYOoVdA#2 zI6#v(SjYw!Or$;NY_wR5KgS!GwN*m64>vYBVWbyxHa1r+1}YKUM_6lTBc_?vH=>n6 zrw{KxYp;Q^8*2NRfE9lO+w_2ZK$jJa#vLg0y5f4|e+NpCydG?9loZp;QoO@TtPk9X zsa^Jl;zmcFq_YFo0Fe7;lz6i>NW_5q(0YhJ8tWUIZbT}daDphompe)@tV>{TvQ{mi zB=#M}&CNU66OB6&{{<&`!&I{!@kd(mS34V!);Fv*71LTfjmZ=B0MaRqHb(KrRD(RI zZpN&H;2Mp)oYCT4sM%fC29$7%wb9xHd!Q}NEi@!VUu=bc>tdj|t+CA-bhcfJNbOP% zo>&iiJAA0g0(4VSmvH6hoYak=n&>Fo9j&r$_DXYS3CH<0wwHG{c9h2&ccU-1F9aGp z?Op8u<~@sn8)oh2?A_k;9&2<_L*vfkxD)q6_gZ&h&}~7f$DFa|zD3j$25b9K)&s~t z)Dd>j*{Nz(2x2lYp?wtJfGq7f`MKSsSjA;#v$iG^Q?2PyqknT=w9qwzK^{x2BY`c zPZrNzG7;B7+;An~Y;C^RIm~v)7YF>*8uz0e&Li#t=YHotYd@`tnis_Q;sIyeIqckr z@CPtf9%xQ+Y~wJKgU;cH12;l1n)V5O{GfBN`ILrG^yY)kLF>@^K=ElpK;xbp!OEk~ z;Ek}_)X_`#cJ$ISjfb3viqAIRC*x@&;-{P;#J4))AMS{sehmjBHXe2!Mw}_kweQEb z&+*$z0#DC3ff?8_e}WZIH`F0tyeRH;5u{k-Xye%X0u}Sw08)!6)lr+r zyxQOhMhk~7el}>&G-nqMtsWj*C#mPnU^+$nqH#T3?Z4Q##=31G6^^_oKwwfHa2Jn_5_2&vC}v$ zsei#aU995>Ny9ovK?UM{i*p)ZJmEaibZ|9v-4XI0Jbe(YwY4(ktOZ8iASHgU%w6aG z5?dF%a!!>#RDP*(1}UCFTRw%>M@tqz+1oxEviW35~0n=9oEjfzPe%yM$N8oO9HgV)x&tdGx)JN@)IGYgndGF~J`=izi z8!?XgHRj?#@ng+bnZC@hTmg~sPR20ZL2O!`FVEp10vsyDLlNDiAHt_wqo8Nh1cezPXzwFwJQ7Wjt+5<4L4ze5f~6k-=8ixz5D0cYc= zJ^0io)XA&5#^nyajRY}HJ+yEfr<{8DlO$|Oy28Z#Z-)c*qs!^Y7UR<)wIuSevOE;g zKOM1^A`Qq2=%$*w8_6!sI`**lsbtLhGUNK5x0Nlj=&N2Vg5J%{;!yRvvxi(D=RMhw7W#AD*9wAx;^` z4|RY7WjvLu^#Qs#SAg&L%*@TBAnkzk>f=wHIQsb2Jht%zMBtFo%h(xSrALmRdumxI zDiO!2G!moM1qtYqZr~A?Ew&$Wp4^WNKRWB-`T z`Uc%1XsoACPd?ez8tE9@@N%J2#X*ZW|50IVoylHNo$w@01W3;_=(- z6kpegwO~8}w^m;#eZz)7d#1Z#?v^*Y(I!gOE29^10;+YEP2U}H`_hOfRd#OnM1Hy} zT8=H@|HAOJgbl~=@G2m(2$h&8;~*!IK_iHjOChW+2{yxsg$0Sw9%3?U`Kb`o z(yW}HyniQJ5P8OMD~lqe7{sC1JptHRc5YhQoQL6-aRjexUf!P`gv=fLX!1OSxmlcZ zFfCmG;4^1%f~Tz?wOC*ZUx!h<|BKnyPu*5WTtTuKoB>kLS9BH~7|=s(&_vI&aNJ}d z-LF6JPAtTUA1qdSFe?W(pn&<396Fp1&jF`kmVy}BPWN+Kff8;$XV0rVAi0R7(DOWl z2j@6okkO+heCA9Z0{%;g02pLEshc~rte_#(R1kdtD~Do6{qQ@hZ`VK(^o_=oDeD|O zGx!5dCF=u9NPHj73L^CH-D;|+g&3a|jrRG*xyn-A`r1iDB-Zlvtm6`Ug1 z6K`^eru?>|@-z{sAG@u#nvr9<*fkwM;SXFKBoeA%49YSmjhd*~!&J#$bt5<#%g$S( z0)zxQ&!YvU*W+?dDTQM=3Rn(d68$Wf&o4nPZlYa)I4h7qO=vBm7mU?!5B;~(s0Xh; zxDRr{Dzln{gF)1HS|oysCyKcJq`7#MNPY&Qx%#f9=;REjfYHaTnf&O~xu?2tN`0VI zxR}Qq0P=XPI=;U?+!3&U+`5LxdY>P|DwQzHgrP5(zl#0Tv+7Er;zpkVqOV@5p;QT( zSL77#^ct)f%PX1)+E~x$7tSXOI)?@(yOFbYU7=trA~5KfVm5_jahh5lhTV!smjMeI~p+M z2rEjiajG<^6gP>J;{&$EKSqfenQN?dRN39_Li02YqsI+@p=vb1dic zbY7rC5Fn2toD=6cFjS!#9M_pnuw*(Hw~uH!Q=6ZjE?iBgn0xyGqjZALB~4_8$p&6a zCx9uvpmZFG^AOQO&Wo9^%-FN(ei8@Z*1Q~2;MbU)SA}#;!?v>3DwYsuBVmltx)|pR zz04*ASYYlwQ=ORs$_t&IoOlwP0BGNI)Z-G;Ypr|@C?{LufMf?}kTZ0fZiqEYugl?P z=fOC7Y(WM*rA;=HLqit8rjt-`;3z_&Tx;n;R@8eEdN>`aptf`Q^iby;IT|fJ=to5L z^;{CR=dtGA?TRb=#k)gnE*m z0=t!zb-FJHGPGQ>agd>2}c*t_4^rphm-Nd_6QUu}GL+vw^v_xx~nFu zHjOcaJj`3VGTsz|dV3-Y9?a-_!-~U6PO*o?BAv=th+Z-fdt^A-40>RQ5|UM~Dh+13 zia7BRQ&W0?HPj)(OzS9Km32rGoNcK#t|t)n3fnvC4|$tU5iJo=n+P(u=nM$l;Lt8) zi}Tkb^K*cq{McD_gl|8a~sXBXX+*(?>iX&44ib5yf;Rh zb$}ZI1i@>%5pV2)Cm>9!AJAW2Oh+{Aq@x~Hkq+6@MS!cGgWZU(5iry2m7K=G*O`x? zTfxD79g-hm?%ZG{Zwnq(?_(~}n#Q4WNF0bd8M-yvl7QmuMu)Ch06^gLD^+#LjSIKT z(>2|M_Hu4_hc(8KUi6}-gElag#oW-&f|87B^)r&YhgIB|k4JFg+ck7rA*UzNZgv-sz^U8D0Ybc6=&^B5i;yxtyX>6lMl`HEtXEbJOJZmN64q~mN3 znFZ28P&s7YC}E>4!~;ysk3#5bpcEL>N_sA>(ySZpHohI`vvwFkfz+Ux08_IAi=ju1 z5H1`vI+MH%Es*)Mv=yF>swu-5bROKXC}k3C^vmz zd6U@if!j95@D<*g+yuh>n@}+FH=)po$ude=Ol}5|MEr5YLOw`+7t{0!d6aVB8xc?fVL3eA9_b|MUh^+o7w z>2HQ1eTD^iF`^#DQq+~f_T)5wc_5z5G#6}?Sro$=9>|4(EA$H zBH?={;eCIH11)k9dRKcJ-rU*TPW=+}1gQNDNR;20lLDFv2e%Ias?KPgM zzeXIE_SdWg^c8&wNjh-fh)}w3-;H|jWDOB(%qz*A&0TDP;+U>G^AcSZO%-DHJ$N(X zN&92aHl4%sZnR2Q%iDXJp7nq=FcN6)wFVd2&&(y>+-D6*51>`8VJ8NSBF^cw_hXld zRagcT#o{|$Gocw{ge{*$_%O7&2dogxIJN&E{ms4hp(1pWkU_u_!9LVH>?EE3=6yK4 z9=j|#Dc?6uSVV6_x3SK?--+p3NuL_h{|`VPyQ|&*lJ0|EX&-7n+=8`7Xfxoo#cH+% zTS7GBG3bj<;ptJNbfg1Ebc5aTjp9>KlH)&OU;##(zFM~Jt-t9%J>jv~fC z!u=So8HgeIG2bU+Cr&%}Xidm~C0E-h1j+~Rt_c*H<6JL%X{g66< z=XUxderq|yIK_Sl+X#)Ih0i!60-?bd|H|5gmckAqx}DH})DGQ&h1Q!tVosZ3=nLCD z{s7PpHkLUHYmasIHrOjrfBXe|(uqN16cTMR##i%6oeJ%|1upMoU+5ezU?lN9--&VW zsJBK}RwZy35^eMh6-st~hJph(ez{Pa7kXaJV1j{wjdX4u_E4DCM>#oO*Joqjb&6U+r0Lkw%)WH=`8fPtO4&?9BIZiesNN4Ms5Gu zv6CAlR4*aMiVgIc&b-FXX|>1C5o$yWBDQni&&O}4^Cdc~>HIt$A#{bb7CM|-RP_xy zUxt&8fpa`f=+|fKlKNJ^3I|4a^A+`L^a*<|Bc!I~o4>$FvZmoT_&y@%S=ZDrGWfUX z5WY1u_;jdrm-=^n`F(KQSVrYVf9q}21@4MWw)$Si_@C)qWP?!Uss0x}{(CsEl@nGw zpSgTMU18`i(TO9H8wV*{)*TlC^F_%^)FV9A2BvZYQL(j{nxQ;~JuOlfjs>o022TZE z?G>gJQ+Y}XAhZ1yrv5=j{R8+YZ~!bbAAx3qy!w}P$ckwC`j_eB)+HjicVkE#k_+r< z@E;iVCv<)n4rJBf$FzVV4e|fwD!Ayc$_p}Tp5)>$nDcHr!fm|--`Hx+Te(c>YjpmQ z&X3|1i2PZ#j!)#L)HXV=@nc~Rjx$E^V)ZH{RUpe@I9RHw%ZNi^6R|`smnB<^4GgLf zMIy|N=#ebi2#uWtPYH@TjpWV}C~v?M0KrN!9E%r0obM0cm3tc)&uS{$=+yyJaam}0XH^G?pgdYd%PF|Eb4Fddg}5h(gIHnfbumynG5FdgA~ zPtmuvxicIzd4q7rbrsz5|dy`5Bn*~%Ib z8VA!f*jx39yIw>Ug#ntVV$=r_hCSUZ<5IBpAp8;aK|4_dSqXnqoq@l<5`vpzJj?v7 z`3Y9x%`xbi{1(#fnVBSa(4rgLzJ{6HC=z8HWw_1uM09 zeT_C`2+8Q-h{8#Fm20{cUvD+<8}td%6D;>nnXK#_NMM96?Feq1?PhBAbP^j?mgna( z!~-=Yqvh8bzq>o!h>rH%>=99_n4^HeEr7}T`W`)_V}BFE8hduHUEM460L*Z^@IFm0 z*egggQ2QJlvPfcn3(^|?V96}w#7Gzq0laW5EcV`mDve4|Y)Eq5tUYLBLs9G$$rxb4 zup~~e<)vLQ4*L+&imNzdLQaLO`1(LHa8?sjF&gV61JzI3aXSXvbVF>9V^&hD-JqdO z%s>$bBVDZboRO78WBr~Hj^U;tOQVKW%ld1Wha(U@LQlXcIh>otIgMR41uRNUWrib; z6+%Il2=jP{1*~wm6zQDhOc53fLV|tyc6=erEin7-rC&Nnh6(4_A7iJGsuBw5K#%Uw z5=QCc4!s=ZZ(;y*i>$5sLpaMy&AvH^ONcdS8bxVAV(n67nf@Q5L{uHN7{jQq`Urur zT~O*2|CwJNM06>Xag!WD(9=ySgvEA zh-qkhE$%sq)q9`72ckL|GbeGhTp;VJG9TVjVs8IlTBO)`(CM2vX33 z!;bFiRf{e?!k9S$@K@YX1-BG$sili31^d6u{3yQCgw#v)^&`5Q)Vd2u$7ZXRV$14b zz!I&z;PP?2eI1{Rjakeb{ZQbM#Q9h-5i^Zs$}|+;=a5I&kHdvO*&U(1QURy0nrW<2 zNaGY*tVal?qgEI~z99BD#R)IK5X@;3I17O zSS<35=f@_!*ybnGhmCH78Jt9nio_=CBS$To6pgkj1Rd(=SR@^yk&@#3u_v08&<4)LpWcy(_nq)Yh`$2z17BE9Cz%*Nj zN(i1|Rx;(V+dy5QRcNyi(Sz5GUdRh~FYS+}BZ^4(Wh*)Cj{{qtf|c%r`WC#eJU}@K zzjw^E8_8g^;nKBqpUma8aW1S;+tCGzyUyqi+De~v5UD;l3KAX0sY-J|WLT&mkRU31 z=e&i5t~p)lNJ%9lrZX_-bfZiolI|_<6hHgBJb$lINC?U(-R%`EnHU4fFNy#K3};sm->>p1=-7GYwjA1=TS-ggf?xppXzE zG@LMlQ)dtYN)cnH6*v!8oazkV3vPkeb+jxH$<+Cu^xU$G(9f&tMW-DwEUunQ$bwbLdvruY? zO;6bCiyNFIS8iQ5iyL8$n?$LTNF58H>`l!(?9GgiagnU-ll0aU*FbP(s2iPc?K_c1 z!s%atka+In4rT#$1ZKO8o;Ry`y zV*m#KW^+_<{E#&aS~5kd&@02P;yn<|-tQzUG1O$Y)Z1|Su=pbI;(r%PauvV9yFIUR z*~S_8)@%$k2HNKNEg1SD z+C~oQetpD)CY(M-_-PEbVFJegCVbgXLflSK{v@Cfs$pts&4yDDlsY;EOxkIb*qxfiDtH z7%4vJ3>3F6M0!E9Of~C0!Q>W<&zpv3Y=lL^PM*a1|B=pW;D9L--9T3PY}t)b(M;Ve z%xsHuGqUF=ByCJXvr|NavhRxCd@{#p|A(~Nv^kA)e32xDVLV5w>m&hCHk7o0e%g+k zmL2>QeOu|YNb){R4#H@?C&?TOlFbQaLEGpF#!&tE$Zr$!kkUD%d-gABm{eyWFax!E zrlYrR=>(z>KhelQZ$|x)8f3jhn!p~=6V9g*)f3w>Ovp>&eqZJz8C6BL*o{ny`4)Dh zN1=0&dWrPGjgq|Q&g$e6v_$O|zd6dsZ>7U|L&Rq4yZI>UeTcnqU>!(lH;4%i;=k+_ z5tBi9UK6>RO7g2QK$bQg`7A#b;nElQ;z!^>$qrq&7$>D;xy$x(&ZgdDUX`t z0F4?fgWSF{7s$e7=wiO44zQ3S8(7a$B{ZP`+n<_d9fq{9AXva!24EL0N~63`tYuu& zvNc0=i0RPg8-tSlzE92#fJEUn7D~hlU*YG+`M86Qh}}49Fj7x2;0T@lOh<;%X*~WR zCTBTbB+y5FGMTC!5NDCedN8;fQ$KaPgT_8wDG^4HmfHnCL)U>U*eqhR9cB{Zuyw%% zgkRK8dw<`IK;L}8tPd?4o?`}A&rpN}mS$in6T{Y3kI;FH&JjA?HA^?90G)6*BvsU1 ztQG6BG6TbX6Mv31C1n~jTjgUp%~XFf88Q^()hKn;B;Nr9fxRBt%jd9gE zlx-$wVLZptBzU$Hn2%x(7DIdCeb{cy564L&t?;DXkC>1b;oFowAagwVKv3@dQo;uz z@q_9ZF@|Iwi|cu;3(d0j6h4R)h=@$k>^2;k16iR^8$h(LfNo=|R`?&I?A%D++o&MY zl@{7lT55@G##f^)k*IUnufKJpkz#_GLzoT?@u9adlGGxO5|F1w^7rZmTFlu5vVTU z5($koc~Z}8ODFb?EW$QA(mL%XqL2-1L)$ZYR~|vUZROD>Ali#%Hg;+NC2b4^sk9)% z6D#C+u$^P1TQK(ya#T;SX?xV`(2BX>gEokfDuDP8Gy&}=0A?VQEf7cToejrKE#o-y z`VmN*)VH!$oDg)2E}=&Vbcg+%1?YCXdH1$NfWsXkgO{27F_hB{QGunU58Uq#ayyv6 z!|v?rwX4_YUYT`+Fi3R641j$wLn>NXBxNLTs*8LaX=3@x5kG159;Bz3s5(REX~yI< zj@2}J`(0FaCu7SeGIbIA?_}-RYI^wr&7RtSoSfES62tD)t9sOXVoGssPtM2hOoekp z3Kz>)`!SRRhbWPmi#6hUYap$}phy5ro%#GJz+XZ$cCZUDorD!!OS>1Imj-3W0vCyd zoxUP=_Fm8MsQ4kCfIs#|{63@%W=YWDRhrz7ldU4#E4C2*-QiCEC9@fYdGnW@%hGhhHkpLqXgGJOIIYC@uTp{H9gLa?O z2k{0{0ZL4Qr5S+e1B@kd|MU&>g@E2?!^;I*gXr=*LstK1LT>~ct0a}W>>Y95iCQUG zedrQNTzN{-mS`hZb6d31h=TzibcUQ^XBAj!Fgnf%n5lv5A#3onVX}ag&0t}M@Fs%t zecV}v?+mcsNY5JLZU(SF3LwU+Mqhszx#01Pd9C&e)V<4qZ zppUj$Yo(83NNXM5Z^w7sDs{&ipjWiBslI@AHqmFyb-fcsytUq%_PA$vvSCsnyBKs?Tgv81? zL`!O@woHyuA<#nbVIhD}xgJXr&XLNl5A3W|M=jYahI8ClJtE5kN9&PMDOEiLLzYoD zbg^2pTq99GlejRK&sz_UZXe%2U9OD+*X#iSoP(+BZa=JLgWL+4JbPgj%W!m(qgmL@ zkS-p@Hqf;B9)%hoWO$7^joqyojwe}mqBD(iZn6G?X(IEL0yqwJKbu7m5IG<>qA0AJ zQ+F_;a4Ubq2!gBrf?;>UNynke_K1C=BR%~K8&&3`C}rdH{S`m{YdWHRC&E$0K-jH+ zwt7W9!-5I2BpT9^TIxL118nF>NtcD_Nf7DS8bOFKE}nzkx~;gWpQHh`I(iO;hrKeE z6GWtkz@M-KVgIW%i@5~SnV3XG1j;6ImixO$e~Jxxd0jqVB04bg1= z0b*tsf;fp~_XMV<=W;@^xDQZ`_+1uY7K;dh9^&r^` zEnALd@PG7^F_yV#3p@~&$dNZF6BM>6AyiTj`hZX;MihcLG&yw>y3z6_bTN1w%uxXNIWdv|wAB1P6h3Wq-Ft_J{W)smv}!%DlTP@8vx3P%L%+xpSZN=XZIly&zy zTNnk?{eF?z@oNww?01bqX6Rc|2#MJ)1ox!xfLA>ryGYvza;9J?h)a$Ufwmg_P6OTB zzz)YeLysoS;HO}iO8GeU9}Lcf$T>a#Ac&6;I5z6mg{&N5n=j1F+DpDrXW2a5AXssmUiDr$T{XvZTPnB(55hQLoo$;>gUmiaGRS8m z#>{0Y5Pb@_M~YBV7}tybEQ1ngN*Kx6f^S5Jwx8AqW^kOX$M>0&TApcY6E8vk+mKu@ z5uf9Cu5e8IIy#p}2xdDM<+hanFbMD&I!f*ADj$0i?PPtoKIFJqR*^7T2(ji11xy9wp*_yF!vW(5Isp}kqiMrSi%eBv>nWOfyq z9CzaP_R;40l`F6Z%i_AIx3&B+MrL{#PLlQ!1~6?dlwm7gX7Zc_17-|UKnRkjbui89 z8caH7SIs$y{Hr#K2bz9Bkc;6YKn0B65^MOITy`ZunB+_?P4_T70#S#^%rYUG&(08% zp!e~77-OCK$NmL{=9Yn_xM&~$k5xGGxXj|xP=6WYAXdvokYJx@;X(Oj` z;L{^a|7DJ2;$w}k+O~6?onB%l1egFsc*CKtM%LMMwGomEu)A4sQ4z|CCIOynMiQ4( zk$0ZSnzV8MyH{ag)? zf?|y&i*`OJ1e6$=%(r#qMct&aog7an+X(Av{5tJNaNshh z^49yPJj2Y6wj@H!MJ7d=20C*b32PPG!zci7bW*HiGt)R84aE5)42*Y3Z1yugg>kZ3 zxEp(Ft|mO48=Loh?S1xddW6+cM*mql$LL6tOAko9vs*vKc2B@Vie8@#T2D#iv?PVg zs4F7U7QvH>AFV$hvT^5D=sirx+$>X&2};!oI5meEW4l zJjEY|z)Sp`{k8ITs~m>*9|srbpmGispHkkWhuroh#|fT zGR0$X-V9=3>r09se~XYO0cJulZV^Hd>$iBsjD{2(hD}aOeVBFLLLXP0aD8S;cD|NP z%J`0u;`U>`$xH=n!&L=lV780mHlb~U%Te~ocaip=>Z(y%XSn=nvsCfGnllL zG{DNBL5lY{F44yT9NM6<1So>)HD)U1cvE=z0hR$#IeYZ$+?T+E0L8P2&;d?u=%})<<~T=V6or z$!#ft79%4s?pZF@mbCU7OD;tt`w&HM{CyvN{}m48z*vCqMo)mBf=#{~vP9INcUgz1 zL1508?h0Nx#!m%{vB!0GC+HJ|(*vA-gx$cYR-2G*fb;;?kwbnPt_)QzghSXRt#3nz zuh!6{k6Zw(Q;2?OBMuAnCK?gDMG3q$u{cblBOs&S!p9gGBEJ!(wWz#bK&YY(q`FS$ zTj@N2Fm7SM-2p3VB~0~Gtd5XZK%Gg@Hm!>I0P3lBwjjB;+6MjO$oGetATFYpBRhB-O58IWn* zO~T$9598P#115j01GtxKb6g9>5v+&hH<-Ezx@2CTM~dn&9nnF>FiGWR$14??j?x3C z&&hUp<5mHOk7TcD@mKKN=`(ueOhQNJ{uW|efQ4h+2|$sSaECneWAG<5ivxkM>gP41)ZO#^NVzTna;1!`BgfKxZSJ&2;XhGfHO%o!xW}(m71$K{~WA71g?C zgE+|wW)-;Yz{Fcw6p|Zm>|tGmNAYE#_DMK;QM~WcOaXr|IPgQbV#f;urZ^GAvi4vi zK0Gv>7# literal 0 HcmV?d00001 diff --git a/models/__pycache__/experimental.cpython-38.pyc b/models/__pycache__/experimental.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cb8b1d6299a8fb74d05b6dc7ee11017518d15a9 GIT binary patch literal 10271 zcmd5?Ym6LMR<2uBT~+;U(1?hWx|$u;SXME+7HfKsOHwi`N3;y*z#(_yfq|7rKCi#E-}lDew11_{-p54dJVNmIh&;^` zO|2CFi;~WHqh#>E*-DfWg6j09)v`;r&_1nshG*W;Jkw7!oE@W-L`lN4P-2ZrQYf)K z2PF0Jf<3hvq+YkvFmR;Ssy_TuSQ$Mc)PX@3(7ski)gSZ&FWJKmMZBaXdej6v-Cq}InXIpXYXCfW6Hj9#IEoo*+Y*9!s4P+K+ zWe4?It5f%6j;hmf^#aw1O(nLtSc$qkOw}AMQKen&$m?b49YQ9cu9|Q_fE>thLQjdj z*iI7}My(3s@@AHL7rzs^Z zLK06B@dJ7*N6*?^QbA_sp;~%oMZsZFAQ2!@usnN46p|`=rM`Kg)4q1byR9?X?$UIe zTB%q(U>wm3C{NYFzag4RNT^4~j_6xa0=sJrOix6Xryqsn42?lzsJ$z03dkejjuTpu zwIePtaD)Nblk9k@j+$$`i09i7%PB&l9N zi#*m$=|e~_4mG3>vm|F??yBv6{r3jn{Hw2?s}Qe^A7Xx@K6A-YBx>mRcCIgOf%Vt* zfi~2>q;Hs4W6py38$qvC(3O7bDJ5zJA&;Za95TsqV}2=lHTzh%MTnJ(iEt(xrgx`8rR>*W9fNkF2k znBSC9Z->fkmBTe<)G8jjJ&I~spf-BNB?48RRw9^EMm~ma@F1EVL~*YbwzU!e9f`>GaxO zywcPa7T;GvSY>S2yGpcF`n$_vWv$qetuvm|SKG?GT7UHoWt1ynAteb?p9w!y8Tvv^x;jAw*$F3fU~q zFv|?PFJd5OmQ0_R1G2P-)p_TDE4;6tLd)Ail6>sj(dSS!W%5U-?=iyF`|>zA>Jkli zfg;9vqf!nf&!oqDF`e!+)QAC34SATFlT@$B6O^242HLTBAljUV z5XQ)SNK#1!Vdf!<(=&t2Q;IV#6GDp_BgMJY4*b??)1M)}z&NyQ-bH%lDGU=(-4?}y zJ{1XI3v(ndd7{=>T3(_BjR`QGY62^lD+$z(AHsbi1yA}3B!Hvhrhe0?0_$U@VMI@a zV5cZStkX~6U#rHNdFBmhPLf$#Q}7@nW}hWyALEvGOWe|TOb;f&2u(z+*J<^5HDV=Y z#cK7osqAvEMz)HwyRuWQH+_}7Ru3R*^=4nBat#vADPxi{DOEL~T&){oN0e;ejLm$H?j|IleM5}83qhDwKtK9BAy0eDzVzx^aEuDl@87a>UgWX>2`Wyw->rx8W|{GA)oLo_Lk?m9pEtmh1Up$>uypHtXJIAb*J3u`)Qf0j9q%TPEU}FXBeOUQX zVrYTbdQmMDJMAmw&8xj`7w;5+r$BYR(}lo2hHlcQs7g_dqBV-@6!AhQt5r_0)mrT| z>yc28MU2yF^GE1a6PhALDXJyvQ^0AwwG6AWa=L0w8 zEN>1Z%PD(hB$rDoyt>y{W@S=7fiYQ&$m`LX;GLTE{ zEu)|6r~8>uG;}iTqm%~|8aV^za3 zxhgE;LSl4mBV3cTUw0`5#r^|SKnhO2jfe&3nKOJG_S%I7NnkT&2$fqcSJuhcY2bp;(@b=Oi@pI_##%N5fyzRljzXkHTRRb{y*ouX zN`Xa+^a8B58Ii(10XKnoaDwcn`>81PmWf@2H4kh{v*=}*{$_FeFlKt`rAyUny;6rN z1*p3yk=HS7!71hUQ`W7QGE>?jH$MYIZnc!(PiZvD&C=QpO8NcNMyrK=5aw2u9fMyW zJG7p;{YbElQu)IaF&spc>QnS16cIWo10yICyA>$=KHEU@93li{>S<|9==c7i#_5__ zGtoK)5rl(f7yc|a4e`_540{gjBJ_H<7lSZa@1(o%{L7z@6EDN8{X9xoUxUNxU1JXs z!yjpJNz5q;D3r~@wc41fKbXU?5w1hW-&lR<|JZ;q8j_f}CP}osxfy`5_S%4#VIqgr{5K@I*78($PpId+lJO=ligBiAR(H)?^e<{Zn<@j zUNb0J?N)%%OPNU{H|)w5tya9L#OF)diFwS6C>@&QM`P9fpfo=zq3OgEoI<1- zKaLn8evlR+ z^o+I0GNq<=2#YrxBk!VuHi-|aq(!nBNoBNAGtd3CM4Gh(-5!ya;5(x|S|T&j640UB zPu!y=4mM%;Yl!$I?@qO!2g>aSqQ~*||^YJAqaeQD= zVQk%JxiIt3S}u~|_sf{$APoLvlus}?!}x0fgU7!S27}ZGWAGfhrx7mDFn19Bf5hO> z8rZ->a!OK2B9NXKNjoGP(r?BlY6_GxL#& znZkK^ynyeRrGFD6{^NL5X6clqvXu(q_xK@9Od>Gj{3UEtaz{3`mgt);c(nyCzIL>C zHL#3EEF(`wS{vya?lj;23RuZ%*dc63xNxJL>t zzJ~d5b2&IX)Y@P~`X36RZ9IvVOG6E-d=0MEBawp}Sh`z1+Bk-A9N__k69^w8pIp^m zx84(yl%nH4xN(PDKD6%0JDADw(pd8yPea}E#+~rvbnoM3)^+rF;TFCDIR*mxD!iPqpzrUZW>ato{70YyNq z$A)m%HnO)g(B^TrF%Ef)H=MY|j(AT5e-S?69mba=__8D}eP8BI2!Vl-1Cg)62%Knq+-R+PC_ZZ+?@ zB%x8RBCwL@OOh;|gc=a2*$ZV|{yv&XvM%NKP_$2+9!J>%IC*;Uu{{2;hCZq}=z8s* z#gI5WgTOa7xZTEm4K|QAg?;$z`saXh8#tnY*gE??in#3i6Iu-|327F2UK!~%l{{PX z+W>xfEvbPj)F#IXil3Jkxo^GQZPG2d^F}u)w=kpp zBO3TQs!VhxUC`Ts4_|mFm5r5i0^b|ea8#0{rEu@vY_h5}EUgwlJ zIqG6o*1Pc~trB5;N6u{VXVigtBeRzWzK-JHDMXr{(=8l~^5O)JPg!wXrJ;a37AE$HY@Y{sa0HjN-~l;jp9uAiTX6K52RZX}0o|!(ULHYPVy$&G*83 zGdMN=+AN5-%mTxK!u-_@U+~J8X#m=2<&=A2hrZ!b&gEVkj^j?7kF>GXNQb9bTa%vQ u_3YuSsVq2W<4rsVNzj7hh#cL)dt`9Ymp`08mY-WZy7+Yd$^0Yv{Qm$FD(xHq literal 0 HcmV?d00001 diff --git a/models/__pycache__/yolo.cpython-38.pyc b/models/__pycache__/yolo.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf21b2541f4bf79c3a3fcc9c1118bbe58dae7d97 GIT binary patch literal 33143 zcmeHw3v^t^dER~P6N@(pf*?h$D3T&565>k|B~cV5J|sC5M2WO4Zz!*qxEH{Jy9;pk zk|MW@#4Y5AaxGbI64$BfbP=D}rJFQvbDF9?Y1-!4uASC7P2AY+ZJYWyiDRbeIX%sz zt()5YzJKn%fCWV=l&q5@;N01H&)k`R{`ns>|9x+FchbOL*I8@ol}{MP?=jH&*M-0# zJY{7YhHJQH!N}-eGh<5F%2@JiXKegh1*aIvL`>$f3(;aM6Eh7j=EWDyT4Kq_B)#Np zm+P!EGAXayOD&q7J==3OrV9M6_3F4K6J@Mg-;&`*&f4k~lsA#u@1?w0ZD7e*vKGxu z3N1tngQwV1v@j&UvBH|-aAvr;HnX-kk{Kzk%d9JoW=5qq@xuDzhRg=>CknR|H)b{# zZ_V6VOlQ)?+cLKmH)S>zZ_nIb+??4gd6I=K#jTmG#XB;0m_~)InT2hcZSONmqmJQ? zolU-KsIc4h}+y4@bc^n@`x5!37TA*L^k z8AnXNJAjyhFy=1A47x*z846=|A!dy`jF{mtW;bHix+91g@pho4JqWLJM-d*C@Lq)1 zyBiSR5SG}7m|NV9h}r1vLy5b+yEFG7?p8OAxU_dS;_gNGHg^-knl{};N0A})lcVPBGcdOU!-r;U@$6j_YXN=kI9dsS{&X$-R?oM~yz3XK= z%(2Ve?e1~+Hp2T5zPlxS55o7>!?lOd)*)}iJ2d++pyg2ZVQ7$6f4Y7aW-R3>!Jw9!swgWHQnd2pelqc}^Q-H;k``~&5@I2ujTr!Us&mGS^ zTuGrF^e^)y;A21F;{@QtRv$2p%u|4#5$|dDA@AYW4EK=t)N9749QOTv#@&RRr+)OQ zXAKQKuiEE^OL6q@Bzx$ZCylX(uW~g`jF~~)^}PAXx%o@*J127f%vBR9LF|3`k~{Z& z8L{T}u}Ba-{^X;N9(_9Ko}BR}&t@-ps+^xI1xX#elq(j3o?`B-mv!?O^5y)Qf)^y` zRlekBb84EkteNt1{(0qPk$gT^$X9YcDj16eeN*SlUUqV>bRnB7x!E(NU?5-0`+4T^ zvd??@=^4KqB#U#dSIFi|Q*%LVUd>JA(L$m;nJakNd~rJHE_(%U($Biyh5RH^{Cv^N zmM@hi$E+YF9m|y_XXaEnNVxg3zvs-Exr>t=gVsMM43sC}@eE=jU?CG2$jmsdm5F%K zTFkXE|M7+|;dZ}hOqp)XjlYn{B;ACY1YCFN^_oe&XgEfu8zbHmM2~nrx_E3tafGj) zfOEw-HCbk+XS{1BW;4p9A%vD{4d;wc}1Q#GF3w_y4h7vIAB15q#9zIE9MeMZfO&%A8T z@_no&d&zzV<97~YaMHNqT;9>w=ALY`Ab3Kz_eHLF3ziiccEyReQ+??l}#Et#k@Ce;>rI0ln@)pjQE&DEWB zcEH(PhQwVXK!F1Id(jVKkjl4{Y!!EP8F^= zx~I~rH94d1MT1w&Ad$^Z7INisHj8&3L!4sobzXTPf*N19%z-h?q#2c`55GOu0khAv zevq`ZTj^akZzadbq-w209=cfN z#%#1Tj5TO20phFH0pr{dQe)~=704XEc-e((TsA@0e8ja~=LN%0aBa*c-N>TNHG3K> zB-@22bs6zlgDqlZW37iP8$DSx-T0DK>qfaAH!;MTdv&X)tp$6*@%t_twf-s)AX2(& z0{|t{O}dub^)h>(8Zs6lthXAeI<>(iLzi8MB5ep{ja7}FG^)`m+Qj-#X*?*fvI4Rr z7_>4UE>uAFz=BydYgp=N(h8TehFzkH5Oy)MzRH!#^K)fSz*WO&JD?JSXh1cv>TU~X zZ)#3GpHps!MfZoukH2p{w4#rk*g-tyG@NPnazyv?GWLyi96U_+lKpY(oP$+nX&mK+tC!dCnQ*2O%$Tv=E)GdZlAM!$pPfdYVia;`Xk@F8T7?nl}EI?`C# z{YC+`9wh|donpHeFFikl#gQ*wS))!$V;He%PuDX*!6x zmrA)}eo_rF@qQ-8>YHjzFV!;oK3~ZDvb1Hjt2NC02s0peIW{vH@@C*ovH&jGH9$fmdvW%%*%A|j~z|U27 zSIBm%pV$0mVTnFl@I&Y66UW0Lc8fl~yr24p%nEdzZeqJhUO24*fW&je<9 z8TbbYmX_`12&yf|;TV=PiohIA-6=W`krli#-h(82(f? zdk)l`PEn^>;0zstmIkMYSDg1RsplB+0Xlo>~D5UAsI=rVQ zuLLs-_St~AwsprnvI`Gkp-f-|@;tB_@#31U^|Ip|v#oR;NOISC*=f*r5w0ms$Z*Rb z+cK@m5HpBQtGV;@YM4~RwML}tV(qsgGAwqq_uj04tGYCrt=`3Ch9rF z;KOttp+jV;WOY7DUxQ+L2BD4=(24}(e#Wi?K=E~{a#P*&3+SD(~ny~tb%Ezx03q;gy8>X%%$H9{wJO=z}c8L`ljPT&S5bOQG$&>`DANf z#)jW4b#zU_h=QkmbnaG%(46LS9SENGG-)KTWlagwhned|I@{?qC?`ghN%3h}QxBNf zx~%CCd?}GOEq7C9Wv~N)wh>b)R7p6n@sz&|jMtPuO&bdq9=k;jR z(Tth479g#g9f92J3)L>j&9F~!j6`HVv{&}8lUPhh!xiZB%~G$gpf{a*`3tSRys}P9NZ!xP zWYU6YZhqb?xtcr6FuKWqU1snlIzLM1G@XVFu0^tvWrA>f!U3*fP&l>^()STMIXYw? zucR~%x`Pof!wD>Rk8&CE=#Usz(~Nf@fP=OJ!ni#_gwnV@YKDn|``an#Pa(d%2@dfX zg)|h%9RfCsw}|8@mfL1l23EpR+Z?o!(_cZAZ-;13M1K&R&nDjz(cc9UKSHiw=~5-U zQ_Ca^Gd{;S@;T}p9YsgxvIzpKk@)`zdCNU;>LR^erU>?iTE_WCY5hgiMv8tVtv?(e zQcQiDY5gvFnHwkeU%08nzKGa566~%W3gD0m0ou1)y&OI>e7_zy^~HzNE8{hQ#gk1xlSL<^RmA*GVaBF28*$+Hn0}ZBkkNZI0n;WbdDIAUlw~Yg)JN8>M?c ziAsNs75BkuQ#cRDM-@}wX1X_ZO6BJZ-gOhcWL|B)z<$sHZBp)No0K~$QtqfoxuXp! zcT}X@(NM}A6)AUgnUp)sy+XkJB;3)4w4AvTIM&%YS6y1pTuGgaHSu9D+G?!J z%$c|A4e}01A06pqs+pH{O5M;QwADQ*`6H6Q`#SOu`D;)QCG*kjaEpI!i+@C<^gR<= zp}tH?zq(4lLEgO$qWZO9>6(=Kw$=smjwIFBwf46SeUnsw%SNsPiN5~&da@tgx=CXC zP9*v**Vl`i3`wFexTllC;MVKw&rKW^>C3&H`f^)qU&hwGS@z|c$_v%tT&Tgght9oC zHHJtFanNk4u(w5*5$mYScnyG}DEeEb)#&0|P>R_}tMS+}US~jb9NHZgKi!lGEytRU zYrBp0;XchJBzxZ9z=B$-=YX!mY0-6jVF71t8=8w~H6pqWxS_72B@CS1N!KAcAfdOa zkrrLYirALYPNTh~ZjYs@S#GJJt8+@TmUC&8N~u4lHlXTE+oCh+e$^~fcOqI7XbiUE zs!RlDfO|x5((6WB&HzIrlI>riH{tb#@N_>eI9Rm-)HWFEO@?&q(3?b|6Ilbj$#69) z`i;KY+GTo^e&|i&ZF&>dtMw+U*PB>KO1&f1#5L;eYN=Q6bdH=^UcKJbLbueru9~_= zy*;9RiC1H_(KdZdS2ejTrl;E7jG49630JUHb-yoUem-+uJ}GJh5{}+k#oiA-yF^8nqhpc2B);!o0O_)GPbeRM7Ak->J7p zH?zOBiz}w-W^01(Z*A?$hGm){^>G3Xw;8pn8t7-4`uFJkr4>Ro){>)h2F{hfWm+Ni zlZb_CXm2Ogkm3OZ_0x2y`p-~Pv^PUd(cX+`iuSImDZ<@Hqr4r3>Y5_0D>`UC25nIi z2a@XtxT!7TbH7;`>IBwpv(?53Z$lG*6?8_Tlj?##D)pA=q`uMANp-(jom3MZS7Xy3 z;(SwQ*{+=mbyUM`Gkc@l`p=+NoAs~Y);GjQ6jR@3ZoNBvof~J@-*Z#h^)@|K8_QkS zG4aMUG)$eCZeCI95!SZNq9@rZJl0 z(v2hDv8*3#Fo%Co8EacV??`Sw&rXZ59G3;!_8@ZeZ}~8G;OfsVAI25EYRDLFA`jb% zvoHN{$L8;>1hM%a&e;6jl|3o9j?LKUo8{~|mDjD9Z0^p7SnLrxAE8sDbC{0kDVOML z=qcaFp1ucB%enWox#0&PV?pAXX+&(My{iGe9Q?v%eFPywJEdB;3U*G3LT}DSO zW*aO23|7;-to*yIJVXyS!pgU?=?x}*B4pBW7KZj1-bVKP!;Mu>_Povlw`=s?@@qA# z@#mN8ofH0i+1~pOVbDL)+Q$_e+s$UsKib-w`Z=P-PtzHp(|+Rd60>ec{B`oNHm=&% z<(cjC-}YATGP2eU->Ysivg+qKQLC|{KglUul@(pvPJ7-cFZvm@{3IJ#!HaH!=9sA~ zcu~$Z|H@n8G>pz9#i$Hzt|VZ7ChjI+R0j4MU`z%#UB|kD*hz1?2wN9&_xNfWMX$oh ziH6ZrlTn20fAq5r8&A0eN9%)mvLC14ZJgk@E}L0Mli}j*e-#EoV7ve~-|<+BjE(rw z*%*G~uqh#_Vo8Rn)=P$;taXKF4+Lc|j--yV+WwfK#(?iPyeAE3FwF3qbTazf4db3p`l ziD1BFnBB-TG3tuNA5E#H87((!sd*WTYDt@%?qyAi2gKD8I``5!N=G`~Ajy87{gty0 zP@Y8bKWxS=-rZl`^ETZGU$8O%l-ON1Yq+t^Sza<1tg1yb73*N_$A>xv!w( z*h)Pf?Nd`CC1pE~YA9I&APwA3Br8TXHZ9P$56D75DJ(QlV^f1Y62w85-!Qjfid6;J zci~{GC+TpM!>a@$&Aeg<)|qmPz*F$MjP@yH{pC7z$iCQsj%)3Rq8aCSk0sPUWRoZ9 z5VW-{w{hHG{SxCuczXuEvCvS>9q+>J>Z7iZK68wFZzCwp@b_)07D-8TKI#oJcdD;}ikI^UZFOC48wEI~ke7ZgoajPe( z7_$P2)e%lR5(8prSjMtL%UDe$mI~r0a;3R>A0|cCqSC9+u@lQWJxKtl|Iyz7Wr~~| zs4N17)G}071@LAaKT?f^2#vrDa~_C|`Y|1=-8woW0I!8HDdb4_uqTM$l;7Q!2j{!7 zP_zt+rgJpPi!@MP-`k1u)aNh+t711^_iAFd&Gn(aCVJ~{qBmGy@`_p5ed4mw$mtj9 zBU}fO_tH3)Hs=&MPup!xtfyi70p?B9X`;N)$cv!7FEFWD^$`YF!+XEVHyJ<tFF+ZOD(%(tsF(Ry*=T2*@l%g+N9?AsQ8i#*tc3VFb_-Z9#>ArW!WI1}coJ z8K5IvpsU(bu=j)93H5@QbwX?%86pjcQpXB93{ygM7?yr35Tg1#US9E%i2BEL1hdFm zmt{_b_nUQq5ro$SnCruMNsaM-m6e~Qv!70e&Z=lno2*fHFw;Mw^E{njp(9J>PfW~lC{xBYjenR6G;w_tM-X5x=_hcwB0{nP z=E&XyBYp7uz?^E|Wt0AOMrkM{8HLPX95LuiAVt+_yc-D9%SIK(T|k_|#}uqK))_?$ zv9nQi@iOe9V!M|MsH)G8BY$KzAvu7PX*Kj54mxP^^9l5E1!2K$dXR6BhSRj(cF`XgCqyMG zC0_s1yjLeTrN;yZ^)H!bxxVrl5@7gs`s5}41rzxaq-Dw4@_&b1=!=`9_#HMYYgc)# zHdu+Hcmj_=t~P*27$NREYwAaDJ7#SJ$!Dd<7Ks`5sYspdOtwu)-H$J2x#gw*DC+7GU{8$Za zm79Fc;i?ROyXCj%Q#jsY8q5I`Ug)da%UqxY^e=F`+lQEb-V}u0)tJDcgM&AqENXEa zR)uj*o!1=*Yu3Mmx@Msgh9-nYJvL?qQ5Zlx4=blJOxhV*ufx~_DDmfg?Q0Lq>5;2s zm9BDfD`MOU^8((r*g$|C7w=eBVCoq?p@=1roP9Y zT~pt)(&@n5S&607r{?@zVJxM-h$;iepP8H2c*3DESjDBG5x1O2;Cc@nvA$}7(v!FbZ1k8cMRfH$tqcboqY&;}?*mOM8EtfkGg66ZJ*mHuPr zv0$guvM|fzr)(6^&jZSB*gTLmDQIK6~v$R=1FBu1hJW1Ip_OI z{a41P>S&(TY3h9wrb1rLQ{G} zxV0NMM?i9M@yB6EuQ@BybOE(Z);B}=WkYcAD~p{-21gfUn_2`SC&;T`L*$sLdii7P z?tBgYAPy70*)r_?w(QFSEgT$Z*cw|?OCJb~ZL}V{!bQU0^0RR2+yU5Jfd;N*n%b;d zKKKG3Yt@HHLuNdVd4@4fP=K%I2;U- z0pM_C61bR0BvV$UuQ@roH)GZ@U>124XKF0Dockk9rHJVj*QEL`9gerOqgLez|2eY! z4nc*xOWaD@$?iD*leS{YB%ZcE0tElchu}1+C82VMpR8X&=;Yu<&R?O-8FHqeH0 z)+yZ&i*iPRTMTUeEP~iaEC7Vaf^6Q)_QwVma89(Akh6~{K||Y58BZI}LJl$O)L?0~ z3b}{_5M$Vl9Dsa7eTEj95sG5dLCEi_!IDHh<~3|hRhx#p1!09bQO;3ck(`jXAyw>b zfDwgxZz-9CY_!MkrJq0vbK+euTc^=#7Iv4j{n*U9arToo`lMaWv>;`WG3aNH+q=Zv zLkRb|eOmT&+3<0&z+dZ+OgW1dwz3bo{V%aS5JOp#YqSA9;10eJ#~ILdwNY4(HQgb! zgmaS!uXoo_j(FKzLZ3o**vQhl#|E^7-`XuiwY1ahZ}e~V)BbH?JKt$*hg<*fvR2mK za4VZG!?Lc4d8wtORgU@y$5=Mrbz0UGL~`ZHd>)(cbZJg`*oNa?X>!h`tIZ-zNyB6Xq41m@DE$p`BXln@iLS2f-yh#x5Aif7#1!JT-eKS4gDGIkxWqsz2)9_2+OAe|pYKt>k^hQUHW zu1M3p>qD~tih1?>wAX8Dj&=(&!!Jx~qi2faJl;CaD=ivxytpe#lSJNF4%m_EBc%Qm zQDdgw%l}OK+h1S(Ib!ccBkF&_^JZU1{cjXj-=o7wK14r;JUny{C`!z@~A}dDuojDjXfocBS6xfovoEzBFp5G*Of(VRpmfWBVgevU* z%TiLD6TN+Yl|Bj#f(R=vYoQR?5WS0jmcG~N>}RH^@0H4Ps>Pn=MW!{k-cK_489E&I zkX*%OhpFl2+SVpizRyfwq9YsQQD(O_EXs__j0jP(7fAygsAKSG?kH}>u@Co|!}f?d z0K*$e@JM|iqBdZM?z4HI)d30BV{W%eR7nW?w}d37vUU|q=1Px$4jx%!*lBr(Z2>ncxVPdYz3qb0a}D?$+|-Ep z@)nC0i1%m0dk0oErXB;yZdbu>fk`y0VDLcXM{3a}hzQ*1%l1MXBM_tQB{zoJ69~nt z30hjh&g>w}-5WOyz##5I8g5~LG>mYk1#5@B+l9q)5E=0a^?%S)S;x~K`}IHDcKTy~ z^w9LV2b16Y^!y(@BxD8yKbOEND|24z+(-B!H#~d;m*VEsu zm{*=F{Cdy}WAyp?Y&ZjmC18u-3?y4)iquuc3Lt)oz8O|$mGdAL^Xe%HT*@z>v;+{( zW1KY2#zST%fH`1pg&ljG*Wo1Ix@;1c14|qHk)BH6k=Z1{NQPQvcti*DXMGqOKm_=+ zYTp8U2Ra6`3s(C8w1!~1kqbJX5&4B3?B7Rf6sf{Iu8W#xBcOOXVXIwXjN=qC-%dHHO%!dn}+R7WK? znk~N$M`R<|v1cJ|^JQUzh=j>yP0lSsRS5ZujUmD$g0SR-;)5c{8HK74IVtlX;kSqj zs*nUqJ^%#<(*;4>6%H!n0T@CXwAK=8il}s4Sra8NQWK`FS&zwj^m6Se;gdJx+Y$P4 zld)$3Kw32ijqv*cC<_@LDS!z*+*{yXKJYsbH_L(N1dC;a-OGo#efhRX^&0nVSXLlo z#m;>leN<~O_t)-Vp|yi6+FaSl`5QmY2hzw|AmO#Z{%)>gwF?AEQJc z`^1e5I)UBENW3TBuNXgxr|pj;%|99G2Co}!<#mOVg?oN{~^=t`p@1~%|Y7%etIf(ORkD#13D<-0cqYO5umSc30Z;S^! zePwh-Hyext#+=uh>ti12INYlW5=Kz;LcO85r5Ij)NOz$#jUm;iW9s6AE>W~le4x+C zz-A-F1^Cv;SSlDi%rCvH|q8+Z+#2RI$Bs zGsb}+9;l$i%mGG*F=XV#M$Qjk;KiL;-1?Y}VDp3~9iODa)lkGSKH=KjgsahP91nyg z*(5*t!n%mBab4Dsy1Ve(Mb-tn)(hpSQ!@PpgfKbpA_dx$PYM^d)Bf4LU-p zeU(1Wz*wlOmiic>HWJW~P&5N?lk zoU%@U(zai7HS&@9BQnBV;WIwX)o#km*QLC&(3ka(SMEQ~{;?jzs%%`nCjy(!1k5z^ zwYgGIanm2`xYGIlijAy}PDhbiWfzFVfKHucuVP%=tFA#Bw*V|se3b=2tA+aH6`*v% zs)q}r11@eY!-de$?ZPkQUavWy0`*VfzuN;ahT=wC$hhF|r(1&*Yss97oQ~)(x{z9| zM%AkTa;W!xSo{usu!l`sizXy>L)9o`arpKTq*%Bq?I*AxKIbQa8P@O&k~=&q zsw+^QF({4Jt~GcQnyOKJ4rpW@UIvsY?l~`XK|O>|DoT$=*WtTuB%5BfpPN68GgYS@ z9&~=m1_>6wZ2KvZ#DOHb4C1O}UAAcnMU!OD9ROd{vmlne{oW<>W1FO3yAyz zq9ud=y4g{Gy}!Y~g)+REp0iQZ@O>ysQ~cZ!#{uuyzt9C+wehny(!S!S;io;A4L}iH z)&44!E6BNC^M5~$!!&4R#NRMup)OlJ1Hs5`?r^nx3Cb0B{Q~Z_m!3tAb+en!#&OSj zM7K2!5)~;2k#f7lAMiJeJd&TGj8{{d3|;6#o>Ao4B6*%+S@sbqjVWw-tE7~4t?sRu zv98j05r2oo|Ezu|*x{8oTY~${+<2~f2Tk0-{tR` ziOXBUa&c|9^f&F^R_`z23MH5;91A~<_J;u{;rKBG&ge1Z+Jp9NHLm-md+81j65_f_ z+mOCj(kGUuk1{vmA{

E5^bAtm;s$ zxxu}iTSV<1cQbkBg+YI_y9K3;>Ok$@B_kYr$d;?A>R@$f$~o;+2eSL|+=u6WJP&YP z;m8YCFjQ+$xNt>R`xvecxm&rStAkw8XpvCKRjpO5$=U(Pp-p@ml)c1E`wwD;IBHup zHBDHTnV0pSsO_#|ol^$vuE7UJ9rd1R98rKUa(A_h@#ntHx2`5$G5mwoB)@qGdSTl{ zU?$aH0Pj?+gL}cqWBaby2Y2re%#!+TMkBCG{d@R=D0U}&1gT;l*taW4;47Esc^w_@8X1xuucLf1UUm zSXi?^8(ZXdop>Gc4=Xyt(t~)~q}jSFEz<1OyKxF-zf|vr$g*$k+>M6(dPNsHjRi!$ zuXG#>Y<64Gr21Vr&U|<-C_!H?Q4 zE$;$^OOX(+^4j+<`i1-%oQ63+4_R3m-))_MJoqTpGFqSpRtIvFhpF=p zy6K*(fBP>;J~HR~_{t={R2$eQPn-xGILFi4;&wi~-Z`8=o zfT9W%IE*Hb%spQUQc%xNAmZrphbQ()Dt+=fB_8L1YTggJ9?AKWGZS;Fhzj^UQ7|Ej z%yAGuiEq*Rpz?#r)83P68g%I+$MYpGr;zWFW7O;uNLmEhtLl(>J_E%uWC~*)M2}va z$9Ht~Id~DI;{&Gp>zYY98v0Y)xRgnqw4Z zQP>k}+J*S@`M_p_Rw<|Mk>dKSgAyWojJ=oow_u*szhdw_b6KSv3Bb^`j#?!mf$iHjH z?j3t}?4@^d{yf(A`#G=~IF-cCo#Bz1AX&z31uyHX^Il~zjMTc#ba~#x+~Z&pcci(J zF50tmX8|x;FJIZzp^QH8l0I`udn(bLJAJS0R|e_Idr-u&y(!<2!wIK=a7giw*+E2( zLIw8Rd|456WS<55R!KGsRuX~-()m)lV%?t}I~Bx%p=C!1EPOaN4tV!5ZbfKEFO}6d zQM0I+@a1THGZAQoU!9j3qs9uKF~kxn&lN6UM?LoV@uSS9xR(dc(@#EmN+TtQMYY5} zib{R2$WjHyg0plhlJ7zs{erPWY$!W%DFB-6)r@S#y~_@&=7L(B{=wsJT1PQidf#P z;dj2Z63_wScY~y?897B1Sh$U2<%?PzFpgrG6d|{MWK!lEn-eCOUby+Ok<)!2OdObqg;VQ^ud3g%b|*;M-UT`2jVs$ zb~7$aTlfXKu?KeJ|Dl1S1IGtW^{wq&Kkz``q+|C*`$prB5fKgGzc2ik8c~LbE)K0> R4{991GYo{6LcSgL{|7yBiN62< literal 0 HcmV?d00001 diff --git a/models/common.py b/models/common.py index c7fbbe322..8946ccb29 100644 --- a/models/common.py +++ b/models/common.py @@ -413,7 +413,7 @@ def forward(self, x): warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) - + class ASPP(torch.nn.Module): def __init__(self, in_channels, out_channels): @@ -493,8 +493,8 @@ def forward(self, x): import torch.nn.functional as F from torch.nn.modules.utils import _pair - - + + class ReOrg(nn.Module): # yolo def __init__(self): @@ -549,8 +549,8 @@ def __init__(self, dimension=0): def forward(self, x): return x[0]+x[1] - - + + class Silence(nn.Module): def __init__(self): super(Silence, self).__init__() @@ -558,8 +558,8 @@ def forward(self, x): return x -##### GELAN ##### - +# #### GELAN ##### + class SPPELAN(nn.Module): # spp-elan def __init__(self, c1, c2, c3): # ch_in, ch_out, number, shortcut, groups, expansion @@ -575,8 +575,8 @@ def forward(self, x): y = [self.cv1(x)] y.extend(m(y[-1]) for m in [self.cv2, self.cv3, self.cv4]) return self.cv5(torch.cat(y, 1)) - - + + class RepNCSPELAN4(nn.Module): # csp-elan def __init__(self, c1, c2, c3, c4, c5=1): # ch_in, ch_out, number, shortcut, groups, expansion @@ -597,10 +597,10 @@ def forward_split(self, x): y.extend(m(y[-1]) for m in [self.cv2, self.cv3]) return self.cv4(torch.cat(y, 1)) -################# +# ################ -##### YOLOR ##### +# #### YOLOR ##### class ImplicitA(nn.Module): def __init__(self, channel): @@ -623,10 +623,10 @@ def __init__(self, channel): def forward(self, x): return self.implicit * x -################# +# ################ -##### CBNet ##### +# #### CBNet ##### class CBLinear(nn.Module): def __init__(self, c1, c2s, k=1, s=1, p=None, g=1): # ch_in, ch_outs, kernel, stride, padding, groups @@ -649,7 +649,7 @@ def forward(self, xs): out = torch.sum(torch.stack(res + xs[-1:]), dim=0) return out -################# +# ################ class DetectMultiBackend(nn.Module): diff --git a/models/yolo.py b/models/yolo.py index ad3d8fee7..2681c2308 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -464,7 +464,7 @@ def forward(self, x): if self.training: return x, mc, p, s return (torch.cat([x, mc], 1), p, s) if self.export else (torch.cat([x[0], mc], 1), (x[1], mc, p, s)) - + class BaseModel(nn.Module): # YOLO base model diff --git a/requirements.txt b/requirements.txt index 1f96afcb0..6d8767b7d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,8 +2,6 @@ # Usage: pip install -r requirements.txt # Base ------------------------------------------------------------------------ -gitpython -ipython matplotlib>=3.2.2 numpy>=1.18.5 opencv-python>=4.1.1 diff --git a/run.sh b/run.sh new file mode 100755 index 000000000..377a698a2 --- /dev/null +++ b/run.sh @@ -0,0 +1,7 @@ +# train yolov9 models +python train_dual.py --workers 8 --device 0 --batch 16 --data data/polyp_2.yaml --img 640 --cfg models/detect/yolov9-e.yaml --weights '' --name v9-e_1c --hyp hyp.scratch-high.yaml --min-items 0 --epochs 300 --close-mosaic 0 --exist-ok --single-cls +python val_dual.py --data data/polyp_2.yaml --img 640 --batch 32 --conf 0.25 --iou 0.65 --device 0 --weights 'runs/v9-e_1c/weights/best.pt' --name v9-c_1c --exist-ok --single-cls +python train_dual.py --workers 8 --device 0 --batch 16 --data data/polyp_2.yaml --img 640 --cfg models/detect/yolov9-e.yaml --weights '' --name v9-e_2c --hyp hyp.scratch-high.yaml --min-items 0 --epochs 300 --close-mosaic 0 --exist-ok +python val_dual.py --data data/polyp_2.yaml --img 640 --batch 32 --conf 0.25 --iou 0.65 --device 0 --weights 'runs/v9-e_2c/weights/best.pt' --name v9-e_2c --exist-ok +python train_dual.py --workers 8 --device 0 --batch 16 --data data/polyp_2.yaml --img 640 --cfg models/detect/yolov9-c.yaml --weights '' --name v9-c_1c --hyp hyp.scratch-high.yaml --min-items 0 --epochs 300 --close-mosaic 0 --exist-ok --single-cls +python val_dual.py --data data/polyp_2.yaml --img 640 --batch 32 --conf 0.25 --iou 0.65 --device 0 --weights 'runs/v9-c_1c/weights/best.pt' --name v9-c_1c --exist-ok --single-cls diff --git a/train_dual.py b/train_dual.py index 1d21ac8f5..458a30ca7 100644 --- a/train_dual.py +++ b/train_dual.py @@ -56,7 +56,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze callbacks.run('on_pretrain_routine_start') - # Directories w = save_dir / 'weights' # weights dir (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir @@ -117,7 +116,6 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio else: model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create amp = check_amp(model) # check AMP - # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze for k, v in model.named_parameters(): @@ -285,7 +283,11 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio if RANK in {-1, 0}: pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar optimizer.zero_grad() + #tt0 = time.time()*1000 for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- + #tt1 = time.time()*1000 + #print("\n loading time: ", tt1-tt0) + #tt0 = tt1 callbacks.run('on_train_batch_start') ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 @@ -341,6 +343,7 @@ def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictio callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss)) if callbacks.stop_training: return + #print("\nend: ", time.time()*1000-t_start, "ms") # end batch ------------------------------------------------------------------------------------------------ # Scheduler @@ -462,7 +465,7 @@ def parse_opt(known=False): parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW', 'LION'], default='SGD', help='optimizer') parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') - parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') + parser.add_argument('--project', default=ROOT / 'runs/', help='save to project/name') parser.add_argument('--name', default='exp', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--quad', action='store_true', help='quad dataloader') diff --git a/utils/.ipynb_checkpoints/augment-checkpoint.py b/utils/.ipynb_checkpoints/augment-checkpoint.py new file mode 100644 index 000000000..1ac08117d --- /dev/null +++ b/utils/.ipynb_checkpoints/augment-checkpoint.py @@ -0,0 +1,970 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +import math +import random +from copy import deepcopy + +import cv2 +import numpy as np +import torch +import torchvision.transforms as T + +from utils.instance import Instances +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy +from utils.metrics import bbox_ioa + +POSE_FLIPLR_INDEX = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] + + +def polygons2masks(imgsz, polygons, color, downsample_ratio=1): + """ + Args: + imgsz (tuple): The image size. + polygons (list[np.ndarray]): each polygon is [N, M], N is number of polygons, M is number of points (M % 2 = 0) + color (int): color + downsample_ratio (int): downsample ratio + """ + masks = [] + for si in range(len(polygons)): + mask = polygon2mask(imgsz, [polygons[si].reshape(-1)], color, downsample_ratio) + masks.append(mask) + return np.array(masks) +def polygons2masks_overlap(imgsz, segments, downsample_ratio=1): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) + areas = [] + ms = [] + for si in range(len(segments)): + mask = polygon2mask(imgsz, [segments[si].reshape(-1)], downsample_ratio=downsample_ratio, color=1) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index +def polygons2masks_multi(imgsz, segments, cls, downsample_ratio=1, nc=3): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((nc, imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) + areas = [] + ms = [] + mc = [] + for si in range(len(segments)): + mask = polygon2mask(imgsz, [segments[si].reshape(-1)], downsample_ratio=downsample_ratio, color=1) + ms.append(mask) + mc.append(int(cls[si][0])) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + cls_id = mc[i] + masks[cls_id] = masks[cls_id] + ms[i] + masks[cls_id] = np.clip(masks[cls_id], a_min=0, a_max=1) + return masks, index + + +class Compose: + + def __init__(self, transforms): + """Initializes the Compose object with a list of transforms.""" + self.transforms = transforms + + def __call__(self, data): + """Applies a series of transformations to input data.""" + for t in self.transforms: + data = t(data) + return data + + def append(self, transform): + """Appends a new transform to the existing list of transforms.""" + self.transforms.append(transform) + + def tolist(self): + """Converts list of transforms to a standard Python list.""" + return self.transforms + + def __repr__(self): + """Return string representation of object.""" + format_string = f'{self.__class__.__name__}(' + for t in self.transforms: + format_string += '\n' + format_string += f' {t}' + format_string += '\n)' + return format_string + + +class BaseMixTransform: + """This implementation is from mmyolo.""" + + def __init__(self, dataset, pre_transform=None, p=0.0) -> None: + self.dataset = dataset + self.pre_transform = pre_transform + self.p = p + + def __call__(self, labels): + """Applies pre-processing transforms and mixup/mosaic transforms to labels data.""" + if random.uniform(0, 1) > self.p: + return labels + + # Get index of one or three other images + indexes = self.get_indexes() + if isinstance(indexes, int): + indexes = [indexes] + + # Get images information will be used for Mosaic or MixUp + mix_labels = [self.dataset.get_image_and_label(i) for i in indexes] + + if self.pre_transform is not None: + for i, data in enumerate(mix_labels): + mix_labels[i] = self.pre_transform(data) + labels['mix_labels'] = mix_labels + + # Mosaic or MixUp + labels = self._mix_transform(labels) + labels.pop('mix_labels', None) + return labels + + def _mix_transform(self, labels): + """Applies MixUp or Mosaic augmentation to the label dictionary.""" + raise NotImplementedError + + def get_indexes(self): + """Gets a list of shuffled indexes for mosaic augmentation.""" + raise NotImplementedError + + +class Mosaic(BaseMixTransform): + """ + Mosaic augmentation. + + This class performs mosaic augmentation by combining multiple (4 or 9) images into a single mosaic image. + The augmentation is applied to a dataset with a given probability. + + Attributes: + dataset: The dataset on which the mosaic augmentation is applied. + imgsz (int, optional): Image size (height and width) after mosaic pipeline of a single image. Default to 640. + p (float, optional): Probability of applying the mosaic augmentation. Must be in the range 0-1. Default to 1.0. + n (int, optional): The grid size, either 4 (for 2x2) or 9 (for 3x3). + """ + + def __init__(self, dataset, imgsz=640, p=1.0, n=4): + """Initializes the object with a dataset, image size, probability, and border.""" + assert 0 <= p <= 1.0, f'The probability should be in range [0, 1], but got {p}.' + assert n in (4, 9), 'grid must be equal to 4 or 9.' + super().__init__(dataset=dataset, p=p) + self.dataset = dataset + self.imgsz = imgsz + self.border = (-imgsz // 2, -imgsz // 2) # width, height + self.n = n + + def get_indexes(self, buffer=True): + """Return a list of random indexes from the dataset.""" + if buffer: # select images from buffer + return random.choices(list(self.dataset.buffer), k=self.n - 1) + else: # select any images + return [random.randint(0, len(self.dataset) - 1) for _ in range(self.n - 1)] + + def _mix_transform(self, labels): + """Apply mixup transformation to the input image and labels.""" + assert labels.get('rect_shape', None) is None, 'rect and mosaic are mutually exclusive.' + assert len(labels.get('mix_labels', [])), 'There are no other images for mosaic augment.' + return self._mosaic4(labels) if self.n == 4 else self._mosaic9(labels) + + def _mosaic4(self, labels): + """Create a 2x2 image mosaic.""" + mosaic_labels = [] + s = self.imgsz + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.border) # mosaic center x, y + for i in range(4): + labels_patch = labels if i == 0 else labels['mix_labels'][i - 1] + # Load image + img = labels_patch['img'] + h, w = labels_patch.pop('resized_shape') + + # Place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + labels_patch = self._update_labels(labels_patch, padw, padh) + mosaic_labels.append(labels_patch) + final_labels = self._cat_labels(mosaic_labels) + final_labels['img'] = img4 + return final_labels + + def _mosaic9(self, labels): + """Create a 3x3 image mosaic.""" + mosaic_labels = [] + s = self.imgsz + hp, wp = -1, -1 # height, width previous + for i in range(9): + labels_patch = labels if i == 0 else labels['mix_labels'][i - 1] + # Load image + img = labels_patch['img'] + h, w = labels_patch.pop('resized_shape') + + # Place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padw, padh = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Image + img9[y1:y2, x1:x2] = img[y1 - padh:, x1 - padw:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous for next iteration + + # Labels assuming imgsz*2 mosaic size + labels_patch = self._update_labels(labels_patch, padw + self.border[0], padh + self.border[1]) + mosaic_labels.append(labels_patch) + final_labels = self._cat_labels(mosaic_labels) + + final_labels['img'] = img9[-self.border[0]:self.border[0], -self.border[1]:self.border[1]] + return final_labels + + @staticmethod + def _update_labels(labels, padw, padh): + """Update labels.""" + nh, nw = labels['img'].shape[:2] + labels['instances'].convert_bbox(format='xyxy') + labels['instances'].denormalize(nw, nh) + labels['instances'].add_padding(padw, padh) + return labels + + def _cat_labels(self, mosaic_labels): + """Return labels with mosaic border instances clipped.""" + if len(mosaic_labels) == 0: + return {} + cls = [] + instances = [] + imgsz = self.imgsz * 2 # mosaic imgsz + for labels in mosaic_labels: + cls.append(labels['cls']) + instances.append(labels['instances']) + final_labels = { + 'im_file': mosaic_labels[0]['im_file'], + 'ori_shape': mosaic_labels[0]['ori_shape'], + 'resized_shape': (imgsz, imgsz), + 'cls': np.concatenate(cls, 0), + 'instances': Instances.concatenate(instances, axis=0), + 'mosaic_border': self.border} # final_labels + final_labels['instances'].clip(imgsz, imgsz) + good = final_labels['instances'].remove_zero_area_boxes() + final_labels['cls'] = final_labels['cls'][good] + return final_labels + + +class MixUp(BaseMixTransform): + + def __init__(self, dataset, pre_transform=None, p=0.0) -> None: + super().__init__(dataset=dataset, pre_transform=pre_transform, p=p) + + def get_indexes(self): + """Get a random index from the dataset.""" + return random.randint(0, len(self.dataset) - 1) + + def _mix_transform(self, labels): + """Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf.""" + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + labels2 = labels['mix_labels'][0] + labels['img'] = (labels['img'] * r + labels2['img'] * (1 - r)).astype(np.uint8) + labels['instances'] = Instances.concatenate([labels['instances'], labels2['instances']], axis=0) + labels['cls'] = np.concatenate([labels['cls'], labels2['cls']], 0) + return labels + + +class RandomPerspective: + + def __init__(self, + degrees=0.0, + translate=0.1, + scale=0.5, + shear=0.0, + perspective=0.0, + border=(0, 0), + pre_transform=None): + self.degrees = degrees + self.translate = translate + self.scale = scale + self.shear = shear + self.perspective = perspective + # Mosaic border + self.border = border + self.pre_transform = pre_transform + + def affine_transform(self, img, border): + """Center.""" + C = np.eye(3, dtype=np.float32) + + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3, dtype=np.float32) + P[2, 0] = random.uniform(-self.perspective, self.perspective) # x perspective (about y) + P[2, 1] = random.uniform(-self.perspective, self.perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3, dtype=np.float32) + a = random.uniform(-self.degrees, self.degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - self.scale, 1 + self.scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3, dtype=np.float32) + S[0, 1] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3, dtype=np.float32) + T[0, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[0] # x translation (pixels) + T[1, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[1] # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + # Affine image + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if self.perspective: + img = cv2.warpPerspective(img, M, dsize=self.size, borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=self.size, borderValue=(114, 114, 114)) + return img, M, s + + def apply_bboxes(self, bboxes, M): + """ + Apply affine to bboxes only. + + Args: + bboxes (ndarray): list of bboxes, xyxy format, with shape (num_bboxes, 4). + M (ndarray): affine matrix. + + Returns: + new_bboxes (ndarray): bboxes after affine, [num_bboxes, 4]. + """ + n = len(bboxes) + if n == 0: + return bboxes + + xy = np.ones((n * 4, 3), dtype=bboxes.dtype) + xy[:, :2] = bboxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if self.perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # Create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + return np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1)), dtype=bboxes.dtype).reshape(4, n).T + + def apply_segments(self, segments, M): + """ + Apply affine to segments and generate new bboxes from segments. + + Args: + segments (ndarray): list of segments, [num_samples, 500, 2]. + M (ndarray): affine matrix. + + Returns: + new_segments (ndarray): list of segments after affine, [num_samples, 500, 2]. + new_bboxes (ndarray): bboxes after affine, [N, 4]. + """ + n, num = segments.shape[:2] + if n == 0: + return [], segments + + xy = np.ones((n * num, 3), dtype=segments.dtype) + segments = segments.reshape(-1, 2) + xy[:, :2] = segments + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] + segments = xy.reshape(n, -1, 2) + bboxes = np.stack([segment2box(xy, self.size[0], self.size[1]) for xy in segments], 0) + return bboxes, segments + + def apply_keypoints(self, keypoints, M): + """ + Apply affine to keypoints. + + Args: + keypoints (ndarray): keypoints, [N, 17, 3]. + M (ndarray): affine matrix. + + Return: + new_keypoints (ndarray): keypoints after affine, [N, 17, 3]. + """ + n, nkpt = keypoints.shape[:2] + if n == 0: + return keypoints + xy = np.ones((n * nkpt, 3), dtype=keypoints.dtype) + visible = keypoints[..., 2].reshape(n * nkpt, 1) + xy[:, :2] = keypoints[..., :2].reshape(n * nkpt, 2) + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] # perspective rescale or affine + out_mask = (xy[:, 0] < 0) | (xy[:, 1] < 0) | (xy[:, 0] > self.size[0]) | (xy[:, 1] > self.size[1]) + visible[out_mask] = 0 + return np.concatenate([xy, visible], axis=-1).reshape(n, nkpt, 3) + + def __call__(self, labels): + """ + Affine images and targets. + + Args: + labels (dict): a dict of `bboxes`, `segments`, `keypoints`. + """ + if self.pre_transform and 'mosaic_border' not in labels: + labels = self.pre_transform(labels) + labels.pop('ratio_pad') # do not need ratio pad + + img = labels['img'] + cls = labels['cls'] + instances = labels.pop('instances') + # Make sure the coord formats are right + instances.convert_bbox(format='xyxy') + instances.denormalize(*img.shape[:2][::-1]) + + border = labels.pop('mosaic_border', self.border) + self.size = img.shape[1] + border[1] * 2, img.shape[0] + border[0] * 2 # w, h + # M is affine matrix + # scale for func:`box_candidates` + img, M, scale = self.affine_transform(img, border) + + bboxes = self.apply_bboxes(instances.bboxes, M) + + segments = instances.segments + keypoints = instances.keypoints + # Update bboxes if there are segments. + if len(segments): + bboxes, segments = self.apply_segments(segments, M) + + if keypoints is not None: + keypoints = self.apply_keypoints(keypoints, M) + new_instances = Instances(bboxes, segments, keypoints, bbox_format='xyxy', normalized=False) + # Clip + new_instances.clip(*self.size) + + # Filter instances + instances.scale(scale_w=scale, scale_h=scale, bbox_only=True) + # Make the bboxes have the same scale with new_bboxes + i = self.box_candidates(box1=instances.bboxes.T, + box2=new_instances.bboxes.T, + area_thr=0.01 if len(segments) else 0.10) + labels['instances'] = new_instances[i] + labels['cls'] = cls[i] + labels['img'] = img + labels['resized_shape'] = img.shape[:2] + return labels + + def box_candidates(self, box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute box candidates: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +class RandomBlur: + def __init__(self, p) -> None: + self.p = p + + def __call__(self, labels): + img = labels['img'] + if random.random() < self.p: + k = 2*random.randint(1, 10) + 1 + img_blur = cv2.GaussianBlur(img, (5, 5), 0) + labels['img'] = img_blur + return labels + + +class RandomHSV: + + def __init__(self, hgain=0.5, sgain=0.5, vgain=0.5) -> None: + self.hgain = hgain + self.sgain = sgain + self.vgain = vgain + + def __call__(self, labels): + """Applies random horizontal or vertical flip to an image with a given probability.""" + img = labels['img'] + if self.hgain or self.sgain or self.vgain: + r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + return labels + + +class RandomFlip: + + def __init__(self, p=0.5, direction='horizontal', flip_idx=None) -> None: + assert direction in ['horizontal', 'vertical'], f'Support direction `horizontal` or `vertical`, got {direction}' + assert 0 <= p <= 1.0 + + self.p = p + self.direction = direction + self.flip_idx = flip_idx + + def __call__(self, labels): + """Resize image and padding for detection, instance segmentation, pose.""" + img = labels['img'] + instances = labels.pop('instances') + instances.convert_bbox(format='xywh') + h, w = img.shape[:2] + h = 1 if instances.normalized else h + w = 1 if instances.normalized else w + + # Flip up-down + if self.direction == 'vertical' and random.random() < self.p: + img = np.flipud(img) + instances.flipud(h) + if self.direction == 'horizontal' and random.random() < self.p: + img = np.fliplr(img) + instances.fliplr(w) + # For keypoints + if self.flip_idx is not None and instances.keypoints is not None: + instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :]) + labels['img'] = np.ascontiguousarray(img) + labels['instances'] = instances + return labels + + +class RandomPoints: + + def __init__(self, num_region=10, num_point=20) -> None: + self.num_region = num_region ## number of regions + self.num_point = num_point ## number of points + + def __call__(self, labels): + img = labels['img'] + h, w, c = img.shape + + for i in range(self.num_region): + # region size [50*50, 200*200] + xx, ww = np.random.randint(0, w-200), np.random.randint(50, 200) + yy, hh = np.random.randint(0, h-200), np.random.randint(50, 200) + # 20 points in each region + xs = np.random.randint(xx, xx+ww, 20) + ys = np.random.randint(yy, yy+hh, 20) + # draw ovals to region + for j in range(self.num_point): + x, y = xs[j], ys[j] + ax, ay = np.random.randint(5, 8), np.random.randint(3, 10) + angle = np.random.randint(0,180) + cc = np.random.randint(225, 255) + cv2.ellipse(img, (x, y), (ax, ay), + angle, 0, 360, + color = (cc, cc, cc), + thickness = -1) + return labels + + +class LetterBox: + """Resize image and padding for detection, instance segmentation, pose.""" + + def __init__(self, new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, stride=32): + """Initialize LetterBox object with specific parameters.""" + self.new_shape = new_shape + self.auto = auto + self.scaleFill = scaleFill + self.scaleup = scaleup + self.stride = stride + + def __call__(self, labels=None, image=None): + """Return updated labels and image with added border.""" + if labels is None: + labels = {} + img = labels.get('img') if image is None else image + shape = img.shape[:2] # current shape [height, width] + new_shape = labels.pop('rect_shape', self.new_shape) + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not self.scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if self.auto: # minimum rectangle + dw, dh = np.mod(dw, self.stride), np.mod(dh, self.stride) # wh padding + elif self.scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + if labels.get('ratio_pad'): + labels['ratio_pad'] = (labels['ratio_pad'], (dw, dh)) # for evaluation + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, + value=(114, 114, 114)) # add border + if len(labels): + labels = self._update_labels(labels, ratio, dw, dh) + labels['img'] = img + labels['resized_shape'] = new_shape + return labels + else: + return img + + def _update_labels(self, labels, ratio, padw, padh): + """Update labels.""" + labels['instances'].convert_bbox(format='xyxy') + labels['instances'].denormalize(*labels['img'].shape[:2][::-1]) + labels['instances'].scale(*ratio) + labels['instances'].add_padding(padw, padh) + return labels + + +class CopyPaste: + + def __init__(self, p=0.5) -> None: + self.p = p + + def __call__(self, labels): + """Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy).""" + im = labels['img'] + cls = labels['cls'] + h, w = im.shape[:2] + instances = labels.pop('instances') + instances.convert_bbox(format='xyxy') + instances.denormalize(w, h) + if self.p and len(instances.segments): + n = len(instances) + _, w, _ = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + + # Calculate ioa first then select indexes randomly + ins_flip = deepcopy(instances) + ins_flip.fliplr(w) + + ioa = bbox_ioa(ins_flip.bboxes, instances.bboxes) # intersection over area, (N, M) + indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) + n = len(indexes) + for j in random.sample(list(indexes), k=round(self.p * n)): + cls = np.concatenate((cls, cls[[j]]), axis=0) + instances = Instances.concatenate((instances, ins_flip[[j]]), axis=0) + cv2.drawContours(im_new, instances.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED) + + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + labels['img'] = im + labels['cls'] = cls + labels['instances'] = instances + return labels + + +class Albumentations: + # YOLOv8 Albumentations class (optional, only used if package is installed) + def __init__(self, p=1.0): + """Initialize the transform object for YOLO bbox formatted params.""" + self.p = p + self.transform = None + prefix = colorstr('albumentations: ') + try: + import albumentations as A + + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + T = [ + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + def __call__(self, labels): + """Generates object detections and returns a dictionary with detection results.""" + im = labels['img'] + cls = labels['cls'] + if len(cls): + labels['instances'].convert_bbox('xywh') + labels['instances'].normalize(*im.shape[:2][::-1]) + bboxes = labels['instances'].bboxes + # TODO: add supports of segments and keypoints + if self.transform and random.random() < self.p: + new = self.transform(image=im, bboxes=bboxes, class_labels=cls) # transformed + if len(new['class_labels']) > 0: # skip update if no bbox in new im + labels['img'] = new['image'] + labels['cls'] = np.array(new['class_labels']) + bboxes = np.array(new['bboxes']) + labels['instances'].update(bboxes=bboxes) + return labels + + +# TODO: technically this is not an augmentation, maybe we should put this to another files +class Format: + + def __init__(self, + bbox_format='xywh', + normalize=True, + return_mask=False, + return_keypoint=False, + mask_ratio=4, + mask_overlap=True, + batch_idx=True, + nc=3): + self.bbox_format = bbox_format + self.normalize = normalize + self.return_mask = return_mask # set False when training detection only + self.return_keypoint = return_keypoint + self.mask_ratio = mask_ratio + self.mask_overlap = mask_overlap + self.batch_idx = batch_idx # keep the batch indexes + self.nc=nc + def __call__(self, labels): + """Return formatted image, classes, bounding boxes & keypoints to be used by 'collate_fn'.""" + img = labels.pop('img') + h, w = img.shape[:2] + cls = labels.pop('cls') + instances = labels.pop('instances') + instances.convert_bbox(format=self.bbox_format) + instances.denormalize(w, h) + nl = len(instances) + if self.return_mask: + if nl: + masks, instances, cls = self._format_segments(instances, cls, w, h) + masks = torch.from_numpy(masks) + else: + masks = torch.zeros(1 if self.mask_overlap else nl, self.nc, img.shape[0] // self.mask_ratio, + img.shape[1] // self.mask_ratio) + labels['masks'] = masks + if self.normalize: + instances.normalize(w, h) + labels['img'] = self._format_img(img) + labels['cls'] = torch.from_numpy(cls) if nl else torch.zeros(nl) + labels['bboxes'] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4)) + if self.return_keypoint: + labels['keypoints'] = torch.from_numpy(instances.keypoints) + # Then we can use collate_fn + if self.batch_idx: + labels['batch_idx'] = torch.zeros(nl) + return labels + + def _format_img(self, img): + """Format the image for YOLOv5 from Numpy array to PyTorch tensor.""" + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + img = np.ascontiguousarray(img.transpose(2, 0, 1)[::-1]) + img = torch.from_numpy(img) + return img + + def _format_segments(self, instances, cls, w, h): + """convert polygon points to bitmap.""" + segments = instances.segments + if self.mask_overlap: + masks, sorted_idx = polygons2masks_multi((h, w), segments, cls, downsample_ratio=self.mask_ratio, nc=self.nc) + masks = masks[None] # (2, 640, 640) -> (1, 2, 640, 640) + instances = instances[sorted_idx] + cls = cls[sorted_idx] + else: + masks = polygons2masks((h, w), segments, color=1, downsample_ratio=self.mask_ratio) + + return masks, instances, cls + + +def v9_transforms(dataset, imgsz, hyp): + """Convert images to a size suitable for YOLOv8 training.""" + pre_transform = Compose([ + Mosaic(dataset, imgsz=imgsz, p=hyp['mosaic']), + CopyPaste(p=hyp['copy_paste']), + RandomPerspective( + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective'], + pre_transform=LetterBox(new_shape=(imgsz, imgsz)), + )]) + #flip_idx = dataset.data.get('flip_idx', None) # for keypoints augmentation + #if dataset.use_keypoints: + # kpt_shape = dataset.data.get('kpt_shape', None) + # if flip_idx is None and hyp['fliplr'] > 0.0: + # hyp['fliplr'] = 0.0 + # LOGGER.warning("WARNING ⚠️ No 'flip_idx' array defined in data.yaml, setting augmentation 'fliplr=0.0'") + # elif flip_idx: + # if len(flip_idx) != kpt_shape[0]: + # raise ValueError(f'data.yaml flip_idx={flip_idx} length must be equal to kpt_shape[0]={kpt_shape[0]}') + # elif flip_idx[0] != 0: + # raise ValueError(f'data.yaml flip_idx={flip_idx} must be zero-index (start from 0)') + + return Compose([ + pre_transform, + MixUp(dataset, pre_transform=pre_transform, p=hyp['mixup']), + Albumentations(p=1.0), + RandomHSV(hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']), + RandomFlip(direction='vertical', p=hyp['flipud']), + RandomFlip(direction='horizontal', p=hyp['fliplr']), + ]) # transforms + + +# Classification augmentations ----------------------------------------------------------------------------------------- +def classify_transforms(size=224, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)): # IMAGENET_MEAN, IMAGENET_STD + # Transforms to apply if albumentations not installed + if not isinstance(size, int): + raise TypeError(f'classify_transforms() size {size} must be integer, not (list, tuple)') + if any(mean) or any(std): + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(mean, std, inplace=True)]) + else: + return T.Compose([CenterCrop(size), ToTensor()]) + + +def hsv2colorjitter(h, s, v): + """Map HSV (hue, saturation, value) jitter into ColorJitter values (brightness, contrast, saturation, hue)""" + return v, v, s, h + + +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + hflip=0.5, + vflip=0.0, + hsv_h=0.015, # image HSV-Hue augmentation (fraction) + hsv_s=0.7, # image HSV-Saturation augmentation (fraction) + hsv_v=0.4, # image HSV-Value augmentation (fraction) + mean=(0.0, 0.0, 0.0), # IMAGENET_MEAN + std=(1.0, 1.0, 1.0), # IMAGENET_STD + auto_aug=False, +): + # YOLOv8 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentations + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if any((hsv_h, hsv_s, hsv_v)): + T += [A.ColorJitter(*hsv2colorjitter(hsv_h, hsv_s, hsv_v))] # brightness, contrast, saturation, hue + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +class ClassifyLetterBox: + # YOLOv8 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, size=(640, 640), auto=False, stride=32): + """Resizes image and crops it to center with max dimensions 'h' and 'w'.""" + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + # YOLOv8 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + def __init__(self, size=640): + """Converts an image from numpy array to PyTorch tensor.""" + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + # YOLOv8 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, half=False): + """Initialize YOLOv8 ToTensor object with optional half-precision support.""" + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/utils/.ipynb_checkpoints/augmentations-checkpoint.py b/utils/.ipynb_checkpoints/augmentations-checkpoint.py new file mode 100644 index 000000000..ad4c07fb6 --- /dev/null +++ b/utils/.ipynb_checkpoints/augmentations-checkpoint.py @@ -0,0 +1,395 @@ +import math +import random + +import cv2 +import numpy as np +import torch +import torchvision.transforms as T +import torchvision.transforms.functional as TF + +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy +from utils.metrics import bbox_ioa + +IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean +IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation + + +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self, size=640): + self.transform = None + prefix = colorstr('albumentations: ') + try: + import albumentations as A + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + T = [ + A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0), + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + +def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std + return TF.normalize(x, mean, std, inplace=inplace) + + +def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean + for i in range(3): + x[:, i] = x[:, i] * std[i] + mean[i] + return x + + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + # HSV color-space augmentation + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + + +def hist_equalize(im, clahe=True, bgr=False): + # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def replicate(im, labels): + # Replicate labels + h, w = im.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return im, labels + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return im, targets + + +def copy_paste(im, labels, segments, p=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if p and n: + h, w, c = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + + # calculate ioa first then select indexes randomly + boxes = np.stack([w - labels[:, 3], labels[:, 2], w - labels[:, 1], labels[:, 4]], axis=-1) # (n, 4) + ioa = bbox_ioa(boxes, labels[:, 1:5]) # intersection over area + indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) + n = len(indexes) + for j in random.sample(list(indexes), k=round(p * n)): + l, box, s = labels[j], boxes[j], segments[j] + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED) + + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + return im, labels, segments + + +def cutout(im, labels, p=0.5): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + if random.random() < p: + h, w = im.shape[:2] + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) # create random masks + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([[xmin, ymin, xmax, ymax]], dtype=np.float32) + ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h))[0] # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def mixup(im, labels, im2, labels2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + return im, labels + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): + # YOLOv5 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentation + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if jitter > 0: + color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue + T += [A.ColorJitter(*color_jitter, 0)] + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +def classify_transforms(size=224): + # Transforms to apply if albumentations not installed + assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' + # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + + +class LetterBox: + # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, size=(640, 640), auto=False, stride=32): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + def __init__(self, size=640): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, half=False): + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/utils/.ipynb_checkpoints/dataloaders-checkpoint.py b/utils/.ipynb_checkpoints/dataloaders-checkpoint.py new file mode 100644 index 000000000..6b5323d3e --- /dev/null +++ b/utils/.ipynb_checkpoints/dataloaders-checkpoint.py @@ -0,0 +1,1278 @@ +import contextlib +import glob +import hashlib +import json +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import Pool, ThreadPool +from pathlib import Path +from threading import Thread +from urllib.parse import urlparse + +import numpy as np +import psutil +import torch +import torch.nn.functional as F +import torchvision +import yaml +from PIL import ExifTags, Image, ImageOps +from torch.utils.data import DataLoader, Dataset, dataloader, distributed +from tqdm import tqdm + +from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, + letterbox, mixup, random_perspective) +from utils.augment import Compose, Format, Instances, LetterBox, v9_transforms +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, + check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, + xywh2xyxy, xywhn2xyxy, xyxy2xywhn) +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(paths): + # Returns a single hash value of a list of paths (files or dirs) + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.md5(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + with contextlib.suppress(Exception): + rotation = dict(img._getexif().items())[orientation] + if rotation in [6, 8]: # rotation 270 or 90 + s = (s[1], s[0]) + return s + + +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90}.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info["exif"] = exif.tobytes() + return image + + +def seed_worker(worker_id): + # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + close_mosaic=False, + quad=False, + min_items=0, + prefix='', + shuffle=False): + if rect and shuffle: + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabels( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + min_items=min_items, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + #loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + loader = DataLoader if image_weights or close_mosaic else InfiniteDataLoader + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, + worker_init_fn=seed_worker, + generator=generator), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for _ in range(len(self)): + yield next(self.iterator) + def reset(self): + """Reset iterator. + This is useful when we want to modify settings of dataset while training. + """ + self.iterator = self._get_iterator() + + +class _RepeatSampler: + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadScreenshots: + # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` + def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): + # source = [screen_number left top width height] (pixels) + check_requirements('mss') + import mss + + source, *params = source.split() + self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 + if len(params) == 1: + self.screen = int(params[0]) + elif len(params) == 4: + left, top, width, height = (int(x) for x in params) + elif len(params) == 5: + self.screen, left, top, width, height = (int(x) for x in params) + self.img_size = img_size + self.stride = stride + self.transforms = transforms + self.auto = auto + self.mode = 'stream' + self.frame = 0 + self.sct = mss.mss() + + # Parse monitor shape + monitor = self.sct.monitors[self.screen] + self.top = monitor["top"] if top is None else (monitor["top"] + top) + self.left = monitor["left"] if left is None else (monitor["left"] + left) + self.width = width or monitor["width"] + self.height = height or monitor["height"] + self.monitor = {"left": self.left, "top": self.top, "width": self.width, "height": self.height} + + def __iter__(self): + return self + + def __next__(self): + # mss screen capture: get raw pixels from the screen as np array + im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR + s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: " + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + self.frame += 1 + return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s + + +class LoadImages: + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + files = [] + for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: + p = str(Path(p).resolve()) + if '*' in p: + files.extend(sorted(glob.glob(p, recursive=True))) # glob + elif os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + elif os.path.isfile(p): + files.append(p) # files + else: + raise FileNotFoundError(f'{p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.auto = auto + self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride + if any(videos): + self._new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + for _ in range(self.vid_stride): + self.cap.grab() + ret_val, im0 = self.cap.retrieve() + while not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + path = self.files[self.count] + self._new_video(path) + ret_val, im0 = self.cap.read() + + self.frame += 1 + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + im0 = cv2.imread(path) # BGR + assert im0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + + return path, im, im0, self.cap, s + + def _new_video(self, path): + # Create a new video capture object + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 + + def _cv2_rotate(self, im): + # Rotate a cv2 video manually + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im + + def __len__(self): + return self.nf # number of files + + +class LoadStreams: + # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + torch.backends.cudnn.benchmark = True # faster for fixed-size inference + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + self.vid_stride = vid_stride # video frame-rate stride + sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] + n = len(sources) + self.sources = [clean_str(x) for x in sources] # clean source names for later + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + st = f'{i + 1}/{n}: {s}... ' + if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' + check_requirements(('pafy', 'youtube_dl==2020.12.2')) + import pafy + s = pafy.new(s).getbest(preftype="mp4").url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + if s == 0: + assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' + assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'{st}Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback + + _, self.imgs[i] = cap.read() # guarantee first frame + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)") + self.threads[i].start() + LOGGER.info('') # newline + + # check for common shapes + s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + self.auto = auto and self.rect + self.transforms = transforms # optional + if not self.rect: + LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') + + def update(self, i, cap, stream): + # Read stream `i` frames in daemon thread + n, f = 0, self.frames[i] # frame number, frame array + while cap.isOpened() and n < f: + n += 1 + cap.grab() # .read() = .grab() followed by .retrieve() + if n % self.vid_stride == 0: + success, im = cap.retrieve() + if success: + self.imgs[i] = im + else: + LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') + self.imgs[i] = np.zeros_like(self.imgs[i]) + cap.open(stream) # re-open stream if signal was lost + time.sleep(0.0) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + im0 = self.imgs.copy() + if self.transforms: + im = np.stack([self.transforms(x) for x in im0]) # transforms + else: + im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + im = np.ascontiguousarray(im) # contiguous + + return self.sources, im, im0, None, '' + + def __len__(self): + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + + +class LoadImagesAndLabels(Dataset): + # YOLOv5 train_loader/val_loader, loads images and labels for training and validation + cache_version = 0.6 # dataset labels *.cache version + rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] + + def __init__(self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0.0, + min_items=0, + prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + self.albumentations = Albumentations(size=img_size) if augment else None + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('*.*')) # pathlib + elif p.is_file(): # file + with open(p) as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib) + else: + raise FileNotFoundError(f'{prefix}{p} does not exist') + self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert self.im_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e + + # Check cache + self.label_files = img2label_paths(self.im_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == self.cache_version # matches current version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + except Exception: + cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists and LOCAL_RANK in {-1, 0}: + d = f"Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt" + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + labels, shapes, self.segments = zip(*cache.values()) + nl = len(np.concatenate(labels, 0)) # number of labels + assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' + self.labels = list(labels) + self.shapes = np.array(shapes) + self.im_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + + # Filter images + if min_items: + include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int) + LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset') + self.im_files = [self.im_files[i] for i in include] + self.label_files = [self.label_files[i] for i in include] + self.labels = [self.labels[i] for i in include] + self.segments = [self.segments[i] for i in include] + self.shapes = self.shapes[include] # wh + + # Create indices + n = len(self.shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Update labels + include_class = [] # filter labels to include only these classes (optional) + include_class_array = np.array(include_class).reshape(1, -1) + for i, (label, segment) in enumerate(zip(self.labels, self.segments)): + if include_class: + j = (label[:, 0:1] == include_class_array).any(1) + self.labels[i] = label[j] + if segment: + self.segments[i] = segment[j] + if single_cls: # single-class training, merge all classes into 0 + self.labels[i][:, 0] = 0 + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.im_files = [self.im_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.segments = [self.segments[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride + + # Cache images into RAM/disk for faster training + if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix): + cache_images = False + self.ims = [None] * n + self.im_hw0, self.im_hw = [None] * n, [None] * n + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + + if cache_images: + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) + pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) + for i, x in pbar: + if cache_images == 'disk': + b += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + b += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' + pbar.close() + + self.transforms = self.build_transforms(hyp=hyp) + self.ni = len(self.labels) # number of images + self.buffer = [] + self.max_buffer_length = min((n, batch_size * 8, 1000)) if self.augment else 0 + + def check_cache_ram(self, safety_margin=0.1, prefix=''): + # Check image caching requirements vs available memory + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + n = min(self.n, 30) # extrapolate from 30 random images + for _ in range(n): + im = cv2.imread(random.choice(self.im_files)) # sample image + ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio + b += im.nbytes * ratio ** 2 + mem_required = b * self.n / n # GB required to cache dataset into RAM + mem = psutil.virtual_memory() + cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question + if not cache: + LOGGER.info(f"{prefix}{mem_required / gb:.1f}GB RAM required, " + f"{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, " + f"{'caching images ✅' if cache else 'not caching images ⚠️'}") + return cache + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f"{prefix}Scanning {path.parent / path.stem}..." + with Pool(NUM_THREADS) as pool: + pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), + desc=desc, + total=len(self.im_files), + bar_format=TQDM_BAR_FORMAT) + for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x[im_file] = [lb, shape, segments] + if msg: + msgs.append(msg) + pbar.desc = f"{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt" + + pbar.close() + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + try: + np.save(path, x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{prefix}New cache created: {path}') + except Exception as e: + LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable + return x + + def __len__(self): + return len(self.im_files) + + def get_image_and_label(self, index): + img, (h0, w0), (h, w) = self.load_image(index) + bboxes = self.labels[index].copy() + segments = self.segments[index].copy() + return {'img': img, + 'im_file': self.im_files[index], + 'cls': bboxes[:, 0], + 'instances': Instances(bboxes[:,1:], segments, bbox_format='xywh', normalized=True), + 'ori_shape': (h0,w0), + 'resized_shape': (h, w)} + + def build_transforms(self, hyp=None): + """Builds and appends transforms to the list.""" + if self.augment: + hyp['mosaic'] = hyp['mosaic'] if self.augment and not self.rect else 0.0 + hyp['mixup'] = hyp['mixup'] if self.augment and not self.rect else 0.0 + transforms = v8_transforms(self, self.img_size, hyp) + else: + transforms = Compose([LetterBox(new_shape=(self.img_size, self.img_size), scaleup=False)]) + + #shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + transforms.append( + Format(bbox_format='xywh', + normalize=True, + batch_idx=True)) + return transforms + + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + hyp = self.hyp + data = self.get_image_and_label(index) + data['ratio_pad'] = (data['resized_shape'][0] / data['ori_shape'][0], + data['resized_shape'][1] / data['ori_shape'][1]) # for evaluation + if self.rect: + data['rect_shape'] = self.batch_shapes[self.batch[index]] + + data = self.transforms(data) + labels_out = torch.cat([data['batch_idx'].view(-1,1), + data['cls'].view(-1,1), + data['bboxes']], dim=1) + if data.get('ratio_pad') is None: + return data['img'], labels_out, data['im_file'], (data['ori_shape'], None) + return data['img'], labels_out, data['im_file'], (data['ori_shape'], data['ratio_pad']) + + + #def __getitem__(self, index): + # t0 = time.time() + # index = self.indices[index] # linear, shuffled, or image_weights + # + # hyp = self.hyp + # mosaic = self.mosaic and random.random() < hyp['mosaic'] + # if mosaic: + # # Load mosaic + # img, labels = self.load_mosaic(index) + # shapes = None +# + # # MixUp augmentation + # if random.random() < hyp['mixup']: + # img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) +# + # else: + # # Load image + # img, (h0, w0), (h, w) = self.load_image(index) + # + # # Letterbox + # shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + # img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + # shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling +# + # labels = self.labels[index].copy() + # if labels.size: # normalized xywh to pixel xyxy format + # labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) +# + # if self.augment: + # img, labels = random_perspective(img, + # labels, + # degrees=hyp['degrees'], + # translate=hyp['translate'], + # scale=hyp['scale'], + # shear=hyp['shear'], + # perspective=hyp['perspective']) + # + # #print(img.shape, labels) + # nl = len(labels) # number of labels + # if nl: + # labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) +# + # if self.augment: + # # Albumentations + # img, labels = self.albumentations(img, labels) + # nl = len(labels) # update after albumentations +# + # # HSV color-space + # augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) +# + # # Flip up-down + # if random.random() < hyp['flipud']: + # img = np.flipud(img) + # if nl: + # labels[:, 2] = 1 - labels[:, 2] +# + # # Flip left-right + # if random.random() < hyp['fliplr']: + # img = np.fliplr(img) + # if nl: + # labels[:, 1] = 1 - labels[:, 1] +# + # # Cutouts + # # labels = cutout(img, labels, p=0.5) + # # nl = len(labels) # update after cutout + # + # labels_out = torch.zeros((nl, 6)) + # if nl: + # labels_out[:, 1:] = torch.from_numpy(labels) +# + # # Convert + # img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + # img = np.ascontiguousarray(img) + # # print("loaded: ", (time.time()-t0)*1000) # about 800-900 + # #print(torch.from_numpy(img), labels_out) + # return torch.from_numpy(img), labels_out, self.im_files[index], shapes + + def load_image(self, i): + # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + assert im is not None, f'Image Not Found {f}' + h0, w0 = im.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA + im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) + if self.augment: + self.ims[i], self.im_hw0[i], self.im_hw[i] = im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + self.buffer.append(i) + if len(self.buffer) >= self.max_buffer_length: + j = self.buffer.pop(0) + self.ims[j], self.im_hw0[j], self.im_hw[j] = None, None, None + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images_to_disk(self, i): + # Saves an image as an *.npy file for faster loading + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + #print(labels) + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + def load_mosaic9(self, index): + # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) + hp, wp = -1, -1 # height, width previous + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste']) + img9, labels9 = random_perspective(img9, + labels9, + segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + @staticmethod + def collate_fn(batch): + im, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + return torch.stack(im, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + im, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', + align_corners=False)[0].type(im[i].type()) + lb = label[i] + else: + im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2) + lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + im4.append(im1) + label4.append(lb) + + for i, lb in enumerate(label4): + lb[:, 0] = i # add target image index for build_targets() + + return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def flatten_recursive(path=DATASETS_DIR / 'coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(f'{str(path)}_flat') + if os.path.exists(new_path): + shutil.rmtree(new_path) # delete output folder + os.makedirs(new_path) # make new output folder + for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() + # Convert detection dataset into classification dataset, with one directory per class + path = Path(path) # images dir + shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in IMG_FORMATS: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file) as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.dataloaders import *; autosplit() + Arguments + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only + n = len(files) # number of files + random.seed(0) # for reproducibility + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + for x in txt: + if (path.parent / x).exists(): + (path.parent / x).unlink() # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path.parent / txt[i], 'a') as f: + f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file + + +def verify_image_label(args): + # Verify one image-label pair + im_file, lb_file, prefix = args + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any(len(x) > 6 for x in lb): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = [segments[x] for x in i] + msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, 5), dtype=np.float32) + return im_file, lb, shape, segments, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] + + +class HUBDatasetStats(): + """ Class for generating HUB dataset JSON and `-hub` dataset directory + + Arguments + path: Path to data.yaml or data.zip (with data.yaml inside data.zip) + autodownload: Attempt to download dataset if not found locally + + Usage + from utils.dataloaders import HUBDatasetStats + stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1 + stats = HUBDatasetStats('path/to/coco128.zip') # usage 2 + stats.get_json(save=False) + stats.process_images() + """ + + def __init__(self, path='coco128.yaml', autodownload=False): + # Initialize class + zipped, data_dir, yaml_path = self._unzip(Path(path)) + try: + with open(check_yaml(yaml_path), errors='ignore') as f: + data = yaml.safe_load(f) # data dict + if zipped: + data['path'] = data_dir + except Exception as e: + raise Exception("error/HUB/dataset_stats/yaml_load") from e + + check_dataset(data, autodownload) # download dataset if missing + self.hub_dir = Path(data['path'] + '-hub') + self.im_dir = self.hub_dir / 'images' + self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images + self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary + self.data = data + + @staticmethod + def _find_yaml(dir): + # Return data.yaml file + files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive + assert files, f'No *.yaml file found in {dir}' + if len(files) > 1: + files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name + assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' + assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' + return files[0] + + def _unzip(self, path): + # Unzip data.zip + if not str(path).endswith('.zip'): # path is data.yaml + return False, None, path + assert Path(path).is_file(), f'Error unzipping {path}, file not found' + unzip_file(path, path=path.parent) + dir = path.with_suffix('') # dataset directory == zip name + assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' + return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path + + def _hub_ops(self, f, max_dim=1920): + # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing + f_new = self.im_dir / Path(f).name # dataset-hub image filename + try: # use PIL + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(f_new, 'JPEG', quality=50, optimize=True) # save + except Exception as e: # use OpenCV + LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') + im = cv2.imread(f) + im_height, im_width = im.shape[:2] + r = max_dim / max(im_height, im_width) # ratio + if r < 1.0: # image too large + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) + cv2.imwrite(str(f_new), im) + + def get_json(self, save=False, verbose=False): + # Return dataset JSON for Ultralytics HUB + def _round(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] + + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + self.stats[split] = None # i.e. no test set + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + x = np.array([ + np.bincount(label[:, 0].astype(int), minlength=self.data['nc']) + for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80) + self.stats[split] = { + 'instance_stats': { + 'total': int(x.sum()), + 'per_class': x.sum(0).tolist()}, + 'image_stats': { + 'total': dataset.n, + 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{ + str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} + + # Save, print and return + if save: + stats_path = self.hub_dir / 'stats.json' + print(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(self.stats, f) # save stats.json + if verbose: + print(json.dumps(self.stats, indent=2, sort_keys=False)) + return self.stats + + def process_images(self): + # Compress images for Ultralytics HUB + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + desc = f'{split} images' + for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc): + pass + print(f'Done. All images saved to {self.im_dir}') + return self.im_dir + + +# Classification dataloaders ------------------------------------------------------------------------------------------- +class ClassificationDataset(torchvision.datasets.ImageFolder): + """ + YOLOv5 Classification Dataset. + Arguments + root: Dataset path + transform: torchvision transforms, used by default + album_transform: Albumentations transforms, used if installed + """ + + def __init__(self, root, augment, imgsz, cache=False): + super().__init__(root=root) + self.torch_transforms = classify_transforms(imgsz) + self.album_transforms = classify_albumentations(augment, imgsz) if augment else None + self.cache_ram = cache is True or cache == 'ram' + self.cache_disk = cache == 'disk' + self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + + def __getitem__(self, i): + f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if self.album_transforms: + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))["image"] + else: + sample = self.torch_transforms(im) + return sample, j + + +def create_classification_dataloader(path, + imgsz=224, + batch_size=16, + augment=True, + cache=False, + rank=-1, + workers=8, + shuffle=True): + # Returns Dataloader object to be used with YOLOv5 Classifier + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return InfiniteDataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + worker_init_fn=seed_worker, + generator=generator) # or DataLoader(persistent_workers=True) diff --git a/utils/.ipynb_checkpoints/general-checkpoint.py b/utils/.ipynb_checkpoints/general-checkpoint.py new file mode 100644 index 000000000..08d8b14ff --- /dev/null +++ b/utils/.ipynb_checkpoints/general-checkpoint.py @@ -0,0 +1,1193 @@ +import contextlib +import glob +import inspect +import logging +import logging.config +import math +import os +import platform +import random +import re +import signal +import sys +import time +import urllib +from copy import deepcopy +from datetime import datetime +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from subprocess import check_output +from tarfile import is_tarfile +from typing import Optional +from zipfile import ZipFile, is_zipfile + +import cv2 +import IPython +import numpy as np +import pandas as pd +import pkg_resources as pkg +import torch +import torchvision +import yaml + +from utils import TryExcept, emojis +from utils.downloads import gsutil_getsize +from utils.metrics import box_iou, fitness + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLO root directory +RANK = int(os.getenv('RANK', -1)) + +# Settings +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory +AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode +VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +TQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format +FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf + +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads +os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) + + +def is_ascii(s=''): + # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) + s = str(s) # convert list, tuple, None, etc. to str + return len(s.encode().decode('ascii', 'ignore')) == len(s) + + +def is_chinese(s='人工智能'): + # Is string composed of any Chinese characters? + return bool(re.search('[\u4e00-\u9fff]', str(s))) + + +def is_colab(): + # Is environment a Google Colab instance? + return 'google.colab' in sys.modules + + +def is_notebook(): + # Is environment a Jupyter notebook? Verified on Colab, Jupyterlab, Kaggle, Paperspace + ipython_type = str(type(IPython.get_ipython())) + return 'colab' in ipython_type or 'zmqshell' in ipython_type + + +def is_kaggle(): + # Is environment a Kaggle Notebook? + return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + + +def is_docker() -> bool: + """Check if the process runs inside a docker container.""" + if Path("/.dockerenv").exists(): + return True + try: # check if docker is in control groups + with open("/proc/self/cgroup") as file: + return any("docker" in line for line in file) + except OSError: + return False + + +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if not test: + return os.access(dir, os.W_OK) # possible issues on Windows + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except OSError: + return False + + +LOGGING_NAME = "yolov5" + + +def set_logging(name=LOGGING_NAME, verbose=True): + # sets up logging for the given name + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR + logging.config.dictConfig({ + "version": 1, + "disable_existing_loggers": False, + "formatters": { + name: { + "format": "%(message)s"}}, + "handlers": { + name: { + "class": "logging.StreamHandler", + "formatter": name, + "level": level,}}, + "loggers": { + name: { + "level": level, + "handlers": [name], + "propagate": False,}}}) + + +set_logging(LOGGING_NAME) # run before defining LOGGER +LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) +if platform.system() == 'Windows': + for fn in LOGGER.info, LOGGER.warning: + setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging + + +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required + return path + + +CONFIG_DIR = user_config_dir() # Ultralytics settings dir + + +class Profile(contextlib.ContextDecorator): + # YOLO Profile class. Usage: @Profile() decorator or 'with Profile():' context manager + def __init__(self, t=0.0): + self.t = t + self.cuda = torch.cuda.is_available() + + def __enter__(self): + self.start = self.time() + return self + + def __exit__(self, type, value, traceback): + self.dt = self.time() - self.start # delta-time + self.t += self.dt # accumulate dt + + def time(self): + if self.cuda: + torch.cuda.synchronize() + return time.time() + + +class Timeout(contextlib.ContextDecorator): + # YOLO Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager + def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): + self.seconds = int(seconds) + self.timeout_message = timeout_msg + self.suppress = bool(suppress_timeout_errors) + + def _timeout_handler(self, signum, frame): + raise TimeoutError(self.timeout_message) + + def __enter__(self): + if platform.system() != 'Windows': # not supported on Windows + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + + def __exit__(self, exc_type, exc_val, exc_tb): + if platform.system() != 'Windows': + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True + + +class WorkingDirectory(contextlib.ContextDecorator): + # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager + def __init__(self, new_dir): + self.dir = new_dir # new dir + self.cwd = Path.cwd().resolve() # current dir + + def __enter__(self): + os.chdir(self.dir) + + def __exit__(self, exc_type, exc_val, exc_tb): + os.chdir(self.cwd) + + +def methods(instance): + # Get class/instance methods + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] + + +def print_args(args: Optional[dict] = None, show_file=True, show_func=False): + # Print function arguments (optional args dict) + x = inspect.currentframe().f_back # previous frame + file, _, func, _, _ = inspect.getframeinfo(x) + if args is None: # get args automatically + args, _, _, frm = inspect.getargvalues(x) + args = {k: v for k, v in frm.items() if k in args} + try: + file = Path(file).resolve().relative_to(ROOT).with_suffix('') + except ValueError: + file = Path(file).stem + s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') + LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) + + +def init_seeds(seed=0, deterministic=False): + # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe + # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 + if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 + torch.use_deterministic_algorithms(True) + torch.backends.cudnn.deterministic = True + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' + os.environ['PYTHONHASHSEED'] = str(seed) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} + + +def get_default_args(func): + # Get func() default arguments + signature = inspect.signature(func) + return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def file_age(path=__file__): + # Return days since last file update + dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + return dt.days # + dt.seconds / 86400 # fractional days + + +def file_date(path=__file__): + # Return human-readable file modification date, i.e. '2021-3-26' + t = datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def file_size(path): + # Return file/dir size (MB) + mb = 1 << 20 # bytes to MiB (1024 ** 2) + path = Path(path) + if path.is_file(): + return path.stat().st_size / mb + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb + else: + return 0.0 + + +def check_online(): + # Check internet connectivity + import socket + + def run_once(): + # Check once + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility + return True + except OSError: + return False + + return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues + + +def git_describe(path=ROOT): # path must be a directory + # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + try: + assert (Path(path) / '.git').is_dir() + return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] + except Exception: + return '' + + +@TryExcept() +@WorkingDirectory(ROOT) +def check_git_status(repo='WongKinYiu/yolov9', branch='main'): + # YOLO status check, recommend 'git pull' if code is out of date + url = f'https://github.com/{repo}' + msg = f', for updates see {url}' + s = colorstr('github: ') # string + assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg + assert check_online(), s + 'skipping check (offline)' + msg + + splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode()) + matches = [repo in s for s in splits] + if any(matches): + remote = splits[matches.index(True) - 1] + else: + remote = 'ultralytics' + check_output(f'git remote add {remote} {url}', shell=True) + check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch + local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind + if n > 0: + pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' + s += f"⚠️ YOLO is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." + else: + s += f'up to date with {url} ✅' + LOGGER.info(s) + + +@WorkingDirectory(ROOT) +def check_git_info(path='.'): + # YOLO git info check, return {remote, branch, commit} + check_requirements('gitpython') + import git + try: + repo = git.Repo(path) + remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/WongKinYiu/yolov9' + commit = repo.head.commit.hexsha # i.e. '3134699c73af83aac2a481435550b968d5792c0d' + try: + branch = repo.active_branch.name # i.e. 'main' + except TypeError: # not on any branch + branch = None # i.e. 'detached HEAD' state + return {'remote': remote, 'branch': branch, 'commit': commit} + except git.exc.InvalidGitRepositoryError: # path is not a git dir + return {'remote': None, 'branch': None, 'commit': None} + + +def check_python(minimum='3.7.0'): + # Check current python version vs. required python version + check_version(platform.python_version(), minimum, name='Python ', hard=True) + + +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): + # Check version vs. required version + current, minimum = (pkg.parse_version(x) for x in (current, minimum)) + result = (current == minimum) if pinned else (current >= minimum) # bool + s = f'WARNING ⚠️ {name}{minimum} is required by YOLO, but {name}{current} is currently installed' # string + if hard: + assert result, emojis(s) # assert min requirements met + if verbose and not result: + LOGGER.warning(s) + return result + + +@TryExcept() +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): + # Check installed dependencies meet YOLO requirements (pass *.txt file or list of packages or single package str) + prefix = colorstr('red', 'bold', 'requirements:') + check_python() # check python version + if isinstance(requirements, Path): # requirements.txt file + file = requirements.resolve() + assert file.exists(), f"{prefix} {file} not found, check failed." + with file.open() as f: + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] + elif isinstance(requirements, str): + requirements = [requirements] + + s = '' + n = 0 + for r in requirements: + try: + pkg.require(r) + except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met + s += f'"{r}" ' + n += 1 + + if s and install and AUTOINSTALL: # check environment variable + LOGGER.info(f"{prefix} YOLO requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") + try: + # assert check_online(), "AutoUpdate skipped (offline)" + LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) + source = file if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + LOGGER.info(s) + except Exception as e: + LOGGER.warning(f'{prefix} ❌ {e}') + + +def check_img_size(imgsz, s=32, floor=0): + # Verify image size is a multiple of stride s in each dimension + if isinstance(imgsz, int): # integer i.e. img_size=640 + new_size = max(make_divisible(imgsz, int(s)), floor) + else: # list i.e. img_size=[640, 480] + imgsz = list(imgsz) # convert to list if tuple + new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] + if new_size != imgsz: + LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + return new_size + + +def check_imshow(warn=False): + # Check if environment supports image displays + try: + assert not is_notebook() + assert not is_docker() + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + if warn: + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') + return False + + +def check_suffix(file='yolo.pt', suffix=('.pt',), msg=''): + # Check file(s) for acceptable suffix + if file and suffix: + if isinstance(suffix, str): + suffix = [suffix] + for f in file if isinstance(file, (list, tuple)) else [file]: + s = Path(f).suffix.lower() # file suffix + if len(s): + assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" + + +def check_yaml(file, suffix=('.yaml', '.yml')): + # Search/download YAML file (if necessary) and return path, checking suffix + return check_file(file, suffix) + + +def check_file(file, suffix=''): + # Search/download file (if necessary) and return path + check_suffix(file, suffix) # optional + file = str(file) # convert to str() + if os.path.isfile(file) or not file: # exists + return file + elif file.startswith(('http:/', 'https:/')): # download + url = file # warning: Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth + if os.path.isfile(file): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + return file + elif file.startswith('clearml://'): # ClearML Dataset ID + assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + return file + else: # search + files = [] + for d in 'data', 'models', 'utils': # search directories + files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file + assert len(files), f'File not found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_font(font=FONT, progress=False): + # Download font to CONFIG_DIR if necessary + font = Path(font) + file = CONFIG_DIR / font.name + if not font.exists() and not file.exists(): + url = f'https://ultralytics.com/assets/{font.name}' + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file), progress=progress) + + +def check_dataset(data, autodownload=True): + # Download, check and/or unzip dataset if not found locally + + # Download (optional) + extract_dir = '' + if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): + download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1) + data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) + extract_dir, autodownload = data.parent, False + + # Read yaml (optional) + if isinstance(data, (str, Path)): + data = yaml_load(data) # dictionary + + # Checks + for k in 'train', 'val', 'names': + assert k in data, emojis(f"data.yaml '{k}:' field missing ❌") + if isinstance(data['names'], (list, tuple)): # old array format + data['names'] = dict(enumerate(data['names'])) # convert to dict + assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car' + data['nc'] = len(data['names']) + + # Resolve paths + path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' + if not path.is_absolute(): + path = (ROOT / path).resolve() + data['path'] = path # download scripts + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + if isinstance(data[k], str): + x = (path / data[k]).resolve() + if not x.exists() and data[k].startswith('../'): + x = (path / data[k][3:]).resolve() + data[k] = str(x) + else: + data[k] = [str((path / x).resolve()) for x in data[k]] + + # Parse yaml + train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) + if val: + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]) + if not s or not autodownload: + raise Exception('Dataset not found ❌') + t = time.time() + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + LOGGER.info(f'Downloading {s} to {f}...') + torch.hub.download_url_to_file(s, f) + Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root + unzip_file(f, path=DATASETS_DIR) # unzip + Path(f).unlink() # remove zip + r = None # success + elif s.startswith('bash '): # bash script + LOGGER.info(f'Running {s} ...') + r = os.system(s) + else: # python script + r = exec(s, {'yaml': data}) # return None + dt = f'({round(time.time() - t, 1)}s)' + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" + LOGGER.info(f"Dataset download {s}") + check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts + return data # dictionary + + +def check_amp(model): + # Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation + from models.common import AutoShape, DetectMultiBackend + + def amp_allclose(model, im): + # All close FP32 vs AMP results + m = AutoShape(model, verbose=False) # model + a = m(im).xywhn[0] # FP32 inference + m.amp = True + b = m(im).xywhn[0] # AMP inference + return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance + + prefix = colorstr('AMP: ') + device = next(model.parameters()).device # get model device + if device.type in ('cpu', 'mps'): + return False # AMP only used on CUDA devices + f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check + im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) + try: + #assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolo.pt', device), im) + LOGGER.info(f'{prefix}checks passed ✅') + return True + except Exception: + help_url = 'https://github.com/ultralytics/yolov5/issues/7908' + LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}') + return False + + +def yaml_load(file='data.yaml'): + # Single-line safe yaml loading + with open(file, errors='ignore') as f: + return yaml.safe_load(f) + + +def yaml_save(file='data.yaml', data={}): + # Single-line safe yaml saving + with open(file, 'w') as f: + yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) + + +def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')): + # Unzip a *.zip file to path/, excluding files containing strings in exclude list + if path is None: + path = Path(file).parent # default path + with ZipFile(file) as zipObj: + for f in zipObj.namelist(): # list all archived filenames in the zip + if all(x not in f for x in exclude): + zipObj.extract(f, path=path) + + +def url2file(url): + # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt + url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ + return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + + +def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): + # Multithreaded file download and unzip function, used in data.yaml for autodownload + def download_one(url, dir): + # Download 1 file + success = True + if os.path.isfile(url): + f = Path(url) # filename + else: # does not exist + f = dir / Path(url).name + LOGGER.info(f'Downloading {url} to {f}...') + for i in range(retry + 1): + if curl: + s = 'sS' if threads > 1 else '' # silent + r = os.system( + f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue + success = r == 0 + else: + torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download + success = f.is_file() + if success: + break + elif i < retry: + LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') + else: + LOGGER.warning(f'❌ Failed to download {url}...') + + if unzip and success and (f.suffix == '.gz' or is_zipfile(f) or is_tarfile(f)): + LOGGER.info(f'Unzipping {f}...') + if is_zipfile(f): + unzip_file(f, dir) # unzip + elif is_tarfile(f): + os.system(f'tar xf {f} --directory {f.parent}') # unzip + elif f.suffix == '.gz': + os.system(f'tar xfz {f} --directory {f.parent}') # unzip + if delete: + f.unlink() # remove zip + + dir = Path(dir) + dir.mkdir(parents=True, exist_ok=True) # make directory + if threads > 1: + pool = ThreadPool(threads) + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded + pool.close() + pool.join() + else: + for u in [url] if isinstance(url, (str, Path)) else url: + download_one(u, dir) + + +def make_divisible(x, divisor): + # Returns nearest x divisible by divisor + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def one_flat_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf + #return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + return lambda x: ((1 - math.cos((x - (steps // 2)) * math.pi / (steps // 2))) / 2) * (y2 - y1) + y1 if (x > (steps // 2)) else y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = { + 'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(int) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights).float() + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample + class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels]) + return (class_weights.reshape(1, nc) * class_counts).sum(1) + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + return [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + + +def xywh2ltwh(x): + """ + Convert the bounding box format from [x, y, w, h] to [x1, y1, w, h], where x1, y1 are the top-left coordinates. + + Args: + x (np.ndarray) or (torch.Tensor): The input tensor with the bounding box coordinates in the xywh format + Returns: + y (np.ndarray) or (torch.Tensor): The bounding box coordinates in the xyltwh format + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + return y + + +def xyxy2ltwh(x): + """ + Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right + + Args: + x (np.ndarray) or (torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format + Returns: + y (np.ndarray) or (torch.Tensor): The bounding box coordinates in the xyltwh format. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def ltwh2xywh(x): + """ + Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center + + Args: + x (torch.Tensor): the input tensor + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] + x[:, 2] / 2 # center x + y[:, 1] = x[:, 1] + x[:, 3] / 2 # center y + return y + + +def ltwh2xyxy(x): + """ + It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + + Args: + x (np.ndarray) or (torch.Tensor): the input image + + Returns: + y (np.ndarray) or (torch.Tensor): the xyxy coordinates of the bounding boxes. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 2] = x[:, 2] + x[:, 0] # width + y[:, 3] = x[:, 3] + x[:, 1] # height + return y + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center + y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center + y[..., 2] = x[..., 2] - x[..., 0] # width + y[..., 3] = x[..., 3] - x[..., 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x + y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y + y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x + y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x + y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y + y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x + y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y + return y + + +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center + y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center + y[..., 2] = (x[..., 2] - x[..., 0]) / w # width + y[..., 3] = (x[..., 3] - x[..., 1]) / h # height + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = w * x[..., 0] + padw # top left x + y[..., 1] = h * x[..., 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + s = np.concatenate((s, s[0:1, :]), axis=0) + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): + # Rescale boxes (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + boxes[:, [0, 2]] -= pad[0] # x padding + boxes[:, [1, 3]] -= pad[1] # y padding + boxes[:, :4] /= gain + clip_boxes(boxes, img0_shape) + return boxes + + +def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + segments[:, 0] -= pad[0] # x padding + segments[:, 1] -= pad[1] # y padding + segments /= gain + clip_segments(segments, img0_shape) + if normalize: + segments[:, 0] /= img0_shape[1] # width + segments[:, 1] /= img0_shape[0] # height + return segments + + +def clip_boxes(boxes, shape): + # Clip boxes (xyxy) to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x1 + boxes[:, 1].clamp_(0, shape[0]) # y1 + boxes[:, 2].clamp_(0, shape[1]) # x2 + boxes[:, 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + + +def clip_segments(segments, shape): + # Clip segments (xy1,xy2,...) to image shape (height, width) + if isinstance(segments, torch.Tensor): # faster individually + segments[:, 0].clamp_(0, shape[1]) # x + segments[:, 1].clamp_(0, shape[0]) # y + else: # np.array (faster grouped) + segments[:, 0] = segments[:, 0].clip(0, shape[1]) # x + segments[:, 1] = segments[:, 1].clip(0, shape[0]) # y + + +def non_max_suppression( + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300, + nm=0, # number of masks +): + """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + if isinstance(prediction, (list, tuple)): # YOLO model in validation model, output = (inference_out, loss_out) + prediction = prediction[0] # select only inference output + + device = prediction.device + mps = 'mps' in device.type # Apple MPS + if mps: # MPS not fully supported yet, convert tensors to CPU before NMS + prediction = prediction.cpu() + bs = prediction.shape[0] # batch size + nc = prediction.shape[1] - nm - 4 # number of classes + mi = 4 + nc # mask start index + xc = prediction[:, 4:mi].amax(1) > conf_thres # candidates + + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + + # Settings + # min_wh = 2 # (pixels) minimum box width and height + max_wh = 7680 # (pixels) maximum box width and height + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 2.5 + 0.05 * bs # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[:, 2:4] < min_wh) | (x[:, 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x.T[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + lb = labels[xi] + v = torch.zeros((len(lb), nc + nm + 5), device=x.device) + v[:, :4] = lb[:, 1:5] # box + v[range(len(lb)), lb[:, 0].long() + 4] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Detections matrix nx6 (xyxy, conf, cls) + box, cls, mask = x.split((4, nc, nm), 1) + box = xywh2xyxy(box) # center_x, center_y, width, height) to (x1, y1, x2, y2) + if multi_label: + i, j = (cls > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, 4 + j, None], j[:, None].float(), mask[i]), 1) + else: # best class only + conf, j = cls.max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + else: + x = x[x[:, 4].argsort(descending=True)] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if mps: + output[xi] = output[xi].to(device) + if (time.time() - t) > time_limit: + LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") + + +def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): + evolve_csv = save_dir / 'evolve.csv' + evolve_yaml = save_dir / 'hyp_evolve.yaml' + keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps] + keys = tuple(x.strip() for x in keys) + vals = results + tuple(hyp.values()) + n = len(keys) + + # Download (optional) + if bucket: + url = f'gs://{bucket}/evolve.csv' + if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): + os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local + + # Log to evolve.csv + s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header + with open(evolve_csv, 'a') as f: + f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') + + # Save yaml + with open(evolve_yaml, 'w') as f: + data = pd.read_csv(evolve_csv) + data = data.rename(columns=lambda x: x.strip()) # strip keys + i = np.argmax(fitness(data.values[:, :4])) # + generations = len(data) + f.write('# YOLO Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + + f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + + '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') + yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) + + # Print to screen + LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix + + ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}' + for x in vals) + '\n\n') + + if bucket: + os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload + + +def apply_classifier(x, model, img, im0): + # Apply a second stage classifier to YOLO outputs + # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_boxes(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for a in d: + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=False, sep='', mkdir=False): + # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. + path = Path(path) # os-agnostic + if path.exists() and not exist_ok: + path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') + + # Method 1 + for n in range(2, 9999): + p = f'{path}{sep}{n}{suffix}' # increment path + if not os.path.exists(p): # + break + path = Path(p) + + # Method 2 (deprecated) + # dirs = glob.glob(f"{path}{sep}*") # similar paths + # matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs] + # i = [int(m.groups()[0]) for m in matches if m] # indices + # n = max(i) + 1 if i else 2 # increment number + # path = Path(f"{path}{sep}{n}{suffix}") # increment path + + if mkdir: + path.mkdir(parents=True, exist_ok=True) # make directory + + return path + + +# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ +imshow_ = cv2.imshow # copy to avoid recursion errors + + +def imread(path, flags=cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(path, np.uint8), flags) + + +def imwrite(path, im): + try: + cv2.imencode(Path(path).suffix, im)[1].tofile(path) + return True + except Exception: + return False + + +def imshow(path, im): + imshow_(path.encode('unicode_escape').decode(), im) + + +cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine + +# Variables ------------------------------------------------------------------------------------------------------------ diff --git a/utils/.ipynb_checkpoints/instance-checkpoint.py b/utils/.ipynb_checkpoints/instance-checkpoint.py new file mode 100644 index 000000000..89d41ee67 --- /dev/null +++ b/utils/.ipynb_checkpoints/instance-checkpoint.py @@ -0,0 +1,390 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from collections import abc +from itertools import repeat +from numbers import Number +from typing import List + +import numpy as np + +from .general import ltwh2xywh, ltwh2xyxy, resample_segments, xywh2ltwh, xywh2xyxy, xyxy2ltwh, xyxy2xywh + + +def _ntuple(n): + """From PyTorch internals.""" + + def parse(x): + """Parse bounding boxes format between XYWH and LTWH.""" + return x if isinstance(x, abc.Iterable) else tuple(repeat(x, n)) + + return parse + + +to_4tuple = _ntuple(4) + +# `xyxy` means left top and right bottom +# `xywh` means center x, center y and width, height(yolo format) +# `ltwh` means left top and width, height(coco format) +_formats = ['xyxy', 'xywh', 'ltwh'] + +__all__ = 'Bboxes', # tuple or list + + +class Bboxes: + """Now only numpy is supported.""" + + def __init__(self, bboxes, format='xyxy') -> None: + assert format in _formats, f'Invalid bounding box format: {format}, format must be one of {_formats}' + bboxes = bboxes[None, :] if bboxes.ndim == 1 else bboxes + assert bboxes.ndim == 2 + assert bboxes.shape[1] == 4 + self.bboxes = bboxes + self.format = format + # self.normalized = normalized + + # def convert(self, format): + # assert format in _formats + # if self.format == format: + # bboxes = self.bboxes + # elif self.format == "xyxy": + # if format == "xywh": + # bboxes = xyxy2xywh(self.bboxes) + # else: + # bboxes = xyxy2ltwh(self.bboxes) + # elif self.format == "xywh": + # if format == "xyxy": + # bboxes = xywh2xyxy(self.bboxes) + # else: + # bboxes = xywh2ltwh(self.bboxes) + # else: + # if format == "xyxy": + # bboxes = ltwh2xyxy(self.bboxes) + # else: + # bboxes = ltwh2xywh(self.bboxes) + # + # return Bboxes(bboxes, format) + + def convert(self, format): + """Converts bounding box format from one type to another.""" + assert format in _formats, f'Invalid bounding box format: {format}, format must be one of {_formats}' + if self.format == format: + return + elif self.format == 'xyxy': + bboxes = xyxy2xywh(self.bboxes) if format == 'xywh' else xyxy2ltwh(self.bboxes) + elif self.format == 'xywh': + bboxes = xywh2xyxy(self.bboxes) if format == 'xyxy' else xywh2ltwh(self.bboxes) + else: + bboxes = ltwh2xyxy(self.bboxes) if format == 'xyxy' else ltwh2xywh(self.bboxes) + self.bboxes = bboxes + self.format = format + + def areas(self): + """Return box areas.""" + self.convert('xyxy') + return (self.bboxes[:, 2] - self.bboxes[:, 0]) * (self.bboxes[:, 3] - self.bboxes[:, 1]) + + # def denormalize(self, w, h): + # if not self.normalized: + # return + # assert (self.bboxes <= 1.0).all() + # self.bboxes[:, 0::2] *= w + # self.bboxes[:, 1::2] *= h + # self.normalized = False + # + # def normalize(self, w, h): + # if self.normalized: + # return + # assert (self.bboxes > 1.0).any() + # self.bboxes[:, 0::2] /= w + # self.bboxes[:, 1::2] /= h + # self.normalized = True + + def mul(self, scale): + """ + Args: + scale (tuple) or (list) or (int): the scale for four coords. + """ + if isinstance(scale, Number): + scale = to_4tuple(scale) + assert isinstance(scale, (tuple, list)) + assert len(scale) == 4 + self.bboxes[:, 0] *= scale[0] + self.bboxes[:, 1] *= scale[1] + self.bboxes[:, 2] *= scale[2] + self.bboxes[:, 3] *= scale[3] + + def add(self, offset): + """ + Args: + offset (tuple) or (list) or (int): the offset for four coords. + """ + if isinstance(offset, Number): + offset = to_4tuple(offset) + assert isinstance(offset, (tuple, list)) + assert len(offset) == 4 + self.bboxes[:, 0] += offset[0] + self.bboxes[:, 1] += offset[1] + self.bboxes[:, 2] += offset[2] + self.bboxes[:, 3] += offset[3] + + def __len__(self): + """Return the number of boxes.""" + return len(self.bboxes) + + @classmethod + def concatenate(cls, boxes_list: List['Bboxes'], axis=0) -> 'Bboxes': + """ + Concatenate a list of Bboxes objects into a single Bboxes object. + + Args: + boxes_list (List[Bboxes]): A list of Bboxes objects to concatenate. + axis (int, optional): The axis along which to concatenate the bounding boxes. + Defaults to 0. + + Returns: + Bboxes: A new Bboxes object containing the concatenated bounding boxes. + + Note: + The input should be a list or tuple of Bboxes objects. + """ + assert isinstance(boxes_list, (list, tuple)) + if not boxes_list: + return cls(np.empty(0)) + assert all(isinstance(box, Bboxes) for box in boxes_list) + + if len(boxes_list) == 1: + return boxes_list[0] + return cls(np.concatenate([b.bboxes for b in boxes_list], axis=axis)) + + def __getitem__(self, index) -> 'Bboxes': + """ + Retrieve a specific bounding box or a set of bounding boxes using indexing. + + Args: + index (int, slice, or np.ndarray): The index, slice, or boolean array to select + the desired bounding boxes. + + Returns: + Bboxes: A new Bboxes object containing the selected bounding boxes. + + Raises: + AssertionError: If the indexed bounding boxes do not form a 2-dimensional matrix. + + Note: + When using boolean indexing, make sure to provide a boolean array with the same + length as the number of bounding boxes. + """ + if isinstance(index, int): + return Bboxes(self.bboxes[index].view(1, -1)) + b = self.bboxes[index] + assert b.ndim == 2, f'Indexing on Bboxes with {index} failed to return a matrix!' + return Bboxes(b) + + +class Instances: + + def __init__(self, bboxes, segments=None, keypoints=None, bbox_format='xywh', normalized=True) -> None: + """ + Args: + bboxes (ndarray): bboxes with shape [N, 4]. + segments (list | ndarray): segments. + keypoints (ndarray): keypoints(x, y, visible) with shape [N, 17, 3]. + """ + if segments is None: + segments = [] + self._bboxes = Bboxes(bboxes=bboxes, format=bbox_format) + self.keypoints = keypoints + self.normalized = normalized + + if len(segments) > 0: + # list[np.array(1000, 2)] * num_samples + segments = resample_segments(segments) + # (N, 1000, 2) + segments = np.stack(segments, axis=0) + else: + segments = np.zeros((0, 1000, 2), dtype=np.float32) + self.segments = segments + + def convert_bbox(self, format): + """Convert bounding box format.""" + self._bboxes.convert(format=format) + + def bbox_areas(self): + """Calculate the area of bounding boxes.""" + self._bboxes.areas() + + def scale(self, scale_w, scale_h, bbox_only=False): + """this might be similar with denormalize func but without normalized sign.""" + self._bboxes.mul(scale=(scale_w, scale_h, scale_w, scale_h)) + if bbox_only: + return + self.segments[..., 0] *= scale_w + self.segments[..., 1] *= scale_h + if self.keypoints is not None: + self.keypoints[..., 0] *= scale_w + self.keypoints[..., 1] *= scale_h + + def denormalize(self, w, h): + """Denormalizes boxes, segments, and keypoints from normalized coordinates.""" + if not self.normalized: + return + self._bboxes.mul(scale=(w, h, w, h)) + self.segments[..., 0] *= w + self.segments[..., 1] *= h + if self.keypoints is not None: + self.keypoints[..., 0] *= w + self.keypoints[..., 1] *= h + self.normalized = False + + def normalize(self, w, h): + """Normalize bounding boxes, segments, and keypoints to image dimensions.""" + if self.normalized: + return + self._bboxes.mul(scale=(1 / w, 1 / h, 1 / w, 1 / h)) + self.segments[..., 0] /= w + self.segments[..., 1] /= h + if self.keypoints is not None: + self.keypoints[..., 0] /= w + self.keypoints[..., 1] /= h + self.normalized = True + + def add_padding(self, padw, padh): + """Handle rect and mosaic situation.""" + assert not self.normalized, 'you should add padding with absolute coordinates.' + self._bboxes.add(offset=(padw, padh, padw, padh)) + self.segments[..., 0] += padw + self.segments[..., 1] += padh + if self.keypoints is not None: + self.keypoints[..., 0] += padw + self.keypoints[..., 1] += padh + + def __getitem__(self, index) -> 'Instances': + """ + Retrieve a specific instance or a set of instances using indexing. + + Args: + index (int, slice, or np.ndarray): The index, slice, or boolean array to select + the desired instances. + + Returns: + Instances: A new Instances object containing the selected bounding boxes, + segments, and keypoints if present. + + Note: + When using boolean indexing, make sure to provide a boolean array with the same + length as the number of instances. + """ + segments = self.segments[index] if len(self.segments) else self.segments + keypoints = self.keypoints[index] if self.keypoints is not None else None + bboxes = self.bboxes[index] + bbox_format = self._bboxes.format + return Instances( + bboxes=bboxes, + segments=segments, + keypoints=keypoints, + bbox_format=bbox_format, + normalized=self.normalized, + ) + + def flipud(self, h): + """Flips the coordinates of bounding boxes, segments, and keypoints vertically.""" + if self._bboxes.format == 'xyxy': + y1 = self.bboxes[:, 1].copy() + y2 = self.bboxes[:, 3].copy() + self.bboxes[:, 1] = h - y2 + self.bboxes[:, 3] = h - y1 + else: + self.bboxes[:, 1] = h - self.bboxes[:, 1] + self.segments[..., 1] = h - self.segments[..., 1] + if self.keypoints is not None: + self.keypoints[..., 1] = h - self.keypoints[..., 1] + + def fliplr(self, w): + """Reverses the order of the bounding boxes and segments horizontally.""" + if self._bboxes.format == 'xyxy': + x1 = self.bboxes[:, 0].copy() + x2 = self.bboxes[:, 2].copy() + self.bboxes[:, 0] = w - x2 + self.bboxes[:, 2] = w - x1 + else: + self.bboxes[:, 0] = w - self.bboxes[:, 0] + self.segments[..., 0] = w - self.segments[..., 0] + if self.keypoints is not None: + self.keypoints[..., 0] = w - self.keypoints[..., 0] + + def clip(self, w, h): + """Clips bounding boxes, segments, and keypoints values to stay within image boundaries.""" + ori_format = self._bboxes.format + self.convert_bbox(format='xyxy') + self.bboxes[:, [0, 2]] = self.bboxes[:, [0, 2]].clip(0, w) + self.bboxes[:, [1, 3]] = self.bboxes[:, [1, 3]].clip(0, h) + if ori_format != 'xyxy': + self.convert_bbox(format=ori_format) + self.segments[..., 0] = self.segments[..., 0].clip(0, w) + self.segments[..., 1] = self.segments[..., 1].clip(0, h) + if self.keypoints is not None: + self.keypoints[..., 0] = self.keypoints[..., 0].clip(0, w) + self.keypoints[..., 1] = self.keypoints[..., 1].clip(0, h) + + def remove_zero_area_boxes(self): + """Remove zero-area boxes, i.e. after clipping some boxes may have zero width or height. This removes them.""" + good = self._bboxes.areas() > 0 + if not all(good): + self._bboxes = Bboxes(self._bboxes.bboxes[good], format=self._bboxes.format) + if len(self.segments): + self.segments = self.segments[good] + if self.keypoints is not None: + self.keypoints = self.keypoints[good] + return good + + def update(self, bboxes, segments=None, keypoints=None): + """Updates instance variables.""" + self._bboxes = Bboxes(bboxes, format=self._bboxes.format) + if segments is not None: + self.segments = segments + if keypoints is not None: + self.keypoints = keypoints + + def __len__(self): + """Return the length of the instance list.""" + return len(self.bboxes) + + @classmethod + def concatenate(cls, instances_list: List['Instances'], axis=0) -> 'Instances': + """ + Concatenates a list of Instances objects into a single Instances object. + + Args: + instances_list (List[Instances]): A list of Instances objects to concatenate. + axis (int, optional): The axis along which the arrays will be concatenated. Defaults to 0. + + Returns: + Instances: A new Instances object containing the concatenated bounding boxes, + segments, and keypoints if present. + + Note: + The `Instances` objects in the list should have the same properties, such as + the format of the bounding boxes, whether keypoints are present, and if the + coordinates are normalized. + """ + assert isinstance(instances_list, (list, tuple)) + if not instances_list: + return cls(np.empty(0)) + assert all(isinstance(instance, Instances) for instance in instances_list) + + if len(instances_list) == 1: + return instances_list[0] + + use_keypoint = instances_list[0].keypoints is not None + bbox_format = instances_list[0]._bboxes.format + normalized = instances_list[0].normalized + + cat_boxes = np.concatenate([ins.bboxes for ins in instances_list], axis=axis) + cat_segments = np.concatenate([b.segments for b in instances_list], axis=axis) + cat_keypoints = np.concatenate([b.keypoints for b in instances_list], axis=axis) if use_keypoint else None + return cls(cat_boxes, cat_segments, cat_keypoints, bbox_format, normalized) + + @property + def bboxes(self): + """Return bounding boxes.""" + return self._bboxes.bboxes diff --git a/utils/.ipynb_checkpoints/metrics-checkpoint.py b/utils/.ipynb_checkpoints/metrics-checkpoint.py new file mode 100644 index 000000000..cc6d81141 --- /dev/null +++ b/utils/.ipynb_checkpoints/metrics-checkpoint.py @@ -0,0 +1,395 @@ +import math +import warnings +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from utils import TryExcept, threaded + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.5, 0.5, 0.0, 0.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def smooth(y, f=0.05): + # Box filter of fraction f + nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) + p = np.ones(nf // 2) # ones padding + yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded + return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=""): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes, nt = np.unique(target_cls, return_counts=True) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = nt[ci] # number of labels + n_p = i.sum() # number of predictions + if n_p == 0 or n_l == 0: + continue + + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + eps) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + eps) + names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data + names = dict(enumerate(names)) # to dict + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall') + + i = smooth(f1.mean(0), 0.1).argmax() # max F1 index + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype(int) + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + if detections is None: + if labels is None: + self.matrix[self.nc, self.nc] += 1 + return + gt_classes = labels[:,0].int() + for gc in gt_classes: + self.matrix[self.nc, gc] += 1 # class FN + return + + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + + if labels.sum()==0: + for dc in detections: + self.matrix[dc[5].int(), self.nc] += 1 # class FP + return + detection_classes = detections[:, 5].int() + iou = box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(np.int16) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[detection_classes[m1[j]], gc] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # class FN + + for i, dc in enumerate(detection_classes): + if not any (m1==i): + self.matrix[dc, self.nc] += 1 # class FP + + def matrix(self): + return self.matrix + + def tp_fp(self): + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return tp[:-1], fp[:-1] # remove background class + + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure') + def plot(self, normalize=True, save_dir='', names=()): + import seaborn as sn + + array = self.matrix.astype(int) # / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + #array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + ticklabels = (names + ['background']) if labels else "auto" + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, + ax=ax, + annot=nc < 30, + annot_kws={ + "size": 8}, + cmap='Blues', + fmt='.2f', + square=True, + vmin=0.0, + xticklabels=ticklabels, + yticklabels=ticklabels).set_facecolor((1, 1, 1)) + ax.set_ylabel('True') + ax.set_ylabel('Predicted') + ax.set_title('Confusion Matrix') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close(fig) + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +class WIoU_Scale: + ''' monotonous: { + None: origin v1 + True: monotonic FM v2 + False: non-monotonic FM v3 + } + momentum: The momentum of running mean''' + + iou_mean = 1. + monotonous = False + _momentum = 1 - 0.5 ** (1 / 7000) + _is_train = True + + def __init__(self, iou): + self.iou = iou + self._update(self) + + @classmethod + def _update(cls, self): + if cls._is_train: cls.iou_mean = (1 - cls._momentum) * cls.iou_mean + \ + cls._momentum * self.iou.detach().mean().item() + + @classmethod + def _scaled_loss(cls, self, gamma=1.9, delta=3): + if isinstance(self.monotonous, bool): + if self.monotonous: + return (self.iou.detach() / self.iou_mean).sqrt() + else: + beta = self.iou.detach() / self.iou_mean + alpha = delta * torch.pow(gamma, beta - delta) + return beta / alpha + return 1 + + +def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, MDPIoU=False, feat_h=640, feat_w=640, eps=1e-7): + # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) + + # Get the coordinates of bounding boxes + if xywh: # transform from xywh to xyxy + (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) + w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 + b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ + b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ + else: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) + b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + union = w1 * h1 + w2 * h2 - inter + eps + + # IoU + iou = inter / union + if CIoU or DIoU or GIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + elif MDPIoU: + d1 = (b2_x1 - b1_x1) ** 2 + (b2_y1 - b1_y1) ** 2 + d2 = (b2_x2 - b1_x2) ** 2 + (b2_y2 - b1_y2) ** 2 + mpdiou_hw_pow = feat_h ** 2 + feat_w ** 2 + return iou - d1 / mpdiou_hw_pow - d2 / mpdiou_hw_pow # MPDIoU + return iou # IoU + + +def box_iou(box1, box2, eps=1e-7): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) + inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) + + # IoU = inter / (area1 + area2 - inter) + return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps) + + +def bbox_ioa(box1, box2, eps=1e-7): + """Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + box1: np.array of shape(nx4) + box2: np.array of shape(mx4) + returns: np.array of shape(nxm) + """ + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1.T + b2_x1, b2_y1, b2_x2, b2_y2 = box2.T + + # Intersection area + inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * \ + (np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + +def wh_iou(wh1, wh2, eps=1e-7): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + + +@threaded +def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title('Precision-Recall Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) + + +@threaded +def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = smooth(py.mean(0), 0.05) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + ax.set_title(f'{ylabel}-Confidence Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) diff --git a/utils/__pycache__/__init__.cpython-38.pyc b/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..038792d1cef74c367605126bea0f82cb94fb5986 GIT binary patch literal 2496 zcmZuzNpBoQ6t1dXrl&nKUXnu)+emB~iOeK~AQFWrV0H;BB1T9SsMYQ&+imwUsj7D3 zQTNF~aLF$Sk?dps3O}KaaN@#+TMmf#sy&e%XsKSk+TMHhy`}oQem^AeUHJPS@i-vl z4;-BSTo~Mjrq+RQ!f8e}+MaGuJ7ybqj|9}?~hvg;l?8y@fQ;33)Y z`4SKL0_^GVW!~ol-~vwXlF`baAepQgl2Bw$KIBpNDl|0&qRA0G2K{?XzEYDTc1(^O z&bV{T?vk2-9CyuhzE2BY-dDzpRg$L0Pq&LwijixAX%_3PQs%~22TBX*MUj+T7@rI5 zN7Oh<%cn#xfU;+oCOc&=Ce;TYUb}wn>gCCo@xCY~2W3|7U!PPu&D11{(jwJSG@c%S zu)N$&m5;_UAcV<2&;;4jp96#2(A3*N1VID=y)NDZ(c>=n04taKyaT<*1Gracy6f`b zvj>ToYKg)Vs{t7NkjG|mLRZ$bCJ}h(E_q7V@K(9n9x>x8k!{H(*kCW%k{%3AFuw>| z+*(g(E6-Jq=ga?b54y;zML(fJxd@DvJfd=W&Yx1%jUrKKA)^Qlnd>@tTGz>Xf->hH z@*Xs+>|J1LCNFBD;ikLnF?fHM$qSqojN=H}6X%c~QcAdE<6_2c3@vi&Bgc4CnHJi* z$HFmkt*Isu50Xe9Ooj3G81bp1K7#X6l(m%jM|YV8(qfmJ>7 z5$Gb$MHHDZit>_I8S=d-x>vk~?2+5Bnqk&aK-eB% zhqUr_EVqR=E_BNHd-vO($B+*NfZf$QQp7whwvE4TS>q`6_iNS1XAqMQ)=9; zsz^=+Oy|Cgr&A$cMyt>~LRV-&XD_uU1Rvkb$|TOzt?|iDdw9g;Td-GRmbKE}M6M0@ z`O%kPO4%@DJx{&^qtDMkh=}0wD(<@lq^&>=g&uJDl>B%RF{$ZZS1#2=Ue*q$HQRL| z{uo9d;N>A+_m(g@Mr>`i_;m?2qu3s{HVj9;9D-moNGd5I@}qX%bVX6+LdK9P=W*S| z=rHasoCZ)k`$BG(N|=Q_PK)^&s$U2c)5HBI%jyk~qYwbX0@?!%UAjc&1jg%UZjCA2 zg=TO47cd%XuFWOJNk$t8A?R5HxyGH$$=$}uy~fS`#)Eug+|4?9(D-nB=Me60cXyi( z^j;R`3rzqk-XW1&fK(6b91k8bm?QEx-fg;d=cs>7b_Y!e=q>W_nEv223r(--*TK;e zqRAI(dbE5@9uwZH!@B!2;r+)hAJhwV{{neF4q-fBTYS=O28GL)nnCTu>$CC*yaV-P zg`cYjt+%Ydu-cwqn=P;Ih-44lq=c@f~3ul!biX&dOX%AvOm4snk%}q8y5WJTRUvb)1T_})MB$N_v{;f2@a@>Kp;0DI|6o|_Lz@(!hAY#T*#FWvL&E5n9D+_K=LctJ1^`M zKwlqqAvn;h4|JApN-R}2we34&mR8%PPvKVsvbvPEVB4BwQ_#vgWuq!(9p literal 0 HcmV?d00001 diff --git a/utils/__pycache__/augment.cpython-38.pyc b/utils/__pycache__/augment.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b1ea8d3bab78cb0030de9b476c7260cf6483711 GIT binary patch literal 32715 zcmb`w4|H7Decw6n&Hn)~06~bPC|Vv;vP8%fNKuq6DY8Y167|OvtWvb>(2|Ekya6yE zFo50+NB|imZYVWQYAMS9ZmqOLPMcKeHoNOM|9aZA&1uqP>tx$>&u;gf-97epPrXdL z+3Y#Z*;t##`}zLvd-DeX)u!FSnLGF1ci(;Y-TV9h``wGrjEt00e7@*qW=DQHmHJmI z^ndnHIL){9+g>Uaqyld-b;U4n~8qU{5d}>WDcw)$!<3E&lawYYRVY0We3a5hDIKPCEO?O8gOrX?nhK6nI!@^xN+*Jc zC_O~!UP>o}hbcWw=_8a*1&>gAgwlP%$AXW)k-Bt$a3T1v;0fO4Xz+>PlhjNGF9**C z&r$OLcRU}wz#T7e$44oBDmYK+Jf&llUJRxwO;dU>@Lo<$Uc99>o}TogLQt=-%q*|0 zlP^BsY_)66nR;t77v;`>`q^inxe$%a%-3hGRbr1-x@u#&cFX6KPUdE&liTvV)azsI>`lsBnk|3h{ZfU`nQ*T4 zxaCx8EX}nxDo5L^Ou^*im5cLrs%mrfN~^I^pXxSSSzcV9TW+>0M;9Bd_UD@`Q_Y|j zhPCzAxT#*7nXkkxDveg<^V7#FFTQrHGOa>$b?HhytSrwaEm~aMexbp;bTi@X`nUAtztvOeZ(u?(G zG#n54#py`%24d5Q|jJVaR=Of=SeTGgHwWz}0N z@>`84qwIUB@Bx$Jc?wYmXgYZ8sKM$7`zw zfm18%Q8{^#4X(9UkYFUPknzU6gWj+g9;0qXaF={^U7%xN^M_>GsRgeCvAZ+X@eiiR z`7Sr>13UmykiMD;kI^dK1iTh9LFO%gGc91_n{5lk5`cSjGuO@;z~unAe2{ej7x+Q$ zt;}Y=lf7B!wt&{=a3+>d+k^oQud~>Li8KQo(Kz_JGADzOT+n;n|v)IXY zij1@v`1?~6DSA(L^7~VpX~sCx0odJ@Apcf+GyVG4xLTO?UrxPO*f@Nl-d+uxm0IQK z$4;F%Rw0?JEQ9omwUvq%V=5HNhK7pYA@`maWrOzmNO1D;*qI|7oU_=JgTMpad z5nap7EH+jqOW{2#6cXy=MsUCC0KR$+NLp%zM=6JsR@rC<^|i=r7*x6Bqq?wf5Mrvy zYjdro=cW9E-oEeT{ER0EI^>l|OTbZ?-!ZS??eoGXX*DDIm3%~~x)3$a)EPu&Vwg&I zVT#;rx(}wNw!u^uc*^n3^DPX()%0dDhAURQ3s>nFu1Y{sp;I!rD%>ozi;y_l38HU6 z@_@FSuHP)h(DstS7^Dwq6@=vs+J?xF2s%4MKwEy``HyZ6cM6?h>PLXO5uh%w(*Bf6 zPw0Kiu0HS>6AU;k(P4hibQ2brVEQ3H z3Lhf@(gYIW<0MgfX0c@;0Xdop1s36VDS1K{{iT^6WQ3nk{U=E-<+!QZ2)0AW#5M?7 zT3u{6PI5y_P@nP-b>Sm{kbQkHQt&3AJE7RojGFg<#0*dKZRxe^DT!2QQLVc!Px!!2cS85e;Eb*Tge@m0TPccAA#@+OQ9h>7*7?u_=z&AioEtN z!=9-vE>^44v}#G5Nckl&zJKVh!NAbV`X+DhQ0y+nwzn`UdXbjNyqA-CcT9RlR6{hs3{Y`*+!3RDL0v}+_$5Yh6ojd+lQa^2_*C!}vTs?1;-MByI zbcnE;=&-(m_=5Cm@FECuC@}TDcQo=!8yO_M%7$N>oJ>c>YISC@)@oI&5un|yE!C@4 zTapB#Mua1^cGdMAeu~jB&O)GL52AwYsz% ztRik#qv2}x_0`&9Jc8%B9G+KFQSxaepHU*|sV@<;3!fno&S&qh@sKNLnv)AnKpBwsHM}m3+eg9CI0!|m>LJ{u zs(t_%zvoTAmyYtn_ExujXa{O)F zx&6I%Cu+hdja8uB`Z5V@)f185^1lT5B1-NPN&6D|$X068Z)ZAwJA1h%lEf*0hTP~Qbn zBB964V!+*7K0<&9X@29ygpe{b53YpED1a^DMt72+SIwZZ)L2_xIli>qsx@Z%uR#VD zYgg)vt(eA&l9-@4fX7-WNl1J7wnW+Iz~tA)Ha%V@CX+wnzn(!Z(8NREv>egixq7>5 zYE_jXRP8zN9DnMBG3-zz+O3p{a-}CkT4vyfpIqlkOS7H>Mm8SYWu+VqpL$|(nOy6W zWRiQQqY^Lcp4OTy0en1s8F_TI*$|2wfJ8$!HVKvWR#a**WlnnzPis&Dq^MX&iL0aD z)T1&Y@me3;rB{|$CUcI8=iK~7c@t6Vt+1{hB$0$KC^1m9S2a)+t~dF%4w9t&aW9iD zKtgiqarl(Uq-n{gyMpI!7+0727wEB_`hLI@(C{}tk$?u+`a#(rMt$(a>ImC^* zm%T+LXv~Q9*1~lY7WPgrx1Ue&ydFFghRfl5j(demJZw9jU8U|#z6o#m@XnPRn8*Pj z8(?JHUggne>#(t&P}7=Uot<5*2gnDugcVk{5<_?Y+f7Y}%Nl17)NDCyCFZ&CgMEWN zv}3S+Ff!>+hu=-(n4*WTsMpVuMEPnpSe}71^hmj;AyoH$^V55-nCC3tR*9tGW&E*h zVLwz{^}AAW>QgOU9iZY9Mp`6Y7NOyH$iRj%DJvtJ$(b11jEZAyGac*#DpMu9V$|PwX$1|^B+K2VzH((9)5X#iq)5n- zN!>7v%F$B-#K$I`y#+SXbdm^-xEe2^5e;XCk!hxXMP)-l3PN@aoD37QhD7Y0YhYlg zKnSnYt~3@K?e%!nXWGaNS618gJ}XdstF!+k{U8QipKoB=kxaJ>Nc9b%XDEv8NxaOl z%JPb#&?2Ur=estg%F+3HV{X1}#Q1t6XwOepYO`%jMDZh58Y}h1Mzby{riK~Hro+uc z<>~rt4Xs7*i*Db0>PqG4?Ba55@R=|5Ufud)uGWnq?EX4al^0i06s}+ZYuYQDsi<<| z!H1^$pZm~M-*cO0bQ*Y5Ez?}s2+V+XtWrk@;oVL#Kvw(Y+DQx&thy^cy!P;`qstoWM7%)GB!yiMrGf%Fef&GC+H41HzlyE~Zj?4YwO!#F zRJ3BiIo4-EJk}3@$8627EQdioj4d&%yvoMInzKGYt+eKsR~LhLDcmxA9&^q^*c1WW z%G`3h5_wZ_SItv6p*}ujbG{U>U|oWxpv2*rnwng6S?z~Nrm^Xj&$Q6E1foXHqikyx zGTHiy%DCfo-dCkdepM<4rPQuzChmvqX6+PM}8u%{XR+%K_-)9_6;?_QIW7Lt9 zu9$UkhX&SN>eJlZ@xqD>FKx+L>4nS<2^-nKJAgs&Ekx6vWYx|kS0qi{fcXVk7B08( zP}eY)C_qjY=y456titnn4W~@>L1Oi;4zMQ4KY6|;{~RG>^3TCd&xFC08zPoP2Djxq zNeDLCm8B3;Mes4UB8tceB0HKb4J#y)A9>Y27G>Nip!#p~?TJD6#q-z4m3EbXCiC&r zX6Bb0h=Wl^Z|_91+!gpHtF6^D8w~?^PWd;K*gEz^6>+eiKs*g@{9A3ohlVbDkHeHmHv zX4d5pcpsBgv+e}io+LXU%Q&Nf5l_QCC!tM6r4ai_)$HBV8~3Z=WaC&W3ZeI_ZBU5r z>uWQMt1WoRl*w}&4}z>*$2M7ME?1U~y4eFzdv=H(Q--1VVbZKsc%4~rE3R2b`*all zR5$ln__~_d?DtSfLd4(U8_)g$?|`>)M}k6wQk1|m`AC&2@J;h=UEvDGM@ZTNqK;vV z%u@x*hAHSBX-w3##R)VPrJX#Q4u;FwbWq|NCNE6arFH@0S_F$TH+=G#v0a|p z^lawU%;wPM@aD*7xl`|2Oc!~OZ4>G-8X&FC@Rs;Pv{~^5>t@n5P$IYlS?PC0vyutp|4Zpwl zq(8k`r%3-c{!Sro6_37oh2Dp)_dnCyZy)goJt4qt(Le6nJp;q;SZDlAzcbp|^QJ#5 zS4gQd+$mEt)EQCD6De||2U44RsUPEa0(2{Na-F@MiLEqi?&H94ezgaqkS9v5U-CJp#$@&T?y&Kg3hBPsF4+s(YALJf&eRKy?`8VzmbJk zux!1KNtI{#F#}6G4NdekoFjqGItscbvfb+OqNw=9T)kOeTM0k;D|%XnBqMe5NEe9= z?@#p+(c}ilVU2#9HYZb0lt##lMld*GcnusCu`%6}U=kizBCa<%;tbi+VVxO#QMtn; zQD$~^aS@IykDJ-aqx4d3jRc2Fxw^Un8>+{etJ#C1q3R4KDM!C1_c>B6#VZlYX z^`$4gqyD|v%?jAhhRuT36A#>*of=>(2~{(;^7THdwuxiRSx;afFcPC;i$$D%Se#5wZ$(}#ly(isKK573p z?aS8w=i~OwW@r07svErT7493g`+lD*z4mTKtgYU(IYqlM+WDc)RJWfDxxTg?%4**! z6F*XX6MDw#LrE>5 z?HT=?=%b$~M}zbe$|ozWRd8T@W=QKWkA_3xx7W)xB1?dWb=?Z z>{86|M^yY#CFWNA3FVBkR+Rfml76xn{*-FJOcMF?E0KR4;;)*&szih^{E8B3C`Jq) zquB|Xort`dP@-=5bGqcOJp@JNyLOV_li}aem2KkLm#F(6`L>=TaYPSt*GKe95W#|d zD-gK5`Q8_Qr{eOx{+PEfeatJRkBA8Rr}}gxM+6@mh;&G_VIzVs@KooKUO*uV(lII^ z&QS}wTQK|nTk;lL8TJZUGsCefV_b%nOBGsb>mbMTZ+*R(-XR7|X+=n@JE zl1X27pIUn|V~Z0F&p208Rgpr`P*88`vRstWP%Q{xMFEP>9J-{qSN~Ku=kf7g-#)uq z=RAPcDv?R0<&o82PXHd^l>@AzzzzD^EPa)XfLOX2#CEK~yGkJDi;ICd@`v+?lUi85Oburx7VN*TcE7H~2uSa?M80qjn=Cp7 z<{q`IC^<|L4YBITC-o-ZFs*7s&Bw@H+85TB2*#)?aHOh`jw+Bra9Y6$F@~t!$2rtD z(D+;GI5W4ryxoy+$Uf!)6Zg?16ra@>#Nu($miJgoy zb-AGXfckrMs|ib@u%bjQQDUsWBjr*lk00;e!U^9i6+&H~;XCjV=Ib*UX;4D1PQUA! zT8G8!jS+l%C}>!AGiD7Y>cA|#f~cV=YhD0n#SSmg*^CH&@=;z-(JHniZM9R_%y2ki zB}2q|E3H6IiQ&`XYg~mXt?}cmMm6Q^4UIEOgN7T`Fqa>8C0!*>OhTx;x+O1p0SX~?sJL-@(7Q8WBcE@# zS6YuBKVA#h8rPVgzOR33v9yQAb}w~r@U}E^1>+Z2-wN(3m}9#p#{*x zY*!(Pc=ViH6Ac*;nU>_ctq8TIajP_hG zsC|i8+!ZWOn3l|;^sT@8U+D8|Pkzgd;u}w*(K1da=1tEOT(zaj3!e-Or^xK|nZsga z+D3-Ov`y$DrtG|#vO(TZVGCG*^Fe(stYa8-0Vs=5Z*)?ei~5?<@HiO1drkR-j7_p3vl%X_hL;AZ&j=e)xks=A}m^1Xjssh>X_G zUOvY(2nV4OSoUQQ7Up3&1P3u9WbP8)WQ8lTCic}~veUuBLD9*-Bxs`%}$2zu#bP z#sTG$jf8Ch#MXMvL)7k7t(z$*p~e|swbuFBG?Z}fM*b{ODDtfcx}4TyBuo$kqcUVt z-f~p*U+k~9yj}b~-I*p7RbIF#H)H)9v`^oXR26=&>)IMy|G!JcdugY4newHCV25lV zb5S(pvj?2`4?J>+AI>Zw|bNPdXiER(*>uJA5)k6OIuOdF`adQ(Rbf@is5k zaA&Tad|J`g?fEE8AeSLxgZiYu;12G zg}{+_Hu7GIybF<6bE=g0a^zi%ycf4&bu?D99(r*RFABwv(MQT3^C!HCH2Mu1&X_mu zAMg+Gd!Kh-oKE;BklXh85d0%M3BdM_2h<=DXsHE82=3#NPQZr%AgCbZ6EyZR_Cy?h z1k{2yXro2CYU27F z?`2`<0Tx+>0o!j@Q9)L=-&x+$z30D9mEw3@Bzwk?c+kySecWlaDgXO(K6z=>88Y`2!`!6D1UdKd$7*NRXq1ZMau$ zw!?p;3+5K`*Ve-y(j{+wJDv?I-eEm{$5r9j1PxO|m`{lwIp95k*7HKz97K2Tf=hij z@4%z3$p81~5U4R%U7sldh6+X**fLaS0h|E$ zKM&-SM}5#R;t-Po@c-a3{dtgy{Z6O)Udb7Y947TR@m&DcfVL+ARwR5c-`2e(=1V^$ zCs0Q&ehiLx;slax$9{)&99I}KTuH#;H%VaQGs^#8N~V>VhWAnB0+PQWG?Ch75rn61 zhZe~s_8BC#loo^tuKuXHJFMjYR5FO)2+Qr-%+(mBZR0{8KBplyZghXBTWxeUP7N_C zJCsfy|F%XetvfY54pJ9D>R~n>l+cOFY(Lnj{D;OrfY_QOA+|BdqjY^+#J;=7DtAL_ zNpnI&yJpg@G*A%O(7Nv$h~Guy_$m_Oe*)6S!~vEqefnyTIOLO>r+RhPN~~CN=G9E! z6>Du*u&e3HCJY%;A?e>3XeWoh%}$KBeD{7qW=pc<(8eF%mOoytudgf_GtZiq3qYhuX{rp@P zVTOb-8SLy6I2eU6uTo}9+;(MqYFiHj_8l>P(Imr|H-s?+OoT^>HuJ9L!y~wOz@az* z8~8TsFboJmO*R>q^J zLE2!>Mx0sLgFzv$4cxK`Qjd7MAWdyZse2MS-ML^-)0a~YXX!8j26_kP2daR}&+^#7 z7(cV9-`)&SGeM1PTz72T=wYL&8cEP7i(6^4t&pJL55&gUdVW-4%@QjuzFAR`mmSx? ze5`Ugp~1>`;p=jrX!wr+jHqbaT;<1bexx&0Ivd$9Dfi1HF%pU4Imr8P<%qmgj+1;rZz6A)!RyXGhz=k$=2*cgU`A&B8t^@UO4}pamOA5KTm(Z&g`|G zAd|{#i%p4O0^;Pc`rnPQaEfU8GRC7EwPSpT(J+V8@&V+$u|LbDhp8!eVn#F|`H1Ru?M>3=flTcw2LJC=P?F(4DcFAitK4QDoI|ao z1@A@zCLS5l{w!1}W*gHGr=x`CTPewB+|F zPGdM8QZ+;+Zj~X2lJ&A`Us3W|C1&sax5^nH_ki}i>NL8(5%)mqpK|qa4f_z8LYjH# z{e8Q%r*O1zCx813CquQI;XCjVMx5qrdy2b8VL;(sz#z!Hk-;$RHhN|(S}7lu90z)8 zaW$NF?iLBY$wzs8&+=`RNYI+J!!xnc5?Onh+V|q!T`aBEGFRvVOgdy}bwa z45ggTd*}m`LZR z9s1ki3I0L}_5h(N%9b!lfYp2l_EzB+)$D&JiH4u8t+HRK*3>*Dk;XlmvPqju1)`R! zSGc}?2DIXTsu|E@Qy_d9A)qYwKR6c--`~sb3Q7hxJs9snOqrhlM7>Lkpzn03njMg# ztEc&zD$|9G0=Aqquz?lljclJ=PHwDXRCG)A+{>>RD#Y-io|2F9UHFh<;NZhT*86tH znMMCS43+b9HSO7Nxu0wLNjxze!IBDP`3d(c>Ah+fit;?Ee`nlycX+!4lB6YJzX9)` z+Ta`@0YdC+5+GUXhkv4ADy+#4oMa6*Fyq&sOLu`QcQfDCj&{GDTPSWJT_MpBhNI9! zWSBgC3<4g$fRT?@=*r@#Q^ibHRkVJyG*ioNGy3rM+E3Oc@wd`z?!F9F54-^xm;&x%b!it|1hIt&phcEG#wkhlZuriCA-nOhX;{cy{qxQ}nD-NT2f<&1)Hof|m=(n?Q{|~C0QXe&k?1(Q;9#T;~I zNch|8;4#%?*$k&cM9zQaVsZzqPjc5zVc>UkLwbId=B-*gN?)rjI_>#~bRoZpn6HbL zQ3K1+D9RD8FyFei9ZZF#!va&qCZ*u`KTUc?8*vN^B;-uMG;I8UQ9p;@AlMY0%?_{gc+Oz0&F-@sRtvs!TvPw zX$W?Xj06idwCrmW>f^RYD^wnp){8g8JU|3CLG*4j3>#p?P2{>~7cBxe9zk>7XQ!dd z^2$er1S&U9zP!4!!s#4{#tMjMTS(<{Z*Z3d!IzW9h{)Q6;!HfNj0+lbogbs>QL)=Y zR1mCl7QX^Ex^G`yv9gM0zC2cCQ%?^X|}gO zL3ua@uq3fIrj7Y%W~@+b;8dtzdLbP?9^{*t`(bPt7>+6}vsTn;>$7Rt>+ z%P{Pxt3%d;o?_O^(g%Lbf=ll|eZiuVW4a>kC=R%?%?Uxh?N*j8N^#H-hc4KmJGO0t z-AbH5;Ud0V&=65x*XR3{8gbb00WF=t2Yg>tg((`QWBk7=+WH!;xvb)065_;Iyw$+{ zE{1OTrE^4kYAwQt+Ksu@<<*uuaLDbbbs=JJtBbEG`HGTvm2Bf(A__+ZHb=;aC@A=6 z90{O|F zIx$%g8Av|LGX_T%Bw6(f1zph*g|j3#M_8W&nWLieX1IzEItWiO^6(KQ+vZbf_*C+e zDIwq;{bSo51vOSdhtaA7^x8#1e<1}UH&)IWUa)*cXTSFEaFNGC0Y0v@LVMyer7hnn zQr#9X^Icjt<%19E&Ao<|uVbZ1vVjlJH@zgvrT!R5V^M5jqryzx7S!#O53GYA2Zh8vYS} zh~8kE>Q($sEyD8*3lFKWE$?CF3>CkqoUOL;{ydRsjg^+1(9h{gPuA|C*)QmV;${he zT|*bVP7v%m`XG^-Yv_pUt;kz2Usd==)`%Sper-Mc@2xO1vmLC2rso7!N6AP`A4Yda zZ`dnxY(H@(r`44xr=w1SdVG*U_#E#MKCk2jC7)7qUdb+G zqNdq3Zf%^}xb=8^s^Rzyi_b;vx>NQQcs=NjYf_~fau=o+ z@~!Wu9qVgla@?uYvEby^IyZrH@rD=I&`&3>p)su(l(588s(g;@4$AbcvSl#7DYW?*+CmwnHocOIYULtkUAEGbpwE8XC ztQEIB`dnaE*{&iYILlTCm5>uKOYmow=T1TKIpI6)^1>*fSKxQdHnpVfor$+O(3=5D zY;I#d7&#mBtVMC41*L8aw(Zf|9(u*%%l18$#?6kL=Nvntd2V=Y@Hm9MnIH{FH)Gr) z)`#{^n!%AA5Vs_~s)OWgXfU>MGLD6IG2cOQV2Q*Wx5v{dQbE9x04~mkV;@X@Fr&nq zfLsV1$5vUBTm3%s)3L7>(1JGNBEa?lM!8$ zgba5YsynUZ$CU_yIC*h-wHY|HI!rBly@(h#v@$F+qRja?7OXS;{Pe|VE>zDyKmE*^ z3sG4od%jq^T7Sy6jz^6b5_nZD?(wm(6q9K_Zo<~p;nhTdlHZv(Wk z{f?f^F^~xOjyS9~5fLMw9bP{ByVC7t=)slccAJw@GK=-uHjQ;&!FJ>qF>eU@^+3s7 z8N_vwZ^E$I#y|izJHC!0apk-Uq+BN>RcIv>?F0mWoGV8}>N%26$DQa(hF!(|q{C7A z9p?auTAPsZIWoWrrYpDe!6bWEmUN7@wI}6EiY9!g>u<5U@W?pti%QP_XHogm<_qC3 z()~8ezD;JIc^^ZSiN3$4rZ&g_T)DqXGGN2{tf=SD>eA0C*+xgTazELlql%8wse~H` z`X*ywn*)8HqsMdSE{4zYOH3^L`L`RjLOn^6oI6!Y59xcb4Z#dQ&(Omc^z;HPvA_I7JNTsZ;nRHoTqdrw0PjnL2-o{fTs*J)QC?qLQ?U}q61#G2ILcVUF(un3PH(rx zt%>Dxy!)&!;~DVoNx0Hm+LPAk&_PcUw+)v;RF#g{rxzyPJorPOH;--4@EW&=1C0+V z`f58%3k1w0&k;}f%cq zl{-V|ovE9n2%>l$18i2Jyq#h7-5y)m(;inIdde6&dGRagw~vVXxy=!tGXXtii{Kp4 zJ?j%ZHmvB__8rvc@GcD2=i2*j?uQQZ4(`g$1N3kwPsRJ!-nVdYD~kyGFKKlb99nNc zE%~DIv^|7wJ1QX9HnXty|yjKc{{R~)-V;!1AXnC%`X#Ic1d>kk@;3?A^ZGkCpr zrFHz|3BuM-eEgx0<2;YudNq#OU3;X$sl28>9_9Q;7koAu>#}E9^cZcwMmGEb z&FdEBo6AjYiXk*}anS|T=Uhxqcted|QuE(XF0I5E#BdPSuAfC)US17b&Lnq=i&5^} z^XJb$LlCI6Y&iy(7bnO1c!A$o3N;CRI?TZ^-2ZtJJ}8$v`4M$ABVIVs@^UJ=P3)7e(Y_}U3qAwA&ThX-#Q4M1%%)DNI; zEaH34Vs~5VzRIxvx}F-_>C&NUW;BHUk7n;Q-`3qEUFlvymTBa9upQI9;`p^YrogEv zi&r@9D?ZJxWxIn0h=?S3E#g^qDK}G0uFTTL2?}VpoBqbnKt{Ypv|fbMH2a2m2uB6W zRIHuD7zN)#*x@9EBvirriZmBY0g>gI+)#ma4IKRs@~&!(E^GO70X)l&H1I{(*qr<$jz*-H(slueL* zW1s8eD~Pu`h|%5m2*TKA_ItCROL2$)IVYO!-FUK3L(v|b?x}N$mZHNkz8mXuMPBVV zD71~7X8H=c%KKxrfr)?ISr^~W>cF9gFKVmWJO@;Uf5x;sjv$xetxu6)=ory~H=OZQ zU0lA-sV0}HjWRlakPV4;*rTHqXw1K&OKxL4+u`bqK?{-2Ip*C^-uI7HW?0tl2M9BF zS&Lt@FtH_;6JHcd6++{_qGJG=hOK0CEUumR(i;=C#p|{8R@I_SK5>q->FUQSQED0m z@iHChyzQ06+D!en8)yF)LAHQRXBFg98xITuW|zbDMXV*K7NFDX8R3gc)CFN?1Ctv{ zhT=?tX1 zm1k>9OSNe1c}bl*rWAUi!{+T!%Bb`d%l?vd(^e>SW!G(Ub~(x{VC{;A;00Ko-7!SP zXVzxwc5+stk6u>$9R1BUIQjXEg&jm*eK%>~cQk27X`eE66cYk5bYp4R0F%GrZ)XaJ zFguiySP`=J-)35O;Cxr?{4ILlE;taf$Obj`_@kmCN+Nukf!9jJpZi2%$L8i8f++15 zWO63>C;Tc-r<@s>^B9=Fsyf&x7!Dmsqt;5%M5TkxmpE_otCSQbWNfxwz`$%*%`}Wj zIIbbc2G%T3t)9eT-M(IjrOeNtAQ4asoYO*qYPR&Zvp*jnZ(s?cgJY=3fbX4v}#@OpfFu@T~~|Mf*r) zQr;D09K+IH_*L$`z4iDi!}uq9hT~YJgQ046a4gBFr}?&~NV*(rG`2;3z|J$4bPQR* zBJM-Y{>5{)v(=1=Gevq!KFTu?$Hm08KiCe{zk>|dYO2bA1WERh6~gzqKm0O}i-sJy zSm<6f8W)maD$e61qA&hG0mGJ$Pic>eS9C;)97W+%bcP-CIrZ7?IDCiR!yi%dqe^~E z$&V{h^X*G1z2MutT}uLgswksW?{8z&%}iwrjM z0^xfYpupq;eHEIg$je|f6n@#9QH1s_aPAJ{(1uV?*Kk~WQ72_uS};?C5QS}2EK zXKzS383E55l@rQQ3`X#OI2wVGtzb{mA@R^Ig!VjYjOwHijB$O~T@NrNS(_o+q!#wj z>SWw%-0Fv|elPXU_4UY6^5!BhZ+td3;kY0;2>@oLfaQu!Yd^>p*-3OxAq)do`hArI zeC!x-jwf7g$-J{(xu1VQp)HHm+2;L@^u)C03~T5JkVO1G$^L#7N0b-^`D5h-MDR;9 z-k1h&BE12gY4t{h1bxw19J$x~Pbt~tAwE2I!Q(colE>KL9P`3mNVPTuz22gG6>u` zYB`_+$GHodD|6gL7bjw)3)s{BDBs12a}?l1-hw+yAr&5M`zXU;1}Y(2wSjj}8y_X{ zZfP4jJ2zx@0iHd$TRsV&g;eD?o^+43llPnj54bQf@!BJ9Klij|mDAP`L=-ad()vYB zd4;0}_)ik?Mn9SCT+A5DfEQQsefe7-D7S!!}mCG~yNy_0* zksu)&<#}5@ZJXr3!u9(#$rArlwpCfDd2Ea)lgHrMsl-Xga|1>zk@N2S1r#JYnp>Iw zC&1=*upjfRI6ZWOFDBZzvY3Mv%FFDb^rJfKUO)JM z#msJY<1lVjdF=7(+JCR%S7Gb6$E(p^s;%f;$om+N5mC3xiJFh7M%8f8$CWp5ep)#L zX9MhGx*i|lV4CC^U4K`J`H$>I;agPFzx?2wEYbFD6Z#WO=y6S`(P@*|?lyI>v)@EZWk{ad)sLN6B}^GFRfgtr}d4%#$pz*ALhdU1CU!x$+nO{FZCw` z1%9Yd2-Rx9i;u8dBFKu9n?s<~|9s&%I3f_RqWBj1JwmVAW3C>tF9l|A3C66iJv?cg z>xE!1S0*Uk@fJtx1^X!NccnR45WsO@Bl({Ij{X`@)G9JQS6Zu$c8wr>3mK5gcI{EA zb9Q|1RFrdvj{qn4@XAh?Dak-hNVJT6W~{s2#&85A8KD8J+qG~fZfDu^zq-ORESCAQ z9wfnv@V3^q>b(1Dxu0v{gsrQRy-0^MD}hCKnw8Tw|C=0U zVLs;1CtBqTYN=NwdTE`b{#6_V-)Q98qrEM!txpJ$kJ2pV`z9^=eL~0Z`XDu!xQ(5C11;v@_PAeMknzS{#-tmw z>wT|%bkaf1#yy;wnVcWouH$$(#sQga_YbpuUL*9^DHDk%hy6np}r{m9nKvCg}jZq7x}kJM80vP^D4?c;cKhr zS7{!rn2Tt#ve1xkm|z=NmqpmU;417g|MBZeSD8itiq+}Cj_7&P;Ewk+!un+f6M5U? z;-XP7#yx7I}{5iLa;A>Ah*!o?r8N_|-SMPrF-~5GNef!C8 z&D!T*KN%lI@jpFzZbpu?+o-34U2)HdcJJai&5yvsHneduU%w#J|Iw-!y|9InH zfFRA%VW;huiwcM%3L%znyaTHxJUIPBewkk{i`%%%X-# z+wK`dg2tJN9c|o-?uY5U|Hkp_l|ZgYr;BWMHZRqkgt4?r33?BIT-#{5#73f|6?_|G6fz@Va_hSMr0r+cw%7zD3=S35IiI5(`)tkq50=yIH{QFuPay zJ@g;#+tz$#-Q@Q0m-TR&bwceJPR?by;Li0F!7)o!_@*wFpX%7b><$qsUm|EG3XzUjBe}yhinJN?VL&aRKLnIUUyp6qhxei)lF}U@MvT&WGL{9sB!{ z6wotVj?fQ+@oAL}zhFxUt!EcL^mW#kccGb4PwIu1Y=h!X&Zyvs8SgGcUDUyC16(?C8wM@urcG?W^?^_FFjj_>gabO6`yk1wQ!}w|CO8gkA?Ndq6^tDk~K}G z!zxoFM6%w~LJ25EvNj&vZIPUUy$j89K_+E&`i_$0N?sJ43_G*ukE_Cj0MoNx60jIB z9oB^(QF20wJcZ!}U47ZzZ5pjipgOvk7>~x%_3(GpsM{GqecJTZ3O}#gUQ<&A;Ij+Y zy`a6Sz2$lC`p4TH2j$zykiGrda}&dyP=U9gJf#K4XZ6}7c%D%3_p zncaC$*v&~96}M7N*n%8n^1fQUBfmgVMgt$BKNFLk!S^nTb9_^&Z!Dk2HguSrnf=1A z=vBG~zff-$W$pB>~CtkKda>D)O6^gtNE&Gx=LL5izArMnq%>aISH8P^bnN?52gml7 ozc2MWCyB) z*}-Z$o9246GE^PT4kK-4cL_T?a#j-&5xu5~=$erob$7WVB33j-TsYVC>=@;XhDeAc za=XPtA|?8f8W(RDgCdR89x)__uW8x6;+Pl{yHRqRc!zkCm_TZucv##n_9OKsF)i*8 z2auW&cZ!2(f4ev??hD$D; znCZQk>7B^kC+xH-=)Ro$BJg^6}*>T-QKbe30^=4(^SE6XbxJ&avO zS*~2qXUvlG_n&RN!}<1f>1$WN^c&+x{&BkW{HYIBUj6aQ)2O^HE8hO?3*VS7t!y5= zJNCJ+PnW*&b^c8kXllJb6Pe?9ya@nTBLu+V$AlqFES-+S77?T@5yjG+FmL-6+Lc7Dng`2&M>= zO1c=hZivC_2G6a3eIPJIdVNsgRA4=#t?3*3sJ5Z&+Pk&dqn75U7ajR`Ixt?>p44jh zp=4;$c}|y(uC;7kTOILvlozzXenI=Rp?cm6T4~uoXTOK~81F$2o49M{^@04hU3_Rl zVxzRz?Yk$BPTn(jLl3R_yc8Lu^vO@qZ*%%pRO9! zma)Ugyjw2K`=Py57XEx_dxd<(4Q)xs73z&w-#7dRXa8yS+Vl-w?!thZKLe04vVB*Y z`AXSe$yMr0uH>q$wE{E^lZ8s&^KzB^Ww+vGj4)EH%WB@wCb=Y6EV~uqW%O)LjMc<$keP@B17@C3uCbQ_g?!M$OG#^ljZU%sKqJ23d`Q%C0ORrg5q zz6b7o@ZNil9(go>)vX<&0-Ylh+shSmZvqcMv>)l7V5ldzqMIAex(nRY#l5JkA2+)Y&v0&F7*!UI~zMLga( zK#7PV{gk0bE@JGD86PpOSQiyi^S)+Gc{`ehdLh&=@7!+ZSf%Xw1)T50lQbxe9w7vN z%T1dJw5@DI;BHSf+lSCKW6Oi|tqAZL=ouQdMrh|HA)&ciZlL;Zsy52iFrv7FqDr&o zF72EeFhPCtE$HIWacMZ^!}zLfU!itSdv&=XjR>OEi^q}$j6Z1IT_--Q+Pb0KFyGnFA!FA zel!!pZpQ3~k?^6bt?7?w?>*Ww1K={@abPM;4$R%!FwGM0QDD+m?J@02EFigK%xu!| zm^#Ekt<;&PJcmMg9w5}qJ0=+>gxg&g?Je^~%}DB#x_lS%ZMpQybb)5u`K6HP(&YD$ z33M+R7fJEgb%}0W{HD4jzMa66 z4D7(!FgO(n675tpNNyNK6XW+`c6LCp4}0y9{=%|7F|+v-{Nw{zw45P8jA0W}O@mao zg}>5pH(#WppAev<$9b5-^D`+qjhv)|4x`OlnM6ov7OqY~h&Ldcp*CLiPm$&r_MN(L z?!s*DUEV~p)NynzKMQb-Ve>I`3ir7(K;KgH2oYZRRRrE0*$hfay2*aUA+m3+QOFV zXnEc+03p#neO!+iX?;ZBtM4@q>2d+(bK8i32!LcU6TvzXI6NXpLk<8HjG6^#Za|$3 zbeTokQiyfkAXW}EGTTg_iA7jNkjDmyWM}Cytu~2~BRAK)QG1?;iFte+UybrSVjCn3 z?3og#Nx$P6W0S~0E&@QwiqnytD_6_Vo}h%dXJuJj1jA5rBQBq!woe0ej*1TW$tNy# zva@GTayC(rZr*oU697vl?I=vTtii3ik3t4?I-oBdb#E49fOD&phh{hLX3fhbnKf6M zSLNI3`*(B!vCOb<9^>UYgA+!3NdV0lgP>07M-4Cn((1O^0HY}A(*Hr=K1pC~x&ADu z5fYA%t9=BJ%1Roq>prNRj%~qw%2G7niUrZY9@BzY5E%ns6Z#&FD1P1Az<9nL#9<+T zcdYTq;EXS}9BBCtzGAf!tz@gOl?qZp5+n2li81JNt^U@4Z}`!5a1n5x{xNOCSi_gw zC1x<7O3-q!l@8LuAf5qm)WM*CR4Z8+cc?Yo+SM8X5+;KbIB+89YwHtQFdU4aWGL80 zCC4;BzTkkv>45{(BaLGSEp`PXB^wq9+dVD)$~;LI)*~V_@=0u+;vTbGEJNeFmr~KZEO}7Zz}M@|nvf(_n2*Sxr4+Fhu!Tm$t%=MipS>h< z3ibI=UkYv3QbJ?-?$B7lvpj|ORH%FMacX2P-+ehWR|qUmQ3}AgJdbAy?FwjLpf-7G z1F$?rsj2)J_X@%YxfCkDR2M!H`4-7yZxC>n1fPT=BA4VN?e3JeQZkU^r z(jV=B%?YFmQGm{mq&i7=W)r`;1LImzA7|hjYlfVl(W*F#&*JSh4QTEfwNNq15%EG{(V4oS2pSu zHrHNNM)4m{Lvq>>J54u}lvmL*bjn`2=K1+r!Ieu?U{=dDxkAG+5tz^#nae_{Wos); zJ~lIVdNz0Nv6<5+X2VpW-dH)GUvy8_rEq1~fAYfYBd9)gVdmo8iJ7^~AX5)?J$EU` zq!`-l%K?El{dzXe*7lilr4mLt*=&Rn_<710j3y|H1W5X2d5C7Mkr$yMKuKagG<{fp zk;`@8hqi51+#(S)*>^Y5QpNSLAD~;3mmVqmnvsGr6*KpPD?4UPKcrJ?FPmEf`h*eF zW5zIOY(M_Sb@_Yf!J}?6kv&gY+Hg=Ep9X;f(NpKQ2MKD*^i_eb#~fWJoELz?7zTF+ z))Aj442uzLmpZV;3}Jz{Cpz^B)Pd#_!amj=saqBS-|X}teGJr<0A+3%z|1J>wwJ_u zT06hm z)Bz1Lv4<3g0g*)8|Gh;UA=zyPQSPZCdJ zK!Y?=d#w}odcW1)<4!K!S;3;Y?rdnF?^l8-Ab1QYP=geMJ`9g)KRjPnORMced6LT2 z4iH!9t@rx^^4ARoL(f@K_zrtrp>~n|i}GQ7Efoxjp(4rbUBU2khWw)6A3&D23?JS! zwo`r?ylbR2infCoZ*&bZkK2z0qrn*QBUt0S|0FAeu^_#W#yZC^Z$tjl&K8(qrw_16 zN!&eJE!{0`kF*;j_(K>C>sd2N+8)!MGp-zXPOm*pGoe|NMbMrJIi6@9ChQRl{y!6s zCEzn8dA&Z|Zf)S)I$yu_j7@oLXN#YH#{8XO;OAI(46=`g#I6$S4XxeXa`Kmsh|zZW zI99YP*u6diTS^bc+wa}$qwPGui=e&rVS%&UFc#7{tzGRk;~ORReXNfO=qOm@jvAaw zrv%^aX&dip+pYmV?}i-e>?ZUe+QEf%&<~v6h4(Np3BLN8A$E&#NShJJ;t`ydPG^L@ z+R+!^Mj7ixdu9|{+ER{45zVxj6_p~*CRaPn4T*`Ne5=#vr~vL6*h%n-Mhfs^!`Dg9 zDfw>IwkwJ{<8c9NVjtlaxCmj_SK=~Dh)X1Wwz+l4qBHT3PV9{Uz^RiWPv^R}p|!o6 zFvf4_wG}ymMmspXyc1<##lwyY3u^1?oyrT7gZAe&+=5@e>fXZtxJgzxn;V;C zG;Nagv`KoBq^U^a^Vk3jlH+cMQ(-I*TO#Mr%gneEQA$v{D{hYlNrp){RC4{8fE#1)UbakFB^|<%`RL2?~`3b5rtNCSlnNle%2F|6LC=1kjnLSEG zM|~*8F78ue-J6xDa7)s4y|#-M%7n{@Pg=E`mtmr3Z%Gc53Lr_blxg~qek#--5B1ql z&&v;R!9}Hh>*qtgmQ6Ojty>)V2z7g$T1DI6hoZcq{6hN5j^hr~Hy`eY(ARqgKueD3 zBc=l_c^ELQrvX#OurY==q&XmLXLDFT3Ryp9?2E_nPD86EM=NR7!_W&(nqzRc9z*J# z=9oUFzs-c}%{+=0V}^Vg-zX5?bbhqjuB=}N1^D3m;OmgAkSEWxUiv&FdDk;T-cri@ z#)1VN7Q@>PB3*h04Yze*#T%F}^whHli*;mG&m6A=>j?fG(zqkg8n<{uRPhz)s^qu&ob= z{v4^$ZbAWn0CeK#3yYF0`!HIo*NBS2!zvZ!w2G*!OsKAK87Qs$FKL`r0?a~^Lgts~ zrlQ`Ip0Z+XjC{{j7&-IUxpSw%Pm1KV0)whoGU+W-Rep`epbIPcw*=ltARs_K3-&va z*SUoZ?BaT!oU_7(^F7oTLcJ22U=oUGN%3!1e5TEvWayCkpKHK@=6)X|@kNye)j5g3 z0doe5^B72G9}%4XkbVFTthAmq3N z7V|A28MRFWAW>2z$6<(qh~hmpKs1<%of-%36$|7+#vM#hhkQO?oTYTq@@kBAg0 zdTZp7bAlvXA29F;?V$`rNr9A?f{5r}wB#i}wm`00hYY|H)``4Bm#}c9!hr$;-6HWp zML2X_yJoIo&LHX($T20-iyA2mS1tu~(LU5RAXz>W4!0d|e?8q!e0f?DA+&R!pJvI} z&aF>xlEuTsaFYZl@k7+ZehR*r8Gy^6drPUVW#HXAjy9Wr1^|6QIc|xQ1ErC_9mS!Z z+ks^A9VkW=MCW=2qJ6vo+p+2Qi1V%67}9e}FYSBw`|o<`Gt=@3>L3K2d<|_1z2b?q2FkN3dJ+lvU&(j_yU-}|D3<>~fH+&*k*b_&X1O{A zPdHzbPboux$I|U;dAZqm6Rn)M1Fd{7M6~=djYq6|QvkgBr@#2EU;n~4KXt4?NZ9!$ zkf1y-`Z**ZBgp!OEyA)@S>0eDZT}M8mtaF!tL{i&vddwCwOhnM`s7NFfhb`c!x{&v zs}#1;LE2a(@&`Ol{#UY0BS92UwYIq2p&QoYbh~8tdUFVEc_TpwtWZXQ6y<8Onn}nn zQr#Z`WFt%SqbwO|W5(-1pZFr%v1V`jNo+UUV=keg)arpO}C>fUY;(Hy3_d)SLmug%lLwit%f>*V2{-l z2d}`9L6=t*qiVL25bIWuSg^^ROBYfIWYOf8Pyz)Af4JO&YYqIa)fdF&)8sk`oEJ0^ zQz0T(zJ<#xY_VJ>2Q@gP1y>oAg8SiY3v90WDAzd1ZPi4$=3C&Lku|-@QUM&2Mu1CN zq(5W)il)Y>jiU^z3VC^O7~GT^kWeh!pr??KSNBT%OQ z=lZt=%7DKU-gopBXRbcy*5K&AG$rW{Vovq};@e#<^0c|T>rP1 zKFljE(#^o(33iWGDicd(e}3YbM!C@~**@Yk6NjXWPy$4H30Gur36H5DbcthHprU7evaYj8=Tn@I$N5XmW;twOIqdt-ov_Oq@i=BQ9@5jA2CSp+y_l_0JUjdQc&GC zYJno!2p6obItI^eaOW?1HUmoZLaAbz_kai*KxTHlPI!A4#=n<^I_JMU_OC~ zn=`8ur)Os`%sw>Hopv{#$(taWm{`4IqT2NQiOVi<$9GGvJj@t&h_sf&6MhrnjG2rh zKaPngFMR5R=g}$GYaCFdEPOsG1kWK%s6*Ehk)*hBeC+&*v!`cH&*jdaJ~0CieJ6Wy z?h)YO6b-v$kB4qC*(Wg;yH7M@9Pwhv70`3rucK9X2T{Bg`}jeae8kPK#S#C=A*xyH zgCN1IjU9@I@pJ5bKM9M11o0X9tAN`y6dKoTKb|^|HHg{`L;?5)E~22*%Du#ii$)Fk zrzi*!dP~lRw9s<6g9gNP4FZ2Yk5Mu)7+y{ICRA`scUyampba#WpQW}I2j!?u>XzaL zC5`|bj^k_VqX($R{Q#F~n^+xzcVHj92CM1r`nr*B+b(yB^%75_6?)$VpeVi`dxkyCRxX1IS z&eW8Dg7yf~aLW}wIXQv+4P71I9Y{+)B9a0XIwA`RDSZ+EmUb1szjAd+ zsk`0Lpt6w)=gHruroRio7CLMkMYwygAho>HsGFzd4In`wVIxrc@8X-_$sL%emI{x> z7SY{^$TR3B&k}f)0AVzTe}#Q*gfF-JA^SrH%vRk0e$VK;|yEeARd=-by*05%(S?Zp6drFL;8w&=N-kUkOy0QXf&+obTcs3%!`DTT zUT|B`9T*5KfOukc_#Eb&Q(PE!32^ z(H`vs2=(d~8up+8Qx~D%3T=pDfE1BBzH?LBc&CC9s?_ z>3nMhp-FTRgs72?O18)0jwQKZP{;wSZ`M5m*b!uRgWpf#PQ>2OYT(Q3kqx>Af)5NK z3wY!Btr?Iy8O5x4Dx>@&vdW5krG7jURhH#o-ox#HUoJK4O;3fVst|07at>|QNkgY7 z>(v}C8X7B-myQ2oA)if`m-U=!7c+jI_JJ)!V1)zL6w_CGccX(DXWkeq=-}Sp*-o$+ zOHv6|HcKn(f(!yk;cXz!CO-l&cV>>YicG9$UcXPZ&vyZ5iEYZy0YEXL(K;_nZQXx% zTmP4;go|0r9Aep{{6<7fp{+KC9YkbOXGeI~t|FSEj<7hmv^bvK3!SYrD{0Nm5A!77_v}bAf1s?}YgFhy7BW##I;hO$ARmtcEZ} zNiH;w-sQMijz#6@K@LXa2qG2a@G=eh0|KuR_%VU65+E&_)k5a_ieJ;=^z1m3IH5xr zJEr1|VdnxTvH%oNfd_x^0vi+1&Mnf3G>uIJ=!av6vG# S5CgyC??0TooM>t&)%QO}$3vX} literal 0 HcmV?d00001 diff --git a/utils/__pycache__/autoanchor.cpython-38.pyc b/utils/__pycache__/autoanchor.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..477cae7791fd254d14afa7c7cc407759b3329658 GIT binary patch literal 6439 zcmb7IO>i8?b?)w&{lx-U{Qr^ENG2&R1p%}u*%Hh@Py`vp5+p-ZB*RqT!S?K8f! z&n!r6%~quh$Xp%}DY?Z8s}?zhRXL?{NOH{CIpva?qjRuPTxFw1aF|JU!Ge>lU~f6&9%uOuFB;r4%qLNdv#Y(4(v>q0*p z>xTX{*G>G2YND1{PjKpERFk#TdP>*LYPyzL&(yN(+1kMR0B6pCOziXZoJ`8p0b3t* zQcmtmCezQvK9^&1K;{lO-Ve!fIU+|7`1-KqAF{&4G4x=o1s<8fLs`Rb=Dylnd0cer zLBWWU@4kQU-pX1ueE&yx-rKmnytZ-o{k8X&??>sPSM`)1sAg`t5qQgPam!OnR*{pq z?q%Ya?zeIKO%x&9W*sj15$_0H8dSD;h_)GqQp~Vv))MGv>K6Kq+l)}C-B5HAM=Ug` z+%eQ{knE&_G^{k9i6hQq`5B2i&4d+WpNVTIVW+uz{6Dx9>DQcGb9No&SKP8?YsD>3 zNkA_A6wkK-@T)fAqi#LC**`)j|ZcZ}t49sikN ztVlZcu3=QtxDDL?KVuaFT5rBD)MUuF`L>{ScGIZqTFVR#tf4VZYieK@3~9>5p_pT> zM3C4{Zl}o4!o+r3rf0DJB_T71Mk^U)I@uu68IV~yaKM6GmJ9b z)hJo>WTWb+6t?r2eER-#9;J62r!Ffs|JVQeOS*o0v;2#{qU%3zYQLvL?pK;l-ytQ3 zprG_2ZvPUBGPe9v3(LxJ5Gf?#5J?=(&qN6>67q5O7viD$A$xAD9@F6C1Pbls&y7ed zT#2|J@v3f!i%GnXve&8=KPY;&`i*mV_Q^Ms_-nrSbwBqPu2sFFUG;CE((B)+E~!cr zj**aE6m8yBk!kgy?hJ#M@&J*BJUrxc>_{N$ASy@$`v#-p=%b$yS0ry^#E#i9uv3UT z@IS;CIstues4W41HcJA=o2v@pogL$xv3PNlQ6lhEaVs)v6&Hp5I7*bNo*hJn->4~q zg+emoI}ukAe;k>cj*nM1z2B^>=OQwKEftAJTVGqXWX(a)DOwmCAACN|XZaBBao#-p zEh`s~QI|0&;@Pi^h1W{Wk#3;#7Xw?BonYQV`_)qOtaTe|SRZ)4U)iiW)|!LWuUeOG zf3Q|qu*wfU|Cj&1^x$_l8?Qpx+YO~0H|R;SY~@(CRrWk-l`4T1c$TyS+jj!%SAYFk z<2FVt>)`5ld)7TqeWT|uI=}nJ&*v>W2%K6ypok1Pl^UY&uCuVPaQqv%*xf=R;%=0# z)XE#=zv}yBno)M#D1|t_>G=*^J1L#rO3`Wj5HIg|u0vW_mplmdT)VntHR@2$v2bvl zs&#o`&#qNzu7>Ye!Ip!v8~Uw^OAS`35j2!jXnb-aPwNVis#2~%97$F^ssvj=nf{T5!nr85W&5yRsW~z#q16M{Y}s|kSG4SraO+W0*{<|z zQL5oqN}j4k3Ee}zLLHKU(B3YcuKr?8Y8I2rMA_#VB}kz}yX z?WfW1lX=Xra4gR7%VH=^M}!VamXC@&>f=0z`Yis^c*=<^S3kyxuf`S@N#n($SMT3#m5dm_-9C?#Y@W)Cv0OkjpANC1fcGDwDuh}xm}Jm1O&sZKh~0z?Kr zW^b?`u{HJy%{f>F>1*Wx967+<5Wp7@EQbNUtW97&py764I|l$w%e;o{!9a8dHSJ@; z5Fm|h5BK2sX`-EwV>_&q2i)?`2!b4d4RFbV(au;f4mcVQhX^!-i8cr(YLm~B1VwEc zIkl6BN6xU$BvxVSh(9%()swN)ZGjOp7z?mNZMHjB#4S7g3+}H5(?8>FvooW~;Bc61 zo8j;rYYRCi&m1BM19gT0{`odWh53EHBQWZKMt#iOyCw@}ce2pcb=AP4fpN&Bs zwx0=8owJa4Zk%ZXa*papqRmL^kC`j5vM)&MfVItbqNIt+1M$>+ngFQh<%{yAL($5I zX_`CCZ@(bFBhQ!22PTNeNNcn;jM(=gfqC$fynvo#;n*Ga>EE`-G4ppqq5%`(gfwnDvc@DxhOBnu_Mu%_(D9K5A&UuL4qd6 z+4e=yk(Up7YZ4kw?(^1EIHie*Q0Gvaq<(@yx4d43OCUgnFU@C0IJR*qOkn77((t5OIDI1u9aIPGG%d1Lj3*k=HZDj*l zF*_cA@^Y7#2QPp6%^Bt`>6Gk7HMsuz!j-=109i{G3P(@j`m_S^!uAY;G+IyU69D+_ zY8jvrY}NY2bU_cRs44`=Ky2uW3H{RzSaSehZvX1tt57K<82+umXhyxOr)6|n71AHx}oBdo) zXf6ky79#)X<8lVF6yuc55i?T-2r3vF8Pz=M8bk}GxIx*cUxw9f~&J(VTM}tn-L+ zRIv&YpOA#Eg+y$?A45u%(lqkwYcY&#B57!IBEF%nq6f^?cuI&Ho2U)ba5>cVR8Tb6 z@%?pl(d3%}4VxFf#kl@4_xpB@QVtTY8TfBeA^X6!S-cNN2-kojJR{_jwW5VRY8$bw zWGO8F+EZJMQ@AoIuLMBgoivcUnIZB`{3gSMeopDPX$>ctHbc)Wcs!)Z0rbkj3wrgz z5DcGY03`L=hMtYL0Imqj=fVGz$QF~}{rMSWq`C&3_mPcGvhTB2^5NfwT>VDFZxa4E z@)^>e`0E1pXf)>iO%vhay7~dkZ;B-%)32ABQxgQ$f z%A3@xo$UtIVkZ7*>&odViUxI5+UQ24KSeix6a@nf8{!xFPmMHxiJN?sH_v~I`}GNk zlK9@@fr*3SJ4)fB6KZ4c0w>;~DDN-3@?Fhxk$({!6$C7AlnHzy6I&3dvzYuF2bGSv z4m|&#oL9YwQL(A#PiiM;O_3;{Zi;MicKaVe?IbLL_l9&DVL>wX0UW@))F_@)$zE?gZ>sb8M~Q zYP8l$krZV$b0@y(xmGg^?**jzlzLH~j-@s$0KEq7_Wx1NM^01RBDBJnF;Ina1#Cfs zMOQw+ElyjYN+r+L^*<2Oqs%pe(#Lgmq6DsAa9TKua1tpd0hg7^v#d|359m3sHi}XmYtCPPtP8e0lPa^CP2cpG}hL(Nb zQIu7$&}k$+WR!`&7^5*tJ+c*f8NM|k_}~a4bB$8OrI6C9cSt;KJR*?omK{Y705h-{ zP@-DZ%@QUR@i7&phfkrVbo^prT75)g$r0lyq|xpcJ?ed;rHTv}38PJITlC0FQHB;_ zLsk$6PL?3z6`g0sAS(Q=NYpS{9XqRjs>kg{Vy7I5QduANLh=vAy%=ffck&sm9s5oKV3fWVf0w+$(ak6RffaxaX(Zh_J}%Nm KI-BFU3;zou(SGLu literal 0 HcmV?d00001 diff --git a/utils/__pycache__/autobatch.cpython-38.pyc b/utils/__pycache__/autobatch.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e9cc1cc5141dc6e7b83fa1fe13abdd05604eca5 GIT binary patch literal 2481 zcmZWr&2Jkw7ANOJ8jU_ITef4xNq5?27ZuRR#x4@H3vb#qN!0>{+va1_HLx(oLrNZL zK2#3nwFXpCz$w~G(e&7+0c7l{|3Uv29@9gi(;gP+p@*W6ROF<)91Qu$$9eqmz276} zUbX5XC>#I%mC`aoKdR32#e~kA@Ma%CMG(Oeit!%CN;V>6&(y88XF+R5c3jvi#Kpa$ z)?1Mim-b2^S4ojL5AdE#YNSjmAeD)62ep=FU=_8nbO@!xARUf(TZSyW_kO2y>$Y@) zG)h^<8B8b+S=tRFI{VZ>=x(53EsB!WCT_r+y#^IWecU&urZ7(NR~|;(;+ z0Sp9lnI9%CQ`&J#Xe8}0?qxYEf$n?6mZ15iy>*zzbZd0=wJWb*`OEg!JN^+(w#Klt zqt~}aJdCofGo$U{Sgr?$G#Kn_!~1%v{VdF>+F)iug^W68%h3Mx?Ki5--k8;)7igW* zLs^4YEv`J19RN}efjhwyi{q&wtVa;KFVG|O*gS_xD#0<9xTr1Z!lu5fi*7G>R5L`ApN$5pjI zpxhFvlV!5fvnSR3PoJYMCJnOsz!BA(uUH$+V!vLKI(sq~mdjFiL62v#e@(KRT zXda`GNJ&V6HGHjRk75ImgV`rx(PEXq+0tJ%^Nk zMG2x|EIdpuhz9ufqW1I)5Cg!dz4?cy-u8q+UWBoi!1FIZt|&i<2IS7A-MoAqvg0k4 z9e?%m){QhC0>6`?t;T-IRHkFY9daq!|oH`Sy)_H?Mn1$~{6k4LBvQ zdZSEzPH5L3Mcljb!M(YO%i6?9ok-r3Z>RtH;VbVp{dg2Int3{T+XsFypb5#^ki<#S zKA=f(82fDSx3~Q$qyKAIoHCdd*W&+(~KNrPlBfOHk!tN@ilXQ_L@B(%qrqeAGT(ceM3fBp9EyStr5 zq%}?aBn3i^pQ8$+u-_PBt|G*J#yy%Gg)B{AjC;Nw=WOh$SKH#^=dGXemB2HV|{g89dm_vE|JS{Iq}15+`PR+w~CX(jZSOLyoqKL+-)jCoqg z$3r>~nzNv!v^6JX>5P=pgey+kqan;?iWG9KNBRCd1p9FB!8i|TCM%kU`!S7EHkM2C zc7MT0S${fkMoNGMaG0%3mTnyf6!I`lWbt1^ z9!oR!kLT(mKWeS9DwxqslTMfUfi~r8_vs5t7KV&=!{hnRjoyL81U&K#J?0WKiG7K? zvi$733>J-hvIyINB+n$K5|1SwGL;n)hjZRsREsNeGx$kCO32=U3Y8qZifh<~+M3s< zg)8v5xDHPR@0c}g;1$rX;7cH1fX9WWrpGK?H#YDN)>eV5D#&+Rrf%5^badvnD)2V| z|0YlG{A3h^d_`$>K34TD-tN(aGC$I^&;*gobA>*V_O+~Ip>J=!;&VB1mKf66fq#in VM6and$SP1(3~<@Ec=Dgs1=j literal 0 HcmV?d00001 diff --git a/utils/__pycache__/callbacks.cpython-38.pyc b/utils/__pycache__/callbacks.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b59d401f7b745283012edc11d343a0558db53d87 GIT binary patch literal 2593 zcmbVO&2Jk;6rb5$uN^mbOB31{3Je7iwt@tK0%=qssz8ZEW`_xjFW?Jb10sl^K53D!i7kPCLdCqO9UHB(D1rWK$=oLQ{WkCHh2wj-s|>~# z4}>p627IPv&kG}|rC^;L$GS?h?;kyUw7UZNe9>1LndZ92AMWa(*W(0ZjfZON(#2_MY zHDfNhHsN^#-jgoMI9STcD=}*+Ni zVD(V>YX9ykB*hO`3soz_!=&Q6UI2c&b65sT)GllRzU_sAV#6 zUV<;JvTc~eG$y-b&kk7x?Gw{yX(jEFXXLqcM4!^gh|MD!uEVYwo3Lv=hxOA`Dy9wc z!i)hSN+-rkN{O(c{>tOO((=Z}*q$^4<1CN|jCUeW1uRmq&d<&I&=A!^;=b;M8^sEB z;Qj{N>C3YsEZ_r)s`7?o5kLwHXH+0GQ;`a(?kpC2(}x^u(A0Lb6SaPH_iAHZ_g z(^KfFjgGSA)5bCjR0J(Q3Iy|l!t0QYtGO<=Jbq=Fd0~n~CU5%^D$7Ful1dJ1I+k95 zEG6~(VJI~Q?<=j9&JZY**nBvIk_F(a2(-`%eG8`g4J25F8M%quSMy-GS)kqqW@rN; zbBr$0abq*TvR2L^I|iaA1LydqWrVj@Om!U#2}|NJ!5I_+9siPDk69%|BOPQ{Bql0X z57%VoO32!WMXdfqQOssE%>X2()0dsS515c$KJp`|Z74Y3FS6>;G^&n~#!O=n&Ijs7 zPe$(4$4R)t6@Z3mB+6qpjn&2A2XGtMQC~|gis<1rkMSf&5z&=8^r9{P6LQ zrP$ET$k;XGk}w-2He*XzqV$vX!g@(4d2(mU|C!=CYfAndm@XplP^lsG06Yx6e%cGu zqZa9aH!ZF3^i|6%GGx%d19((W8V+{3#{C~T!kXcD7cjUHMgtjSY1FbKy?x2LS`8*I zYH=tO$-LTN@3)omB@Z07J(_Jna9nUXz)L9eABFONNI`h4A*dK7#H`(gTL>I$QEYSCN9xHc_TKtSa;0MvA7)2ZO2g9&n7c-Zhw9Qin( zcikr=?oW2gt}9f>b@dW@a2v@xNIpa|y=2lE-ZUDU1YeufkqWh}%2KsfolE&(-pMf+ S2`80%nH!&DyQKYAG5-OJ-ojh} literal 0 HcmV?d00001 diff --git a/utils/__pycache__/dataloaders.cpython-38.pyc b/utils/__pycache__/dataloaders.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a1873fdee631e891479b87bfd1c6a2d89f729ef GIT binary patch literal 43824 zcmb`w33MFSc_!LbUA>~wSP6pQq83R}1VsTPMadFPNn9aPqClF2WVtQ5)j&1C26_Q+ z6}WT?jw8wz<%AZ;jR@B98+y@8J8)~#E&Zr!@ezyE(d)!m&8;ctImWcL04BNX}yPul;|I5~u$ z^M)A;*&*8~g|hl+WDPktv!)!w*{~d~tR=@tHX_GpHY&$hHYUe-HjblNN|ck?q`|hr zrLJ-+n<{r_yXD*}rOQ3p9?3^az2&}apX8&Z{_;R}K=MXuFuP9L8_EvJaea2Z9EZyr z%#aZ(k7PFnH8*89$(7C7%{azNTgrE2?<%eUlMaxGPQTlvB4gXM>^50wvO50u}YeS7&}_F(zp?8D_p zvX7J>%|2Q_ls!~FoIPAVl08yBnmt-RmOWNJo;_Zk$WD|W%RW{ZsBduYMTP8IH$ zf2#iUO6XYVxlP%3@~rUA4Iz8|f_cMGUo}EE%nQg}_^Lf@Z+J13ea0TKH{$p%dy~Bx z$E>}@z5~a1+mG4X?Ymwyvd=yivUk{bzZkObF5ET0?P@st97=ZDqbM1@YG&Vqvt9N* zIJ*aDIh^gb@5R}@IC~yvWA-@C#&MRn_t<;U_B2ZO+4rIJzQT62H&b}f-oG(4YuNYO z58#P*^Zaf0gE%kX{Gp8@`+%L94cl+G529?=e%O8lZOx&rN9{vs>kyuB9%qN`BRD%! z*fd|9pGWHp_6hsAJ%M{lws9^rdh#ZB#pI~rM^$05katH--#SyRmi*+@c~!{U%;Tzc zChwkSEnSLSd`XoS^U5iVhW+^Q%f;EL{G8)QPL}g?1wSr_(~Hiit{-N$0^QU$>lq14Ky#fDqnGCtE%ky$(icnmE2<9aSMK;RB+vbnyy~hxoe9Y5=LO+NKxZtBF@qSMueOpO`5X@|7G0%@5C9+~>!O zPHv`J%1`?V<`kY*<)QXF_*&?n*FJGzbyL`n@T)uMYJTk~8 znPs8QU%rAY3NRm=+^U*6pKF_>T&h!7o7v+<8wv#{L=K;Otn0| zRKlo~uDHb+XZ%XFRK0lr_@&}PaePYUi_ z!}E)C*)T@m4=+}bbBnWC3klLog=th>oGp)bWy5)A7Rkk;AD%;vaC!elBnwQc7k%8n zKWmlI9?lo%eA9KZ;Y;O`CB;d<1=nJzOzD2uIxun(=tjt2lKg)BDC-wq^R=(#OuG&GN9J!8dCrsVN}72|23 zg$qDd=R&U=lh|-()$y%G>{mZpxQwmk__4Wy>lAARY;JRGk8do#9#ZW48+*pjSIdR* zr3W6o|DpTu+cSP5f3Z-}^Yzg9l3Oe}>Io6k!cV3zeCwja9j{h=&!h z2&%=3pEzH*Y!~MWjtc-2;p^b7_!7QxUOKOnW9Wb%Z#;Sr`xHkKGW(5wW6)4%k?$B< z&RqgOnY&ZSc%k`FW4>05SN0hp*YZO3$V#XlUBPU+u?->4o^32d)gvC}c|!s$JHZ};LSM>*MoSh zlR^?Qqecot5;sN+mBV?*uyD1p%Q7q#WH1Dw6%34L)D14~6;lSmT(H!{G+j(1Ohj?TyJu6ZW7?AbIiq|0=2oY#xIi#gu`*i)_=(7TsRBFsZgwPEE?|W1)k`$pIfh42 z)!B|D6pv$_#!bs;`~P+{W`5hkm820h(uQ(TuVejkusBw-ehD$%P~89-Xbf?4W4+cb z!b)uJW{wbZ)SN8wHvU;5p#6woJ3m#d6y0LJlyeFN+cztVppS$~01QG20BPb-~1z z9as-c2pl+sJ4~;y`XvvX&qCZA^wzn9URwQy8=hZRv5*eAxW`UhPk8;aCMGR8xy0rF z&ckOXPfk9T`SQo=fBy5YWbVCJfqKgnolFr3r3{o&oGukImq3(cNNH#qG~=9Knw>2b zc4t6pxqv5)f`=!7vkDWSM`8h`>4(o>S@bQo>ZfM%pulpnwn0oe1Vn}0Ov&-1Bwm5O z!i#y^@0JR=ONHXxc_7|Ixmd|*2p$EfpDkXN%`GTFy$!GMy5Yxy4t<>){Hdoty^FsH z5oU5a?}G9o6j#@9(eJ7(m4meer0(R)izN`%$wfS*Tqsu+$T-k}B_Pq)!mi%Q!F5B-v)*|oE=`O1-xp1;F%WYO@O^%;< ztkFlmtDLVa1v^j?i>ZfrS6Ax|_p1-FnA-?%^a!s4_{=#q^+A@uz~l#*yqC!`5nHK!1hKLu#@H-?B+xcKmn|2F4vlS^;2kNh3~@` z3t{dCjoQXrSN~X}Zyf^ykPiYoKma7oXdR@#SWVc8MTo$yCKy&deOkITw2anByPwxJYjYqTV6`{~Gs?n$kz#2nI^}%5JKkM#5KdEF>YL_qJmtbJEevjwm1nYp__y}F@UTZd2#hQ^lSChM8F3W+<%EHdSa~F z@EQ{%l|~a>kQMAKEXZUIV9H(sD$4n(Tn?bB6kNe%+tnE?LoPn`b4)m}>fa$jxAZEV zDU?c$l`0ePv#cVsz^%@oLqTfXup+5QJRA=T2;d^<_~Yt0grAc^(hgq)wuIVY3uYmv zK7(qt$1=e#YXA|-pPURtHU&`Z8j2jot~wwJ+ofHjw!RZ4y!9^S&^SX#^e(+>9LKXykV9|l17vJm z68h>#P_z5_|)J8F_ zEMwn0{jO8f!L~|wd9*9$xV|Mu22xc<8 z9A8c>Czrc`0Ag1S5TvF$0TMPwwj4w*YE&K%u0?RIq_LS7qhRJ)t8sT_M^Gn@Iv>Uh z0OJL9I-UZ;G?a^aKsmV>$mvin?g8=S@WM!wHm`PhQF0o{rO46FcM}dmWRUU_BxltZ zy_DUxAyi4~qnB9Ex@iyU0$lxjl%|5KIC^op+T$f}7>h%=){U}IuGj0r`3{_?<-8Bi z=)o_IUoU=rcF(KqSHGaY-bs*Oj-v$+5jjlaPm`Y080-$1$yFZa4DiR53t3Z|vL*_z zTCyd9=0GeTL5Y`*Qe1)m=+3GCh`ZIV zG2x2Ri7a^b>c8OE?3EDhg?!l$yH!y5rNXT1M=lj@%FNCS?LKO$-((~IiOFv<;cJZ! zsIAO@0SPFI#Zu8lQ?k@G3g+m5N+{21s{I9?CWTqnl4vWxe$`pBBn zeYqVJ{A1SV5(TZY){_Jbc07qds|!DQ5?_jpGWLiNj`P+ESY{ir`2Bz}L2A6;oaK%M zC5(EJk3)5bV-!acnX%v)4~_{OBd>Kq8Wx&Q2C^-*xg4%M=ynm?%o7d}3Z(7mb<3vs z5R$NZj~&-fwiEB?)57RiuSmu4G>dyU`8I5rT%X&IdqLU)>Y%5dIU503?V411yh|;n zS6il*$m~2d_0AKc2QqIMyWcPmWUwh)OMyfEdRRS=gJw1JOQ>=2U)KuMX~&HP?=;uywq;YUx$kLSNpYc(XXSR9$j(%YVSSk0Zf;B1uZgOLGK7a}uQUTC0FWWiE$DaJtohKr*!kW^5tF zudrHc3rOx5%YGHfui&RKqnX?c%*e7C{5t+3I6H)&L$0%l8T$p<5Ho7l$+DAnmtaN! zavY;}+U~(IX7}2CIK~BI4nQT*${MvHdwSXVvR}C%BUXI%k1woYkv=^(#+^`#;j{_Hh2fPlBf&!Tf z88h`?g}T-Xq`Mt;yQS{OP#1VDXrssDj)}a6R$nkKWZLoYasmSs zYhc{vq}%HyAZYKU2+IsDcL8@2v-Szz?S~L6DOhp9>+({P8?+O1hSxO+N&&~zU`R^W z3058w482~k^)Q5Bq1*;{ge^nmdkFl%MhL8ckJ-x#RQR~Q3Hxo(?pm;J7*5IEY~vl# z^KN$w(zJWW$3w40`3{0xdnSEj*Ay8&h=rCQSH4(yBfM*D*Bd<7mN)euXIh!4Cd~inHNAzy>9x(nCx1xc>fu<`4Pyd z!Nqo+eDbkCd3bIujOQm0me>bK!5*Q^P}Xx4&}Ar8ySug6*22Zj`~E3jK>aUtI_SIl zI$xmc>66Fas^{t(s1@$)@`O?-6gXDfJXzHObFg44a}k2KDp`i5iVdk>rgrC8<{Y#t zRRvu#ISD9kp(Id+14%8pI{?6$)OQ`ojAr~~5t2Nu)&&`WA%g&T2|Na{y0js%OL6M_ z7`U%$=^`!x+7ux6vF4y85W!bN_T<<_rJiKvXwlJ{*8Jik>-J56N1Uv>6YS}6rK(EP z3r#QzY5+AH$dYD#-ND>!DLE_V z7T3RR>w7nj@yihX+VKVMX2j@LFg3m%4 z15FdDoa*e2+gtDBbI?~@0v3|eD5$)L5^GM_U5sCxoXcfbSX zwHyVFG)VY7zYf14{MOrDppn4&fZL1KhwbiwMoJ5f6m>V$N4zNS1Z5SKJL6tNC?Y`O zh%8M(lQq3eK@5%8%*@W3Ihs-b4RiQLuSm2r*n1h-e3_bF*cso*s6RptO&|Tg%uymg zx>3b3-Rkd=_q)%5KRAiiAvP#_bNmBq%C`J@=496ZmxR=R=N)k<_MoZ-m;k(hnFjVE z>0W9PAVnrQchp^S0xF1C2&FXve>?I@C^q#a+yns%X(_#vDXHMI+}y={N&PX) zNnWWhGvSWWiR{&{;B*8J5;1THyVJt{OkoeEk*7qN5@spI5&PEMXR&11*aVg!Ai>O4 zdq>k=Gu)qzKjyj9WWVykdcV>nf@=wTH!R}Ya6RH0^O#+%gV;vRy@Oi8nZA-Bm4Vtx zk={hn@zE|BLOpb;rwRR!<`G;{^HC1FwT2{N(nu7QoCP+EmI;{3W zP8v6aOp@x2Uu~#;Y+BWC0rI$AFB(dPI0y|PvLV}XEy<}!HmCSpS%F5azx`QS-aQ&o zpGSiMW1tzJ#A>WA_TnKrXP~@8av7a{)8T&Kq5z z1eh6fAyi-eiXDtYAD&6(AKK&ITF+vWL#Ra(A3`hq z27ngO*0InHYl=$Id|!F63X<~O3PM19i=V70rMcUDX{HLZGMJ?v|@~uG-|@VW&yx zyBRs(jq@Hkzq7uTErKmc)wivnu5;GC!%Lz+U|fI*q4vOC+i~?S_s;np1Y679&GWnE zJRMx!iFSJ2(dN}%ylO|Tq9=>5y7$<99K-p3FMcqDTD$A_dOa&-T|VbQiw?Y(gvdMA+HQIvZ~7`6jEyJ0$?M(c04 zx3G0#yY2w+{tc|%4v^8nGs0C3;;L{}sfO%Q^H2i4M-8!;IPKV#xkSfTF<3Rm&UGCm)xc40!<=oUF|1vIT@R# zO&*N(X0O!N1($Pn>EVa>?b)|yZ129YeTroDsF97+e89c3Sg1XD@4Z_5m)Qda)$|Mu zj+QDkCDTj~?3|9$7IZ9gwxE_O89W(^bm5Nj88OVu6e<^ss#>98=~!*@yEgZP7?ggm z?e$FL!N}OunN`91WN1M!d(*~S+}y%f3HwnOx>8U{KZ8?0YUg3OP^l%sx6Qz|)&cDn z&u9uMqkbJ#YE}j+=301c-)t?CnK*N9R2#`)z?*+u3B<+&_=$E>R>Fv03D=Bk<~1xM z(3#+}=bOBBi~0k8Bh>|o?)f~F#1tVzc=mfZv@uF8z)u$S``cH{D6E$AVEUIxd9AWa z(XbjuCTQ8oK#4e8P-7rP6^6$Ji&e_tiNdMEv^c|ggn^q=EEh|8Rl0Jo+-GMRfPVC# zHo=Hb=VuncSi<5O#uybCcTB^I@_ae37Bp?zWGpq!^Dy5|Xk&VL(V`2+6n=Pi(NRJ(#auB7hgnMjhA1S83yOS_A3tApTLV;$lcIa)$zxL| z{B=z_PSbfOj*G2Zs#SdEoceJ#vW^W$W=mDra$ArKoaa-t0gCFyHAMuiNS29ljJkjv zRs0T7?78}9xM#FaQ|IHn-kJtkO-(-AH!8kS3dg=+HvYK7c+Q>R{4f&I_27!J zt01+r!0(yVgsB_g(;^FUBp(Xi(WD*^C`4#`ATZLrFQU{Eq!nF`c~O;xsT$c*T!4i$AtQ{;i%|bWoKm5%2>amAvi>pNiFXMJwo^xu^Nm^9 zdTHRe76mR#_5-Ns45Nh*tb-_jp@opUWs*Hfq^5mviOIFW)}M7 zdbbVI2S$-ykZaST8F&PGS8%bY(j-!!R8$&A^RT1DeW)hYlqWowhPZ|dLzH#A}ZgIc`Ne&?+ND`{vYoR(e@k)Rg z8w8WUDx$)+89&BTqQ6fxdbAe7bW4w#1k)}3X(DZ+f`(9>3JnZ2e;n{|19b-k-3~k( zx}LSXlddu^bgoJyZxd|kry&?kg!}(H4?Zd5_g@(0c(S4$D#Sg;} z-S4x3*Hrb&;Zo_HMfeY0aunSB^xlVsBpi;kIEQ*Rb(mMEBU4{uLev1Mxa{~pX2F-4 z5UlF4mSO%1PCtfHaxT*QaJl7)|7kik7T}p$s zHnkvHu+>^yo9yqW*$uV=C9FtmTdPkj#UN?ef-1MLpnz(>h@0B1Q-sSF9Q)I}z<0bo zgS{DTKEp>-<_iXUcf1!YcCTW2$4bJvAf_;m&H*IkZNbL~k4ye6I2WfGo|~bULoXZT zS!SX5(i{uH16XXJk1*PUEvFd3c07ke&|I-&IFS{*13L&?8`~Qz7Rnp|3UQf0jz%eV zczsB(Tr65yXn%@3W%1Q4a_*o$sWVVxc&w&9;ilu{@tf9bqPgoniyAF9WgEz|ebP&~ zw!uB74Fohd9NH%72O|UE7$cjYWr9@JEkDIavU$H}Rb!uNJSt@FFx0znHiK#%e<_?D z!q52>lBPfdqKtwyAHgq*UktxEeu=qAHVMy3>O+lemwmTQ@rIF26}rh|+X>7Ab)rT# zZTH&)5P0<1@aDv^7v7aa_Ih0Dv$dCIf8eD#5O`@0240%$?2LUU?iebpx3}8cUc`K6 zhmoh}Ci5F?icm(?htb`+`{eH{kII_8*^8oyNf8hUa@U}NuwX{SS;+8jAAVi<$vXRC zkiMRIEeu73h1BpONKG$>H0;Gx#-^MhiZfiDH>&%PTB#7uK_+!UBu{&KeVk(}Q7?%! z2FMqtk&&8p6F^7d>uAO1UCG*4q3lgl662sIN`<7BilA&ni?qBnci2_F z-$snZMo3X6-Ay8O=+>%)cZMp*tdP5T0me`2hmFuO2&~$6$ZRN?fUF5=kJpE^m#W#< zpkDF%UNHH58+G5?_e|(rp^EuT$i0K@%abzA_x#t8pQPPxT*=sdG!>g4s^1C!VPmz0R>*Dnuy12;_d z^X@2ol%vc+7SoS6-rx-)9r4zALn0H~=&i^3rq>e78{Az`Vt0ERpaLKEh9N=Pd_96^ zhU_gHAYVaSkRjQ<^)X$t0z3~we9-QLy4!TVtW{+v>t}^P|q?( z7=3PQ9iAm4GE4h$|NS0hNoef>H-#s>Z3R!`O2J&V&NA1G8y4hrs13+%U}vLen1lW7 z8=&rkm^sXC{UP@N=5@13&$u3k-F@EXtHvl@!;1i{6 zXGd8^%0@fN_Dk8Wjb>G8m;l^0twbg_%>Iy&DEjz(n4i3ugVOy&Ge5m$%KkOIG%NZ!6@7+vSe! zLD^m@OQ7t7b|0VPZACAG)@IFvp&QnP52D<<5mmczd|-Kpw*LwR(DL2h z-SuODDLZA^TQ#^>qft*EiZ+q!@rEsuJm_2Vlcq!TM-UUx0uV7}n9L7WuV)U1yyV7^*G11JJ;z>hkgMSYq}h7`C8BBq z<8{*Ow)b5(ZbY34+kin1<>JFq@9{>yaWr}N6ZoC7@57tpY<^=fZclpg8!>gp-j7+| zy?ig8aKAj^Rn(cR5bK;?9&^vQ@3?Bz&w68eO=0znySv;Y-Z-EqpamceAc6hBb!&N# zw+HLU5sps;$5XO$cn7XP*!C1_ z3;ceYx7)jSW5|9G66J@)z~;bfQEyM~=^N1tzw7O(zmsen?t8|I*l)kio%k-Um&k2z}4sP&0S!QeQ)6 zO8p7aTL?Nzu!JSriHs0*nU-CS%8;xB0tPX;5(YvKn50qj4RdTh^{%H?IFQTVzd0xG>;Q8iSZ$jqI;g56*?2C4_-lN05 z2yan`+Q$W%wqgeSXc(lCrGzr27F^`nw4tNbky!31F(i481fvKONtoZ3Vf_+cFN#~q zu)^kO{Cj-C2J2euX_@(maZhbntXUwy&eI22d@mp+u9mfy-#3z)ppK5s&`%HJo&xh$ zy_8!l&Md&3Cl>es`xgDr#g+#V3YMGVJe8MQn~GPkVN@11>zJ7RSa)c;|;mTxRzFVy*}N0~JFi8nY+7K*qE!H`l7 zqVh~iNNR)NZ5#b9lowq%UPGUNE6qO+AMcm_L-CXBA9@3U9m2*9EkxS1Ft8r~DCz_| zL%Y;SgNekav zJaBWEV(q>9-+5w}=GMg0OL5t)C0dF96Fp5)D}LLjB>st>f+z+-oRQl;D%t+1mJRYh zdLY%}TfNxb?N9s1Kf`&eXYBl^dB)OPJ!ABr<{1~?>KVJ*pW%n$jsQ6E0c_D*#*KjI z9DdFS64~VajZIGU*pMBL8gE!*mBlLnJiNp2B1+6TaJVjBZtSur*dy8YoqPY$_RgR( zs)cBLatPqH=qvy{6?x|Z5zosWZ8!&YlvHjBfZ|$Id7^-;BIXMMWxy%akBFPD=sDESvkhQRL_mQdEX{328+#ug`=(r@ zk1e#^in=FJ>Os(2D3<(G;Or|pR=-E|H#z;hDxu(7p(s9h*!T=DYpqm^k!z)wyJc~wM+CeTYrp+EPHY2Tci&W2}aDvBTomJy(##0o$l1}M}zJ1_4kTjsMo6+}(R z^1sDB&X1~@)pGA)Qx2zM%Rkbtzjrhp)l5?{^UozawL8s+(m zuwEf%*_~PH$GzM(ZRq98ufBh21B&V8vHFq(AsOx0!09L-PJpJ38v1#98K-`9=6n_K zO~ZBB;bNR1#svQYwZJbr47}y1VWZdx(Di2 zy9ui^jaAx@HG3pn+qTw9KK*el(-ju|9{;IK}I-KEHuy7O{q4x+ec~q1gxo zrLZzshP?n*8&(QT7Y$PO^TtMs_QJ1OSYrsg266V!z)aBclCUt2y0U7q4ij$DgRtx> zY;3&*1nF4I5N^YI0KyNfK&<)8&nA{r%iYWA<(}o<<-X;iIAVk&C)Y{PW==c68O>rGCzp2 z0)fl~9vU)LsFN7#$P93CJ5jY0Ri) z)&xmH3TY_BWzhU%oX1d;!kp0Yem4ku*4L-FYXvuGa`PN>!d}SG zlE)lP$f^XnUaBbx!R#@1-1W9q_aj3Q*2y7wW2NeHP)Nk@p*SP>C%PZ zx`1NO#PlUgL$9mJUYTMV^T&uaRj;SJt9>q_l z>g&jGBDi0~gO5XseZ^fTktdTev;tqDO!eME3#DpM|gRTGvnpb9;S#G(D+)mvfCD=7u>Mqhu383BoAE&L)Y;^vo-t z72d|G|CzZzL;|6D*oM1WbGO4A>qmG|Ci1^BCzJm%=03_qCU*&q&@~lSw-<|rOJd9m zj~rP7zt^}OG9JW5%O*8c&cSMa)uLdSE^I~M)p9QPX)ccKXjhxR(!z1*P4ZWR;OqLa zp`+h1P4hcZ@K>9R+8t{xl(yD7)(F9VbB$24(pV!n3gpERYh(j#K%h3%KAeE63#8WG z<5~4Ej4uwkRvq*f=G&}fXvr#bj}1!?O6Pf(9{6Ft6 z_$s)+*x`k+`ZYW`<@}hOS?VX;=zKSVBSz=b@bHh)DiP)ypzsDk^C1VA&m7h!R@rrc z65fqIP5?^?tZ#oV-v!sP7i1NKwv!_$H#rW};hZDXA1TWLaZnjrh)8QVudn9{g-?I< zS%N9>Z$T^i2>*SYukqAb(0ux-SkrVk!KmVhr((^L9t0lh;sSXdb0UlNbprtw5kFTg z!oz{4xU!f>!w7DJ0D!Nut{{+q&YXxjNcgC0JiUuK;lo6lb~noeLJ9vx(B-F2()BHO z>g43{!)N`ZK0Az<-K3W!3gbuE$ZNdyCzyPSiNH0{YrM`=O4Sf4PnI_YA=4fD}x>jQTkit^$I@j)^eV`3^iYWNrm%Lmy18^?ndI^swHJbPvd#e*888 zq1CpmwbDyXIabF)N7F65wGStasSwO!tRNPQ#M%=h?S>G5Qv*f>BPcGJ2XPzV{;J{r z0^cs4cX9}gi-4D{f#=*bfWHh4M`iVU*HM%PN`kT z8#dTUzNvE_=^Xy`rscc792Q#ym>G+|5=0&d5CxD61`nY#7%~>MERspr;XVyv2;z6s z6&XwrJmkbz&3vwcJ7Pp1*yNgAgXh|_Caen)o{Iiu>g)8Qy9P8s{XSDK4&h0ZOWdZ9 zT^DgTnAH^Q$E;!T?LywV7SefaR*U{}42nU<^qK`$vuhYvuZ6&JrX+{jQYT?xJ zFJVnL5D?n{>$>f07z?-{8CN5#uN@3&zqyw%$6_0a=(k*lXwL+DKfu)n5@1jZ5-yx^ z?j6!Er#2^en`R=l#4_;`6S%HJUw1x{Q1M82oQY$7)kqLwma) z#T6~9I*!~f!Ve1V#a@2jZK0yp^t9Q{HtY{n9ydc>T_J$NPvfAQN=p5RE&2?SPaXUt zxxP4l1Nb>4fkja^7ml^~0aX|i-Zt~4YZq&BlLO=9lPCK`(C z{l0l+#y2m|=$;8?5*nA{(kz|e_GiPF_vW+VD@-o$V~)w?z0g%$VRCsNbNi-sH-(cG zAqtlzXx(?PdwXkv+8#VhSqCw2WR&gloInrfzzpd=r;$$ahU(_6z#+A zwseoN(<#R^SMtoQ6^$~h=fC|0dLDCab$QPEu!)02Tk|b1n=eC-Z1=!GF(ibP)sUiNWx!+| zXfq{fv%8VU%t5~H_06Y&HtQ}B@oIE-Eh3(~VW7=b*MK(RZ2nUCr3la_-3H=bzo1P7 zl7|kdFF>1p9?ZLeK;vHeS{URM(6QHj&D1%Of+azM^s?0J(MUg7Q;@V>I0U3@z!{-O z$U|u#E0?G~=>atZv098fhP#2DTW4$+dGhxih&nO{1R0@RVDLe3kmQSkw~pG$ z=CuKR4Q-%hZ(t4%B`-^EpU4dg0T90MNq4A; zzeHSquHY z+~~2O|NY+u{kQkHiT`iuGC3`w&8SJhkPbCyGty<96q&L>1HJx+17E4O*^<#Qd;>NLY+Rl|O6X&aCvYg# zH`xWYrv8Sx+tDZL?{IZ=gCNqQ1kcA}n~5x$l7td4csvi>NQM zJ@hT;mx#%3M~^(zqDLN*-ay*D2x<4Cwo2@zM;>YuBbb->f_}m8@;)5*wNW7OHG%06 zlNK!!RWxSQ1PP5T6&zh|AxKhf~Z1lyrVOWzT6omx<7^1R+p$o$z^MI6o#Y7H{>&$^y0)GaF$tY$OE`lIWopn%t=B!98o4UY%op%9|tMCuWE=`cLNS0T29)Wic1 zT8BDn#o%{mM4y4^sR-5q0XyRl$@QdsLk&*YJX`LmT*EW@Tn5}_h-{=bq#6`<6X`at z*a#dNTm?bYxQaSSAm+5xk`~#nzF$I(@=V`}yX2bOE!U6((LoaegdK&yT@-=%(n4m$ zkVC8<1R}TrxkF6W*$CEIH)?^v2-X?2IPjk_E_@JAPh<6=CFw^@+C-Eit~464Ok-Nx zoDs=w&f$l@FFhJ$U>g~qF4BT9uO<*#B_$kNZZ=Paw8R1NyP>3;tKg$73g3QlG}g_d zjnN*xj6aIJ4k;3aJs+&=wAG4mY{rD&)e^TB`~>36Ao4{4R(%g+nIV7y&HA|VH5VpZ z;xJC(Kz-Yda1__Gu*ln=)gHF{v+{Ab{m?au`QHAldA^!8@huTylgN$HfN!r_YxDdB zdjD^@dG;W~%>xrI@r|a@6wOqI4f9RtjPOgo3Eye+m~jk%2|+2LDp;EpYHhOpu&nt! zhbN0ZOiWJTZ!Hf|Ai{r#3fglVAK)8V?ei=e_`A@M->pT;6fYk?H>JMJck9w+N1r-! z@~HX>OVWWD`tan@?CEp+vjeapge9WLyC;HCO!(2bnc%Zr1VD-d*#?vCYR? zP*NfS8~&p@cm_j;&UfQH)>3U@V$-@db=Wo{jNv&9U=3(VxhmT0W&V}jP-^wf_v@f| zFr^x3`6ZM2!L=b#kS1D&Q;=&Swwv^H3!pAP5i#sRx-QQ^NOn5~1#ar&RO5!`H(-Hu z<8Abjk=)X+Ma76QK0m@yg17JE5Ic;_Z?qAB?>gDYuEv`~%?7%`i_C!m6wC(m3cjP= z2Ys)JCUErGnzkoqxT68=EowSKL z!G?(+87cWyOI@Og{3#bVn-ZqxP6RYjKw+@Jhiyk2&(-8$gk{Vo8=Z%hK1FqirhpH} zPC-o_(>`}j;N}0#M>Wei%}PCJH8TqU27B{XHJ*|21?MH#}mw5;)O3Vq2a%JKa&q2@naH)>b`xN-S`M= ze4ZC+)Qs;EFVKhJ9+v*76ew73mDGn>gZu<-uEfSnJOf748YW5T=A*3h>rB4P+sRT2 z8|lYN)7lR~lx~7de@^PMhA;(UEGm#+5&=Zr9dq&DI8S4^fi|GDZKmyKKP;2`!$Sg; z_6Ts=iG11|M)?S6`z?kVL!WEu0Q8S*mECA~?zeuEe_wx8<4+IGc9?_G6toG;Qs`>~ zmLW6{wRYIYbwH;T?0JHGXfFVtaK!fx0n02>VsLScV%I_o0@F zMm{$SxWmYmruqUPT{q%m0wM{dqmi^Bty9#+_M?oixDjyYs`<(em_#S*y@+m*fO(}D zTi5$n^b>paR}x!r?n2$}1BKY1pTdx24&!?w;zyFnH+Z^32sK} zXp1140cMDM#`!T4xscn%KqMJ)OiO0)_ml&>sRP>8PQC*?M*9phUZ5i3Y{OuKnO$$s zG+V$YPX{Kocar3|jV+%{zjRC$?^y;;Zb&*W*gNGx9P) zjm%C$1e7siyM8_&0uN4p6&IFf)k^6KnmLUFzw6a_qVC9+VJHNFeyM661N|;MN0$)vXBsT5*VNEVr@ROYeB)|s2T+XY~A$vsu8mpA{CgCaHw;78olamoOIJ?V+`;--U47Kw=jQ@ZeEj!v$C; z38^-YN5O5YV^Qcx-U2Hr`N3y^fO14iatx>1!s!X-R)NDrbD;i#pl}nq&Ck_p%hMq- zR)E8aIRxX3El}O42k`8U<+B1K4y*BI?GeohG>q^K^3QDHtUEFQNT{ULoEt0@5R@ zk3ls6Ma?vELVR43;(aj8kGCKgzY<%A!4l%yn(CKrSX|&~ppRa;w%nzo|LCXGdqFSt zt%yQlIfbzE5)sHtX?PFogBRQZe7IwX@q%#FRSMiyEPG?44DYxp7~>RF zD3SU`p&5e_!!|1Z&w0kRWBSHal_gUBueP;T^cJYQ~~udRq4B=<_h zlx>K)9Ip&wo}zflW_x>UtfbUhT?$N_LNp^9Dw0l)dGT4$>E{4P-7Rnu2&+SI66k}I zzyoL##&OMcau=ifycS_Z4E^6~VJ;DcqrtmEpMZsHXS5%oWp@N7%M;jFH)%;Nz_->V zAPM1AYlTd$D|2w~p3GwLGQMkDTUV?A`V|A91$TUYDkKA-y_YvroZy>k`sR1ueDh6# ztzy#s=KteckDodInA%GCVrILXrD^#lAz+~&90r(y{ z08@aOG|?qOwmZNCz($L-CX{KnBtRrY=v|3*+DYudHvKK>?0Xv+hIDp3RNHZ@>p>f; zX_DKwbWNZadL`b#P#83Qfnnfib)dj=s5Pmo1f{n_dt>Yret2QTUcT!syVSsbxAchj z^I2lt-Qq)R>^eP2U_sD{0HCkcm2cqcc%v(#QJ`MxmIqN+A`hbRfjnqCwI#F-#gY|z z0B11uMj$B>pstuf2O^pt(CRKxhCPoap!R7o3H13Eihs?)Vh{Ip^iW!vL2+$FTQxL% z+A{ZJxs{g66-272^)>PtyWqmYw_w+XDup>gmO8uxNh^5xW@@|PvwLsTy4&U_c3>&O zkDX3S+zYe>>y9G*ew_@Q0@NtR$WQ5y*+WDHQ~g^;j8ij!U&qt{I6x=dwth9Hm5I5? z)p8Vfv{XF@J6@aL^{8#g*dY7JxWbZMLiOS0%T??obW5gM1}1YL1NkSs5J0U20XsCE zqTSHopmT>3cpOd*liL&T z<<-+H8X|IcY9}iTk0~tVX_g(~vm+8RRvf?KSNJomCY;^9TqYmj>Azy#zh~|<%>6Wz z|HS0WER*Jhj3K+{$An*ox5Q^zLwo^++W9G-%6QAjALFh60cGns=uhK}nx9PuLr>C@ zWT$X`^Z*n)6WCbO6^9NuBj5(O5_D+61O+z;2bdIgX`SD5 z;;AFY^jEvjfjw~Yj7yO@%8xY@{AUPTXbX>pf|^Zc>hW`@Ck0ABfA9IF>F1l}O~0?^ zhibnIAj#m4)?z}r- z^yMY*-Yq<0qiYXOsinebCbJC;1bUF!``ud;PSvfBH(JGL-QCv0zRniJUuWK_R(2Ar zK0J{xIc=<=ba`x1RT*hsd)zqz=lVvf@c_=vM294T0HP_0_(PmP@&MySN=a7?(C=`e zUXIirpzA3$UX)H;H(%KVdWntzuo)(mqYw=b1~%b?pkwY2y(4rs^bB1mcY`X5E+iH9 z-f|o?QQC#X0#sYP8QC!Y-gv`J5)#=MC}ltWSGd)`4gY; z3p|5R8+GAi0;d8PC}~q1^s?(!m~G{tsY03-Dk?PzlXOB$22hGZti}(Ll8LI_DJSEY z>T#}7e5)E?A;a{KbFgw;i#3b;Lxll2h!}QYbNO-k5?-#lAdtzAPy+7YqQI%gP-C>m zkLH#tm{QFf5(ukpc%W~wVd%u+(=5#k6jXQeQVh>wkPQjxKtUP9nCKWB*Kt3?YoKT9 zi^vCFWbzK#6q&ciLiD3}=wiVJz`*;kxokq-h09HcPhbOUdMf@DdXXh?6#ma-U;(=D zTj2P5jZFZPl+YuGZ&j|^8_l3zw_r)K#)N&&H6$+VE)Z4)i|eYfI3j0ML{rFxMF+!t zsAsP=*$D{mafM8udS1wS!pR`swbXwe^`oqhZ+t;OhEJ6=?l7JA;hKES%T7p(SbF%j znZacOAye06ExJqgacvmxv^pzdB7n@`n|ngm-10sco?Go;DK$Yr=MZfC(1SMY59;jm z9PRHzLi1G4>QS`Nq5u@fo5UAMvs9=ycFV{jl)s(h6s|?W*6w%`@xF?X6ScoY`^7zanm`lz=^3_|dGe`qQ@S~##s(6>)$u8~=TLkE@Ptrq#Jd{!yvWIr z85zY@9g>G0VCpK9GXz(G=c05&=1Uf7Yo(q*rAy5@LZ}WBPE5BLwV_+)NM82l`(U+l zdu%g^X9JoC*k+!AHo~=M8JI(4L9kGvY3)1$3*d8uyc&@Z0SFi5S^Q)x%RLyBE96q8TmL32Zl-U3ueFFHXOI-QWz(}xoslC9k9gU9!dQoubTDDFZ4uF!h z(+lP&9_aZbI0K!Ip`mK;(Kp;QoF6Up1E256tSRj!VYFegmFGt6yaa)HTR zO!`?xsPV5bx0~&81vb#r6DZBb7!3&il?ai1LzD1 zoEH(u*oGzQbHB12z6Q7s;UrWs;-|VBc7wDD2dQ%*<;qnLq)J0IBcXc*M?uld<%tmC zr0B1sH4;=12PQn|>Q`{aBfp!?FJjsW5G<(cCO}p}^#Vbq81gh~;@oPh-Sui%uwjbW z5C%CGBgt@+@F{lptL#_MM~@%a8pe5#oyHc5x>UtPDSD)6ick*(S3it(`XCS;gu)sg zk#cveu|PFB=g}o{7A_W3=J+z+4JW)M7*@0O( z1Eds00ztk}BoLYRfG&W#USK(_gwY5DqVk>}ZAc%?j&}G+ERpYSNQ?b6je0Pog))>F ze@)56aa8$HE;+U-Vw{Q9z-|r{Bgyg>a>h3U(O&Dk^~K_`gAF;4`j2Q3Q1>tnqNGH4 zoGP>d&Z?C!TI6OAqQG&rga11{(9JJlc2+$IfkjHFPk2Ulpn)JBM>uLfh3ePHfAk> z@LEA^v55AvfyG!we=7IGd_RF)LMxkkn8k4Mb9lIZ^<&IY^z;*r367fPo3-8F{iRzI znoXVqNe*3IafZP*#k}A=AC4aliCN$$U`wW>iU~!DS!Z!J2{jPKJNTxU%45Vk28{cm zIbgeRA_K-S*s>oqi#89c1M()w;|NiT9dxIq4STq!6u~?`PFAJdv$5tX?3r;9q{q8( zb=8Qx&JiJSMDz&Iq{%l&!`XBr_J!t;vRVSIPsBF2#$ds&GsewOi=E3~B(bA@70=P; z-p`{wMqz+&u#LFEeu5G2a*U`4Hzr|8i1*YfcIU&)wZw-MJZt;kqijxQ_A!=Cv+`+{ zi305cbC0uZACtdk6>?s_#ZJK$f2q6(CpV-{H2BQNVfYD;eUMFI;;ZMZW$VIO+z-VYT3{qHtTs@^$OC-i zVjZ=J+U zAH~Bbuu0Cc6XwJObg#e2=lv3sf6wHXnUHZ-8`*hM(~8uxewTUX7MQfUDY;|3;|MN1 z!4A^W8Q;K3C;LIuB;(0se{!2#oxz2UKh%~#hd`l2j2IK50G7=!CbA$x>?lNpk(YqA zvJ8C_#|Pyo5iHIICQ*T8tM>aMW_s7@Q>QQ9uZ4!qDw&|pm{hsd0zy@*E+ln9En!8Z zCIoM-s4S=eh0x6Ti})ZtKH&&75yXw8svUZw=_`=l&*s6zHt&^4q^$^(2e6=~MFt5_ zi!Ykp+6Kgd0-Xp;rIm>pZjr_x^Wos4g&L_b}EmQWX|QXG&e>vR+2gv4Jt~ zvVn!%X@Pg*L$ap?@6_;Mdy04jnsc-Ht(~wS7j*?~(gs5qZvped()}7*Zd+@z%!uTE zkIRgUM=a6yt+^JOLY%c|7m)OyX4|sViO2n3nb+1lIg0zt?_D*0aJ|(L4H&UB2-hir zNMoX5gxoBMnNsF9S>`kTIg>h*pJme8FnsuL@lC{FRQB>9SO9zlZp&N0QHQV~hE|&3 z^HVr!fJpdLh&KgzO{@=f0Hinqpr`|4R|?kNA{QEfQ^q^0@T;ztng>T3fq>vW5)uX6 z=?V>av6@ySq6A98p_2Tsh^K*kaEykND2o@U?itFaEnatVyQ4?7kEl@6OBfNgZmoS;x#5!T!jViMHgRG zMbO&rquBmub4MOKyYK8{M^pt@8=qo7#_CTn5u>{Q!Q7Y8EG!1|Wk{V?uQl3c{{d%c z7?nbpn>6$$I^ri|d?4fz(%PN3n=PO_9SLXDm_Qe93v0y${Qu_b$>NYi*2*!KmGAw|4X?0TFKeiABI z*+a5@WFyt_GD37}m?3J_mss{P5{&N$nZL&52bug3lj}@gVR8cr^mAj(b4YarP*LbD zvFt~f{3w%$nfw?N2|ANz?qf_|W%3#ml11v{OdewLeMs=7$w!!%{z_L`onGOEPcV`0 z{Ume$g2^YDNJrVdw&~~0`$1qsO>Q?4S&pJ`ZR7VRTozkVHI$%kkJP|U(d<#%M}Y#u zaOwYVt4nz2(KH<-#9d`VyL(RIUV2Pm6=~lHF4T)09xkk@wpvr8W%V>`iCpvqbF-}X z2oB3Hp3uo3G|?-0PTB4gM?pVFF23>fB^EbN@Z+zsUlwj1rulD z+q=ouxf(!^{H``su1>Nf3|W)nX3}ybZleG(jJ5P`s*4o}vlaK6lJ!CQFFT+s<(KC8 z(PoX`6{#WB$Akz)5!q-5$aTyOF|`>^WEYcrnCxb9FA~2`_eG;b$?JXd5=Z&ZSnbc5 zkah8+fO7a$wGL4(`{Hq4`#KYD4&RzO{Mb2lgSWn$g=d-Mn7B;1chskte4NQ=nEWb} zuQJ)p+r*Uc!#t%Yh1e;J&7SC{g|hTTKLmZ3lkGam2t*Ur62rpFM(4_J+gb#S0jqPc zT(y@<1uo%`^AkuKF&J7$s)$634vL%q6wxs{G+dX=o#rOvR5%s3u-iz3Ga5A`StrBs zcsv;&iFbuoyWh0V|2+9%|8Tk|y*FJ*KOK+uet$^+-`Sh&-8wiD-=DrRxUJ*PU3EyAP&<=be!6aT__R=fDL8-c{=q`44X6M&? zZ{GL4x1UT+l`LHI|M++Km*bZ8XKEb195k-skpcy`IE$=${$_P%+IHPGZ>R3yZAWge zP%p52j92$epI`U+3GVX3=d51jv)t!J)F${zUgDFemG~T=;uX{;`6+&!&!AT3Y~8BP ze}{do)hg5OM@_Z0(Xy#MzDT$UJRjhZT@>kxvNG!t8(NBW?U9pNdu(VcH?zA1JYL&= z#N=zr|BPi!ypJ)((FBbdmG+?8wjJBtzGJOhuBCkH?M`mm+a;>O#W&B_ZZ>zqxVD=_$_KN}60Q^>?+`kqxXiNye+7 zuxy(ZS((kUGi;8DllY>o7Nef=MYebG7<<2kin6+lv&{O+dIHOF=dnGqxeJ>!5VUdC(6S`U9ms9vb^N5v=8gJ53ly_M?uf+F}}$HIv3Lc`-|0D*2ubm{xYu za1P%jDP-BFnF#RSe`QOlzPwVa!LXtk?W#^oF13LX(h1=W3>p4+G^{& z|KO??baCt(QoCDWlWV6F!%wIh3aK3^bD%Ls%6<~Vv&bWkC_agUKG6Dkbd9yHF!35% zf5n6A@9(xqjf1O*N*Rw)YX!$a3Po0r4FPW`WpKuUv9B!dW)6J79eU}v>D{a_0>Cqd z??4yI(i5CN<`laV-BQNXB+bnV-LjhMR<;?ReC#mbcI50?LgDT1S-`8a?aE9Y%e+VI zGj_*{`#|M1(D*lI4Si;%YvUOsyuCf()9hQweC$}x?EW9WCxLGyI6j);Q|SBF^i_;> z$8zcJSQmlpq5oU|E|4DEcP;vo@9_l_7<(v}h4!Vxp5U4NJ?!sgF4=?jG*X$}$ILOk zYJB?}roCIYZ`@d3OOIb4<1*yIW+w`RbLp$+0-l62h!Yir4?9u?i3rGNgN_WY2E9() zxVNi9`F83rudUr)yAo(t66DZn`Czq9+zwJVP)U$FOG``exFV3$hL6bfSZOU>Y$_G@ z`pS$ex8Ned;!DBeEo|?d3l4RoJfXkF@ZibBm>CR$BNrh#X?IN3IE}I8Oo<*&Q`z*Tv`Y53W)+Dg( z&m19-oPaOxyE{K$I^S|=mGMP^mw4CU>-R_T1vKy=50^t%+2B&(g}e_;>T#b)WJsO~ zum6I{uQ9@>NS2S=1MhZA!@_vhBronU@rQA%z$bWV&mMa4$}*n>E3*X)(bAxKGzI+q zE^|f|;D@|;$}qd};R@ zUNAjpvI+F~{3nlHaF{s@Q22{EipA?VLG49NU{U=bIq}fBjz_Mb$YAVkN6c`<11w09 z!5Xnc0;O*-^~m0{hIVH2!UAZ7`=48=7Z)rt@}`ZACsqZmmKIdAEor4B-i(Qd_ z(M%IksE5kpwj(`OaA62EkFu zr=Op$q`kba>WG)Io_H07cDpi(kq(uNnjLV;uJ(5j4#5hdO2eKBfK$0?#{0aD5t8DY z1w5D7X_lV*KPB8}=M2kM?bIX7U#X@3_ykfL4F{>iK6^LyR+2%?gR~qNMWfxIse(4` zXm$U24Z6IGnn_c{HPpU$B195C9&;kEpkhws%n;{s-THKPXd~rD(`y zGjgh~kxjEpsk_*UBuW{!wP$j`iwFXJl^>uZKE$#hYQxWRu({cJ_AtdSF=7pmu@jx5 zImws*x1F9IS-lX;Ui2{aVe|k_w}JfR6m959g`7o*m(YM0`ACqF?nqrUk)x4pr#BDW znTyU&xYSOPb`% zJXnp=69+3D0Y!S^DI5;i9miEmFCd~yi9qvuK(X$ik0RryNP5AKtR-$h34~X~WgI6( z9dnZ6J<^RCj8;#H^Yr~NM2=${eiu4@O8NUyvlWUrXbfdug7^Y|0P+1nqC#<#S|tQi zq2vP~Tbwc7NZU=IyGS!Ac!;Z1EKqS8h4$7ruHXJ-Lp*01xs7$9w~aKjS~8BOUBV4X zV7MU{@FQA)_RN*JMYA28o(ZOJJ`1SnXC2`(#!G@dMahys&r0?js~|>J@J}&sSD5Qm zn9XMJ#l}M=MbJ5W8VGdJvhn9(v-y8{p4h}HRfZfn#~Kqa(g$Z%{1opo{={{vFH^^q zxI%S8b#4YSCV7|I!XKGQPIB(JL?h_OMg&w)?webXHaw!CArJG%P`uhp_#g`3AZ* literal 0 HcmV?d00001 diff --git a/utils/__pycache__/general.cpython-38.pyc b/utils/__pycache__/general.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af086b353c006329c513962130430abfbd0ccc37 GIT binary patch literal 41309 zcmcJ&31A$@c_upDb7C+!2!bGZYQ#$fO@fC`(zHa1he(SAX%f0dl1GE70WgC(!0HA` zj0cV^&@p|;R-DteXakNfCeF2+-JEO7-o$a7>~50HCV9PiiQ^=;k!+HEiF0vZto^>f zdM*%BoXvY6r@Fejx{kl9{`#-_`?0RBcmRK2`%rlNFFqFt{4sA@|6;f~f}iu1a3G)p z$|?p@=4z!ZxwlicT!X2gTtogfoC?c*Bo&csG!>O=EESV$N2)`v@l;%{ovBW_CQ=Ex zcBQ&-wTs=Qo>Y&;I)laDQg5nP;-TWIQeUc1;^E@zQh%yn;*sK-(m-lJ;?d&T(qL*( z;<4ho()!eTiFXt?ls2X|N<3cNRN9={T)HE5humAmWGX53-QbTgxmD+`CXYmcC-KpKBJ*hoXUZOZ$+MC)d@vh>&(*D%`(t*?gE07z= z-92wD+_M-+-J83&aG&a47f9WodmtBCIH-D5@1i|#r4HrZs8;0;t8S#eX)&O>2Lto= zyq$UwZ!=$C?*764(DrIIh&X#btL?S<2YH>V%XB^S=9z@mkGM{^I(Te(Q# ztlDxokc%#ipvJ9J_DiMe*B)CF}B`KQ#MsK?af zNGay-9=FsJ>Pf^)5`Rji5HG8z)icOhQO~Nk;yRtXANkViIXt_R+dUu1<#PAWTk20$ zR;g>&JpQG$pZ0Z?Q{zlaIqDl~QswcEtG=mvTvybeDOb%Pc2)hkx~gUon^pfqy-n2+tE#_HbL#Dgy)AcA-HWmLF7-U_YU(f5 zJJbt^&8c~H4efrr`YZL_>Rm|rF7?iQ06DL#cjNAP^*_~n)%y^8hk8-HA30x8|Bre} zeGgLR)d$u0BITO;KK1>$zEl0R`jGlCo_x3Z8}$+OGGgyiH`GV*?7I4vT2PBfdAIsl zZtKE()W;Sr)berl!$^Ct`h@xL7OX|nf zPvH7J>L=At;rc=Kx9Z=jpGFDatNu>?JN54o`#$ye>Sxr?BKH02AJosOpGWKm)Gw%C zME)OC|ENBvK97_iQn%DA>I;Z{Nc}JMMfD#M`>^^Y^~=ch5%neYE4aR_{$KTL>eun) zhWaPNn;sjPY-(-@^UJ)PGgKjl2u$+v>lm-$BY^E?M|^>W2}20=a%y z{T|Xjss1nJA=jtWe^&J7S%zY~N@nFCXV zx#?_WdRF2YH|OR{Im9D6H=WD4h{Z=IbuOdMS1Lu^h0bT(Nn8`z$y|0SU72yGXCznN zNxK<6o-gw4*tuypUnysbL$()9<)=>~*^47fH9sv`Y_DTf&z`uN%}u*rBv+~w@($kX zo^WQ|d@((dbDexO$G68SSJU~*j29ixyXBnYELmu|7rJnC{^O&Mjh;I_a&h$NnKPqa$kj8sRJZ@=@e>yw zK6mj%b@*+?^jJpEy$!*k{rl!>$+wl$<0W@)@7vspn<+{>`L~7YL9e5jFXyi0l{@KmOzT`W?*J-ZbXr#k1us5ctYqBuRKBdL zv9}%Czkm4NiMb(LuV)z@;}u=XxNc4_ag$E19(nBSi6_rrNS{6WWP0?}g%d}QU-Tj> zqp#%4p0z*KbMEZIg`l#O&8>qe z^VXD&i4q*~!Wk!<&wG*lM7g4KLm@BdxY`RAb7c%%Ia^UVv?9mrP0O>Kn*sd>lzwaX z-pNWSw|C~g2kt$1@7??Mp2}R#mG{n8ij~U;_i~V(y%V`|P6Kw+vtA5yL+Uxi=7kUh z?3ktZ;QF@-1b_1LKmVsc^U1G&=;yyV{~vDaRsGPj|7QG0KVM2ShM(dqiMrSAWcI41Q4A=n^>_$u`ZgvtQ>Ew>!-mwt` zY<&P>meJNfK9ja^%+QKJ!J3Ll3#y5UN@b#$8_rgWnK4Z5pfl@u(Naat6mw2Xol?70 zOsGii6DX`+?Y)S%Rm+hj#H89JA_ReJF9a6>0FJQ2cuqy8H!GW08P!x+%1yujN&m4ZW~Xyr^z`{zcM?;z6Du*DpO%{;E0yr0Y36Eavb44ntu0sF8~{7@W;DYe z#$HPw#9dpHaUTfc*V5#IjM^fm;FMk2Ot@hbgSZdLL=2Clg6E$&UhUaCm6-q(_g<-J ztec5c&!a~ldF0HA^kWy!q#wrGuMSMQ?zD4g@7^m{t_+*Z0C@?k(XIgAx$@<_M$dxi zSxXO^K<_{|%xmY-!aCI7gg6is(4sq|%Wa(@{5jgvndcC30r7cj!CDOHUWHXuy~jmY z=B*EG!DIE#GXW~JP-=Fp{ck|SpoigDxffO zPX<&J;}@&mbByRWnIBKOler}DNfxLvsb|VgGGBJ`DwoV8RV6!>)5&b5>}D`|^>DRw z@32V(5Js!3_VT^GPOdnPCRvlrem`SjlFJ&V$2^tS3i? z!kSAQ=;mtPaUCyInFjI+X3Dc(6ewHkipJ1{h*EE>u~G74tg#zGz)n~p%eLayF00$p zr;u_oD@&RW@yAXRTgZ666_J|d^tpirtRhTHV91LYMIskvZ3xWS)opI5W-AcpABb2O zIWwA125R;~coATF0<6nL^wW)CG@opYW=KX8BR&)!sYcz>^ssx?^{gwSCx@b%SkQ}P zvK&<}^hEmHqXGeaT+(OC0Pd7Ni5tx7pvr43+Z?d+BS={efdn9MMd#gIW~`Wd4jCL` z$$%X~=Mw-!0)U8F5rX0z(o+#Z(f77Ngag%ypFm_Bkvft!`^B7Hv-B3gAUGA#TijqF zR1P5w&jk@i7NVT-HOyZX&0BduQN?P3T2OVo9OPXMu$-_|SjBm-I$sWxxln{mVnp|$ zHoY3bgB&8wwI;WE6=Em7h}k5oo!fVo0ISSIZpf*wDOM&XfE$J{x}aQ2r!r-QnE_CR zv&D?#=*@W53m0>jbH$;czQDWKq{-xPpRP=2CO`=dg}vzIoR%$nP~{y0FfCIMWv5vO z9%2u#Z_|#x$fBbDlcAUw%$Hq%EP-vX=*&aB?uAZ|oIK}+Ph7Zg?t&M=8XeD1cyX1_ zy2m8W9jcru0TrVOV-+Xog>(jk&ekp;;!e;w(jF9i3D2Ej1OdC(8pOETmTlt~uwTW$ z5MuoIPoY=s2xC|XjQ_39;tenQ1SpZp6{igk5ae7EGWgt!IryCZ+*xm0BV6#~W!;4* zBmk~1Z(ZzgF;f~-nFrsC9DjpfK<5#7osSh=?EbTEKI?d^NQFIqZ~EA|k&~w%A@$Za z0ZF(th!=$$S&r7Cg{f!jK^Gx5x?uS*Nf-qb`*LtDWVXOsXd&c=7b4(I!;3Z+Y|xF? zfQDkf7Y zU6~r{kU64Lcx(_|h@_J~%pzjb#f-}-1~O&VadRbErCzu+g=G)Kc$S&CsiiMxK$UTh zg{DEGd%^7Z#PXr&nQ?L&6a_nw#{2Kb%T6zXfE~wFhyybY;$OnjSCPgk(e}q#a0EY@ zU%3D!K#=})65`-L{EaOY;z9@i5mqtPfonv?xs<)=c|-Mnqh4m)2y=DwC$mg;8;nXC zGp!(m4B|FXg3cii)`sPJVYdRR%?o8`RK_Eg%UsUnL0N$TvAqy?HT^bZUJip;I-M`) z-E?{Z4-JWeRZ{KsTQ^*PeA^mmw2qqyNanyI!Zcc0Zxw1K^W=pcFhcH4OMll2)pw-R zxiV<_bovv``r}r0RkMEod6rnW{>M1MFW}jRNWB3CYmm(VWfuw*fJqDX{eWXS*a9s5 z4&>J_An-nh7;6GhVD~)I zei}c2%;K{0{0=r(9>G@sW4zIJt~|1>1+@MJV4wQrvAu*}h!BJNdyo$}m{S~R6O8Lj z$qSEb%z$ML@_HL^^9g{T!#jrNx2l7Umeu?2XS!{o6T-*%CVR=f3A>Fo71TSi2idMI z3p2C`IV$c!3Kb#^62~5qtpYS3cW$i_m?me6Ui9MWM~@N zlSTl#_pp}tGI$?@=NS-YEz7wHEQ!f%d=Xjx4SxPg?hVI+v9M-p7QePXPQfGiiR7ep zC1a%qNpRo_Dpy5t4I3WK>wUtosK@h={9v=1Z7fO_%@P|ETkxa5;Tc(sY%Z-^p6v97 zJP2m5C@+dtUnyS3W+(GJ6@{RQd-`%PK9737f?sp1TerMj&6P)5ul~n4SD(yl5($O{ z0t4ie=VmVs8Ui<*g=|@yM!weU{Ld()zP8q{(1?F_v57_JyEk+HL59VC7HP>URYo!o{9& zE#?NUgRG`p0IMrF()Jps3X3HxZ7bQ;W041BeaM_v@n2n37TiwmXVT*$)gkE?}ik@jnqQ()xO?f zISYxo&{Ki34PM{UKU52SB%ne!>@vtafTp z0ZTRjR+lUdt@5In zzSBUtP*Tij4Xh$tKV$_E>}RLE1g5{F38#& zb7{8%t;4x5G&1i5G%)|LYmVSkC=4K zd)JLZ$`gac(?&eF&|zo>1){PLFLb68D(H3e|K0J9$}P42=_2$P10j8r50hxZ>C z-Zxr}9=d1W!TSz9kXn1}v4_tby_kOD+=WLkoVSq(G)MnF|0>e!!uh4J5DAY&+j z8AIVj*+^#zkA2BpTxgwym4E~%F8?%lEqS>OA#w=oKZhj5y8`c`zEd_6!ytYKo5^v! zCXIDhugdv3B&F!U|@Y@Cyj0Gm8z2+tZmeN#24lV<=3lyf)^*S8`TwDJV%GZCu zVAH3mLP!Yk1ln1r^f)(tidbWYFoB7o5t?%FYSNa%iGh8S{dr z%vFOgF0(l?G&xINbI4A`jaHH*eX4^+rb$sP?_@VSi46+U|1`8eK|-262-y9W{wA)s z^{*jD-#72TdEKU0Y zOT=9W10a;(6ECDPvkn#+d!^sWVAwWuJ35Jc%!BN5?Non#lv{Y7DL?2wnBPT z2!TFhOT!!^w%ET$L1b`D>=Dn}GZd5N>px>dNJQ&DXYfA|c%fM+e<4nVk_<9aG`rsF zm1Kt<(rQ>0Gql1in7$hDDyr z`-npK4B|C;IL-WR6C(89+w0^gIs7FNkn$pMD5=z-6IpA)UWR&so}!SQitdb1fxyR2 z)AmAt3U|wfO^7jA&KVAeSU3in62V!+s@AX(-N;QvRt*2T@!M$Ye?&@_$@M?vYwnRr+yGAo zih*~V&5e@l*c>1=ADG_E5K{(X4ZhS?3hvQWp91GJtoDI)XIW4Owy5|lun0R>RPhbj z)hGxZ{>%RY%Gcblq3g4~Raq#h+#JR7Wo%|pBZy{CKo1NOaj0DN7ryKmF)Z|S1>D(o z0wIoa0?_azAWMUMWV3IVLlt4mfYe{n{|mXXGZ7b>!DWKkiRvaj^6H%|n`;Gj7OWjcdJnZ$vwxC)nroToUG1|<&t~$xS0tD7uwq3k||yR z(jn5t9uJEKqb4Ml12ZqH{_ha?x{o?gf)LC`Rv1O1-pMB&#vBPxKz<13T86X|Xa#mH zQ~$nJ;P+cws6Y)P?bfO%(4I&0<)`v9zPXDRDnS9?MuB&tLXMm)*lE0`KvMx{*>l)u zghdjrmR18$WWfkx9*jL;`T+1W6EP1I~z~Mp{zC1xS@$D6>?DiUZ~` zz`PT?S6FDegzCB$tA&YL<|1xK4Jt7G1GR810*)7aFAYuC1?HmVjjCskrS#T-Pt_`E zSICqIQD#*2F&#T&hgz-rX*!CuHO(3qIv2_JCXhN%i=l+IQqEw_LVVqgsL6>obksW3 z`l--MmcCGnstqvRwDf@5SngJv{F?E`jnG^iecxQ`z}t7IB;M(s3iWet-C2tx+`{&$ ztv5RE56p$iy~w*wQnud+GKKti4G7gTb8E*)b#>iR1mik6Ge#?sz16SWa(9=>Jmt;UcuTbY4k9i?IW09OWGzYI}wdZt*ccCcK=k~5@`zxLsU zzxvq^B&lCc=A9&JPFBW~9NgsCY_b}KeN3s8cZJ7>b~pJL+VWhr_qik{!E-Ds3;cpP zka#ZXR{Uu(T8yq|uluERd#T89+Pd z&`62XDwPmhnza9xRfc95U@QLiE0%kkbToGzjFEj}oT20z16`9Ok1s9s?%3!yL zV9AV+eh!txDi(%yMw%3aS|qjZbQzMVym}<>vI>J*wj- zQDs4j2y9ZwN3tQ9HW^F8BvhBCi=oMkR?yD<8eSBgk=Jd;pe}#`rxc~!r38X-`3`J4 zVe(I~3q*+!8iVLUQ}+$y->sN0$Vp~}eqjduaYAy=SJnZge+%y|1CfyEWkQAs zNdzTkm#V+C0+Bw`gRD;+y)bfm{%}&0eb*gXg#b}dNF;$&+}=uw{>aKpe{=&)c+EQuxUa3oG+pz;w8@4d0Gou z^VovPTDn1qGRuY^n+w;%6wy0JDDsyHKvpw+0|Xg|n#zI?Y@Uk0Yug`WZV$XRJ(kVM= zlU`uZq@<4X_6^9H>ND7|rSFb0V#ls4J9Mh&abv%E3?^?d(sNV&$0zS_ zIW4@-mUAIZva3m=c@blktb?dl<7gl4*%EE_a-c==L`(>yv=Y<<5m@FxM`X-$Kh0(f zT3g3nTAG~wj0S07`D)e!EL^6^Cd97dCw4=DAX$4uhsA~Nihv5E)WgO$JplaIYxg1D zwzmKy-e=3!%@UT9sIpCAKxD|_69lLbA;%8HXX~UZ5e$gm&}K`UZLxx&uJjaU|15)oNwkW|Sg3gKR}39U;k zsK1W9UUvx-2m+(am?bdvGn6v-L5T6gI8Ia1Eqg~}%`Fm`?7gr#Tg-~fmnKA(w9;yu zA!)R1OWd;uZ|tG;iE@b2444v#AxhY;^3z4k{P85Dxk*khnBP~scbk>NRfl;GE0ufE z2-9BnL?6X1&E)ZdQ2H{hLeM~#CY-A2#4%=x(Xc}#azYF}%u?7f&!_0>-S`L|_#&}5 z2zC(bm?Ds=K7#Z%@{OcN96zCi-h)W>lnc?9PPmjO*oX~C4B9^&VBzS(BTAVFATf>;58&8|_-K^|JKSXU@`2%eiMrOELqtvI zPoGJiE&;C%o3!}Jp@f`HF?7o*6c5AM%LgEpl(+PknG%LHlt)LdWb*E#xmj-&WI(R2 z%pNTkeS>L(7@ETfQ&18<`XLV1$~3I0;L{v5l7AH8k=r5>PI@MSbxqnvpF@sYQ5xk# zrEt@#261Uy)Wo91lQNtnYytEFloSOlZXD&p;)cKl^oGcvxy;9LR6*~m1ZofYi^WeX;}FBh`T zChA-nl%}AcK~{hM8vs)OjWcBAbZ*+!@bGM=RIG-EXG_I4Q5=^8rw-v;_B3_n z2aZ4*ghA;uw)#idQfb|?=J?rI&llMkmJHqmXFl51?;J5=K&f#IPwO>eK|u{qgiGa; zQMfx`N1PqBG6qE&R^ScvS3r5$Zo~zb3|bSk8N_y2qAIu!?}A%yc^fME#6p*0TX+bA zW5-2Uz98ul5s*a%dB<)*@q-q07f79fx|1ajCTQMTSOpEj&w>JG9MoW8HI90OoG-ck zs)Hrmfb9{e;d&e6Xyc!$PIpaV0JG=o$d#x+pMagc>cX7cIfogyu+|+c4C1ibx>{%h z&bMzsA2*;5o`8Q*?949xwY#1(^Cio<2j#4i9!PB_UG-5|=sb>RtECtHhH{nqV3@ZK zZIW5K2Imy25eobc?X5p5clhZfbB#yOT~Onaz}$ z2!n%UJs*ThJheIuifop275S&+U9F^=U-VtW(B44R=4QMIjGA*rSR}~F%BsC<*Qov) zrdYLCn#wAK0)_~ah_yO!w!Up~QajkWD5$%uy7n%#N{HCmWbP^i6d(lM$wSGZ4YH2R z8OcK^L4*%f-tc|>Nv049c@fB_iuo}wEVP*yohe_MflNfhZ-Zg_TIee=_6|bw>h;%Q zna1m|v@!Z9u8*ds$e`%EnRbuVl)H)}HA-|w`WKi?6w-!0L|?K>7h+7K z6-&r5AR_0%q;Y6Vb%7~@3kl|Bz-^#Rne>NY1=LoL&V+ zhR)QcNhA-m%AQqlgEWwIwzwf)vBSa0&!x&2Hr7ljP#q!WxHPU;kryMtXE9((|0xtx z9SVFW&Hb(i3UL*(F*4j4TWWdU?POW+27eAbmE^e~TG%<)pvdR~zT+sxZvZFB9(axK zs?K%5n{JP^^4DGP(I$;~7=!6kpQ6&m&%3bN7#*T7f9WPvH%oH+>+j&&gEt@cSfM0|5q{?&q=6cY{P<7+4Gd>)q$By-sD@;9`KOc&`^`RjbhI_syZr;GsdRfv-Nq zHr9X(Ne$rsN0^uQ=3QXo(84+?@nw^~uwM1an=jXMLctTD!mj=X_9@T+%zZdeUJt7y z3;h2Y7z#f*e*5fFmes|u7#bo7$o7H zbR#&IsCBAcki)?ATD_r`z1=rv&qAI1ZSUv8I@);HhV&mmV2G&>+YO2V86JEzYF(u|7-LU$+g!@1$)fkW8Z zH7pXdvQvVgA*G=m2IO!+#}Dt-r&j7M$wOg|HzFd;alb)PGyJwB3!BK%&b@Zf`$` zi-Ke&2nbr|hG~}r5jH{54hEs2WP+^JK+cV|Y#?bjf*}@|S%c^bExhqQ=&(ROgM$JF zSWa7r;+m$`j;`Q+%{f>Nk7b-mXwNVBG;B4L1oaG)D=um* z4!l!~nyhw$5QWPSMXFoxXK^vo*{}kv>soKTuez$<(E6N7>a{4Sy2?0Zd90kLUlKZJ z(RmaxG~B|Fvdg-Y0jUms8PB~$tN$4NIi%whHt0+*ti@h00;(3B^g7rD9Jz){4@fg0 zho6PR)fZSyhz6-%M-J!C;NS$*?{7;<#21FoJZdQi6 zWUYFM3`E3raC)8SJdTCZ#8w+bI?1=%m#h(1y^Rk(&EQQe^PP-wx?_k135d3m$hU5K z?c<-nKyHV^$UrCrmGU+oKn{X)hhB0s2ydF!^aj_1a7=(ek0FG>6N(=Y=k{i#N310B z@X#`!4k91P_CDNGY?@#UBGL6Aj@7$|`NEP*t%jG-7* z(=Zf+@C4g14w_RgO0^KkfC$N_LR3XyGkt_7zucILZ3w6i{Nf<|sX*!g`}_=4CVg%k z(zS8bS&q4#3kj9LwjZeuh-C`N$}Y1-LzdPpva}w^(tg@kV$%|*3yeAQi9L=XAK(|H zX)m?Pn6AN$q=SO~i!^oi`i^6#&bq?zJ!~x3+vx5v`pn;I{G2x<0Ock?fVmK8P@2 z)@4#9@IDRu<)yoc(HU=o;?pv7pm6m^bG7Prk#^! zNkA9~k)czKj=>_PFg;P-@midh$+*|gHxyiY9u3hiv!&eXXUKa?GaT^M;L;Rv-Dq`x zy*$_w)D4kZm^x!dcJE%0Ni#WT@BIh&JupKI;+JH^52T)UCykp55>uQ2;Og_)|M^|EZBWa7Ca@J3M~SS zd^7|D8D}}R+JpCDc>6&XBc=WjV}}`hl!0K1??o&XgSFl?8GkdHKgg#tq#cbB+spB! zH4nU6z#5U}o@CQ|vG#44pm2Oczyf*!@80SlVi7{KZK0!xrr3KJH{{W|*k80iK^y+LG!x!swgLaZKA#F}8v0X=K!yl@SFqqH7-;&Q z@kIpsG~RCm0!!z{Cp5%PMC1fOvVK4db)gQU52DBcO5WB(W$3X8bY<(@j<@xXGg#Ki zpF-LwJIQQ3HUZ1BO|oHfLvJp=!;Rx)Cn1~Ya3HWxbBhE?xE!u(2FSsjrZB*j9c2Xw z`#Uo$dP@_|aA-k6JEXuJ!^bbCFTy@Amx51#vqz7eyZB_AG@S!O@DotZGYBAc;W0uu z0HQ>vKsh>OIrG*BAT@^sQwHJLo$rRTCgF22pI};P8%|Yk19pUr0%hRn06ZpbTQ)?Y z+l@k4@!hPvmF}eec^TblpiZI=xUdNcE*!A|dA8ntu>slCqzPIEpl+othP%Z5(9XDR}U@Z4CzH0dVamu7r7Vo$Ic31l+DAMWA< zmgbYTd5L?WZDrQeOiq-605eE?n~M~NVH5;mgW&;sig3iU5Cj}5L?JJ|DA^G01|fUZ zPq7Is5(r49sZ3b7c!iY-gE0oI-w;uhO=6qsXF~g2_F_Ow2PiN)*qh^Ngz^Pkqr8@PLPG=9n+u0`jB0+3lbKAiYFM3AXAYro+NZK%@igMEdB`w=SKjg|6FE0sU39 ziDoVU=USH9bDc&fy;4JnWh3_)$s?)+v{>|+m(EdVp+749UhVf--mmAN{bEu?A zT!4AWlp$v!JF{Fo{Vk@E?l9W9PTrEV7j&ZqubsgyRPnOZsZ7Rk7sGT43iYhK;bq9d zK;%Wkd|g3+TG@Sli3zl>GkAhn&P&w$j58UzJkbRCEuKq(Yc*`~gCJ8#s)Ve8S3`Cj zc55-jBM7(I{q}A!DCWTDdgQ0WuIlD`flW+ZPhl0Knx-K#+_#}nE^ArF>^AB6_X9P` z5iZl<2_ys4{Zv~I#2wd5cvTnhxVpYxS+n}O2QOc3pdIAX>#21^%+ZkY}GD#QRYn z5c5`Z4v35+Gz~V4ADE*J*%3l)scba65FtJ$Sw%fLkekKhDa6lQgL&E?_xR*tgb)Fh2nkgI)b5&MrHV&gf>$)|uyIm+hZ z*Z5*C}wxajb4r&|AI1r#e#?}&m zP-_1Nyeh}KzQOxI;D0`}q;P4YrMM5)e`2&L`=LD^Gb;dIOzM;lRO@~h z`!S(87!ZX+p>`c+LW2RM!Eb^TBnx7Rt-Bf2Kg&J_voPAQr}LV#{w8G7IG#U^l-d3G z9cZp#ng5@{n`ZuRz#2jT(Wk3a_|pfV|goG3Oy#^*lRFqhlGcHKA{(XX)S8c=K1 z;LEF_;#;?13E2+!_aNIrugrr5q)EeCf^OqFTrs;gjRvl`y;^xgYP za@lV#2h8PebGgS{?lqVD%;kQ$95nA8H18cW?;SMn9W?J9H18cW?;XT@p$$*tHH3Hz z;XS3H)n0fE=L@FvKETckL%B=0%(FQDR=J{ojcGU)0x};a0-@||rtF2UOv1%QDlvv* zGm~ywUQWgRI~dY7o=8!tF8_%sG1b|4jh?gp8^40aL)OzMMyjJX&!?T zI;9QSVJZl|xSPY>WPiv%tqZ&k`@<6&bh6R1q#s5RU*gmvt``nyL){Ta%c^ehKfeV? z0TJ#%X4$)_!OEB6*<8GJ2%a>B|28rj=H?5?`xC5z%OhZKqm#{l{@-6j`ZeENI**`j zV=@i(F+Hgp3McRp0WzQcT-x-NCua(fPHqU#@$a4Bj>e&wrISk@r-9)RjC#OuLaYIS z9j!EhZl~FT10KF7AZI3Cz{bxFPt(TA z$b33+Tr~^PHV)>)gEo$*(;@Uz$OHEnIO`|}wEmvAaUK^>LlO=z-!#yKPx%n+X)|Lm zSJ5*l4+jHqyxqaMrRbF$CuOTIY{92%O>WwSNY=EU?^g-_K2*Zp6Q@+cS$0P#2gXkN zHI&?jZ3$oQq{J!Wz>+X+VcA8N0tn))2OUpehxt=2*iWzMfRDiDwz=BC#cZYMqqb2| zR{*`xHZ%|$FH$>?Au-As?0bgn{ynr3?DUuMzW#l}_P;YA80!Y)giy=8{t-O-!}`1y z$;um=z!B+V9XKu1yrI@JV`=JxKRK;4@SqHP5Xy-)M+q7t>Rx_RL_NUoh^RO6Yar@D^$@=TqVVMp ze(Qr@_uv;j)SLMY4}QG^U&uH=aUN6QEoM6~+a{#|OPpMC%5jNvVu|~e=Ek)|k$}E~ zu}%iGaarnSz#}(HBUwV+iLkx~EILd&7yL6JlDF&w*yqUqH49+khE4DPk z;2Z+|yX@9mczc1tD1*lsJkH<=22V2JLRm^7Fe`-JYKh6j9LQF(l?V0-?m+C|euF~( znDg@E$RCKpHS+)IKe|=^Kl%4BdGDX}KD6~V+`}0stL6nnNEZ1#Ouw-+gM!HyltI$O zRb^qLfECp`*;eEZcEH$Pf*S&GAWN3Q&vCe!Bh;2Zyh48DKz^dfNQNTWcoKg&Y=IMa zu6U*&qo$Ek1ANAVM?t_ICAlFapT4>~IlDV~Wp{G&SqcuGzPcX?_(=*blR0R5O=1#C zS1HY{Oz$b?#$neDJAV|5y~i1jOQlEQsqB!%*o3P%lRZ6LhTk2aII;ZR31LHaa@a6z zLx+;1aGNhXd=h&YxU;03;-xeFwkEe!gzVA?s;m62NPQRNivTT^&wfXh|MS%p-S&$7 zJ)=pi6ZPDT7(TnPJqrsC>WX^`Xg++|<)+VwTC>X+M+f_Ifc5+p*Fy}i1Ufcb`|wZS z_qwyL`mdUG|B@NklDBoXJ+^$dm9O5@Hgqs*^(UG?v92D#1UvA0lWlhYo9Z)cck=4& zfj5m+Tqx1@2&sJgH2eQ*lA)z5PP41CuQSd5f0$&$GMz`vwg&JT3Af=3_hUBbdl)p4 ztl(EnAwY^#=-aJRXgBoj`u^9QLNENQrVtSq{9XKu5LZk7mZ?9Dqx_<33Aove(Eq4X$%Q)&U;|E@M z4E}Y?xCNfrT3&zSXvy0G`^;c{8J{Lnmwm%oVdF46M(YmIwrA9r_R6F2uV2T_K@#HD zjO#MI|IaT-mcrH2w%T!OZ26|cJ&l{#zjP(~v*7CN`o=7X!$kvE;=z?>0dg4<;yarK zLOOtCSSv&WSDuVGWG^5v!Y%0`N%u*Ob%wk!vje%17jFnshYv`S4mTqsH|=5JhlX&K zNzZ_Z!h7P%o$ht!zu+Lw|6S~P0{sfun;}d$t}67KUbpw(86K+lzDY2(_r8&CdJoPW zJ@pAJnfnF;4QMs+mk$Mf2#tK*F3J+WI(yIZF0z}NUq=x;lPr1M`X8B*2BG+I+`KNK z2n8ca#nzTn!b(p=HK;GqUrL?ZdLYk3@|^v??ftdQ_u2p3-k0a*eZm3m&6;okWiyE; ziw%QkL8v~RQCB3K)c3Nx%XXqTCqNl!@apV?tuRW$NQD3NF}!pecpAd>PoOS1e^^ft zp`js8QHIp0nsZq^lLaloFevQMVgMAjXb2!O;KywmV5=_=oMkdVYXO=64B~4%TVa_< zx|t}gOU2}G1<+6w2P}&D=`_e{nBLsW<k2`x zd{z+7WGFK;L(I{{nJoDL*z!@uM}p+V1DG<;02n@q;#e7Q^>U6NVvFj3To(kQfanw= zIJt-A4Tb(b@!3-c7c8&QQCm97P%L5q0NBN_(@?jA6P1+$!8#qA*BwfpmbPZ5@hLB* z-;5j6P2%ci|6%+W^V^NHZDz7lg3;{m@@{t5v$LN$N3o}Iyuk^~(?@WJd=XVFeG?Db z!BE4@fM;AX*zk=jA}TP`B6}rEEDm>J-oqS_7Q!PNj}IxE2xKw% zZW@+_P1>Rb^AA{6G}K@ab6vynBz}U(XdV_|M~VYVHV%CO2^|LTc&HM7@Qk=nwnJJ@ z;yY+|G016)Aq`!k#gg5V)*=MgB}0f_wO8}D#b2j)4=lp1`} zK}QMxr1h<4>Nvdcm0=6QviK96gOY$-8e(G=oB%}~h8I3=BI^PVKx_?MSbq+Szn15C zciD5$WACiLAKE1E@@xPs`mmK5aZSsNYRWMU9>>Skx0NPS$P7^9S$*JSvelmf3HZtxvte)*VWkpp!knIzKz-4{K;aprnngk*K>b!` z>cGcaOYkHHR|aV&KF46$#@}rQs$SB&I9BTc^wzN=0YE&;GqSL+P$;GijpDvJVqe1z z7mqKSgk4&jUIa@3y#~y9s;==2{f9csJj2v)dnPhV|Jm~Qz*a7Qul!u2RM^^a$xu<& ztabTQEU|hE{Uz4OFD+UA^e}Q`WWp;hrkmkqbQ#o?_!8c9TAu)rFnC-|#^Sww95h$n zhY5UY3aHy6s5e@hZD1`vdu@P!xEL%2fx4x-C&Okwsx!CO=BU+3TNKeV(P zC1yeMc~mYt`C^Q9r8@p`y31q z;ZPN4AZjt7c5nrA37myU;IkfGC?`yGi~BAjUCfv$RfZrOLv!7nYat-uDNA36&jic) zaGBghOaZgNFfqz=#Q2gGP_v)w$0gS*elGF5(a6;?*Q4Tdy)L&h@)6oP=B-pBz~WB8B~=SA_|S@6N4p6NbEbDfp}cfip0- zwzjs=RqL(|tfTYSwK$t==`Xq6wFpzh@bOC~J~zmc@njIj=LwdsR!I$OO$~GFYJ)yZ zf@+n4No}2<0=|bSm=Jxnb#9L-1?AM=FcYL)ZT2Rj0w&^ z{|-(U57gE!^l>a|G1wIa7wu<)bL;0e%x!d6*Ve<1Yke(@^S&ds4IGaGe3YrxFTy`0 zdc0|Fvy{EQwh809wzdhSY(`(g;~eMB_4dk$t^sAck@?0$tYdC1PW5gm3_w%a1;@8G zO1q;`+8wpc3zFaDt=+M(R;@AfLEuV5*bcPmbEtEzU&|`hW8ehH4uZw-Avt9)tm9mg z_xyg(C2L8XZvJidy?myYto6ciZV=ANnxzRQEUZV{23~~EfLbq4ZnKQ?#@EVaYFOTy z8cW7EHDOEKgE@0&qrLtp0#0{=`LFjv>IGA@Wp1mxp|EjbQ*BFaYt3E^&TXn~GNV?D zN^5@z&<@w4Y!{xbekr7P!BinBYaoC+|Gio}6<}&P1S8pLcQZ=jC~cXC$uHK`w%WGZ zRtlZas_huXJ8Ih-t!T7u(Ux)8UP#u*2_zZExiJT8eXOIlskUtadFVl93gQj42)R(( zn%V$%ggEkyFpiadC%yBTbhFB8{UGyKiRw@I{u~Idb+QG*x9tgU6xzf_rb6+|m~!Ao$P+jdsuWlmLga!_h6L`pj zt2Bf~=UEX#8ph+pB(@uz>PU5y@0OIhzHzv_VrzK_9A1@hp1ZnZcT&8TGQ^1`(cg{b z+$%6nO2R{Dl78X#5AUlc>cyO{JeJG{aKIQQWZMm& zL*~U;7zsl|bv{fKGp=!sLIIu^Emz9)HKqT6Z+%?~y`0Zo@ghb^XmYd?(JqPRqb~9r?

j0F+bsG=_q5I73+mj>*ENnaIx9Pt4>&?)^#1ZF#N;jIRw(`;S+`;`F4 zPm{KKG2wfS!*x2X|OIa6pw--SPwfg5#;d?jYT)C9Q4NP8?IkGuXNC zbJ_#eVR+@+5sX{IQF`-YX*=wV!7cdeI1jKUtSxw+unyr#A4oie_}34sW&I+i!ma2S ze2mf67j7DV4)qBSljEjr@y%zNaDavDNcNJA=V9Ewg;%O*PO!;iV!DOxqz@%6EGyl z`8g~Ina#OUM$QhxSgfN0-<2hDE?$CA!O3P92M!%gn{{{?GM?IH&Ercn)0OPxNm=dB zUmHGJ9XwZmSv>PW+ys}DR43iCc30#65%UZ@-EILmjl8Dx_QZ3aEWlUr286Mn#-%A%kwevCt0Lre%B#sWJ@BgihANxK;+pG5PrCqt z9@a}E;#xOa-*p4(5q#L7M-2}0z>&FMhqUzztJMYrJCwAMw2l$-nHH*g9?+eDFKVrI zn|!1=8#zIdvM+9bVGZ)2P6&-Q8ecpMYeAJJX!X{9_|qL+SO=b<2d%mDhE3&+-uwF38%)ht?~>WkJwi{^Xml#XycnSX0#$QTJW)i4=Z4tjptxl#@k1#aWmP9 zy8uX}#R9T!=vxiLN@(xiYUkc&YlXtW=L#IUPD^H|lY#{VA5_EJ5A1Vx>+i>E(f6<+ z-;bagMe6Xq6T5FeUW=#D3i!us$$Wy74A8XJoWc1h%OP%B-j_VmPM{y=1VZvP`vLjE zn94jzQU~VDGo`X4BD?C&t;P-TDO%yeM}_$=SMmh#fOxu1UNEF&yt;a8@?qf6Bq1Wg zyE6RNA@K}Mc-s<%+^gDKlltGFaj!js6cAwq`|9pdRNx~a-&UK-UAr235HK9k()ZUI z$J4FjsQ(OLeC-Nbr%ZcgL-6p2$}lwk_atAdx;M90SGD8ulll`J*t=RF0P`V!`)@Kw zuhHf+l0HFxDE``wdwpo=1x|q1g~YkmroDDOuM33mcLIZEMV9?)HIkHtF|^*0xATC8 z7ctbkp;;-4GOkJfoUy-UaEO6M0JrV3Q^~n)Xr}O?#2ilEC@+Rj8>uu<0v)^aG_kL0 z00m20nOl-ME5SYb7e9`UJy72${}pH^7*L9 z*o=H8i4F!l9~Vi*oo1z{iHE!py5tzuK|gC0?S^c~v|bd7GS%5!8n;=Zcek(1y&SAD zv@U>Mm=1UQunWh6qz^Y+1EBR1)-KS5^m&)W^FFKlRh$vPZ~d#bZGYSRl5&j%cY~7b z^Pe*hKF08BMDj&K5uDk>)XAd0#$VlEp6useNAs~S2YpFvUoAL6u0047e>t#w9RA{- zeFbs~_+A4U7QiR|He-i|GZVeJ7Umue0xrTQBFuwqtiJQfVO(Ft$7f9mmynGylA73) z+j4-YT$X}e3(|JQPZTYC0tt_NfM_9(-E3L?c-Dz8fbf`)c%NJT9KHLx@68#8v#AK}wA9Nna4pfJP8hid9%uW7sDjUE*5_XahRo32t?*llwU& z0vka|^}|OWe7`LW)WU_e)Q>I<5}u%KphQORqBR_Y#uaxU+2KwZGK8)X!TCmOBRo0A zJ}_Yfq2GFFiEc6shfJ%PA$#eoO!&9NJfe2P_puaDYb?Er^r3ZzvlR_s)Hj$l{3_+A z#*{#Gvciqx^p10BCYJ*(PKH&Ji50Dlkz>;yTc1X)@*M|I>|O^v20*8WGojw9445)F ziJzFM%sBqp133?J_W^yLg$chbCrqyJRtWA-BL+Hrk_CT|1qY#yVzAHX{w9&8e*i(M zgM2NFyPf@f?8r_Hi95WY;1rMe8ENG3&;ALqEk=yJ5UCVrjb*sF?|+;No5&#$9Pk(Q zpmh%{sW!?oC0qCa{0i;ha)%!xejrD`f~IC$7ibi(HpTpVd-2l4s0&(U)D_g6@V_^b5|f>P8CBZOP0YP`foAz ziwrIj$|AK8lz*9NB;TNGl6VDi93ah2o0GwkUG%drb0C*7HQhKKWImn)y`eifOHgw= zqn0UQZG-Eleq3=rx+4V8>Ms(k_%;3*XCnpg@kQ#m&A}*G=->-NP`lf8JA<&9j(5`>_0Jbfb{ABf4R_lKLBM6`@!=`_@vzO2E+Xix3M2&L!}{>{*{#)MS4#f#X}I) zEv8uANNx7aJ`m~7uTsU=szh>h_`KQ^UrxAGF#mWRet2P1WP!RA=$bo7@ z?_FY^FEQB1;0GDp&EOpjGVI;mtPH<^`D$zjs6p-GaKB@G1oWFvCg@6*qHol}ga5{z z0Ka}W$>wm9dvP2|hsR=OlW%c=EX-(`@my+{{<9Q>Yf8ktC|nT8BhGK*){h&J=9wjU zAm#BxpZ1)F^f}|`PoRPLp3>Ar8Wg2`hyx3sZ^$;}unR(zrkO_L5pnY8^@xM*;ku6m zN0Tu2x=bp*hOF~hBY_p-)`OEFwkn<03(OZfdHT!=Jo7=v$7TO+S3} zLi*&n3uljxra~vrjf{G|a0#6j>Xp)@3K+-pD8P&Qcl1(#&&c6CU?-)bK6B}H;jNLG z(x~z8s(IMUc;48<*Z~IjGC0V92f#GXlIc?nc#^%nJ0h9GO@HiM(_*0nWvlkhBl);ZO zcn1OU%Z&Xd1CPO18GMt$Uo!X`2LH(5+YG{dV*`V&40ba3H9pwH*dq+iF?fu@lMJ3< zP-5_21`7RLXrJc(69ba$m}If zpXJO-AH&zZF37$nTS5!ICZ;u(Rk6eH7&lYQJ%|PeoQ(*{^}Nju-{6lRH6IYeQb^d5PU$8;LYEB2Bk^!79E-%l-JS97P_TP|GS6-hUIW2+S0vci z-#5_L6$|(G#Cwo45|1SCFP4b)BtSo4i(3yTkgMY1cmiK8+!9a3d*hwB+t?kCKOEZ_ zk90@kalExZ7LA3G9*M1wukMfXef;Z+^~U0{IQ|m2#9|$Y^~P7@X#yK|>%YH`SK__B Rcg42GH$;Nnp;#;y`ro;rWt0E_ literal 0 HcmV?d00001 diff --git a/utils/__pycache__/instance.cpython-38.pyc b/utils/__pycache__/instance.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e36cd58e038836f45d2f40b0d68ea8d29c57f2f GIT binary patch literal 13151 zcmds8%ah#3dB<~J``~g(u9hMtfigvLaD$*rYyycKXl1ioWC**ZV;p$6xN>x(jgM3JS-`4;Jm|0S? zoTOZtEi@Y4jYjwPe)PV(uu#+R`_F$iJ6B)Uw4c#K{%7LhHC*AJq7a(UeXXZAb)D*l zZ}iNj$#v7WdUn(1y5*O8hP| zJKQ{su@zChqcxYkWl`JLn@7Z)m=_D1X7i|6Y##efO&k(ScQmoIZ!}l%bXY9oX&Fxs z3Hu$be&in3r`7e?bT`_#m83WD+=y!H!`_A`QM0af!)X1UP6Nwv)sJ?z&hPH+Y-QEm zz4);7Lbo^Yy;kUL_Pl-+##I_~o<=9t-8~FM>+W9GiYkq2)7Za?y5-}S>rVIrE;!gYiudK5*ZjkQSMHuep< zG}hkHA~Uka+K;q-W1ptgN9W&iW#~B@!LTp7{Y_MMz0m0dvgbz5h8OL4Uf=oN_usqX zxP9SVyZ+vlM%|8U-LTsaBe&o7BrQI!T*Z3a4d09HXb6}{+6n-v@7_hxzpH)m^tr8| z=banA_^lUSe&M-i&RubDdHr*Hfgjv@`P?w-`r)~B%En+Xwgf<;Il(2UVatItz6 zF8DzkJAbvoL*!BVk|Al!a)k@Xej$_LPYwbI&I-1u5cU0*od^7 zrSN0~Y8x9r0i#CRPqa_1ef?w5-L@gjn}&RGjBV224~>p-T_usKxqpQUcI#b#Uoeyw_Lv~3e=X++AGc%t7|FU^@br4A2x}rAmd(ahgh`-)t92Tk;XDdWKdC>eFY3#BNgo|jWYI`RwV5<&6n+KZ{y7@P zdJeey6mYd|fDyobiZDCI69H4c4NwDw@^WON4QS|BB3spN8~sZ>j`~L!N8|fZDH~VL z#+^)he;ie^-fG&*D6z&m5CilQVn#1s4EnbK#PA@*I31EZgo$WxfFgh(2%;@dHY5oc z`520IraJjBs`Z*YMa_>>!5}8SQxGS3$~h`XlgJ7cWfXN&K1p>R&bXOEOS!EU$aG76 znsHLE=@za>>B1ARvzTBf89XBxGs=G(6O6UbprSs9dIDe|X@&7afP?5e_iWMgYQDvu z8c|XsjuM5JM~B~Lo+BaZxYBdOgdL@WFG>>5_lQcEo*}a8RHyyk)fE7ywr%qqzk>lv zZW%RwG@opey9)G2w3u!2FYo}_uSYs0|8fLc+XvHf3p79n`XD?V*&pkmAM^-NV)#5g zWAo(+Fl@*r&=MH?Wsvs;0z}%2Ig(!MfL?$p>9r5&1<;b-QrasF2H=gFll)$no8c?j z6E%hq`kr%^bwJ$-q;uAXl2$dS!}==@s4^J^eupd>N~awJQiP3cA{HR)j0(nr!pu{I zgNj_lO|nj?MnUU&rN~GH#TI>vX*xE2ug^#W0*PuNYiME9lEmx{{SLZWoojYYuRw$> zfn4ctR7_eJN@%Xhwn%i?IwM`9-GHHkb#5tB+13oKGYOtqd!Y6c=ls#$7``gYilR zev@ibd|qy~Kvb>P$LLF~om7}5T}-icBd+n3EhfiDF#N&lT}gllY8(8%9GRrl!_%+` zt!`)CPYp>a`}I<)+EV1gV!`@|@^U*Y8d)Ah zU320<`^X@(@UFfNZe_tswk;^!Xgm{}!0-HMVTEKQ=zH-cfuC-OMW5kZ^jnSl^glSnUFlcRT0} zUj3BjqLr0sWoI6^vg{?;>K4&iy{h1F4)06SI!fOd{XuMdy+O1Ght>C4l*Dtn-NLFh z+kPltqE}w0q9|VEn`rw(Tp@v3TUv(Qw5YEb^H52n<=HhB6cRxwGb*m(Au}o*7!}&K zIfiKX%pza)a~1??+ihcPY}@2NgqI*L%-Yh!1mJV4;p>pO6l$; zywAhdoXLodAn-l656cdNh;`vhK#pb0KY-B>Y9dAzUf7l119};O5B!4L4ZSH* zQ}m@?ajte)tS}^I=5j>fz!PAf4QM(4G#oFlAF}v#5IvCH-5eTbh0c3hh%6|yrI1b0 zw}$s`!j>CKb~pwyxYZRz7U@8Rf}L&zi41Te4&xQj0Cl>FJ{J-e-FbzaCz_lDD}l7{ z{TF_05RO)_CWHjXCl$H;wb2Qu<92-j1NfK9+IN%Po~oCbgJTn_gk5!(9I+Q&I z;0w&riMJEmyj%GywHG-yo@=!>y{H>`y;keb&>xb$p)J?+6?i)jPq;QWtgf>Uh8wvV z^+mYItvPQMm%{(L%8OcAUkj^u3k;TV&OWw!`J0WY^F`*mHwzsI^rU(P@sSnrQfW` z`AzLM!Y}a5En$2@ffIdx zGn9yb%!s7od-#`HmFLK*A zV|(PuAdJf$KX9Yx&c~H>i}jLn-+3+Hr`N92YjgCGBv2*arDkRkE|^b<>;488^NCgl zr$8~6^pY`eRE#BK35=eR)4?$(va_&DMN4GFK>flgA&dPJ9g%YKs?E{Kiz<9lRHiB> z)_{F;1}_pU$rNHKfchjLQ(SaB57UE!MGV4!z#FN@vNBm~en`RSBD~O=nfA#Xq4j>I ze-3+)sni%fanbeLL!bNx65SN$nAP@;Y_%+|4)UpL41A7~{7Y_;M<v;$*@U^_PDqXb{{f&*1NBa4zCn#V0h)?y0dW>pR_8iCTO(cs_w zXvUg7x)N={E$MYPw>TCWc6(jlm5L{Xmr<+J8TMf&!FDHZ32@K41YE1lK4JJ-Y-9bd z(O|})I|-q>r7+IYJ|kPSa#*9xVU5;KoK%p~V2DxRd76-E`hUfxf*r;R2-!loV;S0N z8Bx;-%z4iH`>bQkLC_9aW}${4Gr>H8yTNe(3J}Dbz!Kmsxi$@NYYN^ZUlsk8MprIP zKuhsZWJ6#BCJCN3PJ(1ILxo0j;O1ykmz>_Pu`YiPyOAv_ZlJh)*H%J+xvU8DyD9)0 z>pQW&H3i&wE{E)&VIp=`G@}F%uVg?5n2$}soO@#i*o1i|?H{8;z?L~+3xN6r#Kzd* zGOL5=%?$X8SoKHJaFQu(MZQ^!0{+Jg{OQqFakM}HI{|)qba_3M!39zEAQZq1>-OAD znAEBDNuZ}qPQm}WBwZ^#DyBqQHACUQqc;cNGkN$PnZP$2KLg!NaEve$LwAvKE1XY( zpdf!~hMWOgB@nV8TSlN+i;c8ERo=pk@TsOo+bKCGVK_>1mPV(pAYq5FiiC^8WZMfu z7YD?inMkD7r$Pu#iD1y_}DUdF}5{XJCD0G&Jv zut6sxb%osLHtZ&p2Fr&A)wl=Gw<+hs4()9kXKmZJVM~oEiDi-vMzQZt2)L&E29#K4 zP95a7b`vv@JWChygI(6NF-+W^)ZNLsNwcOI5Bl}+ROSu3S_We>HgmuG3FL0a86Yi>WPEOkzaFk<4tVKC*6X&iUo&ozP0%efRK23C&`B3t zm&{>jXb`1^)X2p8{4O<24d>~K(vICWoW9UPN|tL&59_N?(-qyw{;Xf**)8W|7TAr< zB)XX0{vH)D2$+jfNwR}FGHR2Y+GpnHaBgy~oLS>ml87ef*m7?m)dV?NXvG^BwEk(d zPt9Ezg$y3QO9lW^PuzzZr><-^!Ty$KfILllQM;fF<R_2eaf*;g zM!$ww-U|P%8mIN6K<8*Fy^(LN^5ydx4^d_H3sEyve$c$J-+yl7I^@oIb zgk&RxX32i(_7l61=Wr2MPLlEReE>Ge06VBETag(wvuY5?n+VzuXi&ncSk)}65=}6_ zHftcJ)Zw3!x8|=)bY`T&nEHTfNh|KKa;(wT+g>j~ zIG*AjPqQbU_(R=>*Kphpj(j<^&;c2ZAx?KGe+l8xJ!cEYyZA+5Lm(6jJ7~)zU$%k7 z8vI(xQ!}&nlv}D0gDePxo*ny3*ykjWs=_>+O3l<<5~+x-%>c2zeVeB z@%pb48Ysb{!N#m0o>~JE=o&c_1RVV8OrRh`2|08eykn;oAp$TL0(j7AU;_sxBtm$^ zPIzi?SPwxy4$ho-cOb}NMD9WzsB>$GoWJKVL|DoJB=fEktdb0|7 zte&ddHFyfDJ_ldH+McKLt7$GB$gdy@TjKWWOnVs-q`%|7+H_yv5cX}H-5Qt1<#7cb zh`mpK$DI4udA|cuy)>U4!pR_5lc|!y@IHC=>HeIhe318{h*E()Nw;S+6f^EwbM|2f zWP}3KMHVB$zf97d1A$1og%BpbPuc0*w4&ksQu*|3nP1g8$$x9gmiXUtP_ARgH?oD^ z$N&nhM*PL7PEz$OXsMBsVDNr|0hyp2G8y8)f*TfmC>p2&zeFnZG_7NIwvgw6V4p;} zAURX7N(I2+WnoBR-n_X?>E;!^_k zFI!f2P=J#gUu#(i+px;`4SfBe75yN;qObdf>oocOzVHY6FVq(fI_bc<7D@73vfgJ! z&y!AUF%C7oi6Jl(F4M?8Dqcs?oF9f>D<#^AAKk1%{3yNOoTqAP`8OBnDScrEQS^}; zwCIOX(Ux`E^$w1ci%XMRY9W+3TsISOtH0f_su}%f&e%Z+VkRG3< zVvC9{6+RW5>HUam5fx<1${i|psh}LOe2WU33W8i*Nd(cJGEFAT$+crNj@bCGaD}H) zAm6o8u_{(=t}>6m+TzK|0{#{&bF~WRj2}02extrhhhKa;c!3yZsQ&w)id4Cba6BXA z|2v4wD&rrj!<;3g{$Qc<>SCME%&WX!nNAc**XKjX>Nqkd{m9Z$=alEE#s`?`{{>`& zic=_%L2SWeMCP0=Guldyud+SHl?y8K{wfI%J_t1r;=vyhe5cJytzy><%Ut|{w))@k CU)W~= literal 0 HcmV?d00001 diff --git a/utils/__pycache__/lion.cpython-38.pyc b/utils/__pycache__/lion.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..259be76579c7c3051106111c7643e2d558cf96e3 GIT binary patch literal 2318 zcmai0&2Jnv6u0MVXE&QtAZ?Q<2w$i&BHg6WR!vk9N{%J7t==b%<`g|ZQdoqL3 zfsShvx_SVF)7;pk>Uc`Hu?O>g(Bez5S#EN34s`)i_MUAOdS^`*(Nx zLiPh0^h7G8YSh@cU6dWQ_H4zN#wfNJ0xY-{32%)}rs&H95iCtuGIkzIB;#Q#Rg8;v z7>9AEBylVg&t)B%WPMd)Mv^VIqa;X|w6G)&A~-P;fkY`x2B~0+AT-FLbd8-Br_Ok# z-M2&9vLQpD}DZyDA6mBKGewO}ISDD57~_v(i;0>D8cQd10CAa^}#C>AX@3baV$t#7;|xlt>C67xat3~rxEL! zg%QcZ)@Cp2<=bs|c2WB%h%)hxl!+`Huy8L(3k$=duvG@(SGc|p!IApD=M)_8gfZvyj*z4RAvnWH>qShE7_^on6YEO+!9_Gu_V6y!)GCdoc3>nauf|1Kh}C{Y(=G&~6aoS|gG^VtRP_=rqEeDGopw^BnT$sWkY1`_*GHUk5yKbfFla+Q4~~=- zBs$nI&UC^Q6>+EU2iqMTfAA#PLDcUC-c(_y_@EV2BE~EDSOto@4p^9D9V;yT&Ei2C z6jm>be28>-_e5bvA}(t1(k04|!c9?F0p~sheb|Y;DTyIjRK`{)EL63ywakL{o6MTg5l7bQ8$hl zF0Gq0blzAn234H|xYIKwKDQFzHC<*ry5UtZVPkkPe7~sqKKLaAGx$Z__o4npV@}2Q zdD8NIop5pv1QeAx(bkf$;%a)Fz%3C)06S3pMLZ0(m-v)oaZ~6Tms-Y=8Hehxo-LOV ofpVM2v_l&D)frLznUI*f8eEcnU>WzqNbxtY&?!6xZBsY@2K;$y`v3p{ literal 0 HcmV?d00001 diff --git a/utils/__pycache__/loss_tal_dual.cpython-38.pyc b/utils/__pycache__/loss_tal_dual.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..437ad0827167774f0ae3bbc152db6b720708deb5 GIT binary patch literal 10964 zcmeHNd5k32S+BdQ&zb4zooi>uyK7^+O)~3D;v|+f-u1!Sf%O>28xk6uQcqV+Pxn?I zepS84q#7jF#v73V6A%Ols0WZ>0yrE29PT6B0U`c^_ZJ8Wv*I8AM3KP!zOSl#x@R{I z35P(WTJ_a?-#h9Z-|_oi^#==utb*&zr?l<<1x5LL%1mAcGUsuJe+A+xo*F3C!+NEj<>9Xf|nU~b+{F928^>QyMUe4EB80sKX&v|DxWh7tr+Qkfl$9kVd2VYMr@r|Hig5k&xis;&=MIzuRqB2( z6nPXd`NGDToo?Gd(|`ASAA0zqcRqOLsoE94bLM(C=w5mFOh0M{;hCTthHg{~T(4gX z)_d1wHf(ph(T@Ar<6l=qN1;aV(ZnohosyFmjg+qNUJzd)8K`QnDBCKH`X#5Td$wnK z7Iw^dMKP4BiLpDf@J%(*-0s$E!9~3Mw&~LgspF7dA1ODKp}G$-@|5Q=!_A5&&9L9| zg>+oE*=a_ui-k2pKiJ0nGp<_?YM7CmtilYs5VVZPs6(ELy1Ov-{`FBe=}P|NXi1gx zbqlRVmxDwGH}A|A60s4hLuDVsi_LwtjGc=0CG1qJ#d=v;R0dY0we*(JGCj@H_tfvX zS5+dbWuuj|k6J`M!!!5v*eWZ-%)T{^cZqt)`uX|ORQCHM=q*3pCHL2IyJ?Yf?5f(g*qNguZku*rQ4y94hW=MOxE3Vdrw}ebcLZzsu)si}^ zYHu32OWN+S>Fr7DS2RIQ#4^Z%MVsHLwSCu>S=ViMy?#LHyz9OQF-}T&-^DuW%R(v^ zbw3D3qQP3SKwYws%po<8J0u(DXn9=_=QfF_Qo5!j-m_Web6rgoaeKO|go0 z6-&@;cnze(O1@q1h_5By14P!SlEHf`-b0MTB3=|;p0WCzMvahIikej|{yFMyX*{j5 z64gg0f$EaV(2Q7RUPU5Sh6?7P?P~))Qiocs?yCa>b23`p?McPl z6e}2)Ah#!twqNUr`;mmu^O~HZvb%`zn(-3R!io11No$gNNQ2Zv8jQK(=9E)r%YIYha>xiDP+|o-!+6_g`n^|>9T~&)}UdyTucEQnh z=MLBZP)i;{b@6T@XNZiQfjE@)ccI`U?o`%`jv&q#mvz#ik#+YRmUUP(mUY-Kl6BAW zY&=c2@)pD}Bb~?K56Qq^n34&SkFkW)+TU5iNtQ(k#gNfMLAHh8aND(O((Z@8>um=W zJ(1w|p{96*$Q^|D9QYxdau(j*xzWfDrIsXhbkXamHcRbk*x%S5LI(HMA)FeS@`etn zbs}x3LqeT>ZD_#V>5%CR^NcB8mU-q7TDq^sHfnQUgIr|AnK(-hz_a#@LEf|D{E{*# z#DzEqImUBn!cADmDXp~ZS5*NSpBxFej;Adu9(07qROEN5ad9-JQH_0lFcW2$QqU!KWu7IX8N7*#lG+F`qC19&#y&-v`rS;Ku+Xo z{J^inhg@m;S6R~pNlvoIyxX)za0A{poK_f0EAl&GS4b`DO3U-FH0yq4o`pjusz}I8 z=2$BUsrjKSB+$+6b(6rD>L3UMT0LvMxL$Tp0;tc)7~ZFGyCPotx6dlxTwMV&OVl^?RsibrnHv;&{#Kq3>m-}B~n`& zNdMae$17hhk z#C??i8W35Sf=J>?Dj?wl1l_3tgOVGlWD*l}k@!}!FV);=fBbEj{xl;96;*S|#=8L* zrJ1BC!+x3zvYcf0(H(sXFZx#8$&RXdcmwRFdXH+nsS%3ORza1`$HZ>~#daK)jR4ot(#66p*E5i|VLF(^(P3yUl%&zMiLw^rEr*$dd&{K!E-B zrd)?t;E+zOTAqWQE6UvC-FB}Z`Q$?`O-UhH>oFAn1T1s}R6$X~4M-y7P;Kc0gQSPy zHmviq(f~O3v^~5Va;6Q;$bpn*NKW=O&)!p?CtQ`qor?|6S%$R6mY2C~i0vpJo3V9+ z+7x0HZzy_Ml-pj80#2{k(3|Qo6WgsB&d*SY*)Sp5iaU#ziaSSE7nsd%mGCU#IgjT& zwZL-*&oZ86JdfddjIo^-Ekuja(r}qV)P3y+A-wr$g*j#K7;)0vg11OHZ)uO-a{Mc7 zw;G+G_j}90RGPQ4rw!L)(qF~B>x-70jI7})Y6YLLf-q>*;xyql_im2Yj&Dk<9)xc1 z>VYc00o#^&`r6C?^z7@eoojrg{MwU$xBLC)s^&L8bMcv{ZxPf>(vOPVIQ>}K z+0yxn0T|cqU6#2{znw&re#rPw8e2Y8nLtZLWlvPO2jQ51Ey8pV)wlh~7a>nm>O0qa zQtNc&9OCE?A+);4krl}@n;P+c^uj^4)ZuibR!4OJbOK_62PII>!J#J^*dMCm?8X2&md%Gl-gjWW!sk=T_kQa z%f5+>&xW)xr%uBsY};+|d=HXfP;_XN47X@crMe?tq zKsc!06Uqzkg7YzB=Q1E*oS_h7kd3p#L#UGhGDgqTFU)}BDRGu^XO$N{GC?>Wf%{9! zVML&EQ1uD!S+v5jhqK9|f6K&##0wyoa3W9;e$*Eox8CgozOY!XA7sUcsSbNHbJs_N zpU9)t?=%6oq)zDB-db#t<$5#)?3& z38H z!b+OdOeCXDk}9y)Hn3_Zk{a)Uh9 zq;QjGjuM-6beYDjAtcN0=|~l6H2^CM(acRMDSEjZI@%ToGsw+CVsdeTWQOhr>U#M- zeE{nMfLfsSK;7BFTs+sBL;pC5hEpwh1_e$xwc&hRi~&6c^JyAkY!E=V04(z?>8D3UgP$JwE2H;TaF4khgM}FUW`w~OZ)coI(jv@FZ--AxQlW} z-eFJWSGhM`ybZmbh@H5M-d5t{@mjn}y$wzdP7O|vdWlb_V=z7LB|bIICvTaXo{5K` zJe^tc*3u}?D}jJHHfi1Ye13kDjR%|RkmrpS`#UA8!>g57$3&*w-J`eY`xownvH(9 z9||4>?*rpHLEW;m{sBtePwjLZnu~qPUZX6|D56?@CplgG5n^8_a+3&wsU#}hrPN12 zDt9N&S?!6BAq$+>Zb;MbbM{WbXp*qX(m0n$dAtvL~}zXxuj?#8TTHw&oqE2 z6JKwl?{HU;$C8BvX^tF!29sc*j_+Mak{^Pmb(?vyl)6q(5X)0~utHlFq zNnb_1vYyAetbyaj68trya8>(4-ae9d5G(`a6pb~sxL2ohbgr4!FWCfzj&ifaWso$h z9fiNifGaAF+7c9SaOa?&WsoKB^E6*J28V<}dKCpN+>ALKEqk9c<2Zu2b=*@Ip^Edk zztXV2FkxseKJ^xX{RP;}uLkzxz<%{efPNwk(AN-#-!&12kN77!3_ryO>ED9=s+<-m|j6Mb1xR`i^pc9z=ZIn79qh;{$9asrBIPv8Kk{movh7}i|`hOHYitnJ+e3{61 z5jnE1WFF`<$6E(wOH*pQ{y!g}yV`8dZyhcV9b z9b%l7QFJm9M@`2#qloFRxwj<7p@=E%jiNe4aA~9zA0NjwNsO~P&i`+YagM|U;-{dO ziP2)LLXp5Dr0L_><$-A}A3?c0MK6Nz-G5i~B7Wxfkv={-Qt@+CAxGpjBA*7Ch*8AP z6O-eU<458X@e5S+i$s2j$Y+SWj|h9+U#8Tr5c$%Pi1<~iNA@!D#II9||Kz}7$aL_5 zK#4)}XQ|T1i2NFnUnlZ*BELc8H;Mcfk>4i5W7wnA?-2Q2B7FY!dzAWpBA+Aj2SlDG z@`ptJh{!WU#sLDue1`PmkE!5Ki2NxLc4B`F^)D);k@Ban<=zc740w z;lGB}0+~Hbryn)>kALe8AOA_L1wx~a3JDKiFhsK+@~3vWlJM}~!1eS)dC~+g;giiI zz@>xOYH6Zs`lTWH95I&`(Erg~PV>i&DFpnBFU6-sNRw-vu#Iri_1ZK|&T literal 0 HcmV?d00001 diff --git a/utils/__pycache__/metrics.cpython-38.pyc b/utils/__pycache__/metrics.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c54b4869d7d9c6ed5a860cd224724985356c22d5 GIT binary patch literal 12394 zcmb_id5j#{S+7@DcUSlH^xQM{*jwwp++KU^nRo*+&U&vnn}COn?PWu=PE*sb`k3ji zZojJb9I1vS_KM9PRyI;ZluaUfAd0XA1i}?i_yYtXTw;YlLB#?gAemAAAR-}x1+x6U zSAEUwM&u8=^}c%d`F-E}j`!@?n5E$N_&V($*+1 zsr{4exKS+R+1b-3S4x)h-Lw!wNUJd0WF)Bv$um8iK|a zwEDd_@QaKaPQAwIJQ_{z=FP|u&9=uQt?qh}>a`-hiPc5vQqwCrf$JeeNhEGG>o?rI z5wY!vl~J)Au~wvRx8xSafOaG@{YJAHR3{Pl3EdTSk!9I2CXOP0F=9)y?%gkb_-{V` z`M>XgmnhI0h#IDkr4yU=cq52;Sr|2ubeEnlwk7qxx zbdxwb)|rXZz0_ut?Wjn00tKJhY4TG3?UyHLN=sjstab2;*UHPU)?2@u!tC<0)?4N`_{4 zY*%>-JKHgXY+#1wO)!>@i7{016Tvw891F8SE;K+g$xkXg{b`^%Y8gxkCQ!OKQkIF! z_!!UL?w5~`l#lnyb0g)sk@AUN`DCwrGFU?PUZHc7DrPO zbAUIUMVyv3t#oJ5p2&DS-i1&BHc;LXIL1c-RrIRYZEpKLZ- z?ZCBzs%wKe38&)PE#a1Gey!=9v@10*ELn&=r(U-mj}Oc3Qd`_`{UyuB?@F^RO71zk z8U!u>-09PmT2O7TEtQ&$)50k`ZoTF3TD|U^{^7NCw-lUu$_?JSTYE8cuU{1b$+-o^PINK*}Z)@3Z3i_PM$5Wv;U@T>{iU%XE zYJ)wYhq7ok>@%m92OW&G!NPg5RQJ*D8ANNPz~y#RPS-Bg9p4`rccfY1h>9EZ8p>(@ z-)&m;WGu+Qpy-k+eYCD15c>I~i&MZAL*a#m-ROB_$Z%JtzX>4RuZ1?u>vKzF8 zH{89eZ*{b`<712D*>k(**{$tUWycR(aY~dtFW$zh;N@dN_Es!`l?T4$_LA?{{soLw zUcPm=9Efpn)qPhjv01vprik4x0#tyW*QUYTz*0l@lCoh6JybucUe~Y0m!;(1c`Z_x z&O~e@VmIzU-e`NZ>uncXD>r}SGO*6qYkoj0cyyJ9Fab>blgf_mw2CcP#Eq8D*PB3G z|4~Hx4R$imTrSEzUTS%joy^7MVJcEDE@Ro|cHLQX>pSUZMv78EikL8V(jy&2=4DA6 zmH3kW?F;#NF@xzu3E+j_6hUOh%ZLoW>a=j_)oY&La!PJw?6{)oM=93>#tU4Cf-bhm zEVUbuCL*)uA*+Rp)bT2AWc3cL=(Hl!X|-Hju0}0z8-An{vPNTsRmB#TPIM{glJ4a- zO1(U~25~{Vj)yKprQvJ|IwBkGOFl7E{qz1UIiG&KtK2b#05YHo>Mv9Vse2fgw zY2a?x+?F4iXi=^~465NW4-iU`+S)?cj#Q^52pJ+KBDGYDSgD|S#X5mjK`XbA3|ghg zjOidsp{IDmBesrNBMFGSNG&fT^hAV*(4&wdLT2>n4LB^%+V|N9 z+1}%CzReD_Ke=ygIjA6+`Yr-~-?$*o;xpuVdoGfzA_thaB#DbT#3Q|2ueG8CXsOl! z)mlyo^xLizIiI0&BM?q&M|Kt&n^jlDWS&oo_n}x^Ch#nQt5n;BMo?{XLBguIMBr%x zq)!a_pLjcx*AV<^0L7YS)7reI1F^GgUOmPhRAr446^{OhB|nDXQ=hIvs$oHe$_7~# zikL>|2Bi)X9n?LgU`RPiDVT18r=d*DaHxG!1xxY?K8YGBJ_VGSjmDn@uV|CZaLIu_ zv(?+eS1;WC^FO2CYZppVp~0y1qd4vKlL-D20I*b22*fzh$($6ML#2qD0uY!9DD{=1 z!Idk@9d%V+LU}Vjg=O@BeUl@L_aY%4r<_!+*)EbI=;!rF_uYDVSSUzeI0RZDc!^?!sK1CH<-_%BwYY$Qb$lu6$uh{w1G*RiT~ zR#pvABxm0TW#c?l&nrZ~_(B|)>StGl0H=i-1nEWag?>wYf4l1!WlP$PtXA^Qi%SpY}cm2*5W&S&Fsdc9#5aZQNU;7Pq)%%QJ3 z_i_fx^i37K$OH#VV5H;*lt3r-c6Q;HNR>{Yx%8Hc3+ zb-I~dg@k+v&m(0S1`)=5hhawgQI0`&pN zOIe(KI?O^D91kt5>O+AZj+15&1qms6ZYGqKahh*<06V;SSxy*tpyXxs`lG=NsYE#c zGjbdwm_NwnsEAWkJa#Nq)@qRXQWmjbnsGPVo+~Tq+zC#ThkQ~l&X+}7z`ebGYHEOE5`i$3QcK06NhXjnkA?v z#Wia8Q>YQkD^fl=#B@jtw{CAo($!B%M9@EQ5qEj|m^zrUMI*MZbi6pF~ZC?aUq=GUmzq z#-hU~{x8R(bD*)h3^Yt!5!?rK4Q@6Mo0a5gYEQBOzoJ-K#f78wFi&2^nw&=4s5g9Nb!SecSwBlL;CqZ=Or=@ghznor-hx?_BX>OQADqP$PJth^r*W zG-_TUy%p3-8$BkH-X11^lq4MtL8Ozppg@XcY;D-d4e-j2C8?e1QXkWbF*T9Wy7xNz zxdWp!Z`JZsg@o@qYfa(BER`6YAeI_wtvYFG?KQ$qKjyV0Fv3{vqF~bzUd^lcQ5GDv zR4oqD8Nb$OL9wsZA;T6DRo4ly*1{NC70XauOR!*z$f9JdC`T!Z2eqK?MhVgiaaOVZ zX_V?eVm2F%3Jd!M3NzKbTK`d(y6V-9u`?LJB+B*sB^^u!HB3f6EF4-m2- zV){)4{~Um#Ce@5atZ*L68&p30O{OwaRTcFORaFu1{l9Hm>bJ9Ls$qe7?o1DMKHm5x z+OvI}iL*m?C|>S@R-I;*}qWnDweLCwnU`;0`+YYlPTafc3tafys_nar3UxU_=Xf zZG=DNEs}akdoLCvt5}5TzFntyrdR|{*W(gVrN%!=KwjD(qL@a2_CnA!Mvg^tS|VNH z0Bj@pS%9Q!naPt$Eoqu8b5xemboTv-TRw*1KLIe%Pe?OSp_ORRNc4iqjY0y}&;<04 zq?;nncMAtlun-^T89pX8lr+zRpX4Izx%V`mEnXo`u=9&{qX{BLXv4<$!GW4-kc(7*nhUis$+n`!!5Np6E153Bh)A;^f5OT zFQ61BPbUy1itQGMN{Y0+$tiQ>%;a&=Dc?otp%c`esp0MoH69wg|D*WeHb095-QnU4 zt|7>curt7ULjEPlpN#AskaZ0Na~GZ_=z7rbh$;j}!+au26>EMG0vl)`1x$>0DN2$W zCwD|H#DoOqKqx$qHjz%|FL>QFSZ(8V?Z4*5ucFr15qcgYQ-PrL#?IvEvf_QYTb94? zTo4>oh{bkEj7>o-+#nPTF>{_5~G4$JjHo9nGO&ch|I1qrg&gow>6 zyVw_KU3!T6S8#3jcOey7u)TYbZao+Xts%lj`dYJDmxn2S1CtQH4gj_Gx(GyrqFSrD z361!-sEm#{GAd4^;Y0>^>w$yj*f;RmdaLRjICv1dyefGSyxRW;+Dd{|Aak0KJabYG z-I*KRw2}J6T)seq(#C<&lRzkbnZV}&fOtjTEES7#zT$T%|JMk7o&aGSJc1--_skx2 zVB+@)3{O(VB!-f_79mKSPcl8BLn6jsP3kJM5Cqk@y7vO`^6t~0`{FDA_LZGSOVV|Y zeEL0%KoW&K%)dglP!*p7-HHw=7!-kMzps!^L44(Df*J%s|fY5K8hr8u#0|Cp;S71OK8O8ZR^d1LVU4 z)S#Z;kYn^mQ~RTJ?v?rdk<_p^QsFL*Ze>X|;3n zFfO2cF+74CTKPhpGl#RpZ0<*2s#xVLFxE|~c^K~TZUP}mD>w{3DdSiZ#i^|%$_J2} zf<$}>((42G+bA(;bmZ?YydOgTL*YG` z<3oqzdM4)fDprG)^j3it#4GI_$I10qcVxJ#P98>ES~2{*v?HAp==FGbf_e<|@t06#ewztva8jx5?;@CoN-D$w=4=J+>Ykf`pE}BHT15Okr zyk@Z?9G-ue*wEIpge#H0wY^y_=GS-%hXeK`^@`Da2g?7f;N?e9doRVcL zvP^nVA_ESXrWap?YH1U`2duEPQqV=Uxgs8|m;jCY{vV_JNrFxF^sx_zpf zMKIS9jzr8TO0M+A%(AO8p8H<*T4Ijb$Q#W!?)UQ0qeo5!Dg2`9l< zS0HvCMLdIl4rwYQPAm4%&pxYS5*Y@9WLZ0i#B5=gk!NARw4glp(_md{2fhSA1NJ$3 z!=XdTqBkI9MTPGG1aFY@xoe_5gL;{WZ>2a5Z-9XsvJ9k^0D9pK3Efk9-MI!#lA=s( zaEJ2{I)dMGq%_Ya=@vY^P_BO)S3cf3OyG$BH5PK*e@4Jf64TLMgPW$*(*MsNn$5v0;XRi(2 z2`F0`lwBH@^;`$TPH^Ufdg$ZQwfs5h1Yg~{WXDPv`IiF-DkQCu@4V&I#AeNR2lF7A z5+o3Noqd|M?P7DbZMW`{U_)z@`>`y2>N2__TgU1lc~qq?Omb1%gM#V0(3<{;vgx`P zUncO!1pWj79y!tEa4Gx+<-AHDCN>9{?0~S=@LgLGU!#)Y%_=X|I`U57RAWO(c8T4u z3Ci1tgk-l!AV?(q3?d}x;RXVl5t2d3p;Urw6CDd2R1FQ>s0^l8X;ckUy*owQmz(I_ zFZ4==+L-%FtzH{_Uv6^P26s{#mJLEsD@@MQQzm-sl)-(Qk?5-Je5rT1@{~q#)h1k} zBUmBKb9zr5kOr<%?vnikp{qp8t>x|It(EQge5i`_Ug3nj){<0Q35%t3_qexpHbyw= zdO9 zza#MX1inh(9|-&-K*5q5BY9sTMH83N19&pvcg_Vu7Em-avMdw|QyI0PQ~1t)^sJm5s|0TGSB zrZOaluTzGOd!xEcU`5ivTj6|UZdS>{{4A}6_^SdbmpHWe7UDaZz7L&jEhSn(|3{)D ziIb8HY_x_S7if;ZG}uXVo#eUe8rH_RaT_llxCxuRev`$9IdPsRV77a~)0yHu21@~3 zkFD@4;MX9a&|HVP50^Y__T;U_!z9ugpz=!ZMCz0Jr!{WfM(gYE^u%e0<=ck$rTNyUMJ>bM%l2kEBw3A~w)Z7+)XbQ}n`< zOm}G6H}@Iq<1i}D@p);|Q@VIk)mK9E3$d;Cjw)_p-}X#^tKvmKcn5U2Oyfr-do#!p zzW@Ns+uO=8_oeVk!riQK2qT8HrA2y0xZCLGzfcR(3fZ8f&H-TrCcB+^dY{y@*V_-E zlm7FWJDO}F-k>4qt`;W=kS1_Pt$e2SPU|Z_4`(lNx7V#U-c<;j9<|(23t4%?2hAds zu_`QH8XlAE%)(k1ze+fDIerxNqfv zQcT*?o~GCWfdicX9P)mgNPvu>3hZWCbrD3cq)w_!(k@2ANfxmz6I6F6(|<@pgi)}_ zWLD!YG*yUv@)cK!&bR*~LZJ~QvCO|UvB0*?jR21t2IR3Mt=b=ucLZ^0VNwzY-YkL?&Uw?lGN|An2C|1EAFuu(^W%Lc;}%?y!}cH+J_h$2NP%ZR zse?oysgv9f2Vf2)rt=9&9*M3MLNfxoC*YWrXpL9~p7NMH60n00;_F#@3=Qizkq0c_ zp#kEZ`7~}fA{RIsRX)q-dPFiYV)vf!IW+zqyA@MMYzO}jq{Zt5{)fPK2oU3ycrRet z$B~Spy?7agJ374`6aPu|Ft8#p8<^_|U`LdliBrRKvk&6#|V-FUcHE)_(RH&;7gRt#9Mq8#Gg{FPXR=U z7SyDAZ7sHPPe3XpdoeQblAB_91|`4oc3!7;k`>Tp1$~6xe#aJ6x+CQyZ~2Z{zA}<; zH>817nlq$EASE(+!^$g4lA~PS2u;hCWoUrp`Ph*ADE3M5j|0%7r;KzWCe<8luvXG~ XGP#&_vd63?YsQ+j(kh#tRoVXnn?SI> literal 0 HcmV?d00001 diff --git a/utils/__pycache__/plots.cpython-38.pyc b/utils/__pycache__/plots.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bed0fda931349c1cb3584013b482cbb4a2ccad8 GIT binary patch literal 21610 zcmbV!3vgW5dER~P6N@DPf*=4sw2~-_ph%DeNr_}=i7!ExEkQI%+49=*cClx%SYTf~ z_kskr7mgBAj*#xv-0L{=WTg*@(%7!&8@q6*Jhbp^XjpDOwwL0UQgr`lD2Efd{Ww^@+rBe>wWo5 zx2!*(mAq~F0l5$6hvdFJza95jZMeQ8zoR~qAF1!m@02$2+Gu?&KUN>lkIQqSwyVB7 zzq`ICzo))8zqkHS{-JsRY)wkx{vh#<^sfEL8TJ@`J)&4#k<*Cl3+O~$&bCw!VgSRa;c+1Hj zDId9I<&T#4sG&JWZCAs$?ffy+*r7&HV+1v()Xs8Djoz|RKBmS|eq24LcB?%|olqZC z535IzI;oyl`_yAdJy{-CA5xF21GjCA<0S?5& zQ7@@y)H$Tiq4xP(wz{B()RXwTh<2CMvnV;Q?5ozq<#l4p%!D0zrDkh|_shj#9{1$S zS6+Bl>!zM?quBHHVx=6V?W%T9>acRdu;uZUjEcYt>d^ zwz*vPqinTNGWmrTMiwQjexc}>s?{j9ymE7XYI$XOg~{a=ByV!$iMeXfDEmH=eSWD} zD;KIVeDBKnnGdgj3%|&dS9^Z7U0(0Q1VrxnN~Of9xc}$k;T$giAVAsTX2C|Wm4hX7 zRZ4j(hKckr#c`FmZDEPyDv3!=NAZizT2uQa8x4CuY(9%U^70P=D0|iZE^7|`Dff0V zv@fBDH>`pc04|s`wk7)WW+sO7@#*Q~r;eZE{q&{NPx1cL(PPuRUzj>}DIdQ$eQE0W zv3&f*sp->4&*$T(r!HPRhI+>@UAl1U$$b3S`P0XbpUB6L9=&wp!i9W%>f-6s=h5HM zqZg;9(a-S{Cr+L|o{v9y;;G|&K6>=nsb??dKgH*hQ%8@UI*IpOFwYpr$%`_c z<4;|D>OG$t#}{trr|t%*SfTam@G3s61L_jmZ&`0UYxeg$*FAimeb0WB%QjzLp3;?B zAe1wCG_p%~fsB5+Hh0&$cksx3vtB;3^yI0Nr%xU~dgOz}8|B846-@Gt(?^zqYRx~= zsxz~J7&$fSMe-Zu}+j)Cyr@?30LpO zEpn@kKoh|xJQ;apS0i_}qIC{=<^$v_dLMu}s1*uqDsyGoTZV5+w=6GT#?kYO{1Je_ z5|6V0u-J^(Ot*VgRhuA@2k`Y_XWWY3!~l#ffDt&|((<(7V$HRbC(V#34S#pz<) zeTdMjkGYh2mDllGfuONqsZ}d$4&S^(Qhbr#3&5&AztM8q*{v@CeVv|25Lp_mfZC;a!N8HB=}>Rd9U>EY07Bm)O1=hcBa0m8Mi)eYNt^94O2TvE1-#u zl12{w^r7U(x+PvGvj$Z8xLjAQS6Ih+1^59xaAw;5m#ei}^XAhZI5l}<^5{grCL%`h z@-lX@uld!H$LZ9Re87_QQaM;@VMF_Yj#7qGRhtdn&*IcI6bYl#Sv+elc9f|#i>jc? zbH$}v01O*t$ryo5{Ko7@S;M_r)tYcKZe^d9EhlM@*p8jEvw%!z?F=scEXqo(aQ`oc zhjY068GsE&Gp4{;z+hZ8PFaSmM(v^$gH#Ms2PGG>oIPDxC1E5L$Y(p%C|FcVIQ-ry zb-vMP0+*V4=3eT&t`=*PK`=KRxy9L1iBr`35reovFXHll79g}2tOeU22*CAj+23gg zZWzC1XROsk7;ELiL}(R=4xav=AQsv|d?9|z(Pu&%NEGiRu=Jp2g=CQGq){s=Pkliq zOz2N3r{RQN=(b)9`h#q+jpYji5LAG8${Pj}sn{(?#qR(a3xkb#C_RNv>nFSQ?S-Lk z?S#~JZ?PuCOrS_|ru`8SRdV?5TnSP}wX)P)@+WiWTdmqkuF;x=1hJ7_)VjEGpjw~E z1v6 z>4l#U{_6ZQ6T^|Gia`+q&BoiJ?0MfW>i`I7ux=17A{Q7GrK^6mfnDDyl{HZ!O3mqJ zU4|Dq)w<@|M4rfBo`Mu6$P~Ht;&K$z&83C{hA-6(o)QV8m_J_x-aECM6G>qO`ZBZA zwQ8eWxLH*Yr&5%J3KWD4lE26biSlx>4qBH_HJS}0Z9RbaLZ|qxLjNs$z)s^YZtoU6 z&wVTH#_jgd#$;~r+{n7?uD^zM`qdsl#VX@JBdkd=#q9Vhs4(ZKuFdfseDB<-|3q z3tU853~b5S$vU4g`Fv_*d-YA}9N+ETaqw>T3(W8K`*Xc|qgJoqIVU9MV|*%h$Fr?B z?ZyF+O=)qecd-oFO+`pWl1V|*xh|I zcbzC^us6|M3W&@`HV`2QLgO6R1Bf=stU>C7NUgg7R91rdYH5-BK|aZ*B>xjmUiQIN zY72Uf?RK*EHqGszxkL38fGDNQCG5`%YJH5Evda>ca=;A~c|mzO5TPV;OE;#>W=p*A z{LHh3^XK(dcF>2q*HG&!>nA{7_%P9D{k`Gb?X?esmK8(dX%FPKzSrep{Ig$^btJzX8u= zZOmH(%|H*%fF;jKJmc*l_!96KJ3WjsJ!%bGpbX4`a{jt=)@t0_{2t)={?Oez;!bz8 zD%tI++YVnk465KR>_BP|`_ffu2w>T9Soe`KdpgD4U9L2D;8pnuabfZ>h zCuYY6eT#?Si=81psTk@c(fg=!Dz?hpv7uW$Ff!(Z3DrM_{=(!=Y9B5~*%`z9_U1T+ zImVoV;^3P(xA%WL(x7Gf|3O?FXFkmz6OPY;bX-O3i<%GeV$$g@6C^vs7ck|UPy~w`t za|d34T=)qA&o_csUqyf2lDVdSuF{eV(!2kWX%9$;y334RQFp&U2c?C&g70x z9?i`y)gZEL<_G8p+09n*`cir0b7w?df&LRy6m>UsrE5@Jl8fbXOI7Q>@L15mxGLYM zLda-in^tgPA2-kGGVZADNll@DyM+ElPNNkiioOUULew;Nwxu5@pu`HzsHscyQT&>$ zSClIH1+#QfYzfNfDNwqyt}g{;{b5#jORc3S)>x{yR`d>b@=@j{WFn4F>6iKB(F7F5 zO*cAZR%)r?xalR1xUKi?-6 z3~9~7#&n~=qJnAW)=PerKpQ?jKwI_q;WaH>{#gJk8Fz;4q>}+<&47Lm*%@aX_W?-! z1NMH<+ykJfLy(O!s5gY?A>7i_{f^EkijW`215a7@Xx77>Nz^j`cy4yI%o&K@^{D20^oK@K>R0 z=>4L6%_D8Tu*%$oe4+kp%w>kY4iLp5I?p$?*32QZeDHf&^+#}tvP>O~Ythbbt^+yv zdsJo@Erg>@A_0||yu=2F8W2r~yv=Aju2Gjs`@rMkTp9fXXoAsln}|#=SZX#Ar%;nW z%XeV`A7Ppj`h&wJX5YnS>@&`8`~HR6&=Iy6TPO={jGLNCK)k`+uYt>&QMvll$k*Ra zAa8w`sjV0i#dX<-4*ZiS>LJO%=5(3DiP+4HehxLJ^#=iAJNX2A`Xs@pnBmOmWhp?t zCnQ7i^$#+g)~9gOrwJ&i7!$}%rnuDvN6i{@8uSklYz|UVlzB7}@d7SC3y^fu{pp9& zlW8xVNXDdWJ%{}EHVrCrM3n;waz|hq+Je|@nZk7umwyyfsIb)*DgFv-f6}8Wv6=ug zO+aMvBp+gNHw9rjbl!sc3MLJv3&soK9nXokV_@c~fM{>2@7}U6S+5>g?GG~{ z7egP{vJ6vhF!znx%h^ga}()G%!cRHmTF zZ(oCLCm8PR2u8vb1dN?)5ZN}rq%(^5U?gf3!ueSH4}>QzpCKFH{7sjO!4j~AD)wwS zccbdVy`Y6|=cH_R9Z1Q4p zxmsVU=fH4g%R1Mb%gve5faSJiM6?~77s0MvP*vUQ;SyXDB``NI;AYN8Va{N@FSVoi zCBY-XX(-zqWn>p3ds5Vgc7LJ!BG{!Hl^%z7!C%*DY9VXRz%(%*KvZ9WRA2dZ)OR0p=pRP5n6@!UG~`l6vDsn( z;Uig^FE$$GnvW?fb1sD8MfMzc3hexge&q7yPH|aG#rKgA(U^90m#WZ1P1s(WjW6PT zeh$D&reWC_wsX!Xc==xFNVMmH$2)t$Lz1@s71UWL!`fo!=Q5Mb3qk21VW@D&p%&C} z0inxC9=Hge>|P$Ue0SAD2`mMW?ZNBkQ1g>62$0E-p$rDu&|QO$Kpj7fEr2M2*Wzi- zU5%sM)PZKOr4IZF-8u=>NkW%_t}us|EaRv7)q2=v+DMD zTXkm@B8Rvvpv>J)LfI2_4|F#s)D|=zCK^l&tl9KE#Ep2(Z8^IH*N~2qurUO@wk$W_ z$K~G6cjf@dKcIYp?uWFfdv9RDAuPeb0do<3UopcO;cIH%oJRk#SrWZG1eqe{Po`rc z1~GOi!2XY%IX{XuwJPhp+gQ1t?{749y;v)lVmL(p9o}i?P>?!nMq5DVt)lP$CWpoG zS-{w&-9EClxX}BiO@GMN3KO>ek7zvM<}+}>wpJ$nY6asoZ~YCHB=v`I(|nQs4+N~Q zS)_jpAhKs8yJ!aZpO`75+`08F8^hqo7(vY*a5J$XTmKH*P!QT8qLE{7aP8OdP=TTh zF%5<@@WokhPiz^?IyTblaHvvMC%SiRsJ0H*V$G3_g;ft+Y`8c~a~?K{3sW7+!0nw* z93=@D+rXd07`R`24%~hX!W8^d87UL9K_|6F6IE`~`4yC`04waPT?mWqKUS{cfiisIlGFdsz;SbyQDKq4q=8lr@{ zMRrp+P@7);#vEFMEZPeX?ydpTM8=@kp;g%; zGST%!eL%L^mjF5Ow6H)v-fd_c#hp$JqDxP9X4wft>m(o6ja686V5PMd;t;YRV6CPg z7$vX)DHs`{D52c#pi$p#K}3Q-flEZ&#-go5JjGA5|KtMv3)o~S`UE7E7Vkh}&Q)=g z_QQ_ouJ#46M+$Px135AxIK`pA{35Nf*GC{yr9<>G4qFI&fxQDyBk}~goL$`(_II|y zVuT0)P}=^mKkN&);V(tUWthcZnkk?f;GBEby8aVk>g?+(xAy#w(B{85S5iX-()8^N|X}@|8;^d$IwL=R#@lD&zcbV_3U^~G9 zl~RM$-e7Td^nq|_VKmIDA=t#W5yFB!26C67-Fjgh^|y;#7ow#Imb=9A=ZWzAdGI)f zB5~Hh>0rOhECQ}>4|azP9b$e;jhKy#-uA2xZ{){hGw-EXy1FCWz7SV2Y~GPYXLTeT z!MJ`<(Zby8WrVd?cZS2J9?HKN4pXpJql-44{vaHNjWJbmsJDjc(?(|<#%d?Jc`1|E z80xN|^bigJ|Ax1;&35(O``d0adBDHng*}~z)?^)CZ2=2MSI5E~;V8y79^}Gttm3ZF zd(+jgfDw(4!TlMIua4t;T)!3WWIZ*8nRE3N^RNTP-?A_wKsDYi-}RRFw!69;tMYI- z7VZvrsok9SH{I(~!1B){|B=q4AaEdIVprcYQ+*9PSs%r27In>dz=dv!Z>G$G6@r5R zTeMA@56FCvzWt0oi>B9LH+Y5pe4Ie6_<5#89kq=ug9ZUA)RPjSBjA2@U~NCO-MuU;JMO-hJlx@qph~o{8*S+nqc%2gcY! z#>76O=f8%u{tf~X_9!NXEg{@O)QuEH@mK#0!Bv9)Ou)D3TLh%UF#d}*kizm8nYqAB zD&!Qho?^~4!8L+^Ku{r&k&_(jrwJY>7y^iF1Yi|?$``P!H5u}vKg&Wv%V- z5&XsNN#u;;neO~?82zXcjG$%`ccv&-4N*-%Esdy1ODrKJZjIiLfEBv=$SIPxHw$Q@A^P&#Nih`~7l!xd8tnV_F`1O>;S*dTg&SF*n6 zOdgJ$7p?&5Ipr{}i)4u2d9XHqNj3in)6gU4@NzxmK-5-U^`pdG;sB{g08Fh)1brE2!4FA)=HI zb(cXk8nGZ8ldpe~)ro2bD+N6S2zm?(den4bqakfRLcc6nYiZGCf4KDsg`g6rbQ; zBU>F3$2Hy}IqX$(&@#!$64Q@h!H$`B;zkkSBXDyHBX|;DyU8X%Hh-5H1lM4SKpUc5NuYBxxzK9Of%fuaLj0TvR0 zSiCDl0r1Z4ulnEeAPJghN<4Ue+r{#NXqZ-=J|Q#GLnlL@VwcEV!u$IfaAZ(JkYW2w zyRWNrY#>BCwVdnbwNo&f8I`2nFW&l%$E-2X^qzamyT(Z-xTYV%bQ#_0SMhw$ojhJ? z$8t{`Ih+HIap{HhY`z~_i6#OFx=dt^VH;85mcf8;vgb_<`1^bmhRIcjm8)EY>B|ok zxJ(%lfC{7p=n+%{&=OJ{I8@$Yo9`#sM2N$Dd;lv31S`59PRzK1QTXJ)Bq=l*k&rxz zYuMIPDBEH{=ts3aT!Iy8Y?#o}S;HU}>;`N)WD=|JH&db2r)W|(#srcGL~|fUgxc0> zOiB#CNC^lB)mxaFJ8_kw0T-<0-PI(0?I9sa3G7ESIFoo(nBI2BTS%{xNLcXI|8S7# z^o6N~4BxWG7_S6O%}{~imG<->8|)B*0n>3M0o^65GV-3O)jk+l`hb<&K(^x-bD#IOakjIA4vei3E+j5zOF8GFGH_BGmq=ZhBOm;x{38Or16< zXhR6um%dze7TRDk*rh%YCLz5){WjPz$hNbv8xb7y^4`Z^ zdf_FE`!xE}X9y^Un0Dtrdv5*L@U;HD1Vw@~1l*+S+=P{X^=15CIDf8N|57*c+Oyrn zJI|dnX87OuU@zzSb8UBV^059tXrIqK3;T4bien0}{X@yv9L1X`HbXIH{9k?s5)w1l zhY7e`>!%6E2%Z9H4?TDJ<=oX)8F3986WMq1rHe1+&R@=fv$XqQxr56>)1D1)0jz|j zc1q?&c;1h(6BoV!F^U=Dx2GEoD?tRo15_`HBc1|b6p>e{HD}+COhQEvgMQapV0`{U zD|c|Jj{&esML0J^x_XoVHq_v%(4#C9#=3&IWfaB+uCm zrfskbZ}ne5JqwK5!{4|w0y4c1!oCN#oq?Zz7z~{(oUu@jJ^Uxh#KCSF&wmn6kdQ-} z0JDdWAH;pY)*nZ&^5XZOayg>JqSDBecMoyo~UT-AibxU*peXh zQx)iX2gv_DP)M=ofaZ%Lz;ebiQBr`FCJg~JE>u2lXnT+n(a^>n{QQm$j?Ngp^gW}13R6FlW->nq`i!ff;hPo zeh4)V--K8mV7C(&;+p;0-Cg55X!Gr4u#*N=+OuryW7bLQBi0oJe48FSqv|2PS$VJ| zfoFM`Jh7QgtDM|F-GKF}aiUv$JlJXK-4eB?JE~o0IBBv2isfScAMqo(`(RGJBKgW&diQI)eK+d8zQ?Cuo`L98yPF2Ym#% zYF#`4MsDB%*Z-KzQ5PeG8%@o$w_nCq4Cs$Ex;cHM?X*cxLL!6X5njqo^}y_6(Z-6{ z1NxrVL$efA=qF3BM?(4iJ&^-4uN$WK9H2E7Au9t zDg-u<5RvbaaTB4Pvdm1ysxCqJbM#XT}L)^F`doJVszi;UH zI^3}$bHel@JgbCS8$ax0QU0i8_>%x~)M-DDIw@EX7a*}=ma)$jPL1Jcg-Bk+&+g$T zSKj=dEr>0E6w1}VIK&1yLoqh~?pTkJSfF|dk4$7Q#*XMoC zKE3A{6x^z?yof9;aa6`!#HE1(ChI6~-^aJV_jM2bIj@USy{Ws?JNXLa8wdxB%n-m_qpZ@1x&B{3e)V5s z-j4!+fumyAFGDTU1i^U{nF7}~`EKOZ%5#DK>nNSpyV&TkxV7~^V*PsrVz=XRAs!dX z!zG0LPXo^oqU$$+ip8qtTrjc@>gG?e1zDV?2_u346tJ{bosC>bVMa+66@@#&@aQi~ zO&%gZd=hKIN8f@N=KK0QD+z|FERCVd}G{iL8S9>;1Vn!F7)bbt&D>?eH;>) ziKWTBb&lY-S>M5-&Ug64HN=Uvdc~OX#Hjam76=~~{3^3Okt{OyJo6pji(LO4axHfR z;miZ}xcdm$bka@3KDQsNm`87N(6{y?=x!XP!}cNEhvB@3T@RsONqaX~ecINqvHxcQ zrb|Gcjo&t;Hu*J*dk*AP=mT$9tsM1p?rEG>ty?yA^G;$7UMYH; z&^iv^6Hde6ISH?f2hSE_=2e1TA(h;YIufVf*MTvvlciUOK|o;|?nx5>goyn`Prr#k zplxdw4q|00HvBixs6@;9&W!+|0XQld2n6j35y0s+n;|@%Ve)_Dvh0Qg?NI%~Ens+B zOzF?TZ)UR{{2tW!Qt(a1RaRWqlFLJz4QPeX$Fin}g%S9B5+X-{4_*IU&<9UZ97l~| zHX>&V`WPGpeiTDrUkb)L<7x+kjd=JIx$<6=+?J9NwX?HJ{9L1IjOStDu=NZNB)x<* z!8W~-ncl`X+U$}xD1Dzcakyl6cU|~66RgU{Z4RU1pG|j1u=+xGv#T74oLKEqUPiFM zDs3pPa4Nymt;Fybh@T|mLVNd`@R$M|vG@5dxD(}|s;&_S{}yT+{f+ELoW&qz)awj; zLLH2$`|Bp&eMSh`63CkM_1AvDMBe=%9>BB~&>$K_d<`f;v4(i7N`vRCjk550)X_gq zAX-HF8cxZXuqXY;$bHYu@CCLIllP1i_*WbZ*LTz6|6aDu9B%TT4$m`7bzXXpHe1YH^fe`$kX6lOsKSLn&UC8G@ ziW{*5imm`2AWv=?#t^JrlVUa*_P?-63# zV+V@(go7F`2`!I7EO;<`!kmRCPYxS;s278aKh^bwhzf&9$w#RQng9C`WFW-HplTs} z0I=_E5N;|sASxNER}|GDBLA#0mchOb+=k;h6Uw0^xMo4PLx|ZR+erw;Fq47UfgG;6 z=p_jQRWkGf5?@c@a0HE|LI|l|B^L%k(7nzOlrxye)K0q1k-I%iU?ih7ZHX~9p~kDW z+9eiw5Z@6??Ot?2B5|^|Gh7i82#0+_?SUzGZ?6@<;`*n-c6Lx*!h9q_m_3zK4~rs~ z?CcapE~6d^JrZBm7a@M%%$9|qEK7=Yl?$;|@a$M?UBOc}5!|&1chuFE{s!ureYLs4 zA4A^1C(f`lA%8{|?5_X&h@L%j*7Io4wf>H>W132YBDO~^l>ML=eVJp-b51vJ=Gq?P zIq%tL^l`jU%whU#Y$R0k0`jmIs_-Hq7O}`D4<{PS5c_hdBo2s{*l40OBht)ifwr^w z#5%b@>^HP8wcXqYa{4g)KLG$mL}XGC=J1U2r;nYy=bXV{Harnxi)qIZuL;`@`r+41 zj2h2X6OAC?;@@NxuoZNk9k&TMvvdB2!~iuXiJIg z#mK%9IrXIn*19W_nU21Op@|N}$oe#>K^*jeZd9Iz7=`FX=jdLFWXMY+hh>PJ{86;K zx4lOtx)HWQCWw)}UlxfBE+G>QGEmjlsz1)U&y^s+ygm5r;MGTSX zlem%;ZQdYObiYAT>e<9AX(23EFqSlI;Tx?&X@yv#Vf3Csq(>SWBhOc5C@0}0n2kP0 ze28v=1L*7H$r?dxCon00*?hv&C1ra>gm+14szCfeh)w5Tx&>mCpLvE@M=jdew# z#;5yvp#J*oE4V-=XayKCejiA0j@q>gL`Urr(%~5SO^C(b86$(_OHG zT(S1vnPzX7@;Qf?LPUMf@pLjzNwtR#@lZAbwktFjQ0JKZ3(_F{S&ZS&q(9$8;U@i& z7ZrM#^*#jXJ~glm95cSBHKw@bH*M{P%m|nN97^wch;I-Bwpf1evj&rrVc$~(ySMve zMcu#`pUGXu_lm^?n-yozc>YZP4OD6O^~T0P@l8+Gn4uUSw$znB0jQ$~lP3l6Lq5qUXD8vj)Y0fnoBzKn4XlaSPk4$9sN15?40#V{N{9lL( z;o=%nLHc2Iws)A?G{90*kL>b;bi;U0yI=hcbnfp*MVevhAUm5r09S35D+4r!a8Y>_CWT}NT-^0j7Pz(si z`mcalLxa1;|8@X94tu}b&g^`)QA5zT1k&C*N?#Xi4Z%K$unwUc--|KyL+9#;r~+M2 zET5Mtw%oGyH{el&KdzC1-zy7B0?iBWL98C=2qOcbxq+5!gJqLK2SkxD+Zlv?7Wtx2 zWuYOCGNr~q??w=pn1xU>fY};WLXD_^TplL#Da$Q;@ z@dcZ-M!0$ue_q0q+Jn=&T+UsE$q>;>xdYE#m}qCOoPRO*vfq2oCoTwMTxs{Ipn(6F zKsO6&;lZyrL58}|;KjAWH=DYqa?_Wuw)^I2yz#|zhCTcwQJ->Y_3QZGQ9>#52~)*5 zrZh=`5j*}4s7TG z-R`n4+48cN8S0=PN2mImOnsE#!+aFgL#Tpm@|Tz=dtSEtRX&CUs{o8At2aS1&a=4) z7aIg%Vn`0PXjrZg>)$fg9H9Zf%*vuQ{5bn?=Bh#kgaF~Rm@kTk2ojMU7-H~sHfaK& zGkib)XUv0wV>HyAjl$*xWgr4{bq|JN@y~-gR7^2CDq0A&83#nV! z5;$gRX zksFgp(*tjk(Myx7_R{y)VasbU_~O5A!FP=R{g#w}<5OrZLYl_PO|H{y%L#H+c*4;0?uF;BNOk?&Xam(k zW5wW1>Uy zW+^?9mNuQ`%tTkSth?MZ(bLTLmU}G2G|HPM`lKXU+Fb6R*dqB@X`nnfF<9O@v9-Ky zVq1Cp#P;%zi5*fGFYPSvn%HF;uH%}^A5saGT(OqSiQUcjRP%jL^Swi*rG9Tc?9aZ5 zeJ>f6t+wIrpG&=E>TVOg0A_Py-^Y!L^|Ikciw9Jv%BZgU7QP~H?vU!1x2D>eOwKxEJ}{3gX+U5IjRn+!+0K3!)gT2<8Dv!1im|}#sKw% zI--uE^hq_Lj;j+Wc}hK@o^$GP6bqXaPQLm_{)icPQQm?9W>O69%)obcGbs4!c zDyOcf=aG9_y{@jRk0JNWvxd5+uD@ZZ>r2+eS-jm)H}Q58Z|9VG-N@!2axaZ%%^)*X zt@!R8znrh+XIzbvgmT^5RJC>&dHZVKpT%>_g@Whn!sLSQstb8Pe>Jc3rIK69MuXUu z7oL6g;;~Cg=<$XTYorZMx^=kx{-O)z$%)`dLyxDHaMq|@pTQ@ zn2k2!=k+7GXXun~6wQi_)LJqc#uA#nV~%GdLDaA6so5Y?s7$-stzbCis&a#PrJ9@3 zd49)3exc-z zgy51NuHEI*PE|EVxtE<{?+mlP-z(>}pIiUw;ZvpRRKDb$8E!Sv+W?)l^j19d4kj$n zgG{y~3Hm+=&u-LsHWI^1NAylS-{TAa9+|Bu3eV%`okZdrzFD-Cxn$`9KT@uJc=3X!kW9KoL8<^{aGxz z=DIBGLh{ntYvY&4pLM?fYs)|W;@@=y&RKRTM`&My?S>p3!wXHJJmvvD0qbG;c! z(2;BH_*^cqDwUx7ysmoQ#fq^1d%lsz(U{D92c&mhy9H)W~F{Rrnyfn!(mFn#Z>b(V=$*Uzd@43!Gjr$!3Svv}~;fs8c zZ599lr6s+LCPbesKLIA@&K1Mu5>ayBY-;!Fpsc?)oWTmwIgY>*V3bSfA}NNnJGD(ti~U= z8-KFM&tLsdVHE9()i9Q$DO#n$_tOod5oy8;^isq$D!yF z&|Pdxtj*S4ZLPc6f7S_Hh_R<(HgWehpPQP`K4r`$Rs4RG@26T@Tj&6`b>e&}?|Fsk zyLtpOtUHmcJ78P~(y#0Id3{J`K!RRPFXBKo%q8>hS@SqPA#DUdBNKYZ3asJLz`V0@ z@l&Tt;G)>4wKJEAjc8-w5%t5MF|+xao0|&zQgC%H+>~o%c&#luZBG{RUUqZPDVxmW z-Y-`x`UnsPHa`-?XY*d(_qB{F$W-!WSLLQ=3nc~i5~OfmE7<2?G(ilE(9P>0jW6b= z-Mqhm-sn9<6}u8vbrdSAH9_2SaX`S=Q`bQr-HKnxmx5@aqTD+{?0mIy>xc?*P0i*( zFiKv)=4&r=u10CjE7b9A3Kh!LPvUDJt6YJ=Y>k6`4$LxmBPP`g%)-X`9DMAwodF_` zJPusP+-s&lKKt-XnFD49|M;Fko@BM#)Gq)ICpIM_-_dLIM@`WPv@QPDR%0Ku+y(P!sO2=GF@-3$bUdz*e$qlXNNUxA*e=pvY_Zx^E&T}=MP@7u-La(u;DPN02a5GVd6 z91+{_lXEGpLTUWthDu&HUJg%O3ZoCA*iZ|1g7{0=F4fy!eXL1_r>YATe<6 z8mFjM9BDvq&}^b_qTBV&6^MR?{9&(La1LK_8qV!0=WwaMV`K3&zT=EcIU~26-Mep4 z&hlp6QVBwDzQ%ptJY_*tWY@OENd<{oDeq5Lbs3xVuIIaDeF$F;B87?{q`ZYmh@VrC z9D|5nsBj-E9G@UYVF_|u)T@;WevrI)XUeUSKj6$prpjvLlBV0o{~A{%qYTTIO4s88#6Hzdbb^M<21K(IOYH8nGqCjN%)^cQpIaxJ4?FaG22&gsIskLg$ zNz&^T3(q*8i`=`3IcpKZflA^Ddg;B^i0YzB`Ke-u>HuXv*D&;NL3n7sV^5@)J68<- z8Odc<48IH9C#llJxMa>nz2EY?Ri}t)nFkT>uZI1W>3ymZW!h>FTK*|u{x;h7K+`j^ z=eibSz!d{0i8tb^dk(F1U&B;Atz0ACZ{Q7_2Ya_$^)^h5b5kS!w(&p&9%Z9FzNosl z7=6ZK!tW)v3&yMC;EBn_lze+rBMH9J(TFsXAufGY#lS%mz}44CA>G{QDE8smkLPB! zWt*V}-nP}?gTxcYB6!gJA+>ceT>+Ou|9;!=_ieRJZC@gH`fYq+OL2hn!?#96?O+>~ z1biBAJLN60m~LSmTL>cMnpd|Dj|F|yh;lDpzJB@K zm5aFx7hk-5{^Irb%|rDqmn*mOrGg4orUQo$hda}$pE@97u!|$04v7G^RteMxcs6U` zpiW!!uM4KMA)g!~B z87a9HAq_guPbJ;TLfNH?6>OeyTS8?H)S^a75uEj-1WWr>KVQm~-7?h8APz`jsfo$_ z)SO#Up2nELf2ltgYM};ULqV&O!ca=hcIrxAtU5ft zS2h~1+%4qwZ6ra5cegS%i}j%}VfzKl{2J3-)31`O$r1L`Xys8~GOQGP#rKEYEmIW6 zT7SLBkPu7P{sa=>i~$*%5-?QKCCh_SMaqJ+YEu#iJ%~tZt0?j@N#l~ju7wg; zUZRo;D(h1Td%0m$Kxt<{k>0Xi{UoR@6k4A9#i;6NM00VMk5xs0mX_n%OQ*FZ=r1zeN8goh!#`Gcg!V_k1eNHU>_qiY-7!TGm!|S(8c&- zVli3Y?ROS4jaVZJ>qZysWf7Gb1f2!l>jf1}g;qDeORR4Dn^w2XVk)${y=B(5-wjQJ zboTcaJNzD)Ig%=K${6B4_61_}wgrJVT->dr6W@ry-Y7a}`0FCr=FXW3& zpouL!y{8O(e>bFz@cZwE-}n1e|B{LCpg!zJBaXLCgYxY`W57^b`izFP%>c!XHxh)D zIzX(eOMspq!(44%?g!1bbWHMFAmL#A;7OUqPBkFCj5gBK7JCX_l3DD+NMim#aS&s% z^aYG%P@rPVTUU(Mc+k2VYIe8U`nKIjsclF*)pn$DwF7B_slTm}0ms5_L9Q#zh5M#K zUeMi0H99f2L?hGanu*isf%!PmoDWNX+cXwonXQ8hHF~()*h=jT;f@<>=VH&_?s>U6 zM>4h@JY@6t=3i?-i^=Y)|EWV3bL+U{)m&GNg!F0T(v=skdNjDvlBZJ+{%+D{c2cr@ zEmWXEJEucVGz<}1Rffh+9CD5xa!wo@l_a|gTpj~tDPNvcdFRea=Zl&b|b zbf*pW2w*Ff;p>GfH;2}Wn|&FQ?;dhKQ64+wl#jf6$f=wj8y(F$&Yo6xohmGn4ho%u zPr3|Y%#)r3F&Z(;Ufn)3cEk%}&&nw4X|N&EWoPKwb6M6M^Xh5Hfw12wXQ=FD>zyRh ztFKgjFm^y5IR-4D+IAzb{Ays&=1bGU`Yu5yj0qfA3i>ly_1yPJZQO5sqW_>OOO4|m)gHGVT~PuMfk|@v6*@d`(gMf%2`0AkGxb^W<0hAF@D1 z1@6KiDvk9TSL_OpZ4Tqt+`n1NJaHg zz=U&`;GEIl5v1F_ADR|xJVcQ@Eiw{=ugFmX=)sOz$wvT>!<3~9Qas1??uOkD!3q%Dx`x*{1NDMf&)qoaR zj6iB|<`a^)k$+O5HV3mY(pa-R4!8t`!65+;%A?}k?n-{MTX<8&9%OW{sbN8s49#@{f0Fu;$l}N%J6km{DpFm zd=9(byIt^RgX9%AKX><+G{FQq6hwxU<%LLtM@x=95wYNZRekv|dDcIfZ_{M=!o<0svY@Q+a3 z+U?&&e%)>l*-mrgkr^loP~2>-4d^Ul+dvhDSqj3M*|4A}TXPom0=!%E5H1_$Hk=Af zCHllOy_R53{wWJqn2<_l?WRpJADELufHp3D0tyivmj2y826)rkLhT0DcvJs7l)ZOU z{=W0{x)tZ5aZ2R`Rgi%mV9+8j-f&CO2(9xNm@Ox36a%Ft9#$1loEVnXX;{U0!{%9{ zs|K*va)R$GXgBZBHVzv`2WeEOSA=dZIZyF%8Fdf~w<~5s)XG@COiNn5`*}JaAObDG zV?)OYwCAHU^=@aTRGrM1Al-$7&ZZ~&#Hu(95v%DXWSfxcMWuw?3`Id_XhUutSYhEj zs}o(NvD^Zz$$DXmHZ=vc6>2S{98Qq_StguFuzlG<|DFZ^feE`TTX(~vNbR4ZvS|$) z5UsMCM4j!{Ed-`R$AJNni79t>W^ zP2u+j3^-3Ln3G!Hi!Sd_wu-M{yD$g5F+y@1;dCU%Z@^#m9A-veMgnOfuN2&IZ~?;$ zPANEu&FJa^*gK8ym)2~b@0h~tMIeQIvxg|S&iYvfXRPC5@goNIXMx2 z^mkbONmfUvN;FB?w+imYW_MlfC;R*}L4GP=duQA1aR6g)rkjQVO1+zXFqe%j6JO2{An+ zRZ5-+n2{$!VB`r^UYaO=bHt2Tqw{kfl^VYt}6oDs)l z|B?mWpdUuro?@@sLX2t)RPsb4F%5;dxJeD%hmqFrD{l7tNdOy3m?H+&R+uBU!5pz2 z=7=3MNBCPHnk2-8aIRrha6%f^jJ0T~oluo6!uA2{Hk=9K5A$jlX2N+8S?p*UGX}(% z@#&^9W7A?sXhe|Se@Jk@>+}3>T{4T?FgLr^o)z|gFXm_a@{UFf=HY$*PRxw4yo=*o zqA7ZRBZBlm!$vyP=zv)ore?Z{a5}T=Lq%HhgNM!Ve^d4JGJqbdxMZ-4Ru!cB4iIa& z+Gsdz3ZXIV{*9D3W(CYfAZ59m;2E`Snd#hdNwDdn>S62Q{{XxuFD0@)ZjN#mt z@^@Wbx6T~b(`-i;P>-BBHX0=8>MuaTL*-M##!)_YM9w^NM@B&b7FaY2B@uRld_CPJ zo(zl0yS{hd@fl~|u~E<2_Y{6(NAPxpZwLlCio868AU<^Xd9h7XV!5P$neXVWp?{Uh zKVVWu0?s)$4I6uC9te_m^W_p;ZcwCR5@~aMA{LrVf~YXvdI}D#G9qg3o^u~N^^U(%{k>KnV!HFng?69g3%wb zT%urtEava%R|rH75YpSI=y)N4|2#{nCFuwgVd#_0QA*RKa-gqte68{EJFNMAB-wPx z%zuSY(L#B~tHWH7t4$!831w2@?r*YzFd(Z?*1t<=YuejLVpD7TG|deY64!p{bhdd$ z{Is^Y_os=71+i>5biLguJ1F|#UQ=u`H!Xya;sorj5DirHz721g<;}inylGA& zghs{RfaC_7Y0D-^OqK*KB}hJ1QY?*xiP2j+V9d}e+`&+rY zVu3jun78#3J0)ry!d9DlNOHAD``A-{J!H|n*e|`)lvFYcB2~aArB8Am=gHh(O_eay~T=ry@29Ho_8CbUr1t-Wu1x z2egQkOyqK^0JlmGCkE%r(>zWXppWo7=(GN;K1aZq{sTPpw^#`kcB-_%NI>^am@(%9 z^VY@#loT0>E~GJzoco<&MZ4jC(c4iH*k@I~tp62?-}}bb`S)i}zxS83Ke+e1|Ko>e zra1D}A7$wt{A4Cln3oy(Hyy!-rji5cga#A3mz(klBV7P)_XSr*aJ?qDdIXoHpJ~I@(}t_(F}SSx zOM>TTlsbQ`YTHe9`r#q~>qt6OmWcfqx(iR(YM;o8)OYtv(KeOGWHK>V$U zJ}0>P1Xo1wZNt^qhO6%}xFYih1y`@&`bELDxrwXRhHG;huFa3dbxUw<5?q1c>Tlxu zQX8)RHeCIW#q|}z)dyU*-Y>Ye2rgT%?d&)L803`5zTff~T=slMa6!@sE-knQnz(MX z^=qIF*T7?OO$aV%2wG;YIM~GW*)~jrA*T6*IE{mk#q>GUn`#R}L~Q*-e`|4DaXWB; z3W-%|0E~O^eq=FO`5d6O0xGIA0<~iU6aqTbj{8wS)&aRwAV&pqXB*@$!SymA9D%qY z5WCtSwyO_8a7b2;0cN+r-0_{Z`aS5;Hnmq^y8RESeG<~LzZo`@T;2^>&_9@n4t&NU z?P`Q&pq^;gEc5p)@AdboA<;py{{9AZwc-KjAO{;3(hvJXz=9T1e@NbdQ{E1Xt}-lT zm?bG2d5{Ue1VLqVz1BjBia*g>XuO2n(KKBu_^!bcp^P;p4srhkHVwMt!n|7M%dNtX zTTFR_z;T+)jJ(-~?vOC*yTj<&DnhEtQ2g}=2_7^Lgwi|te`tIF8bd>Sf!Cx^Mr76$ zVzaD5o>ImUBzY#~qs%cWA7hS5`8ab-$|sm(QhtKDCp7M{7&9M!v;Pmj`8Pj)8li1g z38sok#Bo4)BC~x-@4{Da&;kiaJ<6}ALu(x(ji>U6`hfG1O29J;k zGz=owpS^%dS}RCY7=wdL54bgP`GxVBcc1;^df&VFb`ywS&_|HeQ!lh8$f@^8m<>nm z(tPf!Ve~a7N`=>iUY*X9qLbC_p^`pbAILfsN!GL$C@eG=IrTm#9LA6g12NyDStvA_ z^`-R)3!}~?168y7vs{5aOg6Kd{Y-wJuODF|ss|S?NM3CzN%~#Ze1{2FPJf8WZYFz} z>}5hRPa_Dy&IAY!vajHthYAXySbdjTy-@kYYG19Apr4@k9v%UXk|VnUP!{0%4oE|1V{! z=GXesi|y!_gWmvtbn9)kx~)FM3%zLRa~Q?@y^n_N2k}t3_SPADNo_dMoJ_BVq;=T= z%%Tr=bW24}waRRi4M|xPu_k>GAtThs8qn7o)-SfARilVjEjFV_+906OSp_&Beie{0 z#7nG3vBt;4yFp_&f-Y~aP=kdML33hH)d$d_AO$nH=fm|{s|E}kz)c?Q!nvy+y*&`Y ztp!ADqfS%pMs%vpW}(L-#t4I%M>OS3xC_uS$A(|#CJ6N$dQJIb;3y2`S2(e$<|Gc+ z{D(XUQ>3M>KTc#4zw`KcKSZJ!o)2{x+$6GvfiegiMIOQP`Us;A!gt{YxN|co_Gmpx zYoCg(n8;bs3kZb(f9eO+3YyU5T2>ArPWtoE4ia;=cMp<1?PE#dIVs6^JV1aZZmPtW z6AA&6s70^~(u}}#JrKPC+EsiGHfKT!On2C4!1V}R?*sG(yb+7dvA}Btr5n12JxyhM z)40&(Ac~nrO|=oDbp)HQrkDM45#FXZ1=89V%5vFnGM* z0{<7hk&KVTMKfM9qRWM@^0GUex=!`Z$vcS7Tf;!o3ZJ-hn4$jwLy+SzF2NiK0~JF! zT-ZW=(;w-)Zyu;`xkg~Y-@08ht*d3Ho_6FZJ#y-{czn=b#uzrcS?_mnBfyz--AafZ zej#j6r#{#O7a<#;~^_xYg71`z}<>W^Xi z^lxGVi7MNy($`pe2&HR8GMj**VXo$DPPjC~wW!eD8pOh{(09c7&{w%89ikfNq-rBQ zOot~S1pgYK#eZr@gaAn$qu@z!Y;(7@6K@VqhkgZcQ|QyWKT?%v@Dl{C!P~RAIUDB% zD%_wdL{`%p6vDfg_SQ5FjLlVe3|3_RvW4qFi^>@l@| znA8zFh&NzHiaWW$8x6Om>@>(1mFY{clE@sSWM76;Ad`$(1jLc@=0;}?*UJjH(TF9X z^Z;|6i-4m%XLo~#8%&8l?p*GBoGBiC1K#Wy=BVDkZa~8=z;&JqS_oEY#WY?ZLBn)5 z7uEgn)^cDR9*8SSO?avh9*LZ#ke`G1E!TXXi{dQWdJ<8c3?TpnPAws6FUMrad$e`V zF)guiIidBBwB~#51i?gxy8_tKm8t{r26B-II8Jpk40=IG8jP==@6?A{JJ>mgBQEC> zAuMZvc|Qbj>d)ZTC_%&=>3FzKsrUt!frJjHZr}!*kOXJloDH>BDFXT&2=}4hk&oWv2yyV2*_$n&Ci=WsV6ILpwS@QU^ zOG>kPc6n+uyxoBF3m2(M;QU4r%?hU&N@A)LTdO-rh`s*e^JnuECY_tIvdX!5rv@jb zTx)Wk#}#8x^RsYfBizv$0&q4YwI6W#P~T?c2<}mjV1;>|_i&+%R)=Lbg)xU8brH(q z>BW z!hduCoeBYv*q z2%RWjCKB7lc@Np5a6x`3^zQs;RO7tLd6ZxXshVfKK)#9kus1c9$w}Zj`JebTgHw8O z3>gYh?|GzO>-Z4YV%4Nw#7hHOG`t<=6JSpu)$|VFouLmt9DGM!ExH=D z0^>Jg$YaI0J_cWg^_0PPa5ZZH5%CQgb#Vu*kAhZo{utLwSJ2&TG8~eOkkg`HH*!a0xm6xk0?z`A`$_|EA@7vH{0l@zhpi4wjW`ZAg zwjjKN7hIqu$<+lWP-;N!;c%! zF*If1!B}8)!No(qegmJsj~5dCZ?n$2r2jmGiJ;*n@y4~f$WLKTcA!EW5qc+c_%A>> zi1W-}VBQ|r&+}y*$&KuJIk?(KTe-@kUuEv^APEvskSMEqK?UQ=s%-HGl_ zG%+KVilmzHqG5#SVPfnIM_tIDK}8KsX)L^~ltOMI6~dM94KYQC3m3sUh}f;% zlH28)`&%<|M1LMh&;dgQuGWV(iVS?yFcYm#PRC8iLoYS|^MT!LLy=YfBLTT>F1KIh zs;E}1_ys1PX7U9lUt%K1_iM~uWb#cW4CxBPxES3cUf9sNMt_s&^`e7{$Su5E_Ry^+2|Dw*g=^;>3l mGLcH{O~g}26H)w2XQGK@DwP;W4W`nmWa?*)D4u 255 else np.uint8) + areas = [] + ms = [] + for si in range(len(segments)): + mask = polygon2mask(imgsz, [segments[si].reshape(-1)], downsample_ratio=downsample_ratio, color=1) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index +def polygons2masks_multi(imgsz, segments, cls, downsample_ratio=1, nc=3): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((nc, imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) + areas = [] + ms = [] + mc = [] + for si in range(len(segments)): + mask = polygon2mask(imgsz, [segments[si].reshape(-1)], downsample_ratio=downsample_ratio, color=1) + ms.append(mask) + mc.append(int(cls[si][0])) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + cls_id = mc[i] + masks[cls_id] = masks[cls_id] + ms[i] + masks[cls_id] = np.clip(masks[cls_id], a_min=0, a_max=1) + return masks, index + + +class Compose: + + def __init__(self, transforms): + """Initializes the Compose object with a list of transforms.""" + self.transforms = transforms + + def __call__(self, data): + """Applies a series of transformations to input data.""" + for t in self.transforms: + data = t(data) + return data + + def append(self, transform): + """Appends a new transform to the existing list of transforms.""" + self.transforms.append(transform) + + def tolist(self): + """Converts list of transforms to a standard Python list.""" + return self.transforms + + def __repr__(self): + """Return string representation of object.""" + format_string = f'{self.__class__.__name__}(' + for t in self.transforms: + format_string += '\n' + format_string += f' {t}' + format_string += '\n)' + return format_string + + +class BaseMixTransform: + """This implementation is from mmyolo.""" + + def __init__(self, dataset, pre_transform=None, p=0.0) -> None: + self.dataset = dataset + self.pre_transform = pre_transform + self.p = p + + def __call__(self, labels): + """Applies pre-processing transforms and mixup/mosaic transforms to labels data.""" + if random.uniform(0, 1) > self.p: + return labels + + # Get index of one or three other images + indexes = self.get_indexes() + if isinstance(indexes, int): + indexes = [indexes] + + # Get images information will be used for Mosaic or MixUp + mix_labels = [self.dataset.get_image_and_label(i) for i in indexes] + + if self.pre_transform is not None: + for i, data in enumerate(mix_labels): + mix_labels[i] = self.pre_transform(data) + labels['mix_labels'] = mix_labels + + # Mosaic or MixUp + labels = self._mix_transform(labels) + labels.pop('mix_labels', None) + return labels + + def _mix_transform(self, labels): + """Applies MixUp or Mosaic augmentation to the label dictionary.""" + raise NotImplementedError + + def get_indexes(self): + """Gets a list of shuffled indexes for mosaic augmentation.""" + raise NotImplementedError + + +class Mosaic(BaseMixTransform): + """ + Mosaic augmentation. + + This class performs mosaic augmentation by combining multiple (4 or 9) images into a single mosaic image. + The augmentation is applied to a dataset with a given probability. + + Attributes: + dataset: The dataset on which the mosaic augmentation is applied. + imgsz (int, optional): Image size (height and width) after mosaic pipeline of a single image. Default to 640. + p (float, optional): Probability of applying the mosaic augmentation. Must be in the range 0-1. Default to 1.0. + n (int, optional): The grid size, either 4 (for 2x2) or 9 (for 3x3). + """ + + def __init__(self, dataset, imgsz=640, p=1.0, n=4): + """Initializes the object with a dataset, image size, probability, and border.""" + assert 0 <= p <= 1.0, f'The probability should be in range [0, 1], but got {p}.' + assert n in (4, 9), 'grid must be equal to 4 or 9.' + super().__init__(dataset=dataset, p=p) + self.dataset = dataset + self.imgsz = imgsz + self.border = (-imgsz // 2, -imgsz // 2) # width, height + self.n = n + + def get_indexes(self, buffer=True): + """Return a list of random indexes from the dataset.""" + if buffer: # select images from buffer + return random.choices(list(self.dataset.buffer), k=self.n - 1) + else: # select any images + return [random.randint(0, len(self.dataset) - 1) for _ in range(self.n - 1)] + + def _mix_transform(self, labels): + """Apply mixup transformation to the input image and labels.""" + assert labels.get('rect_shape', None) is None, 'rect and mosaic are mutually exclusive.' + assert len(labels.get('mix_labels', [])), 'There are no other images for mosaic augment.' + return self._mosaic4(labels) if self.n == 4 else self._mosaic9(labels) + + def _mosaic4(self, labels): + """Create a 2x2 image mosaic.""" + mosaic_labels = [] + s = self.imgsz + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.border) # mosaic center x, y + for i in range(4): + labels_patch = labels if i == 0 else labels['mix_labels'][i - 1] + # Load image + img = labels_patch['img'] + h, w = labels_patch.pop('resized_shape') + + # Place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + labels_patch = self._update_labels(labels_patch, padw, padh) + mosaic_labels.append(labels_patch) + final_labels = self._cat_labels(mosaic_labels) + final_labels['img'] = img4 + return final_labels + + def _mosaic9(self, labels): + """Create a 3x3 image mosaic.""" + mosaic_labels = [] + s = self.imgsz + hp, wp = -1, -1 # height, width previous + for i in range(9): + labels_patch = labels if i == 0 else labels['mix_labels'][i - 1] + # Load image + img = labels_patch['img'] + h, w = labels_patch.pop('resized_shape') + + # Place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padw, padh = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Image + img9[y1:y2, x1:x2] = img[y1 - padh:, x1 - padw:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous for next iteration + + # Labels assuming imgsz*2 mosaic size + labels_patch = self._update_labels(labels_patch, padw + self.border[0], padh + self.border[1]) + mosaic_labels.append(labels_patch) + final_labels = self._cat_labels(mosaic_labels) + + final_labels['img'] = img9[-self.border[0]:self.border[0], -self.border[1]:self.border[1]] + return final_labels + + @staticmethod + def _update_labels(labels, padw, padh): + """Update labels.""" + nh, nw = labels['img'].shape[:2] + labels['instances'].convert_bbox(format='xyxy') + labels['instances'].denormalize(nw, nh) + labels['instances'].add_padding(padw, padh) + return labels + + def _cat_labels(self, mosaic_labels): + """Return labels with mosaic border instances clipped.""" + if len(mosaic_labels) == 0: + return {} + cls = [] + instances = [] + imgsz = self.imgsz * 2 # mosaic imgsz + for labels in mosaic_labels: + cls.append(labels['cls']) + instances.append(labels['instances']) + final_labels = { + 'im_file': mosaic_labels[0]['im_file'], + 'ori_shape': mosaic_labels[0]['ori_shape'], + 'resized_shape': (imgsz, imgsz), + 'cls': np.concatenate(cls, 0), + 'instances': Instances.concatenate(instances, axis=0), + 'mosaic_border': self.border} # final_labels + final_labels['instances'].clip(imgsz, imgsz) + good = final_labels['instances'].remove_zero_area_boxes() + final_labels['cls'] = final_labels['cls'][good] + return final_labels + + +class MixUp(BaseMixTransform): + + def __init__(self, dataset, pre_transform=None, p=0.0) -> None: + super().__init__(dataset=dataset, pre_transform=pre_transform, p=p) + + def get_indexes(self): + """Get a random index from the dataset.""" + return random.randint(0, len(self.dataset) - 1) + + def _mix_transform(self, labels): + """Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf.""" + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + labels2 = labels['mix_labels'][0] + labels['img'] = (labels['img'] * r + labels2['img'] * (1 - r)).astype(np.uint8) + labels['instances'] = Instances.concatenate([labels['instances'], labels2['instances']], axis=0) + labels['cls'] = np.concatenate([labels['cls'], labels2['cls']], 0) + return labels + + +class RandomPerspective: + + def __init__(self, + degrees=0.0, + translate=0.1, + scale=0.5, + shear=0.0, + perspective=0.0, + border=(0, 0), + pre_transform=None): + self.degrees = degrees + self.translate = translate + self.scale = scale + self.shear = shear + self.perspective = perspective + # Mosaic border + self.border = border + self.pre_transform = pre_transform + + def affine_transform(self, img, border): + """Center.""" + C = np.eye(3, dtype=np.float32) + + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3, dtype=np.float32) + P[2, 0] = random.uniform(-self.perspective, self.perspective) # x perspective (about y) + P[2, 1] = random.uniform(-self.perspective, self.perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3, dtype=np.float32) + a = random.uniform(-self.degrees, self.degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - self.scale, 1 + self.scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3, dtype=np.float32) + S[0, 1] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3, dtype=np.float32) + T[0, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[0] # x translation (pixels) + T[1, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[1] # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + # Affine image + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if self.perspective: + img = cv2.warpPerspective(img, M, dsize=self.size, borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=self.size, borderValue=(114, 114, 114)) + return img, M, s + + def apply_bboxes(self, bboxes, M): + """ + Apply affine to bboxes only. + + Args: + bboxes (ndarray): list of bboxes, xyxy format, with shape (num_bboxes, 4). + M (ndarray): affine matrix. + + Returns: + new_bboxes (ndarray): bboxes after affine, [num_bboxes, 4]. + """ + n = len(bboxes) + if n == 0: + return bboxes + + xy = np.ones((n * 4, 3), dtype=bboxes.dtype) + xy[:, :2] = bboxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if self.perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # Create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + return np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1)), dtype=bboxes.dtype).reshape(4, n).T + + def apply_segments(self, segments, M): + """ + Apply affine to segments and generate new bboxes from segments. + + Args: + segments (ndarray): list of segments, [num_samples, 500, 2]. + M (ndarray): affine matrix. + + Returns: + new_segments (ndarray): list of segments after affine, [num_samples, 500, 2]. + new_bboxes (ndarray): bboxes after affine, [N, 4]. + """ + n, num = segments.shape[:2] + if n == 0: + return [], segments + + xy = np.ones((n * num, 3), dtype=segments.dtype) + segments = segments.reshape(-1, 2) + xy[:, :2] = segments + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] + segments = xy.reshape(n, -1, 2) + bboxes = np.stack([segment2box(xy, self.size[0], self.size[1]) for xy in segments], 0) + return bboxes, segments + + def apply_keypoints(self, keypoints, M): + """ + Apply affine to keypoints. + + Args: + keypoints (ndarray): keypoints, [N, 17, 3]. + M (ndarray): affine matrix. + + Return: + new_keypoints (ndarray): keypoints after affine, [N, 17, 3]. + """ + n, nkpt = keypoints.shape[:2] + if n == 0: + return keypoints + xy = np.ones((n * nkpt, 3), dtype=keypoints.dtype) + visible = keypoints[..., 2].reshape(n * nkpt, 1) + xy[:, :2] = keypoints[..., :2].reshape(n * nkpt, 2) + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] # perspective rescale or affine + out_mask = (xy[:, 0] < 0) | (xy[:, 1] < 0) | (xy[:, 0] > self.size[0]) | (xy[:, 1] > self.size[1]) + visible[out_mask] = 0 + return np.concatenate([xy, visible], axis=-1).reshape(n, nkpt, 3) + + def __call__(self, labels): + """ + Affine images and targets. + + Args: + labels (dict): a dict of `bboxes`, `segments`, `keypoints`. + """ + if self.pre_transform and 'mosaic_border' not in labels: + labels = self.pre_transform(labels) + labels.pop('ratio_pad') # do not need ratio pad + + img = labels['img'] + cls = labels['cls'] + instances = labels.pop('instances') + # Make sure the coord formats are right + instances.convert_bbox(format='xyxy') + instances.denormalize(*img.shape[:2][::-1]) + + border = labels.pop('mosaic_border', self.border) + self.size = img.shape[1] + border[1] * 2, img.shape[0] + border[0] * 2 # w, h + # M is affine matrix + # scale for func:`box_candidates` + img, M, scale = self.affine_transform(img, border) + + bboxes = self.apply_bboxes(instances.bboxes, M) + + segments = instances.segments + keypoints = instances.keypoints + # Update bboxes if there are segments. + if len(segments): + bboxes, segments = self.apply_segments(segments, M) + + if keypoints is not None: + keypoints = self.apply_keypoints(keypoints, M) + new_instances = Instances(bboxes, segments, keypoints, bbox_format='xyxy', normalized=False) + # Clip + new_instances.clip(*self.size) + + # Filter instances + instances.scale(scale_w=scale, scale_h=scale, bbox_only=True) + # Make the bboxes have the same scale with new_bboxes + i = self.box_candidates(box1=instances.bboxes.T, + box2=new_instances.bboxes.T, + area_thr=0.01 if len(segments) else 0.10) + labels['instances'] = new_instances[i] + labels['cls'] = cls[i] + labels['img'] = img + labels['resized_shape'] = img.shape[:2] + return labels + + def box_candidates(self, box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute box candidates: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +class RandomBlur: + def __init__(self, p) -> None: + self.p = p + + def __call__(self, labels): + img = labels['img'] + if random.random() < self.p: + k = 2*random.randint(1, 10) + 1 + img_blur = cv2.GaussianBlur(img, (5, 5), 0) + labels['img'] = img_blur + return labels + + +class RandomHSV: + + def __init__(self, hgain=0.5, sgain=0.5, vgain=0.5) -> None: + self.hgain = hgain + self.sgain = sgain + self.vgain = vgain + + def __call__(self, labels): + """Applies random horizontal or vertical flip to an image with a given probability.""" + img = labels['img'] + if self.hgain or self.sgain or self.vgain: + r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + return labels + + +class RandomFlip: + + def __init__(self, p=0.5, direction='horizontal', flip_idx=None) -> None: + assert direction in ['horizontal', 'vertical'], f'Support direction `horizontal` or `vertical`, got {direction}' + assert 0 <= p <= 1.0 + + self.p = p + self.direction = direction + self.flip_idx = flip_idx + + def __call__(self, labels): + """Resize image and padding for detection, instance segmentation, pose.""" + img = labels['img'] + instances = labels.pop('instances') + instances.convert_bbox(format='xywh') + h, w = img.shape[:2] + h = 1 if instances.normalized else h + w = 1 if instances.normalized else w + + # Flip up-down + if self.direction == 'vertical' and random.random() < self.p: + img = np.flipud(img) + instances.flipud(h) + if self.direction == 'horizontal' and random.random() < self.p: + img = np.fliplr(img) + instances.fliplr(w) + # For keypoints + if self.flip_idx is not None and instances.keypoints is not None: + instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :]) + labels['img'] = np.ascontiguousarray(img) + labels['instances'] = instances + return labels + + +class RandomPoints: + + def __init__(self, num_region=10, num_point=20) -> None: + self.num_region = num_region ## number of regions + self.num_point = num_point ## number of points + + def __call__(self, labels): + img = labels['img'] + h, w, c = img.shape + + for i in range(self.num_region): + # region size [50*50, 200*200] + xx, ww = np.random.randint(0, w-200), np.random.randint(50, 200) + yy, hh = np.random.randint(0, h-200), np.random.randint(50, 200) + # 20 points in each region + xs = np.random.randint(xx, xx+ww, 20) + ys = np.random.randint(yy, yy+hh, 20) + # draw ovals to region + for j in range(self.num_point): + x, y = xs[j], ys[j] + ax, ay = np.random.randint(5, 8), np.random.randint(3, 10) + angle = np.random.randint(0,180) + cc = np.random.randint(225, 255) + cv2.ellipse(img, (x, y), (ax, ay), + angle, 0, 360, + color = (cc, cc, cc), + thickness = -1) + return labels + + +class LetterBox: + """Resize image and padding for detection, instance segmentation, pose.""" + + def __init__(self, new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, stride=32): + """Initialize LetterBox object with specific parameters.""" + self.new_shape = new_shape + self.auto = auto + self.scaleFill = scaleFill + self.scaleup = scaleup + self.stride = stride + + def __call__(self, labels=None, image=None): + """Return updated labels and image with added border.""" + if labels is None: + labels = {} + img = labels.get('img') if image is None else image + shape = img.shape[:2] # current shape [height, width] + new_shape = labels.pop('rect_shape', self.new_shape) + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not self.scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if self.auto: # minimum rectangle + dw, dh = np.mod(dw, self.stride), np.mod(dh, self.stride) # wh padding + elif self.scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + if labels.get('ratio_pad'): + labels['ratio_pad'] = (labels['ratio_pad'], (dw, dh)) # for evaluation + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, + value=(114, 114, 114)) # add border + if len(labels): + labels = self._update_labels(labels, ratio, dw, dh) + labels['img'] = img + labels['resized_shape'] = new_shape + return labels + else: + return img + + def _update_labels(self, labels, ratio, padw, padh): + """Update labels.""" + labels['instances'].convert_bbox(format='xyxy') + labels['instances'].denormalize(*labels['img'].shape[:2][::-1]) + labels['instances'].scale(*ratio) + labels['instances'].add_padding(padw, padh) + return labels + + +class CopyPaste: + + def __init__(self, p=0.5) -> None: + self.p = p + + def __call__(self, labels): + """Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy).""" + im = labels['img'] + cls = labels['cls'] + h, w = im.shape[:2] + instances = labels.pop('instances') + instances.convert_bbox(format='xyxy') + instances.denormalize(w, h) + if self.p and len(instances.segments): + n = len(instances) + _, w, _ = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + + # Calculate ioa first then select indexes randomly + ins_flip = deepcopy(instances) + ins_flip.fliplr(w) + + ioa = bbox_ioa(ins_flip.bboxes, instances.bboxes) # intersection over area, (N, M) + indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) + n = len(indexes) + for j in random.sample(list(indexes), k=round(self.p * n)): + cls = np.concatenate((cls, cls[[j]]), axis=0) + instances = Instances.concatenate((instances, ins_flip[[j]]), axis=0) + cv2.drawContours(im_new, instances.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED) + + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + labels['img'] = im + labels['cls'] = cls + labels['instances'] = instances + return labels + + +class Albumentations: + # YOLOv8 Albumentations class (optional, only used if package is installed) + def __init__(self, p=1.0): + """Initialize the transform object for YOLO bbox formatted params.""" + self.p = p + self.transform = None + prefix = colorstr('albumentations: ') + try: + import albumentations as A + + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + T = [ + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + def __call__(self, labels): + """Generates object detections and returns a dictionary with detection results.""" + im = labels['img'] + cls = labels['cls'] + if len(cls): + labels['instances'].convert_bbox('xywh') + labels['instances'].normalize(*im.shape[:2][::-1]) + bboxes = labels['instances'].bboxes + # TODO: add supports of segments and keypoints + if self.transform and random.random() < self.p: + new = self.transform(image=im, bboxes=bboxes, class_labels=cls) # transformed + if len(new['class_labels']) > 0: # skip update if no bbox in new im + labels['img'] = new['image'] + labels['cls'] = np.array(new['class_labels']) + bboxes = np.array(new['bboxes']) + labels['instances'].update(bboxes=bboxes) + return labels + + +# TODO: technically this is not an augmentation, maybe we should put this to another files +class Format: + + def __init__(self, + bbox_format='xywh', + normalize=True, + return_mask=False, + return_keypoint=False, + mask_ratio=4, + mask_overlap=True, + batch_idx=True, + nc=3): + self.bbox_format = bbox_format + self.normalize = normalize + self.return_mask = return_mask # set False when training detection only + self.return_keypoint = return_keypoint + self.mask_ratio = mask_ratio + self.mask_overlap = mask_overlap + self.batch_idx = batch_idx # keep the batch indexes + self.nc=nc + def __call__(self, labels): + """Return formatted image, classes, bounding boxes & keypoints to be used by 'collate_fn'.""" + img = labels.pop('img') + h, w = img.shape[:2] + cls = labels.pop('cls') + instances = labels.pop('instances') + instances.convert_bbox(format=self.bbox_format) + instances.denormalize(w, h) + nl = len(instances) + if self.return_mask: + if nl: + masks, instances, cls = self._format_segments(instances, cls, w, h) + masks = torch.from_numpy(masks) + else: + masks = torch.zeros(1 if self.mask_overlap else nl, self.nc, img.shape[0] // self.mask_ratio, + img.shape[1] // self.mask_ratio) + labels['masks'] = masks + if self.normalize: + instances.normalize(w, h) + labels['img'] = self._format_img(img) + labels['cls'] = torch.from_numpy(cls) if nl else torch.zeros(nl) + labels['bboxes'] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4)) + if self.return_keypoint: + labels['keypoints'] = torch.from_numpy(instances.keypoints) + # Then we can use collate_fn + if self.batch_idx: + labels['batch_idx'] = torch.zeros(nl) + return labels + + def _format_img(self, img): + """Format the image for YOLOv5 from Numpy array to PyTorch tensor.""" + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + img = np.ascontiguousarray(img.transpose(2, 0, 1)[::-1]) + img = torch.from_numpy(img) + return img + + def _format_segments(self, instances, cls, w, h): + """convert polygon points to bitmap.""" + segments = instances.segments + if self.mask_overlap: + masks, sorted_idx = polygons2masks_multi((h, w), segments, cls, downsample_ratio=self.mask_ratio, nc=self.nc) + masks = masks[None] # (2, 640, 640) -> (1, 2, 640, 640) + instances = instances[sorted_idx] + cls = cls[sorted_idx] + else: + masks = polygons2masks((h, w), segments, color=1, downsample_ratio=self.mask_ratio) + + return masks, instances, cls + + +def v9_transforms(dataset, imgsz, hyp): + """Convert images to a size suitable for YOLOv8 training.""" + pre_transform = Compose([ + Mosaic(dataset, imgsz=imgsz, p=hyp['mosaic']), + CopyPaste(p=hyp['copy_paste']), + RandomPerspective( + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective'], + pre_transform=LetterBox(new_shape=(imgsz, imgsz)), + )]) + #flip_idx = dataset.data.get('flip_idx', None) # for keypoints augmentation + #if dataset.use_keypoints: + # kpt_shape = dataset.data.get('kpt_shape', None) + # if flip_idx is None and hyp['fliplr'] > 0.0: + # hyp['fliplr'] = 0.0 + # LOGGER.warning("WARNING ⚠️ No 'flip_idx' array defined in data.yaml, setting augmentation 'fliplr=0.0'") + # elif flip_idx: + # if len(flip_idx) != kpt_shape[0]: + # raise ValueError(f'data.yaml flip_idx={flip_idx} length must be equal to kpt_shape[0]={kpt_shape[0]}') + # elif flip_idx[0] != 0: + # raise ValueError(f'data.yaml flip_idx={flip_idx} must be zero-index (start from 0)') + + return Compose([ + pre_transform, + MixUp(dataset, pre_transform=pre_transform, p=hyp['mixup']), + Albumentations(p=1.0), + RandomHSV(hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']), + RandomFlip(direction='vertical', p=hyp['flipud']), + RandomFlip(direction='horizontal', p=hyp['fliplr']), + ]) # transforms + + +# Classification augmentations ----------------------------------------------------------------------------------------- +def classify_transforms(size=224, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)): # IMAGENET_MEAN, IMAGENET_STD + # Transforms to apply if albumentations not installed + if not isinstance(size, int): + raise TypeError(f'classify_transforms() size {size} must be integer, not (list, tuple)') + if any(mean) or any(std): + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(mean, std, inplace=True)]) + else: + return T.Compose([CenterCrop(size), ToTensor()]) + + +def hsv2colorjitter(h, s, v): + """Map HSV (hue, saturation, value) jitter into ColorJitter values (brightness, contrast, saturation, hue)""" + return v, v, s, h + + +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + hflip=0.5, + vflip=0.0, + hsv_h=0.015, # image HSV-Hue augmentation (fraction) + hsv_s=0.7, # image HSV-Saturation augmentation (fraction) + hsv_v=0.4, # image HSV-Value augmentation (fraction) + mean=(0.0, 0.0, 0.0), # IMAGENET_MEAN + std=(1.0, 1.0, 1.0), # IMAGENET_STD + auto_aug=False, +): + # YOLOv8 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentations + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if any((hsv_h, hsv_s, hsv_v)): + T += [A.ColorJitter(*hsv2colorjitter(hsv_h, hsv_s, hsv_v))] # brightness, contrast, saturation, hue + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +class ClassifyLetterBox: + # YOLOv8 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, size=(640, 640), auto=False, stride=32): + """Resizes image and crops it to center with max dimensions 'h' and 'w'.""" + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + # YOLOv8 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + def __init__(self, size=640): + """Converts an image from numpy array to PyTorch tensor.""" + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + # YOLOv8 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, half=False): + """Initialize YOLOv8 ToTensor object with optional half-precision support.""" + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 776042999..6b5323d3e 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -25,6 +25,7 @@ from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, letterbox, mixup, random_perspective) +from utils.augment import Compose, Format, Instances, LetterBox, v9_transforms from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn) @@ -168,6 +169,11 @@ def __len__(self): def __iter__(self): for _ in range(len(self)): yield next(self.iterator) + def reset(self): + """Reset iterator. + This is useful when we want to modify settings of dataset while training. + """ + self.iterator = self._get_iterator() class _RepeatSampler: @@ -456,7 +462,6 @@ def __init__(self, self.stride = stride self.path = path self.albumentations = Albumentations(size=img_size) if augment else None - try: f = [] # image files for p in path if isinstance(path, list) else [path]: @@ -496,7 +501,7 @@ def __init__(self, if cache['msgs']: LOGGER.info('\n'.join(cache['msgs'])) # display warnings assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' - + # Read cache [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items labels, shapes, self.segments = zip(*cache.values()) @@ -561,15 +566,16 @@ def __init__(self, shapes[i] = [1, 1 / mini] self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride - + # Cache images into RAM/disk for faster training if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix): cache_images = False self.ims = [None] * n + self.im_hw0, self.im_hw = [None] * n, [None] * n self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + if cache_images: b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes - self.im_hw0, self.im_hw = [None] * n, [None] * n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) @@ -581,7 +587,12 @@ def __init__(self, b += self.ims[i].nbytes pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' pbar.close() - + + self.transforms = self.build_transforms(hyp=hyp) + self.ni = len(self.labels) # number of images + self.buffer = [] + self.max_buffer_length = min((n, batch_size * 8, 1000)) if self.augment else 0 + def check_cache_ram(self, safety_margin=0.1, prefix=''): # Check image caching requirements vs available memory b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes @@ -640,85 +651,128 @@ def cache_labels(self, path=Path('./labels.cache'), prefix=''): def __len__(self): return len(self.im_files) - # def __iter__(self): - # self.count = -1 - # print('ran dataset iter') - # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) - # return self - + def get_image_and_label(self, index): + img, (h0, w0), (h, w) = self.load_image(index) + bboxes = self.labels[index].copy() + segments = self.segments[index].copy() + return {'img': img, + 'im_file': self.im_files[index], + 'cls': bboxes[:, 0], + 'instances': Instances(bboxes[:,1:], segments, bbox_format='xywh', normalized=True), + 'ori_shape': (h0,w0), + 'resized_shape': (h, w)} + + def build_transforms(self, hyp=None): + """Builds and appends transforms to the list.""" + if self.augment: + hyp['mosaic'] = hyp['mosaic'] if self.augment and not self.rect else 0.0 + hyp['mixup'] = hyp['mixup'] if self.augment and not self.rect else 0.0 + transforms = v8_transforms(self, self.img_size, hyp) + else: + transforms = Compose([LetterBox(new_shape=(self.img_size, self.img_size), scaleup=False)]) + + #shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + transforms.append( + Format(bbox_format='xywh', + normalize=True, + batch_idx=True)) + return transforms + + def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights - hyp = self.hyp - mosaic = self.mosaic and random.random() < hyp['mosaic'] - if mosaic: - # Load mosaic - img, labels = self.load_mosaic(index) - shapes = None - - # MixUp augmentation - if random.random() < hyp['mixup']: - img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) - - else: - # Load image - img, (h0, w0), (h, w) = self.load_image(index) - - # Letterbox - shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape - img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) - shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling - - labels = self.labels[index].copy() - if labels.size: # normalized xywh to pixel xyxy format - labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) - - if self.augment: - img, labels = random_perspective(img, - labels, - degrees=hyp['degrees'], - translate=hyp['translate'], - scale=hyp['scale'], - shear=hyp['shear'], - perspective=hyp['perspective']) - - nl = len(labels) # number of labels - if nl: - labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) - - if self.augment: - # Albumentations - img, labels = self.albumentations(img, labels) - nl = len(labels) # update after albumentations - - # HSV color-space - augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) - - # Flip up-down - if random.random() < hyp['flipud']: - img = np.flipud(img) - if nl: - labels[:, 2] = 1 - labels[:, 2] - - # Flip left-right - if random.random() < hyp['fliplr']: - img = np.fliplr(img) - if nl: - labels[:, 1] = 1 - labels[:, 1] - - # Cutouts - # labels = cutout(img, labels, p=0.5) - # nl = len(labels) # update after cutout - - labels_out = torch.zeros((nl, 6)) - if nl: - labels_out[:, 1:] = torch.from_numpy(labels) - - # Convert - img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img = np.ascontiguousarray(img) - - return torch.from_numpy(img), labels_out, self.im_files[index], shapes + data = self.get_image_and_label(index) + data['ratio_pad'] = (data['resized_shape'][0] / data['ori_shape'][0], + data['resized_shape'][1] / data['ori_shape'][1]) # for evaluation + if self.rect: + data['rect_shape'] = self.batch_shapes[self.batch[index]] + + data = self.transforms(data) + labels_out = torch.cat([data['batch_idx'].view(-1,1), + data['cls'].view(-1,1), + data['bboxes']], dim=1) + if data.get('ratio_pad') is None: + return data['img'], labels_out, data['im_file'], (data['ori_shape'], None) + return data['img'], labels_out, data['im_file'], (data['ori_shape'], data['ratio_pad']) + + + #def __getitem__(self, index): + # t0 = time.time() + # index = self.indices[index] # linear, shuffled, or image_weights + # + # hyp = self.hyp + # mosaic = self.mosaic and random.random() < hyp['mosaic'] + # if mosaic: + # # Load mosaic + # img, labels = self.load_mosaic(index) + # shapes = None +# + # # MixUp augmentation + # if random.random() < hyp['mixup']: + # img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) +# + # else: + # # Load image + # img, (h0, w0), (h, w) = self.load_image(index) + # + # # Letterbox + # shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + # img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + # shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling +# + # labels = self.labels[index].copy() + # if labels.size: # normalized xywh to pixel xyxy format + # labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) +# + # if self.augment: + # img, labels = random_perspective(img, + # labels, + # degrees=hyp['degrees'], + # translate=hyp['translate'], + # scale=hyp['scale'], + # shear=hyp['shear'], + # perspective=hyp['perspective']) + # + # #print(img.shape, labels) + # nl = len(labels) # number of labels + # if nl: + # labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) +# + # if self.augment: + # # Albumentations + # img, labels = self.albumentations(img, labels) + # nl = len(labels) # update after albumentations +# + # # HSV color-space + # augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) +# + # # Flip up-down + # if random.random() < hyp['flipud']: + # img = np.flipud(img) + # if nl: + # labels[:, 2] = 1 - labels[:, 2] +# + # # Flip left-right + # if random.random() < hyp['fliplr']: + # img = np.fliplr(img) + # if nl: + # labels[:, 1] = 1 - labels[:, 1] +# + # # Cutouts + # # labels = cutout(img, labels, p=0.5) + # # nl = len(labels) # update after cutout + # + # labels_out = torch.zeros((nl, 6)) + # if nl: + # labels_out[:, 1:] = torch.from_numpy(labels) +# + # # Convert + # img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + # img = np.ascontiguousarray(img) + # # print("loaded: ", (time.time()-t0)*1000) # about 800-900 + # #print(torch.from_numpy(img), labels_out) + # return torch.from_numpy(img), labels_out, self.im_files[index], shapes def load_image(self, i): # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) @@ -734,6 +788,12 @@ def load_image(self, i): if r != 1: # if sizes are not equal interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=interp) + if self.augment: + self.ims[i], self.im_hw0[i], self.im_hw[i] = im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + self.buffer.append(i) + if len(self.buffer) >= self.max_buffer_length: + j = self.buffer.pop(0) + self.ims[j], self.im_hw0[j], self.im_hw[j] = None, None, None return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized @@ -775,6 +835,7 @@ def load_mosaic(self, index): # Labels labels, segments = self.labels[index].copy(), self.segments[index].copy() + #print(labels) if labels.size: labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format segments = [xyn2xy(x, w, h, padw, padh) for x in segments] diff --git a/utils/general.py b/utils/general.py index efe78b29a..08d8b14ff 100644 --- a/utils/general.py +++ b/utils/general.py @@ -748,6 +748,65 @@ def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] +def xywh2ltwh(x): + """ + Convert the bounding box format from [x, y, w, h] to [x1, y1, w, h], where x1, y1 are the top-left coordinates. + + Args: + x (np.ndarray) or (torch.Tensor): The input tensor with the bounding box coordinates in the xywh format + Returns: + y (np.ndarray) or (torch.Tensor): The bounding box coordinates in the xyltwh format + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + return y + + +def xyxy2ltwh(x): + """ + Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right + + Args: + x (np.ndarray) or (torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format + Returns: + y (np.ndarray) or (torch.Tensor): The bounding box coordinates in the xyltwh format. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def ltwh2xywh(x): + """ + Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center + + Args: + x (torch.Tensor): the input tensor + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] + x[:, 2] / 2 # center x + y[:, 1] = x[:, 1] + x[:, 3] / 2 # center y + return y + + +def ltwh2xyxy(x): + """ + It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + + Args: + x (np.ndarray) or (torch.Tensor): the input image + + Returns: + y (np.ndarray) or (torch.Tensor): the xyxy coordinates of the bounding boxes. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 2] = x[:, 2] + x[:, 0] # width + y[:, 3] = x[:, 3] + x[:, 1] # height + return y + + def xyxy2xywh(x): # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) @@ -898,7 +957,6 @@ def non_max_suppression( Returns: list of detections, on (n,6) tensor per image [xyxy, conf, cls] """ - if isinstance(prediction, (list, tuple)): # YOLO model in validation model, output = (inference_out, loss_out) prediction = prediction[0] # select only inference output diff --git a/utils/instance.py b/utils/instance.py new file mode 100644 index 000000000..89d41ee67 --- /dev/null +++ b/utils/instance.py @@ -0,0 +1,390 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license + +from collections import abc +from itertools import repeat +from numbers import Number +from typing import List + +import numpy as np + +from .general import ltwh2xywh, ltwh2xyxy, resample_segments, xywh2ltwh, xywh2xyxy, xyxy2ltwh, xyxy2xywh + + +def _ntuple(n): + """From PyTorch internals.""" + + def parse(x): + """Parse bounding boxes format between XYWH and LTWH.""" + return x if isinstance(x, abc.Iterable) else tuple(repeat(x, n)) + + return parse + + +to_4tuple = _ntuple(4) + +# `xyxy` means left top and right bottom +# `xywh` means center x, center y and width, height(yolo format) +# `ltwh` means left top and width, height(coco format) +_formats = ['xyxy', 'xywh', 'ltwh'] + +__all__ = 'Bboxes', # tuple or list + + +class Bboxes: + """Now only numpy is supported.""" + + def __init__(self, bboxes, format='xyxy') -> None: + assert format in _formats, f'Invalid bounding box format: {format}, format must be one of {_formats}' + bboxes = bboxes[None, :] if bboxes.ndim == 1 else bboxes + assert bboxes.ndim == 2 + assert bboxes.shape[1] == 4 + self.bboxes = bboxes + self.format = format + # self.normalized = normalized + + # def convert(self, format): + # assert format in _formats + # if self.format == format: + # bboxes = self.bboxes + # elif self.format == "xyxy": + # if format == "xywh": + # bboxes = xyxy2xywh(self.bboxes) + # else: + # bboxes = xyxy2ltwh(self.bboxes) + # elif self.format == "xywh": + # if format == "xyxy": + # bboxes = xywh2xyxy(self.bboxes) + # else: + # bboxes = xywh2ltwh(self.bboxes) + # else: + # if format == "xyxy": + # bboxes = ltwh2xyxy(self.bboxes) + # else: + # bboxes = ltwh2xywh(self.bboxes) + # + # return Bboxes(bboxes, format) + + def convert(self, format): + """Converts bounding box format from one type to another.""" + assert format in _formats, f'Invalid bounding box format: {format}, format must be one of {_formats}' + if self.format == format: + return + elif self.format == 'xyxy': + bboxes = xyxy2xywh(self.bboxes) if format == 'xywh' else xyxy2ltwh(self.bboxes) + elif self.format == 'xywh': + bboxes = xywh2xyxy(self.bboxes) if format == 'xyxy' else xywh2ltwh(self.bboxes) + else: + bboxes = ltwh2xyxy(self.bboxes) if format == 'xyxy' else ltwh2xywh(self.bboxes) + self.bboxes = bboxes + self.format = format + + def areas(self): + """Return box areas.""" + self.convert('xyxy') + return (self.bboxes[:, 2] - self.bboxes[:, 0]) * (self.bboxes[:, 3] - self.bboxes[:, 1]) + + # def denormalize(self, w, h): + # if not self.normalized: + # return + # assert (self.bboxes <= 1.0).all() + # self.bboxes[:, 0::2] *= w + # self.bboxes[:, 1::2] *= h + # self.normalized = False + # + # def normalize(self, w, h): + # if self.normalized: + # return + # assert (self.bboxes > 1.0).any() + # self.bboxes[:, 0::2] /= w + # self.bboxes[:, 1::2] /= h + # self.normalized = True + + def mul(self, scale): + """ + Args: + scale (tuple) or (list) or (int): the scale for four coords. + """ + if isinstance(scale, Number): + scale = to_4tuple(scale) + assert isinstance(scale, (tuple, list)) + assert len(scale) == 4 + self.bboxes[:, 0] *= scale[0] + self.bboxes[:, 1] *= scale[1] + self.bboxes[:, 2] *= scale[2] + self.bboxes[:, 3] *= scale[3] + + def add(self, offset): + """ + Args: + offset (tuple) or (list) or (int): the offset for four coords. + """ + if isinstance(offset, Number): + offset = to_4tuple(offset) + assert isinstance(offset, (tuple, list)) + assert len(offset) == 4 + self.bboxes[:, 0] += offset[0] + self.bboxes[:, 1] += offset[1] + self.bboxes[:, 2] += offset[2] + self.bboxes[:, 3] += offset[3] + + def __len__(self): + """Return the number of boxes.""" + return len(self.bboxes) + + @classmethod + def concatenate(cls, boxes_list: List['Bboxes'], axis=0) -> 'Bboxes': + """ + Concatenate a list of Bboxes objects into a single Bboxes object. + + Args: + boxes_list (List[Bboxes]): A list of Bboxes objects to concatenate. + axis (int, optional): The axis along which to concatenate the bounding boxes. + Defaults to 0. + + Returns: + Bboxes: A new Bboxes object containing the concatenated bounding boxes. + + Note: + The input should be a list or tuple of Bboxes objects. + """ + assert isinstance(boxes_list, (list, tuple)) + if not boxes_list: + return cls(np.empty(0)) + assert all(isinstance(box, Bboxes) for box in boxes_list) + + if len(boxes_list) == 1: + return boxes_list[0] + return cls(np.concatenate([b.bboxes for b in boxes_list], axis=axis)) + + def __getitem__(self, index) -> 'Bboxes': + """ + Retrieve a specific bounding box or a set of bounding boxes using indexing. + + Args: + index (int, slice, or np.ndarray): The index, slice, or boolean array to select + the desired bounding boxes. + + Returns: + Bboxes: A new Bboxes object containing the selected bounding boxes. + + Raises: + AssertionError: If the indexed bounding boxes do not form a 2-dimensional matrix. + + Note: + When using boolean indexing, make sure to provide a boolean array with the same + length as the number of bounding boxes. + """ + if isinstance(index, int): + return Bboxes(self.bboxes[index].view(1, -1)) + b = self.bboxes[index] + assert b.ndim == 2, f'Indexing on Bboxes with {index} failed to return a matrix!' + return Bboxes(b) + + +class Instances: + + def __init__(self, bboxes, segments=None, keypoints=None, bbox_format='xywh', normalized=True) -> None: + """ + Args: + bboxes (ndarray): bboxes with shape [N, 4]. + segments (list | ndarray): segments. + keypoints (ndarray): keypoints(x, y, visible) with shape [N, 17, 3]. + """ + if segments is None: + segments = [] + self._bboxes = Bboxes(bboxes=bboxes, format=bbox_format) + self.keypoints = keypoints + self.normalized = normalized + + if len(segments) > 0: + # list[np.array(1000, 2)] * num_samples + segments = resample_segments(segments) + # (N, 1000, 2) + segments = np.stack(segments, axis=0) + else: + segments = np.zeros((0, 1000, 2), dtype=np.float32) + self.segments = segments + + def convert_bbox(self, format): + """Convert bounding box format.""" + self._bboxes.convert(format=format) + + def bbox_areas(self): + """Calculate the area of bounding boxes.""" + self._bboxes.areas() + + def scale(self, scale_w, scale_h, bbox_only=False): + """this might be similar with denormalize func but without normalized sign.""" + self._bboxes.mul(scale=(scale_w, scale_h, scale_w, scale_h)) + if bbox_only: + return + self.segments[..., 0] *= scale_w + self.segments[..., 1] *= scale_h + if self.keypoints is not None: + self.keypoints[..., 0] *= scale_w + self.keypoints[..., 1] *= scale_h + + def denormalize(self, w, h): + """Denormalizes boxes, segments, and keypoints from normalized coordinates.""" + if not self.normalized: + return + self._bboxes.mul(scale=(w, h, w, h)) + self.segments[..., 0] *= w + self.segments[..., 1] *= h + if self.keypoints is not None: + self.keypoints[..., 0] *= w + self.keypoints[..., 1] *= h + self.normalized = False + + def normalize(self, w, h): + """Normalize bounding boxes, segments, and keypoints to image dimensions.""" + if self.normalized: + return + self._bboxes.mul(scale=(1 / w, 1 / h, 1 / w, 1 / h)) + self.segments[..., 0] /= w + self.segments[..., 1] /= h + if self.keypoints is not None: + self.keypoints[..., 0] /= w + self.keypoints[..., 1] /= h + self.normalized = True + + def add_padding(self, padw, padh): + """Handle rect and mosaic situation.""" + assert not self.normalized, 'you should add padding with absolute coordinates.' + self._bboxes.add(offset=(padw, padh, padw, padh)) + self.segments[..., 0] += padw + self.segments[..., 1] += padh + if self.keypoints is not None: + self.keypoints[..., 0] += padw + self.keypoints[..., 1] += padh + + def __getitem__(self, index) -> 'Instances': + """ + Retrieve a specific instance or a set of instances using indexing. + + Args: + index (int, slice, or np.ndarray): The index, slice, or boolean array to select + the desired instances. + + Returns: + Instances: A new Instances object containing the selected bounding boxes, + segments, and keypoints if present. + + Note: + When using boolean indexing, make sure to provide a boolean array with the same + length as the number of instances. + """ + segments = self.segments[index] if len(self.segments) else self.segments + keypoints = self.keypoints[index] if self.keypoints is not None else None + bboxes = self.bboxes[index] + bbox_format = self._bboxes.format + return Instances( + bboxes=bboxes, + segments=segments, + keypoints=keypoints, + bbox_format=bbox_format, + normalized=self.normalized, + ) + + def flipud(self, h): + """Flips the coordinates of bounding boxes, segments, and keypoints vertically.""" + if self._bboxes.format == 'xyxy': + y1 = self.bboxes[:, 1].copy() + y2 = self.bboxes[:, 3].copy() + self.bboxes[:, 1] = h - y2 + self.bboxes[:, 3] = h - y1 + else: + self.bboxes[:, 1] = h - self.bboxes[:, 1] + self.segments[..., 1] = h - self.segments[..., 1] + if self.keypoints is not None: + self.keypoints[..., 1] = h - self.keypoints[..., 1] + + def fliplr(self, w): + """Reverses the order of the bounding boxes and segments horizontally.""" + if self._bboxes.format == 'xyxy': + x1 = self.bboxes[:, 0].copy() + x2 = self.bboxes[:, 2].copy() + self.bboxes[:, 0] = w - x2 + self.bboxes[:, 2] = w - x1 + else: + self.bboxes[:, 0] = w - self.bboxes[:, 0] + self.segments[..., 0] = w - self.segments[..., 0] + if self.keypoints is not None: + self.keypoints[..., 0] = w - self.keypoints[..., 0] + + def clip(self, w, h): + """Clips bounding boxes, segments, and keypoints values to stay within image boundaries.""" + ori_format = self._bboxes.format + self.convert_bbox(format='xyxy') + self.bboxes[:, [0, 2]] = self.bboxes[:, [0, 2]].clip(0, w) + self.bboxes[:, [1, 3]] = self.bboxes[:, [1, 3]].clip(0, h) + if ori_format != 'xyxy': + self.convert_bbox(format=ori_format) + self.segments[..., 0] = self.segments[..., 0].clip(0, w) + self.segments[..., 1] = self.segments[..., 1].clip(0, h) + if self.keypoints is not None: + self.keypoints[..., 0] = self.keypoints[..., 0].clip(0, w) + self.keypoints[..., 1] = self.keypoints[..., 1].clip(0, h) + + def remove_zero_area_boxes(self): + """Remove zero-area boxes, i.e. after clipping some boxes may have zero width or height. This removes them.""" + good = self._bboxes.areas() > 0 + if not all(good): + self._bboxes = Bboxes(self._bboxes.bboxes[good], format=self._bboxes.format) + if len(self.segments): + self.segments = self.segments[good] + if self.keypoints is not None: + self.keypoints = self.keypoints[good] + return good + + def update(self, bboxes, segments=None, keypoints=None): + """Updates instance variables.""" + self._bboxes = Bboxes(bboxes, format=self._bboxes.format) + if segments is not None: + self.segments = segments + if keypoints is not None: + self.keypoints = keypoints + + def __len__(self): + """Return the length of the instance list.""" + return len(self.bboxes) + + @classmethod + def concatenate(cls, instances_list: List['Instances'], axis=0) -> 'Instances': + """ + Concatenates a list of Instances objects into a single Instances object. + + Args: + instances_list (List[Instances]): A list of Instances objects to concatenate. + axis (int, optional): The axis along which the arrays will be concatenated. Defaults to 0. + + Returns: + Instances: A new Instances object containing the concatenated bounding boxes, + segments, and keypoints if present. + + Note: + The `Instances` objects in the list should have the same properties, such as + the format of the bounding boxes, whether keypoints are present, and if the + coordinates are normalized. + """ + assert isinstance(instances_list, (list, tuple)) + if not instances_list: + return cls(np.empty(0)) + assert all(isinstance(instance, Instances) for instance in instances_list) + + if len(instances_list) == 1: + return instances_list[0] + + use_keypoint = instances_list[0].keypoints is not None + bbox_format = instances_list[0]._bboxes.format + normalized = instances_list[0].normalized + + cat_boxes = np.concatenate([ins.bboxes for ins in instances_list], axis=axis) + cat_segments = np.concatenate([b.segments for b in instances_list], axis=axis) + cat_keypoints = np.concatenate([b.keypoints for b in instances_list], axis=axis) if use_keypoint else None + return cls(cat_boxes, cat_segments, cat_keypoints, bbox_format, normalized) + + @property + def bboxes(self): + """Return bounding boxes.""" + return self._bboxes.bboxes diff --git a/utils/loggers/__pycache__/__init__.cpython-38.pyc b/utils/loggers/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dca5260c3605b0730a83c8e090981ba8c376ef1 GIT binary patch literal 13573 zcmb_jYmi(=b-u5;^B&DUv|8=1q>-^CuaS0T85vs`$L~k5*N$v#GclQ(oqKmRv-5J_ zyRtMj3sfWriXdz#NQ|*zHZc{HN+pC!r63^*B#=~6q(Tw$x_SHve^6Dbk}6X9;VMkN z@AS;<&dTCZ$*gYQ?!Nsv-F^C;@0@PEJDW`z_)UNR4gb>JhVfk{w*UC`PCUVXn}*>T zo>@1F`fV0Xq^-Kuu#2{-^PFN#%D6=rZ@V6EB#H_7=G2poR58^^7t@VQF(Y}gdbW`( z<|OXccQx|Gyu{=6vBr3DyfIOnXiOF-B`;Cm-Iyv)A)YMm@uz*Gx7SN~=}qf`S)A#F z((7z-)}Qldz03tG_(Hhv7fnCb+wafT4s1%y%Wj%$u@_C1@@Kryk6QmTs(}n4)@#o zINL)0MEywPhT;vTfp!a^XIZhFi8q#dQ(^;TG_Hp;7h z&`;FM%YHqGGV(?F!FoLmSkd!K?XoJ@>wbMchW3NAkrggSvFF%e6tC!xqFANX@WaSC zeb>^vuKWmxhAhmC5gD=0ehbCJ3ePw)x?-{7t&TAn%h^X(PavpgH~>i99w@nRPZ@T}{(UL12A z_Yz(b?*zFS@1&RUvUsPwoVN?_w3qkB@XmPS-UQxRy?(`9TimxoZx)WUtL;Ly8H5;6p)Zt<6v9@a zydJh1<*>?;o-bhXDr-jzXRE<_xnAw~1xT@i@*Azt*I%p6!h0Tl_|d|DefH%KpuIxB zr%qf%5juS8O*^EVDmCiY>QTDc>U66IkRC`kY(VWo+plMz@S8zP-Q6lH?^NOSgO7)0 z6&9YWhGz;#Lix1JPlaR0>aA6;s!-$zN8~?OD2IhJVc0&kxL9vtyk}ZLcUa6PbMs%~e;*l@P^>3CSu|JvGL0jk6fE35dbh*Ud!{(~!h22t}#+>!L(ZKI@mz zZxnydudbd6gUD{R!^l2!z8%GzE$WXTis_dN5}|;ic(qxnuX}#vtoi4ID9JS~Ni?oi z*yB+=@WXN#DwQK4LJcK+iJW??>_zq}WnrL2%N7_MWN;mWLktchh|D#Rru>!axyTHPyY>7Iro5zyQPg{v4Rt$Sub*5zgRw2H z-*W58+fLqmeDOidZ*%cHH2T@w7T3dSJ-}*m7K6o7soJcDrP4zCe3Td@Zb2hK4S`|0 z{l667mStY!@B3~pZzjx~>Bb$rQ)b5Ozb)6CLe7ktv&YT-=3#uxyDsA67Q=!$Zcf?b zb|?R^rf6Y6M1?wi@S~DvTJ+ipL?C-1)ob7{hBZ@CmJ<2bPuXgLl2B4WKbOs=c}KV) zN_yq642G+OvbgH)Xb?EzR=m`m3^^fh6ML)tNS>R#l163-S{2M3dM&4wyADhz%cP7IIC z*OQ3Ie4P$WKpUz$i=cz=p6x+7Z%`dO>Q08en1(YKjjnw!pjs8*rL3z?KS+hOklJzm` zS%+LlFj^_ML#WBijus}dhX|9^BI-v@v)u46TamSJJTlMin7t@`-$Iw24Xlb<$m;XPI2n{Sf zmYZ1hd26rHbuYkj+=wI3R^RSgp0#GEb6kGU?pi>D&a3vUv5^R4z`qXAZsLLiCP4i) zOUk(t#%qb-TqyXIn7J0O5dk7xvAozNvY6$$sPS#&rh|_l%&2dNK*eCQoEP83$_u(p ztFN#`Z!B9w*7DsXN~AAY!jNO?d&nKDjd$bdC&OG}%IuO@FITuBifO&) zmdhdVjzb;=Z^f#O)u3~Q)6_{x0nNSfM_NG{Ann@hdjqNn`xYQP&7QGoC?_;)R%rLx z*x6!Zbsq}DSabbz;HE%}%?FVuOspP4?6Rfag;!**?Bs<+w&8VLjfMyooZVhLBpE2MU!$ewC<)l1r9jhtdy3U(Qfc~ zX&c}vt*UbSjCwbVQ=V(>-~uXzzNeO$^C%k_V?EDbUSBN*i2YRQB4+X2N35lOi z=}i)Av^>8aSOWALC>xk*VYZFdtPZOx-;ahp~t)rmkn$ z13wIx!7EBp`g}tVpKJidL>sOc&cR!UkIUGM<2FiLxT|=3^N) zH^S3Dy*|xg*)ab9W!#mdg8iH9BQENWAB|g- zc4V!zBNP7=VmC!rxg9y>_Q~T>qTDW3>g6Da@|9L|WnKJ#u)kIHoT8Dkb1dhP_bogc zqFb3!5jlSKxGKaKgCEUAnr^}hzXjnkz#CAC#2N5=0cX675DWoa;R090E&z+{F@Qnb zo(na4t7kV0p3{v_8e!avt%@++0{}W@^b(#6Z%rJf6F?iTm$+cPa2&`X*-LfdZ_{UnWq$Nd&b9 zUzvJLQ%K(+F&IXZgRjk~?Y>}wk~y@uYa`#yb$0=up>A!ewx^rF-FVS_mSS(BHw}TO zq4-2?uWr@LUeYK9n7#NY^n90NhocHM6(I&pwe)`1S-Ab|6r3jS@i{U-d{;#{XE252qx z8$pyRdtNDkKc%d+UxNA~a$sGqMJb>J_}OTl7qbdFPlbb`qH>dd6V3-!hQFSgJ4$MQ z0K6#xI@+5rsHKR4p4Q87X82)oc0^a}F1)_SUe(c*-k}u8D{u)6we9?v>{C@`5za*< z?lcx^`q-7$N>x3~Y=`(IGMnlhEJ;YCZezZAR{cERZs4n`9~5=<{xU^oRp8^!UHq%4 z@h^CSw;?br!W?=P7Vx|c@E{N9lR%g=XDs>~;7qnlajHTTZ@Mf z`j8UhR~Ku-s3p}0P>=hbhU^d=2<}fT3F~e@x+sdbvn05(lX)2HAB2TRh%)cTxlKy) zajp`c!O6K~|Kt_YVm)Q2F80^qNuOaUT zcmg^_4C{KV_$Q=y4B7Iqd3N-^W@ z=ThLfEKa@sNoynaL1V?F$#yZ(jWKT{h8ooRsW9fHS^ENv$yY6kEu>ua62f@!5=+1L z#D>efTB7ULa46DChZ&fqzzaA!0jl$|&)X|BRc#)iT%t(zP@YEv*Yn-Wg}cH$Vq-j9 z5u?_jvg!AQ?>sUBZHA^z`!P^{oMJhc3A43H^l0^VL;3tq*J_F*g2zj2ox&Gp@YJie zw}(eetF|{o8l>lGdYlmB0p&Bp78#4z_SR+wt>Wv_uw5J}di%)4C^d^OpLlcXH{$%> zo9l8WL6I>pKGf-aEz^ikV8+)k!?(;E!9yd zG^jQ2MArQ`6|KikN9JRZdHO1HX-iCy-0KmD;XSuOw!|6TthoM^Ge~LQkx&RHpnxgr z7Y*HrOf>OyPAWpTU8HzmUFVF>4L^qFp-ZSUc3Pjy7Pxgio^C|Wa;xqwh?_pLAAIVb z%TS2)e6F+}J}JsW z(9SR+>bLEC5R6WE9zu=|iYN{&4gYP>N!`6&cB;?fqt-z^#4zE%!Hmx{zE67}g>sE(SUjc0!4R%sa;N8Oq>+YhN&h z`ULZ>s#i=widKU&B@A3qEURB$NSuK|b zA4DDtcL`(t5sJVl8E9bYTx!`FAnJs992&NOcnVtA!k5GkTqk9X!zH1mb;6uBJKK%s zE%I>m!{h%(aFt7fJB0lQ#}QDcDlwm7NQs`M$FFZnIqE3B!@;}dnz(E)Jpt^N$Dv&7 zIkE36XUff0>LZzWnMap`rMeoqTBB_t9!*h6XTy~xGx#SUV~3bfe}=ppIV;pMbXHtt zDUUQYznjM@RK=xIlq!*3*XxXDN~LFUV5q;SdG>S%f+*2eK#eLqukK>r-3;z!@E8M+ z0sSS4E(#SgpwX+&GazPCuQA}-X)YtZ+r}VkEX%tWAKr#X_GrSITC&VkCgI>OlbB2- z@QzFTU?Q6+D3-3^8Tr{r-H9h4{M))OQM93~aEpNVB($2vU7|_eB|whr?Us8HChtpB z4R}|1JHo`R34J>v>5W5ZPes`WAdPX#tR-1_4pW{g;3o&2@~vDiwAWR;75D}08o=cO zBvhfq4F ztH}YqkktKeQWh*r14CH7!hJOO(YB?ix(81{nHECd)Ob#@bt8l~7I?q-LRo&xGk3?t-9% zS<6WYv$o5N166<2Gnx)^cfswLkh==${>_B6VBn+%2zp#Ipj^!|m}9^l9of*=-vuD7i_$La7qsog#aj{Y#(Uzs&~?NY;B2o0Zv|>y z<*i7;TVeLnLvKYwPE1n0jJUwGx8gU^PgX??YdL~OFFCaCQ+=!Gc?^{{{mp%wUi}g> z)CU=Sh@W8D-z=8>Gf2(vlGVo1JWejebdOy4*Vf@_|1fI^u=-`jKEmJ333M(o^N z-_FH3x!Kt}$_?9UM(#s$=T1z;t9Cw>lyJwdAVU3lQTBg`v&t11G-|qQau2SWV)DSc zWr=P-g*C9@^Fay*jz|Dlx=t+?;@}kg;D~{PLn8deh6WDqjfsJC3bm8!;|y_o&t@Z%&R0+lm7VkPr#gT{(p&}+Ig%bZd+B<{?kS5KgTwcyPTO0P{1 zEe)YZYz=*8*4<7M%$Y5uKDyw7)R##W1&=-l(=c28HV5)M2(IdxMgKpMbanfzDA^xr zb)4PH7Wfup!uVfe?8^)YWYr%qSY)uZ5Bj1{e6S*Wi8Hm|XLRIIiVUmgyF>zVC}>#M znK|$_u_VtLuC+jytt%hELaTp3m63}9l!RPg;cqSscnz08<#ZUV(v^z=TSqE=1zASH zd0Pu@A8~#9w}W1Bg~;LH+F-u8F*|#%CQ?dONXOtujmt;qqRC_>sZhl$oJHP0N_*!Pt`eU9_UU$bck+e3|awM?6YEf|O~V&BwJ? zOx)GKj1}3m6uyE&#RQELdVlm1{vjLbubvn`vT*;z=4==rI|P;bHGF7u9tMzi4o;aj zn+x&DTpQKTp&glvY0kwcrk8d4Dhl*Z7S%r^F3Mc}DPvz}uw#|7eYcbb+|P}y)a>X= zjg(x%CjBdRWvTD5SyZGO;_vxNsc$gr&l&s$f+$`pd96yRBClf2(xh# zh0}S6?E-u$5a96hSQ}QzdoH*(@DhAeU>v)K%Lq7Q!p#;ae7F}kjTaWcRLNf2liMy` zqn8OCs@D+E3qa7}h2Kl|sf3s|%@)D|xUF9E=16Gkav0k}9-Ai^xNtGLH^GkcG9 zeAR-JI&4L*=bx=sd>qBYB~U5{5w0q#CQO3>_>h+>O2c#}fIU#hmAg9HEQ)O}L`8ND zcrXTIw&JTS+fUwiuHwt>9>q=dQ6oA4hw<3a7g@WHSgIpI111Tu`O;gR`C@L>02o!ar7sIt> zcLN7d^pw47!PH7Dby5%%!J=5`9p@Ns;C7P7?%|1Bb>%!V0)MrWIQF=|N=rvQkIo{8 zW*C;@1QQ83#N-wvG6;;Z%WVtR2r2RJ3X~nmU zSbz)tE53twNxsSr&fr!%0RdE~Zn^qZq8`Ix}+J4JqA8q)5 zmnB@B`}obXYJ%BRvQhj@IpF=(D0f$YqZ1wi-=|bdVK&Mt@CW}FLhWYZ7*uSvS)9;R zNum_2S1?-vZtbudj-AB()or975`$L~+SPv)%@ij_M=G~f_083ZN5plp)C%u!tv5Yg zk!}Gk%gCX!mfKc+C<>#*?b`8i2bSLmovjwvCISvq3gu0 mG>#2Gh%jlUB!99WL#~y;p-|qMGE;W${l@zZBloUU_J0A$JlWp> literal 0 HcmV?d00001 diff --git a/utils/loggers/clearml/__pycache__/__init__.cpython-38.pyc b/utils/loggers/clearml/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0f909f84a9701faeb3569eb93048254bd5a5816 GIT binary patch literal 152 zcmWIL<>g`k0*TN6QrUp?V-N=!FakLaKwQiMBvKfH88jLFRx%WUgb~CqTm6ju+*JKi zGYeBoQ)2^tkHoUnJpIc2ocuCN{nC=moMQc){PgtHqGJ8zoYcgk+#LP*_{_Y_lK6PN Wg34PQHo5sJr8%i~AY(rRF#`Z(6C;2C literal 0 HcmV?d00001 diff --git a/utils/loggers/clearml/__pycache__/clearml_utils.cpython-38.pyc b/utils/loggers/clearml/__pycache__/clearml_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31bf1679164864528d30c377d7cf70cfe1201e86 GIT binary patch literal 5585 zcmaJ_&668P6`yZOqmSLS9Vd=Mn92uUC7a!lU?3)y#2*BQvNs9YkVHwCnw=i4W_L8B zbhmcBVnh|$gqsWQ6jfS^3m5(ZE>!UcaGnDf4wTPuGL@L$>yc!w6Od~9YhJ&8-TmJ0 zy*KYRo34i6Bmep``sFt??H|-x{MFET4UhOe3Z`{5rZXeaJG!ckj)B@t%)ZsJ^zxnE zan!p?r-Ir_s{LB0)^|Iuu8p2iZ|a>oYCEa-8=XeK*=g!p)QalNIWj~~E`6%Mr=^cs zTC^N3b&Wlj|4P^B&x}s}11&Y)*I0#BKhruZKhju@xu0pwJu*6{nDLI*u8&^08N{h~ zBiq@DxECgY5MDRq-qj=u_{|$$bZZdtxF4m`lROCb;&f;I4)&&Pw)Mh#E0DdmQPeJ{ zX(j`i@xlqSB;x{um1}_vL?jDqD-e4{-S_t+F5)cp{ZJDnNHaGk*0FY^k4 zvc@V$`lot&W;$BkGx$D6Ycwj|++x*pnDy)Bu`TP9Ms6SLW9L2XqV~4-6YYIXHgjjv zqV*?B%w=`fU`^KQflB;YF0&=YW#vd4S8|8B=QRFzlCNx^*CwZEW@@nIyu#dk%QWG@1RjCdN9Z+#Yr(mRM{&x@t-!VE7{Db`&Nzq{^~(WQ-zLW3r_ zQ-*%bM$R+qhe1CX*{J);9lGn$&u?Zt!XiQHWohKmds;X04g%7*A;f@uB*aWIo@H{) z3j@+Dk42EIA7kc0FAjT(2T26g6(?q~s2iuTgqC4A7GQt4&XojmGb@)}Aek?`xTrUP zQe)*_6z-A4Q2bs5;lT~(mvJ89T<&iG62y$Qc2ac3WxcRrG;vy3G7_>d_k*M`(y*}8 zpdX3R=UbS>rF(hH62f=WeLV7(MArW@7B*_k7 zpo-fiZ}oy&{KA@dftIHl0#KEPsw5#Uv^U$QiwchhNf1Wd#$^@eP9(X3R;PuQpJZW> z_+d6UEUYBPKCGQ2+b&EYd0{3|TDU(Al40~i&SC#nHo%BQLKig=bR$d;7>Ef7XqSZ> z3o!(!&`9gD5D};An$9MrBWXmI>__ct=k!$1{5b7qK32r`I6-hxnci7vWu|(hF+N>h zr$x*Aq+jZw3%v`tD>dG~fBi*`#+07_QR7d6>D?L`f>g_ttXa$N1L5HFAr)!=8Yw=?gn%U6tH&;=*MnkU} zE&UAY#y!U{zO*dIaPC=p3y*QnF`awP*R_e>;NJqbkXSDMOmtquBi2wv8oZ819;ah6 zgPHIE7C_5p&S!eZ26$Cj4J|+wK%!YRua?kIdV=6DFd8%o^kGoYDLzca+RQr9>SruU zP!oa-ctjV6YlJ3dGvBf9Dy-NdJ1kGl6H&?pEFB)EACy))Cntpt27`5Vu*xiojR0h;lLHA^3cC>=0Hg=NJQpsN99Sg5~f2^ zh}n`ItGhMOxXju! z`E_c+quHntj$ouiEsRu{y`z(YF*nv({WfuPz`@yV^|FCnA1dmZO_Mv=;=`!CVG?bAftTh>V4An8 zAZP$^ZF|o>=UpxD{DCW{h)c;2E1S=T-g>KKS8C$*lUfY4UzjqVqkP?+W!&!O$R(5A zC>@aV!J9YU+`m|uIEbRUH8tw3qB1x{)E4$$Z`(x!N=m2alf<1Gg10@@g!c+|M90mB zaxj$skjI^egQ3je64I3DyJT7i8Q&9~r3b{MAd%Q{it^gaMKc`2fu_=r23gqS=P_EG zKQRV>fl!qBaX;9JL~*Vk+(LUt_*f9HeAse1xoB>0XLv_J3pv)x6p=|>vB#eTYwi$# ziy{K-wA>xvps~tS_QE2JE?gl4E(Jw93R&A`VTmZ|7ACH+FmV&gKa|DO#GvY92o$3HpO15+^K| zb}_fyE;_k>P5bzxv6-6_?N~zusZRj&xw&g1X0~#RzaUXg>|-PW6w$&WjWKL+*O^p~ zwcToN5bdM}8W%K}CF{8f>$9148;Qv$7PDb{&gkEB#Ufuo4pn%o<;}D`gdI%DEJ~SY z5uDl}QU|(*DFXfv`0~Ss zORmE0w!O<<*;Aq04$p=If>Z=Dh~m6b_A;y`T!8422kjX~V$N(-_qPKX_EtY)&$g98 zamr8H7NR*?r*Pnd;s_Q|Mn)uFQH!C5%9Cui8-)|y!UjZPa77)!!b-g}p{|8fO2sdN zYvGn57NuWB17TRkWvTmog7e4H&WHq=pab=nC13x*~si zMSW0-Qz*1*Rj=wT{SibW%TQP2i_tevD&|+_{*$I@+TUIcNgroF5|V6Nd6l05swKS1 zu1-k4YwS^e(?QE*`kp;&qi>ys_t`Vl2vO>L6>3WxIh{SJKrgGzIMydMN`Q|I($YyC zPXpSDZ1C8m%#89e22H(fj4QGQ4`MKfIiJJ(bfNY0UiC!JVU^Df8aaZ#!-_^98RP0l z=D0R?$91`!SCKU~a#OD4?vZ{@8#m=Cc{)di$ZBU`DM!Y*l{bHj6gqFAtpCm$FXgq| zMt=FHys~>Hug~Pn8?2$cTxIv6h0!I9H*cG;G0;!W9&6)e?Em4(Be2VJtOYOlD4kc1 ze3C4Y&Sr8xM}CWh-hoXn2Y+4khu3&;pd8#bhUA-d+7^IMWK8FvSiawv%8xN(x(j za2d6(%oN}XDL~sQ@!>LND{LriC3pKVewK#5O~ngTJWs`oD3F;Bij_dnsUR%Qh9clR zI4r9DY(ENsXNB1x(s~KtzOc3rWmK5qV2EsX*dH8nlX%o3bO;FXZWIqV?SsEgZ)_lO z$_l%Sm`fHl%Ax~O&!R?NyxfaZYNY~3ex0ViL51?zCIaX7N{N8(ef=wJrAtM9;g)#^ zvpOwO2_FtU61^;8{B4q=roJbrFI9S6SVYpPPNyQ91uy;q(c$|Bbf}Dk9wI8__|Pqi zK0<^q2u=9=w8+ww-z;Ex5Q5JTjF!?;aN#Rqe}S3Pkl}ITaeW;a{5o>M27-lUgb!!+ zM^QEqI3RTR!m&nAd=;4=WQnhF{8g|;wMkWl0u)kP-!EL>N8T_@sNL}WPoR&}Hx=J! zS?K%x70lr`sSs2=b+VveJycZh&Az6{zb_^J^|$@$}I&KrGljOrtS3eeQTUngf# jeYxE1D}YH&Jch!8r^9*TOsmGSVW7QiP+;bo)hGW8m(`TkFX7cLvdcd0P=OQCQc zPiR?&Aq-*GjDk+hg2`p8U~y^}Y)+kmgVd@e*4=_@8iCbL2Hv(=NCj@--#6PM+eRTB zq^qOC-eVLp!B~*FZw5|vd@U&@rR@n_w{4n6>llxjEo6gib+SE$cPE3%>aM_UPYY+; z6ba#q)xTcUgo-eVn5b$it`6Jf12|Lhx)$KneQo)Rudg`34|*ROtd;lj=0;{00+ zt!%lm9z?TE*{BBPs8}zp2g<$Is5LfE;pLZ5Pmbb26 zTu^SKUaQmtHQukg!mCqLy5{X`SFX-qEMAKnxi zR~Ik6edF@g#o}A@H?LoQSMBPzUAw+;@$!Y6XuP3zcjlRYw|M!j`8O7ddi)!Y1nm-5 zws@&{^UdoEH&#CW=zD+s)~#!wR;kNZ-|p1C#J}%;x;qTb{F(Wi7v3ylbFW{%a6|3a z})dhx=Q`5QNSQ#jY>7eDvt zXZV#d+|bWK={%k=jhO<2$RDI{g6uydEa5z4VthXV^1mRG!h7E=Bt%O1?;8bIjEFSS zq!<+$q@Dm_AWex0kwxl@ivq}wH%0`I95rbH4Z2eo<^D#&xmJ{PKqPqIiyqKd67rDOB@w1Ae|P+#Brp%#jKb^IwMYq7m@A}C&f!h z_llRrDWvp=NEVm9=3v;Eq$gOXbS5cOMpTqA? zC0fn#fLvTu-c4Dm)GPHBpu8)CXhYV^iGi~rCQZ-h=ybjYMjbn`Q8Z!`(2OlWE3yHd z*sLamxkjySlmzsm6rdlC0H$N3YF0<(C7sr-D1+KjE{!2KhEukM9gX9xX@m*1OtiCc zf*5sdv?oz^VtbqBn?iXPzd5~4tJsbFZjso7e#IHiyPV%6Xcc>-eSrI82#_`Jfhkwf zW2${Xcp|lJnMQPw%F$ER+a)5;SSrRUrWpUgrgy4Gxc)heHSwW^ zwk+BvKcJO8k9Fsxqtq^@Xm#?--1i04zfgChW0ZEW@UE73eWRWu0o9sXz1Iw6 zp9p8GVWSQfkJ{E4+K!hS_2tUS@#RV_$Xm)`LFIv9OC)|;skufon!Q(AueHXI8|?j& zDUV_$%BK;E(W)fr1YxAJd8;t7S*nqA(iN+XnkZzcs^6dL9+3hj_X6Zy8Vgewi8T~< zck(5Oxb@OXP~2$N8YK~`eY$}*4xd&m*Ggen?C_iJThw@~6qQ%I4Qg-yEn2`vSZUOY z>m_ihyM^)ov^6cNwfHXLlPl|$7G|l4LVBn)*)KJ2k&Lfw28Ho{rCv~Bs$bJTg#7Eu zt2S;G=@gWW9?Hg1$TaQ59PQ~ySlSHe>naVQd@eA%lo@LEInN7~GgLA0*yqYUR6o=I zX1y7eIv6p|KxOo1c6AInM%}?%!;SSolB836Ix0#rxJ&C;59(2I?M_Lqgv#A$0yzRT zR$Reh_1l%-&r6R6aR`Z2ieOPx%Ft!>X=!90sc)?7;{)nU;u}^1q0c%5l5Er?IYoo` z>s=H8g3*^YqSZhuzY4D%`Fgh6dtzD!e`0sxw9)IksOB^rLBf8<^CNZo13 zwXj(#2l6oWA06mY5g!?H3AI0ZZEh8`KDTlD%&AvTy>w#kO<;U|jy2?0=Qg5BEu7;I zmEjyObFPRTsYJ!%Z1bM-x`hP{75;4i!*uD-cC9o)JkzG{SSJ5{>?P9XxHWF3&9sF! z%QrLTxVay1SZ4O8j*~&z;gb6kH{qb4`%^dJS{d%o(3--V8S}8^TOQ41;ZKjQ;g7sj z#?10xYwAK@9h&VfwM@1`_eXi6kPCRiuK~m+i8=Xl3~O>NC67c9JIk;mW2*uMqH6s; z>npH7P-`!fNaj-OM5p)XQSa>R{SJf<>J#1mRCbKWg?0E9Z5>9*wkPb4Dv%OTm9{O{ z_yYQotq76jEEbXtO;3eMAo3=-c{o7utp}`2=iZsWz6f3Xtk&9cWuQFRMFrMGZWYIx zt2d(Dtsto9Kw-0;PIoG`TCT%ma#17KnI}hw4pnb9cdZt{{GnHJ_nzP< z<)jw9C&@r);;=0JHE1^Pex(Hi88y8&bI%8C4Q2 zkW^9mX^fD!<$Fk!Lkw9KY2c+EC{d1jeG5fv%PN7T9^gHshCjs9oqzAn z`TP9*6E5KzX6c@(ujK*Rc|ZX>pU<5m^+*h6Q_jqJ7^o3Kut520^wCFA5p^=7 zVB=NYDlDz47khCkcFAq_LBcd{pJ6ta)S9_gLLD`_T#4!dbjq*{lT>B2lrBbOga$&M z#+HZ?so=d@HfqI7%xdCs{}7XHYxAl3PPlNy5rzlKBS-1m482OJ$AS} z*$}ePIj8Wj$Rom!AoT8=e`kElip(|y9nQ{v$2evDl5rgZ2q%s6ZrhQwZOYGfN{Pr3 z5K>#PzuRt%maRmb0B#uibgL;?=nkyY1Y4&*jSn&AF3P)@PZ^T9aBH0CV9=70FQ7U< zqMSxEfNez9j!K24<)BCg2lVf#B%_er5-Q#EFI|EkEGdI#4SEig<;I;lb?z$EMZ8g2 zE|numd_pTv^OPg(&b#^$wO&jIqReIZen?B!<`}P6euiFH>H$6&djHSygyR5)>6=p! z8sj()$C|cUdj^@%kQZ@?%t%KiQsblemZ`}Mn`o{jwiTXWxE6%D4EF-stTktk zLBvKn2e%;olEtlVrv^S`U31^sN=8mQ5fjn5YdEO)tSxUV1*Z#0QrOUR?(#ft>|sdXuXJIhf_-CErTNX^=OKm8$x1X4qS!F*#;N;4}ZT2)^`o296hV3l5v= zSe%JRg+~ZLJ=){(=r#o5im^2okF+PY4KRm`zie)eZ%xEwXf>+YXfhtZ4`jm}DL6R% zc!FjHq%WTkBMj4f3^Dr95t)aqG2Q-OvELc(vk$CaHnt|VvRmVij~Hi#KSiVb!Vw>s zx6j3JG*zeK(Q{b41J)<3R}2fTEM`+ZQ30C|Lp9D9s zMvWR2HA$GLY?wUq9pv(pnm`gPB27uGcuC&JYd%;p3cR(0hI_!c|nl@0Y2x>zY( zQ)aO+3BrQ43zM?nr7+g3)MF}-r+_&Fdy>5j#H$P0dWr0LNC5J@bA#sJcDWWRhurVV zAyVBzo@$Krp#hWc2l$$B5x{^=;h1~PqvjOlU_*G;46FqoC9*FZ5V3=@1-{CEA3Hzs zy(!dX$kyo9rBkq7c4&w@<{a{k5}6a{Wm5ksGLV}fQ=qh?Q_I*&Y`H)?_yOfCi~tH> z5#fWhfrd`o-ZqGs;X5_JUD?|hCwm0%bXWj$YjA0x`F)4O8Rx=@}B%A zRj@$y;a6C}vC~l}%C$y4kR-PxZKZqwpzLxf>PTYw+f?CdY~n$bL+1}qT}`&>jfVuj zLA{&^l(iFMcA+;sePvQO`iPSQQx3_uW0=#n2iw&%PXI?8bH>c!?}XJlG|1|oIK~hj zq%jncFmZtJ+W?UPscXhIn4U>`(|yR&$cht4iODBo_r3*I`86pdDT&u5>BjTFL{lr&WruQx_6A*M>+=RKQqDX5#Bt)CaCtlk};Z$ zN5I~(8q;_OhUnjSZyzCz0N6D0t0V8C=Jt_y@x&z6y~UO_ceIR+>isBBERy6~NUk!n zqM=$CUpBUJ_}bjCmDLC}D}ns3kqQKE`5WjBk0$8}eKkmV=W3<(TcY&(84Ue>v>C0J z2lu@sg!U7K#i-Gf`LpM04KR-Ib!7DG%plr=+L`x)wp3p!YM2_=`)f5%5GJf%3d!4U{pyu zwG5~s5s+P*Pis!=_4F0qfpzLeOdTHGb#j;?FbVH;KT6r{zpV5Z@g;h&4La+$zg`_VrGjpHOPHZFH0v@5C3w8)4lT8eSdq6_aS4a(Izs>15 z!Hf&xme`J+bH?q{kylN@yW@hnAyN-kYqx#YLf(WL>j-Dfm75fRs-`0lmHY+;ud3-a zTYlsACeNs6fLX&cpsqa{yU>`Dm>K2|_sX=#VkRTj6ud>1CT)@&nk3I!869~(-l=3X z{|IuQZ5m~hN7#vAI>?w#BH8H%6OU|z!e|#c*kR3Rn;&QGRM~J&C>t*-=la#FH`#h2 zwK#0lHW5iKH=6h4uVD=K$|$dk$;w#^?uGJAYI%*ozaVfPKoN=bC<%O-%pLh{0*skQ zsgI4fp_Y?JS5wmOQ9C2;66F{$Qtcl0bN2?mNLZz9n+!0@CyNG8mVJbtISo0S=5pF< zJw1qDKWDrlONWq)HZ80{1g zEdeA##FFK(5A-D7JOa*aMro+fn`G_0PcoX@+M|S47&Rdvx$7r8Ju~R}Em*wh8I48b z(FDDZUd-Qny&d8H8XS!}o~f;>JnUlL^QZ9%Lli_pg!t?YJRyZ{D9rZ7ECd35Ae(iu z^;PS(1D?BtW-!*TQv%jIdqSB|&o=cLQKP1Q{~WoX%8m8r>nn6Nq_epE*Lrnu|9`Hb zHNbRxBYmPp%l|-&`(px2JwQk${}VuE$09#>xeh9-h}?4=+jtJlJE)0VrJm~$Q;y6^ zX6ZfG<_6rGr5x$MxrWSjq68&*59;rEOi@Ua=dVk9lAKN2J3v~B{3QZJp33X)2~(#0 zBWn1U1fHi>hh2LKcpTQDxw5iND5(;%REJ9bo0Ol>0Ub78HM7qrwOr_vKFNRrskaoQ z=`N};(ajZmaT20XW)AQ`bi#<8D8&-BFyL*xkK%i@jr)-y{TLp5Xu6~slU95X&taIg zzS(;A{|N(zXpqp89zxZSct~uvq3IBd{|zv9u_C8Q;%hyLxEUk)s$)W5|}uyEXm(CF;CnHBLxv#kEn+{189bTB&D^(TkCnQ_PWqu z+6lu1#L|X@e%F7QVC7~&@mr!7cz<^zLJZBCg=A4S5R|G1h}DZ<>3^}urrbCiXxNGE2f8+ zKqd>_;i=*lPj4yRXu(&}h&*q_J>+*TEW2Ya92=runnnAEFeGL2AA~zWAUEbDqFK$pd zVH7m;2`yq$?5?32`6}(=SppS+4tdBms`zsPOFd9RPW3lfFBu#BTl96LT~nZIirf@W z-sIj3Wzv+oKRRv~4}ammMTSmZ9tMPG0$zPeaf_>pH~>^(i|-~8W0>Y!Pp~!xbS3zH z9r|Y2hzNbg7C3kUF1>*k)FT!Uem(NEjwNfcQ-`t+Ei4uJaS9QG5jZ5Fg3GT1;zk%? z22lf_uiC=55s{P56&{rJZ^tgX5Z&qobTd-o&30C#;*o83Kei{kEGh3VwkFv$I^GqE zd=m%M%-G9-+|aG`9NogGMd55Y+`Nfn2vI8|0viNQ0u-kE;d~u@D?<6$2=Y_%2K6~l zfG9*#WQbjAeLu8vgH6)TS)o9I0;Qo=EJ&TuQ3>+@%QFP76JXi!1Mz)dU+F$g*2RZ}S)|TT zC<3?zIRg`rq0g?dPAKAI*CTftFmL9`-NF>Ts_79K&&Kh!tt{)(-2K)sNvDU0`KXDECgJ3Iw#^R0_ z$Y%FF+U42X&yWcU=RACOFm1tQw9gQp#pId=O{)_e04>2g#Lj((Q{1y`J48?MNbGGp z7A>hhtE}Uwz`Af$H?g53h?wA}NQ{^TA|+s%?2My01{`eU`em44=rXhz z+6+y4PgoB<#8GT!rZX7B1+$}(l3N+DPK-_lDwyUNVjghwkwylCa||`CmsQiT57vo@ z2#%KjmqMHYGxk2TMe2iOM=1G=I_B!N-1Yfu=Vwo~e4U=1ef5+|LH8=-k{9kP;$|AW zW;JCkzgWo7_4H!15mv}WAHt#3gG~~Tj_8R=PFg|gavR8-8hn+iu$Mqe-viGZyl8rk$%gj8V;rm+hB6`Xn5TK*goSWn?-!#+wQlY3^ z;Kl}q|1E0!O@P7_IZArV>|AA(kUK_xLSw#FkCYWPv1?6b;-8ch08kd3fljG;>V)!2 zO@04P;RfXJok(R?lvzeNhi_MM0GnkLuaXzNL@Qyy1mJfF0GRt7LVeP^iCdGC4m`RW z!q9;X)BT~7vi{Ob6F0}D7bs){;berB{r}UNegbh2S=cmi=STUTR+L2nh)Fsl_)kys zu@>cDpan+NfL^2TI@Yv&dJ-+;4nQZAhB?1SAJoyNchc-o$Dt@IDKn(t(3XX?Wwb12 zvOP8{&&PIno)!%{w&C-mJj_y7bV$WZfL0S=rw(0LU`B`lMvoD4><=H-!r(C2*e`Gz zc}e0M$X}AUhSiBS%l}CYbcSjK;=6l!L8bZ1KP^GeSH%4)=`sx6g_&=bnpe;Ad zXUySv^H9IX)OtE9-U;%{vz4>M#xeN@P)Dcd#h2E@or@th%h!EETnC0{CHUcAaWq36 z9}~hoJ3}vsT)W)#|AD$64@^IVumc(r5HN?Xd0fLXFXa!@ za-+_MbRi1i}DstOt;7wd-Wx$YgA;-u1-1D)kvnZ z*wX@#{i)xfy88tFgus6x@Mi?ROW?l}_-_RMJAv;J_}>J+PvFM{Ch&D~lE4gsy#(m1 z2XFwiu);omM z@oXL|e@=%n3K$L{V0|r6{$ncH;XMd!GTdtwePc$YAknpzDfiPd1L}vJCbbmX_;0A_ zT)KQ^f!-ss2<5*EnT=G{~~LtqbqeFXLsH~^shn+*i_FH_jOfIxn+S*h{KJqbM!O4F$R zI!?z335E4#NP=lxs!c-*;3f-#JD%+$P@YAwldjsL71ALMl?g#|D{ZIkDSHZ~aeEvO M1=G=HAwX*XU%AfNo&W#< literal 0 HcmV?d00001 diff --git a/utils/loggers/comet/__pycache__/comet_utils.cpython-38.pyc b/utils/loggers/comet/__pycache__/comet_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e71d8e38329dc388ac5590f1267389b8c767838 GIT binary patch literal 4179 zcmd5<&u<&Y6`ox#$>nlM%aW7YP1@XC$q) z+@)uSmPIUkajt0!^im)QARYZL^w>XRu08dfTYv`r-mECmP7Sm@mBPNAogZ)Jz4_kv zp6;|-9>eeQH{Xdb=NbDuea!wE_;?Rb{uc<&xRtP8`L=o%zU{;wRC*PQjeRrf^qjja zec556Dr&p-fhSiiMt|JuIUlpszE`fU_v(TjyP|Po^*mnbHHCHTbB9+?>|V>XYrKwj zo4dU6IqO~E9&h43$9>+y`yy}i3wY0S>o!}Q`v*?Px{H=}M=}{kG8bdFpACds?sW9r z&COe@+u^OvmDP=~d*jxst{r7b_VB9yX*sg8x_0Bv#&&q~gVmcKezdvX-455*H&$ut zxSmA0S{kapOh?yurQCS>dd( z$JV5(JnYv*t5($R`;&T6mwzm($E~6c?QYtpwrW#bwK3{)cHqjtnw^?O9q07F#yQHp z#-E`70>|3v35xop0Xycf4nF2{*xxH`V_n18)>IdZCcpUD{&H^O7yj{l;gPnx>?->m z+hX@v(JWZl1P=}M0AKeX<#DMoOs3*B$6 z&h`^rs(2LVD$EWfT+POBnp+E2x3)I7mV<5<^!G*oV3@_J%7fi(lyXQJ3^Fc~AdLnh zSPuHMa{8k-UFU5)xeHO?>aO5((e7#|S7P{t0ZY~+NtbNIx7R!Sz*A@R);m|S^9H)H8qlJ%n`zm7(^YSGq}197D5Qar>S;wgK`mMR`&&kxavVbQ1a6Tp$PD-06W z{n>utkcHaajT6y@TXa3jbD`F`u@EEn(i^&J`pylPKJ;nt8jt%5h7GU3g73TrQG*X_ zZ2bCvV#V3&@#Uv3@8;Rf1>>IuX{Ju^FHQrsFM_=oP7YQd4TX#cB30v8%GK-Lwaw*V z<NK;K9t#g(O23bY-u~owL6Fo1dnAdD{xHj(w3* zYzSr5Lm4Zf8>^4{VyNOQg^Ea~@P}@QY2RX7SF)kfo;Y)(Zj^w-iM3ZY$&}t)**-sW z?*(&QJ_F5g5Dk026nQq1eG!6gdkg2l4GCMuEpDkL=Dl{=nvV4Ra+mUwdJCsVnTEnJ zQv2H4)s}b`gIA_E^Sy9cfRFOuLNIIIYTJ(G*}gSrIks)tR?V{iS*x~f&#Ga>$J?=8 z%%f47GiS9e-@0Ub_Ppg;^4HMv6xX-VL0Qmn{ol|4kFygtvBAL>SlwP>pZ zsDMAM2lLp0*t!lqu3=tJHST+bM>&zQO5tJD``Vr~RTGTd1S4OX_}InA%2wf@V9qnF z+~Ph+pf&#P&X7k6fJc-89eF^6kPoB22o7VlA0UUKLxF0%0KfA9cr`~n0?q*0d0YmE z`-oG3Vl<#&i~>1IJ+ttJ+{>3ui_~DJ;I&BZnGC&jw&fyN{A~MdHvrEJBCvQ8r{;7( zc6kiaaSr3)HN&*wAX&Qc(favmpw7fs(NC{&lEeZAJJXGa|TPC;7gjY99MQ10=lI5Tr3%2D* z1o7_Ki6be3eAMrYJl`E9$IyAe-E*DfY>dOs|>aih0d@rVIc-eG8ylox=8}K8T7pq0W(2XR!b(6&Xg?% zONK|OFy*sUcplbE|Nr2n97W!U;gr%l{3#GW6*JF6ZcDz03zauWyhVceOcEo>6%wl? zY!YiE-X~#@No;1|DLd8kH<=HMO QC)8DzUvWF`Pu!RO1z=is>i_@% literal 0 HcmV?d00001 diff --git a/utils/loggers/wandb/__pycache__/__init__.cpython-38.pyc b/utils/loggers/wandb/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e84ce7ce92bfa12787b08ffe03d04067ae25e88f GIT binary patch literal 150 zcmWIL<>g`k0*TN6QrUp?V-N=!FakLaKwQiMBvKfH88jLFRx%WUgb~CqYyFJ;+*JKi zGYeBoQ)2^tkHoUnJpIc2ocuCN{nC=moMQc){PgtHqGJ8>#JrRw{rLFIyv&mLc)fzk UTO2mI`6;D2sdgYkKLarX02US^ssI20 literal 0 HcmV?d00001 diff --git a/utils/loggers/wandb/__pycache__/wandb_utils.cpython-38.pyc b/utils/loggers/wandb/__pycache__/wandb_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b039799b97ba6295df4e3f29e2e617ce2a31227f GIT binary patch literal 19695 zcmc(HdyHJydEdQv?!0GbpCoseE9r`QkYkCvZY=r1vWiGba+uI7g`y&PB6&Q!=kD$d zXJ=OD-r-|1n-rp)xK3oVageq#9Sn=2t$@@mkT!h<$fIfga8si#P$0NS-$7dyMd2c; z5fhHs-|stj?#wJ{wN46j$$ReWoO93X`@Y}zIP=QXRKddE;vc;4zu2~{|HPZ|zdUZ9 z#Sy$@Tb8n_ma>)8va7b7ovI_}R5gWjs^#vct7)5g+*W2cTg}RKx|Q3_SMzf1v$#oeXq((ZC~*|vJi(z2t~qyCD2R26PG!Ec2Ryk);?wO3rr zf6#wm%eh?8|IxPiPdU}2pSIf0YnK0z?+iRuRHfU_4ZHfVnyMc2k8Rnitft?#tB%V+v;d9)b-8FjrO*uyY0Zc+6Z^NSN+EJ zP8fKPduJQ z(fox@U7g=W*5FKAU8ryPtst6i>~4Qwi`TV1lnpAWD0AV(Pk!?Gi_z5Pj=y=irs^R+ z8%70lyVYp-nH}`%H4Tp zZKt#AuXR8A@uxrW^hZ9t_5<}RetYeDr`5UgiM1{UH&|kTcRiZY z{%+@rUo*9OY>zb5)(g1a#HvsJ_&kF{8vQsfls!Ph2cQU*y6xPy_wCSOCkCn8mOj?E zU&dQ>#a-`ZYx42c$XI?_FhH2euY|FE1 zNot56N#9K}&yq*Xl#FMQQjg+gkV9hGb9TvgZM}@^iC(4gfK)EM`UPAl^aF>339&-^ zhNBO&7wDOF!`5%1zbS>j>bF9-Z>f}W-*LV)v!4pngG@h#F>qhCp0a~d{O-@2>-ZhUF!(~>(O>Ml_-=vUeHpw5Bbi-);ZAz>dVRMQ zWg6SSOBRUiUgMBk#E-&O&sDRMh| zemiodvV2hA@@p+nMU=uaiwY>HLzC~&t7q1qJ6k(*@zVJh&YZhcd+FlyFPwi(lWkVg zQC{YWlU7~E>`4-ZzoG71tqc0nu`r#k zU2t4XZP{M1bGCEWwM%y$XZ*@_^hxBsXUbhXlqr7(7a=AZlkMC9%iG7aH&gq#Z@M^R z+HvL;SbX0;0#zR(qRT` z{tI#5pA55ndc%I*-3O)Yr}o`2H^|?%_S60JpwI_LIAZm2$l1j?b;QD|&-62cB64Lz zH!StDV0Z3Oe6w%Wrc^p!YSlwO2=8QD^&ni`3olj5dI~S2 z2QX-xeh|dVEoK)=?a4gohnXj%{h*7XJI*S(Ze&JeT%xovj3{+xuMy>ji#5ur&eb-F zA?8X~johi^qEu%u)K9aHr`AQ-9to8~&M|C{&Jk9-H}&-HTy*6*TzH?BXA`twqM_O#Vc1$X-HO^OFqHZMB=0)w>-r&F+_6vH zDUr#nH3S;0X%A0(hm25ZbvElQfdGd8+E**Hn&QRqRhI?zW4xzil%;oY$^ALLAeG8W z5$+I`hpVnuZvoWkXZc!$q2bA@ySrB@>a%=I&ZnP5Qq2pWtznf_vtX;d&}3#&*}NE5 z8WqgFaGDZxCU0`Q!17yC95q1uK*X)~qvj4URK|XNPSm zwTV&_f0VS(;s|ab@hxCzi})GX`4I&<33LgGsIowhX_ZrXoHMGRia2LgNloFLQ;(`? zHG`aa^_ZGfbGRy~d9{FdMfJE}@~8Z=KfMJALKN!mId{&I)*2Q1eczOimwvCT@d6{Ujg#LB{6BEJ;bC>da*zXp7ey~ zaVCTXkfJC9A`Ad$?**-ef``9LJB`3Iz|0FMlf4F5xZbU^pp<+3tXB^`S$wPY#;SMr zx~KfDdbbrm>4hvkRzRw3v^QH_1t7h1eb3hrH4qpfaNm<&aGm1EGwiXe%{SOqWYCSx zVF!|1sgCbA>Oo^u`Z;_jT?EQ*T_*rHeL>5>hp#jgho-I+;Cu%;@V#9Ca^;5!CI1`_CVYz*a_W*qMs#?5-$19<3o8@?Gc6@U2!CE(S^xpUJ!oO!GJk{BhCdqi5k_POA;a&i+j#t5|8GH0%ZM)mRz^*l*WNe$^3k<}5 zT1u@)xl8fV*raG3{$+7NoFq8o7#30AZd$rT98m)+13Lm>4xL~{Ic+y|nkfgYwg%B# zbIGuAPUFbL*ATh2oZ{1b{DcgxxXm&pc|N58No!@fo|beboW(ci!g;X!Z}4ic5FP@v z|DEtK??iyR`)P>qOn9iD0kX-UJ%1-WqS80)x6r!gA{c3!@Kj}0?j5$r?Pt*bx%TT2 z3l3QEhuHF)c5qB`Lnxr;C6&MI=s#42b|GBm{SCW$6z3Hj52)g0-;GN?qRM>J zu+fj{cOZ7qwny1EUZDpw=z-}CzB70PtSj4y-zI&S5!N$TZ$YlhvLc^2>78$bo**20 zSizvCm?wey4Y(i6`N|$a%aAR3jV%vwWwk3$hsvu5vbdzX{#db>{jj??;%zac0v|yI z&CW(G1Uq4|kw+mr%7KA3nl2y%Pkh!CrmSuXraNUSl2Ir zNZ+Fx-a97a+G-ib%a55a#rJ3c6s>xz)wy~JFy3h>g9HT=o<;@HjJ%|>Q`_9Bx3~Q} z1r}NpmKS9{{o;ieuRI;4{A+u_lNrO3HA{{vid+s~lp8i%Q~v57VRDfPAwb2J2fdl+ zj8Vcnr+wjLQF(I0E05_nSm<>oL?BTL?TPZj+^9R~7y0H>d^0bNOfZ5`P-e$rgLyQ~ zF=>TK#wgD`VSNJzky8B5+#T82=v=GqLL$~tv<=&e!B){+QbMY%?bi3A>D~G@yx)cz z*?}^;U0pPd9AXv&QbbSW5EzUlLw7%HUp^BAKFlwj_Vd(_BNw7OG=ofhr9aF>=Iuw> z_eBHGL1DGY0n|UpjK`Q%nNXXIGTl8$e?KZ?iFNmk+6(Dd$wJffTU!RNiFzR8E}s#V zf!e|Wh!y2(HMXr*`!T!>_K;Xp89U=<>_yQEGDvZ6{-vlVP)jH|8QU@czLQJm>|DL7K^KWOe?zi0>aT?yEL^)Nkb3kl!PT48hGpR+W&#bMtFveO|hs~L=SWTEo zI1nL}%@Aq_XEUW^Ru`nq>@#|Ll`wIb;h0XPhyc&`1u7T6~s z*maKv7}#INtS^LAmjS=XZ~51I=sd;2INOScAs#OMDrQW-#$<&F6(6V-qLJ+2I$8j| z4P`_^*Ru3s48qE8`L)efaFBdN|4Eb{m_2q``B_$2jFoZGDX6(rrwui;MNn8^keW31 z=q57#GaLbxf2+WCzIfMlXxiyLI=()?8>LRHSK=<3Is|(;z+MfQIRsx3F9CryiKLuR z)@+zIh7OqUAeEsiLs^Ca?h7z;P_=fBvP=$=Jl`xpIRy%8=70{|W)VjThl67ZM;XWT zR`&HQ5K<0D5l0CJsG~N8ql|-?VFrd0t2wJumvj1GfqLfp>A}2mfo1IG!fi*e-ywVx zrpNeQdg9#?yqiT0hfqUae@o0Uhhl{}gZ5?6x-=DXqwj{i(FLO(<&{w z*cRscnQ(Eiq_W{+bGdmGhMOFY{B4fa3Y9S`V%zRM74-+$4^>`*VFw1IMc^2i5s(ka zt2v9_u*Fu*!`;K|C+kLUj@^cNiKDNk1=al?uBU*HsMLSWTra~Bp=MoOHX)9!q8*yH_tpZ$3s=oLs%rNCF_>c2kDS7P7;rqcFlMIu#> z(2rn+p~6S89cy_b3MV={6GF2B;sHVt;&( zEJV{hS9vVT4f_mdkYWEdsV|zE==T9k5*-}I`iJ;|&oUv%({&~S z1aoI%@09)wp9#v`z!koCErd-XN?#(R(~565nQZax9I%?;TY-265oY`=oz@jU%CNS?981|X1D^!Bt+TS1_&B@SX?1toK{dvn;^`XF$UzCwLW$qQ5quVjRV<4I zstmltE5~;4;s95`Kc3(BQW;pJTsvpEcS~>+$QcNz49QYr3hc9tE8491j5z5S?&j>n zz*fEGahWrT^ENqs!@mqJc!;FoSkaRwj-$`^?ExeV^zF~W;GM?P)D1_$hd|RfjFzb| zGsyN+gWPRvkWZXwF0^g;az1z|EEp}k?~2hJrgium;5Jnm@Um?ESH^kSa197X5;MvK z1Bxel2%<>(p7HT~5X(tO?mlXsQ~}DBGnmLYHUl$)ibm_p(3J~5(cT_aM^mTK@lb;p zKFP3H#c=9lHE-ChwR;c!?EjZ&_5&0(db-B3jjj5gr9WK1%0&<5Wfi=^iHgI+WwOs(@K(G)9`=CeqDbY z=k?wb$3;UqC1^Ei&G56T;|J~Iq32&~1mST}RV$_Y+Lwi=&}(bRyM!?tv4UJZsO@!v z#Hi&d#S&ds z+nU}gq5Uk;BCATjS5kg z)+P&1Ne0K$OVs!Wq&SR9lT{BHchSFEpM{UdH?2BKiLetTRL zT%e}{tn^+f@IC0mvUpFt1b$F~ehDtkOEPH?Jh)&7P&vk%Tp|422=4|7-F)};d(bH` zVVT1hNezp=VH`(Z5Lo(bPnL5hUc&POHC+ zcY=-IwYAu_GRAivx%8diF-X#jTsAMV5IvF=Ztn-ZhhDBQu4@CEny?S`Bo_;ySj9Qp0dmU#^YNc^$9+GOKaBg8yBVim1P{z78Lva+=kQf};Nfstg7S^} z3h$))dH6(&@1(#HO8DkflKWS~GQg$r$BQoyPah!$X6^{cOL;JmHbSqMGj)h8%9pe{aF9v=AE&*n(f; z>fgRP<^C`-B0s{i^~k!GBSc37-G5mK=qjQ3-OG+AY>>Kb}&}inEThOZ*;oG z)iG>bt8v+fzD_m{nGM>9yVmo=%@I3Fta^Ai#_-57Me=*QF!47|N?FqpV0Kf;JWK`# z>b4-`#Pu?KKwWy(dxd&{u}=V)cLAZa2(jS$vG3H=*u7}}M*H7u5W z9UB#}WZ+YN315Lr7+*11e3Pyb2AIOmb2a(YXj)A4kOy=eBCvARdw$zH`HXZqj#^}l z+-S<~(Kpx~&h}VaWV(|s8fz_0(nL=~)d&q0bU1Ro8QBVKMJ+fPp^-Q>mJV7Zz&{yK z=Cn8URoqSl2~iDjalDK}qKg_JjW^0*J%q*p!(VpZgq3~7P!Z|stbRDMFGu#3(NjH- z@=@V5O|^u0&uoy-5*KCg&#`(>9ptC++Dq8|>S?iV2hXgIDv?N=OB7dx5&Fkbo&L*6 z)~gva*!n0RiO=O(UWsftkE`B_xVXj?ws?uiM2csu^3cedV!U1tvkH+=V$2ca!!bOI zQUDcE-Uo*8;n4R5d`1gqr6l@@7&tWHpD~?Oc*Ws0KC=(ABKP8pFJ6jLd!0QK4J6j^ z3(P}7qW>zB*O}aALIfPSSL^UZZSwx7kU*Ogv-yYkV2=p}X>}%9?ZcSYH~3ftDxGHp z;bxrvI?Hewn8A`Tp-E|}J9zsd=Z|vRDi>hmDB$1Y_FT$M<=#I`f6@CH$9dm{ePl6} zyPL6RFvkwg2&x2lUUYJhthAO4|1IOG$Xc<{)2dQ{t-fsc#@*Dbl(Prd#XYMvQSY8b z2}gtZPpw1xB7()nAw<@lKOLb|A=jJ9q! z%=90aYA&h2z^TsR!b;NwwrKYr94FTM81O`y2m3_jzl>sV-%w`I5epYLf#gJB%ON~S zaMFLr$EXoOGm#iXI7n!+H_@X{iFOVxe2_s*_)|bic(aX(1dd5j`wm`^XK<&q5op3E znBtp)t}x+J>L#2j)R4q+2%RUx2vfwQ7O6o=+&Q!?_Ws=dJi>7hp0hvKp8+`c5ib>dy|A zZ=(+wvogo*wrjKOBFv9R`*}EOiQq_JD|If}gvJb89)~|9m?0sItBDCA4Xu4K1;o)@ z)cNjXX&e$P0KhQlh2<|G*5$w+4g++I;StgmB9gpaV1p1cYZAj^x6x9Dq?2BZt#?A8 zoqDk2flg2kUS?dtEJ%v*8VJ=x-Lg?+B$sm@`kriO$(TS3(J|v}=8f@`U;?e4iFP%Q*Tnu` zEt(JzDI8_b8Q>^iM8wHYqtJbsw?K_$yk#)j8*?M5p{!fZUlh3Uu8o$9i$R)CruA-0 z{|%fDJT(X;1|KchJOzAwjU0`rhTvlodfZQFk3o;5iuVBKElobltv$O9r4X)?zS>ZX z!rze?@1FA*_xofblZjR1L~41H#3aSGq8 z95UF1GC}`cCh|o_x>xgXM49HXq46%AqUpKu3KitW7y{A)rlPzVA4rTA z3=qkJJ>x7Q<|T8t3?Gqf4sjMB6jvY`J(vX*zjAokE02^J82*+4)&yhdpe1wVV! z`HV$_0n})Sw##XK8u2tR58^%z&k;li1VN75h>#+j9;uiNZn3FFmu$EwooenF>FSI!2rto<&6GqsN(WmWz6$8Pa~!i&w#K_LxqR8 z3Sw$;zlib_OHkaGEU5dqLyq5ti82i+Fe9P{rix}BTbK|<(L}EODcqM3DPbW}g6TA( zD5oICxDg6b1Cvksh^&C9O>H6K^cS5kLTOiX5I^(v%NW>%E1nf*K0;cCuZ!9V*LMuf ze4|p`2L{Tr{9pk@_i)=gLE@;K0%C+=5AtW^G-fEa$d@({<`v(Qz{laUI8N!L_oLT# z8|^2(>ylnWs2i`DiW;MUAK@pG#r{UpH`0W$0^qycfH2{18yX+S`YwDsDNbnw-JTj8 zji?BPMpPxb@I4B*KPk)C{{Ylr==2{ZS38S>rAGZ zhz`m)Ct=3=cW@QCm;LL3f&QXi3X@gL^Us-x*;MpJGB^DVB-jB(r4dc#;xtl(L!kdP z6XBk7^()|>FrzS}z_8Kk46~bSyy`*+21mViJ#v{(M<`pBXb6+VbVh{sB)5$Y-FJ3= zgR^-Sl~{Qbg~&)m`u$w7%7Vv!$4xun#MCSu$N5&SID^f4a}J{r4}%NOK*L<-oA-MU z{~y=FgpYF>a%s73rY~FE(b>$vqU)L~ z++~5IVd2$p{gj0r%g_McMktqIdtklLL;9J|L!S@^$9-__04xrnU`%l=|G)4~;VKiHk8Fy|jLa*j zA`!5AQDn4if`ns_(iHMuQd%eQqX3X@?kyKFAy9%p4=Km9$YpMo%ZOZt+W>MITb^#E z4Om9_3|5T^J{qwx&DaZ;c`F5E8&i0jD~@d-x(DoXPBPLb*cX|9}9eD*AR=pD|~MZ9sW1w5KUrg zqUE$$824}(R5Yae!r-lHLaDYgj67c8TdOBht0YO??s=`LRg>Z9Shy&Lh}&pRdYDt5yP8; z1h3Bbem9vchD*YmKr6#=_Tx>6iPgdUo`#q$#b6;e8ALd3wYsoujFW%v}<{MpX6=lqQ>f3KqV#u>Ptxpl+q zY|4_A#e0=|pv+>{tKKDyzz`(D$A$oS3E;&#rw4{kF^NdkUD4ZDy-zVVfS$9$y4e`s z3iV&%BL4*@Utw~K$yb^DB9nkg$V6~ZmseMq2tQrrl}1uIEYuS%APAp(S4hC1%76is zN(+-C9T|r9KXDaJoyCudJb!J|m!^$OzEQ>m+vpAE|CdaJW!~o1aV9+`pJO6el4wW2 z&4k8%X=Y5iFj_FC!OTZw`(nU3pg7YBqD;)sJc*bgX;zk?joVHf5c3RN>AAb&7yyi8 zAQ$6lARw%gU3ecZf;0mtc}%+4lX@qlD9C-JA@gG(2N;Ht%1%NsfH^`mY+=U}!l;o0 zI7~IuGzf#F4J$pDte^`(Jwq-EoK$aL7cgW-Qdawr>5&ew>nSNoo4fEmnVvsDgGO^? z2K0a78HB6w!UHh4U_QWYR3GKw6_C}!z8(Jh!eP62c$^E4vR0gR*4A! zw*paV!*9tHYK(5^V&6qAD%EOlcI&P9O}19U_S9NUx7pYxlNOV=nDm+aH70cVh=0jA zuqdpI#vyCyYrGXH{6Bcb6{-J-$+ww|x;X+2Ka3X~0|~VNq@OChl$*^xm0QTo6_yI8 za)p)E+!MK#T(R(oX5NWe$Kc4U69EMDo6%vw@F{H8#}BIXWe-6Q;R-*3+2xGOQ2cQovz-bXM4C)cN^;X>NbeIyRizkxE3^aJMEKKeS`+BeeB~;f8@_R^_0x@ z88Jj7uo=c&`H>*33hu8qJj2J2<}gnGdLn$GIS2fKAh4GP*%4TZ>hcG8WHoJcRP3eQ z@cB%*4t(s`nC#*2l|2wmb-~G>PU+vi!4jEW(iH$n(mR z`j46CeI$|7fmIQk(eVpP=J%RfjSbDEfnPs?rGs}V-enNVgi+P9W*=kmG#`UF=%E8a zAcQrLcysrK^B10HX>Q00^xtM)S>J*S2_d6d^HWE(b8>quHpCl(5Uc5wdAaSk;VNhu zCQKrhA9WB$5M^O4sfVGKaM`GMes`~V*{GdTa-I(=>HjVOwp=c=*s*j9#~v6ew}{T8E3{`a2zn@Ir?>R Z=cY<-E}NUr<#XlSV)-X5{(IH2|2G{Lcc%aV literal 0 HcmV?d00001 diff --git a/utils/metrics.py b/utils/metrics.py index 1229f2b10..cc6d81141 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -11,7 +11,7 @@ def fitness(x): # Model fitness as a weighted combination of metrics - w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + w = [0.5, 0.5, 0.0, 0.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] return (x[:, :4] * w).sum(1) @@ -127,23 +127,22 @@ def __init__(self, nc, conf=0.25, iou_thres=0.45): self.iou_thres = iou_thres def process_batch(self, detections, labels): - """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. - Arguments: - detections (Array[N, 6]), x1, y1, x2, y2, conf, class - labels (Array[M, 5]), class, x1, y1, x2, y2 - Returns: - None, updates confusion matrix accordingly - """ if detections is None: - gt_classes = labels.int() + if labels is None: + self.matrix[self.nc, self.nc] += 1 + return + gt_classes = labels[:,0].int() for gc in gt_classes: - self.matrix[self.nc, gc] += 1 # background FN + self.matrix[self.nc, gc] += 1 # class FN return - + detections = detections[detections[:, 4] > self.conf] gt_classes = labels[:, 0].int() + + if labels.sum()==0: + for dc in detections: + self.matrix[dc[5].int(), self.nc] += 1 # class FP + return detection_classes = detections[:, 5].int() iou = box_iou(labels[:, 1:], detections[:, :4]) @@ -159,18 +158,17 @@ def process_batch(self, detections, labels): matches = np.zeros((0, 3)) n = matches.shape[0] > 0 - m0, m1, _ = matches.transpose().astype(int) + m0, m1, _ = matches.transpose().astype(np.int16) for i, gc in enumerate(gt_classes): j = m0 == i if n and sum(j) == 1: self.matrix[detection_classes[m1[j]], gc] += 1 # correct else: - self.matrix[self.nc, gc] += 1 # true background - - if n: - for i, dc in enumerate(detection_classes): - if not any(m1 == i): - self.matrix[dc, self.nc] += 1 # predicted background + self.matrix[self.nc, gc] += 1 # class FN + + for i, dc in enumerate(detection_classes): + if not any (m1==i): + self.matrix[dc, self.nc] += 1 # class FP def matrix(self): return self.matrix @@ -185,8 +183,8 @@ def tp_fp(self): def plot(self, normalize=True, save_dir='', names=()): import seaborn as sn - array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns - array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + array = self.matrix.astype(int) # / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + #array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) nc, nn = self.nc, len(names) # number of classes, names @@ -215,7 +213,7 @@ def plot(self, normalize=True, save_dir='', names=()): def print(self): for i in range(self.nc + 1): print(' '.join(map(str, self.matrix[i]))) - + class WIoU_Scale: ''' monotonous: { diff --git a/utils/segment/__pycache__/__init__.cpython-38.pyc b/utils/segment/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c5c7b3e265bd82cf37a20d2cb2216706f9ffa2f GIT binary patch literal 144 zcmWIL<>g`k0*TN6QrUp?V-N=!FakLaKwQiMBvKfH88jLFRx%WUgb~CqQ~iwm+*JKi zGYeBoQ)2^tkHoUnJpIc2ocuCN{nC=moMQdr)b!lcyb}HR_{_Y_lK6PNg34PQHo5sJ Or8%i~Amcs*F#`ZR`ygom literal 0 HcmV?d00001 diff --git a/utils/segment/__pycache__/general.cpython-38.pyc b/utils/segment/__pycache__/general.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b35b42915f038f6efd3ecf52441cdaac1cb754b9 GIT binary patch literal 4585 zcmds4OK%(36~6b*3^^Q1j%&-dY^Uv1eVM5$*|J^4hT+teEjX>s7zqOcNL(EK)XJ6U9|nq9nzAlpy;CAl*F9- zJok0J`<-)tIyvbu{HFi*k6`H}V}GK;!JmP`5Amg42%q`9!KN_zrSa|CB%BPa-$rpZ1UWZy-16pYgxq zpFr-I&mXbc^d4qq^`@X?^s6E*;akL)ehQJZHTKl#i@wn}Gd>VG_wj!r#&Xp-tJgC$ zo|=8DU+S0pcE;C?+=Ql?TWgkYer!JX%`D2KJL7g`;D^mD@ZHEu*Hd?8+uaFd62>hz>1A%P5yTe6j2*Ew3o z_eb-l?WEU%YT$Zqic#Dr?OAto){URJSrDg*bd%Mh>j70v9;EJ3)p*vua!`*}={A!@ z`YCNe1|2U8H?R-vGjmrH8R ztY*6xud9+Ly|@)f(v%iyR+XiXQdxR=ipPq#72i~1>yi@N__h|1E+{S)Z@y&m6tw<+ zWxkz6!F=xrZ(qH3_438}yWU0+&u=H4WaHX=FAF>Ad>XW(AkOAnK^#c0GuPc#l_reX zpy|pOB-NM4ncx@r44)D54C0HsC`B$eKPMisNixP|4@H zm6wJ_znqzCR$lhSS(Y2VL6_-c(=P#@<*byKbFnM><-9y7`{f~B7klT=`dA5QuD6;6 z((QFqFY0!J0y15hWJv=syY4=rJv96b!1%gm^O+t3ee^C<^n`K8ou2_8ZkW1p zFIvIqV}y(*i=$L4ll6m<=9T|UMO*rM7dIaTJ7FAna_OZ|W|a7Wn)EthD{eFs3FD=bF!n`Zme1d**^;cHtkq89Wy;(L zgH2UV!&a1pzT!)&3YZ46n*g?fwyH8(S)*pFl0L#z*>oalBaBt4*sUH=PSL0$YUA6K z1P+b0LY$_yQ_yb)X{rymF}BWGG!gxWi=)bxTbG`1m)0%quW`j=~5`n$vRQ zsp#AN$^lSWuYn4X0jQi@49p?xSMw@ZT7b`tRbeqetejWQvun&?H`uE-^WU^tqqb6R zAJHovZObe$WEGHga#Q{xE3K7tX9R8d2EPJm6Z1N`Gq8O}1G>8NmwiC51mKO}{~pnD z8kVhVyzY_T+anh&wIG-*$#YcxDnzYPfbMHl@pTd|1bDKO0P@5|@*5;(NPH8bHX*-7 z`EQf>4hek=>@f^AeC2nk;R1vbA%GkfKwduKC!29WCVpdtc_PvS;GQ@L?(d;3rM)m< z+6Ft?{LSJo9Bu;R@*>&}u_axYR{(c$W5IfS-Ih-?KwU!xfIZ93QwdPX0bMEqQgG$_ zHMu#(rkQW*e#D(xYrl3?^sU$?Mg`og3~WoeU9NNJzsm)qFahOaR^2{b^eE=BXdP68 zp15=wf{ry~m;2^AlfTcv)8m?7v0sZ_{xgniiEHvg&?S2zR-xVJnKdY(Uhkq<WU;g!vJ2#tjEyq8iYWf!Or9Xzq;N1}YO!nk6Y%Ql-etqFzVwj5rEmi89h&^wKKAw@XE`pp2lKstN6YQ|us2 zVrBJWB(IxJ```v9UZE2w;$Ze^gkq8o|A=WQEYqgBfee{tiVg6d8S()qb7;fpGK`_q zGsRW^GGqMqzhR7#4gY7xxPS?2dZEQtr3k2A6Os-={)ohF60dQ1MVpgOH9_m>RHs1P z@+T;(a>tXcAk7Xv$q4ln>FVSC4Kgj(7r;3~G^^uVq#FIV*|U%OMrIDI9CEFM4Tf~tcYKT=OzXd+IdSN#Tr*-&=>J9LQ*kzQSq(U&)T`s32~H3)V;Y& zV-rM6Ot769ZG?r<`y-vZ(MiBk=}lzD3i2L0A6X`ipqEL3s9xp-{0YFm%ZnxICDgdQ zjCT2e#9{kO(td&O)nX28Nfeiwwx`NEbiTY$dqXZz*=Tlq%EG$3z4dOraqGc@hwpvRc=Yc52lwtM5qDL! zutN|d09TeLrANccR^)BT_h?v)9?dE0y4@i5K`{>1+Gz%N5VW?DD;{sDe29i3vN^m} zb?opd=jfMGkj?~!C#Mml*pN1#MZj``%O+ZCe5uCkIKOvvVVsaHa1gX#`Iy8L5}Nc* z>c{I`9M7%xV$Eh=2Rn?FvtKU4G##)})UN|0?PxTU>5HdlKVU1p75$>HZ@}UAI>AkX ZY)W>wv1Xf}Fzgfd3Fo}?rZeeG{tH&FcV7Si literal 0 HcmV?d00001 diff --git a/utils/tal/__pycache__/__init__.cpython-38.pyc b/utils/tal/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e97325ce8e864a03d491d913f376f6a5adbcab18 GIT binary patch literal 140 zcmWIL<>g`k0*TN6QrUp?V-N=!FakLaKwQiMBvKfH88jLFRx%WUgb~CqL;Z~W+*JKi zGYeBoQ)2^tkHoUnJpIc2ocuCN{nC=moMQcw#2o$j_{_Y_lK6PNg34PQHo5sJr8%i~ KAfrA5F#`ata~`t* literal 0 HcmV?d00001 diff --git a/utils/tal/__pycache__/anchor_generator.cpython-38.pyc b/utils/tal/__pycache__/anchor_generator.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b942edf340bae21d3d83b98add657641b5f5e898 GIT binary patch literal 1546 zcmY*Z&2Jk;6rY*>@Ote{(kdjWkifJ(bV0S_Kr1343M!}-hay3^SVgPN&e+~$Kg`TH zvBT_z=7g00A(pss;SZVb9S6T|c!k?jUe|kx(M&&7_8enC=w5uj`ziLV6 zz*Q~fDu-HUmLi*|YS%LVhAfZ`{v*ge6{t3~sdHu(zI4v({epn^plU1Yya_U2wN)@A z)LoGC7IkT3K^7PzWgG25TluQBj;J>z%2SSVY4glB`tLq~vw^Y}xSl~aB!HTK^7b=U zFdj?RiHl@ha?u&`GVcsoET^1_?hB3qmeA4`E2wrUJ4zF#9V+J&rb&7LfO}~{*(@za z7ym-LAYgw{A0iy`j<{nStmu_bgJs zOn?W@#0CtA__&QX@EX32*P-9XZ{fR`-viAaH>^!;V|Cd~u^FUmDi2^G(YYZkhNN3S zA-i%ER`wRET$ml8VnY%B1rV{Z307|+AaFyupl`xSL!Ty)$XWnR>OI(9?L%_X-Gj`C zVaf9jO#%NRVGlCN_rs1XJNxDA!EAm!?v%VUo6qK9mz(H9i?6~2`I=;@?xd^y}T=bFaN&OTgi{E9Ug5V?&# z=r*9}0gC(~$ZnLs!Rvp_9>b=_3{y-=jT!s~>bE0hZ9$1Cw1uGb2$UZDmK33Yrokt4 zPVT+N!^8m3hi4lsOjt`~mLSxLt7~fX{rWG@emgmR652H`{5}LwqoSQ8i}Ok7TtZ?z z>IP?{D353SU1Rp184T^NLtyN7%!P&`q9zAG!27TZF}KV_mdpvgWh#Q2m)#{9Gj2*( vx1y_mXB6>UAgLKwgIBw{c5NL%4ZrtMUeamCJ~o9fOtwAJCT+`K_1pgejFN)c literal 0 HcmV?d00001 diff --git a/utils/tal/__pycache__/assigner.cpython-38.pyc b/utils/tal/__pycache__/assigner.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e27c0d806131c5c98cc64a1780a7f4af375fe0b8 GIT binary patch literal 6034 zcmb_gOOGR06~6U;*xl3J)66R&F(D-5Ftqa~GYWYSrYRe+@jx`d5@7yZ8-QAu+ktnOK@4a>3=R4&2L)ICGisF69eJPUc3 z>)iNU@oa8#>vQHgZ!>Ol=X1uLP#w5;HLu2%k6E*Jgz?#KQz`6rJ2~+C$@uHv|Luu? z)OI#s>1gz=dT6;O-Z${3F_N4Ou&m2$qUFkfeX33Li7_#AWvXzMYh0gcQJFF&S;^|;a}4* z3OiXN>xGR`lJ>Lyt*{Zqon9guoiNTq(dfsGZsxiTJgdDl^8_eO+Y8 ztAGiBwy#eCm>i|9hE_V@r_+~-kr{xF0r02*r8%_-lK7%cZNSw*3o~=4H9*eH*-V=( z&%`hW&1vOUuH|e`1N@BO0J7@u0dRB70SS$M+NgHV8(GpAj-#wUib9|;y|zFU0XB)l z#_fLAlj!aByS*^Y8c;S=Mp2_3Hp+7g`OYf%4uf>x2j<-Fn7^^UD_d*di z?ZTcfuCV9FSJYzct2*Mss&?tE%a&5&gl}(y+RXdVd3ophsq8qfEY?n(r_`}BxlC)g zSgB5hj>TB?G2KSO)=JHx^n$XkxT>p&YpCzg$K{7kik|dCBq8$@&ODWGf64X~=?@%F z3-zf1eUd)muHu>S7&Z6_i`V%Y{%!spzQH%)K^(rt&!fiulIhH=K^HC*=WjyJuScZy z{PjxvkLVzeXu_}Y;`!&RRp!!bf(%-R+{gT(JefTDkUonfV|N*8xWaNJXFhZqsU}mM zw9q%8tIQ{zX1>K4)}ib|U`9HI5DVLfo#|)3U+7sf+K0@>L%$P2Im5It+g%b!VFb~r z7ZiFsgdnysY11rpocf-49tH6NB|oO*B}%BlS4!ddbQXug`SUu1(e_=W-{vuQT%4gF2B8Zr)QT8|Z6qv!$9%wAlkRr|Vz}%491)mv3z|jJB4qBVV7Mn=x8p(+(a7MsD_CX*&rI8zS6;rpBF+ z4DQ0j!rJw+Y?R(;wYspf~Df_504?c1iwkB#58#xrMnK4{s?Jx?uI8qaQa z)>k}W#?m`2u{zp$Wl|rsfzxt0xrb_i2kqnZ01Ns@3!tYOPAy!tPf}T1)QK5@lR(JnzH~!HjjGDCNQs;V2hxa&N2ssfhB7wKi|bTy7OEq& z<>43ZF^naJErlvJsfSe|M!brWc#^8>0eK*Q7-pj15m%}Hhm<@;ALqWd9AiHiTQ}Af z^&3~))-Gr^{P&jfh+?aU53TYS)o3T&MID2Z`;hM9SI7`MmgZZ_br|F_%!1>{^}8wz zGK@DEtOKKKf+Z^)u4JGSPnMovg_(9?rsa2--WtrhmD`f-YP*tbAslhYn@SPGSDqvJ zQj}O8c`6BfaiQWZvgI-F7ms3vrDJ^)xn^BTUc5!MZ&UIvB_vz%4kbUM3KCN z?=p7q99f_Vq~uSXz4$3Q4DgoTSld>K8Et=KYIXInvFdb}q1&00B@DnEyo)IJFDM|? zV3{(YkV3;hj*$)#uhFG40uLOcfgHk!fzGvi@VDdz;gf9+sxm+>hfjQ&Msmp4p=6*$ z|A>VV+OsdHg>e~y2BK+%CAmIDaE_KL{tbjV7SXhOI{Ld1UY(mVI5DSd7=In@ESWnu zQy@kge%snp;W=x&B~Mnw53msmEyI`#G6}~_`YmZ44({CU75W?RCZ7m8faZqOb|IOu z9wl+NFr?xy+loL0aW@n-8jd7gSQP;mMC(eGH{k1rqe2@cw~u*M)44AY3auY=uZ%=X z%`CNAqJkpQB?k5j6c6yG8%UT+5#{5`x_U*us$5bIzkffeFQND5ZifzI{@BPYME^S! z=vqTwaOfmxVx>gFDAv43x<;2B8rSx1@g9*f=oiF(kEoiCs1(BG1c@>(1ym4?YB}y= z5Nn#51)^p`XH|$4mmDB;^xfj-zRrotf#N}^E>=*hE zPuhcP4GPb87Coh>nyd4Q)}&v}25wV!5ic!zNyIc&LFZFg5k-V)wtKUw2r_lYP%6Dj zUARX33l--usW~wY( zu-OhNzry4_1!1Xk)qcPs6WEY9Ij@^{@t0YIKkVshh1KyO>g1J{f1vaz|&Bgy?%9eWg z=qfBOQLCmQcF<9@D0!NaXDC6K1m<^h7Z)RlhT~uuA{usmf0*!bMESb!e>M)H@{0^% zOA~L!iQg3g7a!0t Date: Wed, 3 Apr 2024 17:30:14 +0800 Subject: [PATCH 2/2] v9_transform --- utils/.ipynb_checkpoints/dataloaders-checkpoint.py | 2 +- utils/dataloaders.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/.ipynb_checkpoints/dataloaders-checkpoint.py b/utils/.ipynb_checkpoints/dataloaders-checkpoint.py index 6b5323d3e..02c5978d0 100644 --- a/utils/.ipynb_checkpoints/dataloaders-checkpoint.py +++ b/utils/.ipynb_checkpoints/dataloaders-checkpoint.py @@ -667,7 +667,7 @@ def build_transforms(self, hyp=None): if self.augment: hyp['mosaic'] = hyp['mosaic'] if self.augment and not self.rect else 0.0 hyp['mixup'] = hyp['mixup'] if self.augment and not self.rect else 0.0 - transforms = v8_transforms(self, self.img_size, hyp) + transforms = v9_transforms(self, self.img_size, hyp) else: transforms = Compose([LetterBox(new_shape=(self.img_size, self.img_size), scaleup=False)]) diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 6b5323d3e..02c5978d0 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -667,7 +667,7 @@ def build_transforms(self, hyp=None): if self.augment: hyp['mosaic'] = hyp['mosaic'] if self.augment and not self.rect else 0.0 hyp['mixup'] = hyp['mixup'] if self.augment and not self.rect else 0.0 - transforms = v8_transforms(self, self.img_size, hyp) + transforms = v9_transforms(self, self.img_size, hyp) else: transforms = Compose([LetterBox(new_shape=(self.img_size, self.img_size), scaleup=False)])