From 8a6e80d55695ecf531801e58ff55a16c885676a0 Mon Sep 17 00:00:00 2001 From: Bharath Ramaswamy Date: Mon, 28 Dec 2020 17:25:43 -0800 Subject: [PATCH] First commit for AIMET model zoo Signed-off-by: Bharath Ramaswamy --- LICENSE.pdf | Bin 0 -> 161565 bytes NOTICE.txt | 232 ++++++++ README.md | 328 ++++++++++++ images/logo-quic-on@h68.png | Bin 0 -> 4381 bytes zoo_tensorflow/Docs/EfficientNetLite.md | 49 ++ zoo_tensorflow/Docs/MobileNetV2.md | 50 ++ zoo_tensorflow/Docs/PoseEstimation.md | 53 ++ zoo_tensorflow/Docs/ResNet50.md | 62 +++ zoo_tensorflow/Docs/RetinaNet.md | 62 +++ zoo_tensorflow/Docs/SRGAN.md | 76 +++ zoo_tensorflow/Docs/SSDMobileNetV2.md | 142 +++++ .../examples/efficientnet_quanteval.py | 170 ++++++ .../examples/mobilenet_v2_140_quanteval.py | 174 ++++++ .../examples/pose_estimation_quanteval.py | 474 +++++++++++++++++ .../examples/retinanet_quanteval.py | 181 +++++++ zoo_tensorflow/examples/srgan_quanteval.py | 297 +++++++++++ .../examples/ssd_mobilenet_v2_quanteval.py | 447 ++++++++++++++++ zoo_torch/Docs/DeepLabV3.md | 62 +++ zoo_torch/Docs/DeepSpeech2.md | 51 ++ zoo_torch/Docs/EfficientNet-lite0.md | 38 ++ zoo_torch/Docs/MobileNetV2-SSD-lite.md | 81 +++ zoo_torch/Docs/MobilenetV2.md | 70 +++ zoo_torch/Docs/PoseEstimation.md | 41 ++ zoo_torch/Docs/SRGAN.md | 73 +++ zoo_torch/examples/deepspeech2_quanteval.py | 296 +++++++++++ zoo_torch/examples/eval_deeplabv3.py | 133 +++++ zoo_torch/examples/eval_efficientnetlite0.py | 240 +++++++++ zoo_torch/examples/eval_mobilenetv2.py | 156 ++++++ .../examples/pose_estimation_quanteval.py | 499 ++++++++++++++++++ .../pytorch-deeplab-xception-zoo.patch | 176 ++++++ zoo_torch/examples/srgan_quanteval.py | 197 +++++++ zoo_torch/examples/ssd_utils.py | 82 +++ zoo_torch/examples/torch_ssd_eval.patch | 123 +++++ 33 files changed, 5115 insertions(+) create mode 100644 LICENSE.pdf create mode 100644 NOTICE.txt create mode 100644 images/logo-quic-on@h68.png create mode 100755 zoo_tensorflow/Docs/EfficientNetLite.md create mode 100755 zoo_tensorflow/Docs/MobileNetV2.md create mode 100644 zoo_tensorflow/Docs/PoseEstimation.md create mode 100755 zoo_tensorflow/Docs/ResNet50.md create mode 100644 zoo_tensorflow/Docs/RetinaNet.md create mode 100644 zoo_tensorflow/Docs/SRGAN.md create mode 100644 zoo_tensorflow/Docs/SSDMobileNetV2.md create mode 100755 zoo_tensorflow/examples/efficientnet_quanteval.py create mode 100755 zoo_tensorflow/examples/mobilenet_v2_140_quanteval.py create mode 100755 zoo_tensorflow/examples/pose_estimation_quanteval.py create mode 100755 zoo_tensorflow/examples/retinanet_quanteval.py create mode 100755 zoo_tensorflow/examples/srgan_quanteval.py create mode 100755 zoo_tensorflow/examples/ssd_mobilenet_v2_quanteval.py create mode 100644 zoo_torch/Docs/DeepLabV3.md create mode 100755 zoo_torch/Docs/DeepSpeech2.md create mode 100644 zoo_torch/Docs/EfficientNet-lite0.md create mode 100644 zoo_torch/Docs/MobileNetV2-SSD-lite.md create mode 100644 zoo_torch/Docs/MobilenetV2.md create mode 100644 zoo_torch/Docs/PoseEstimation.md create mode 100644 zoo_torch/Docs/SRGAN.md create mode 100755 zoo_torch/examples/deepspeech2_quanteval.py create mode 100755 zoo_torch/examples/eval_deeplabv3.py create mode 100755 zoo_torch/examples/eval_efficientnetlite0.py create mode 100755 zoo_torch/examples/eval_mobilenetv2.py create mode 100644 zoo_torch/examples/pose_estimation_quanteval.py create mode 100644 zoo_torch/examples/pytorch-deeplab-xception-zoo.patch create mode 100644 zoo_torch/examples/srgan_quanteval.py create mode 100755 zoo_torch/examples/ssd_utils.py create mode 100644 zoo_torch/examples/torch_ssd_eval.patch diff --git a/LICENSE.pdf b/LICENSE.pdf new file mode 100644 index 0000000000000000000000000000000000000000..eb5442032dc97df082aacf1ef464438342746650 GIT binary patch literal 161565 zcmZU(V{j(X7B(8&cHU%S+qP}%jcsdU+qRRL*tTuk$t0OLxpU6>Zq;||_K#k>dp-ME zYdx#FcURXYR}zz;XJ+6;gd=bMcOb$s1DF5~Cf0~>e0+>*UXDOUQDawQTL%k9C1VSq z3xMUHTZK{1*xrH)Xiu-MN)2FCb#-<#byaf)0#zIwTmh`y|AojKJKIRxn>+lY`Ogq& z2KWa93J5UD16_^H{&51B+1US!)UX120G*Ybf#yJGpuOq8Ak1tW|M3V2Ai@Fd&HkIs zf8zds()w>YjG_*1_WwdM%37JZ=mJ>(GYi1V_CNDq4t>D?&GH|>|BvNgW&h&3{-4rR zfG!Sh&i}Ibm$-z3z3YDrGvI%jO0WR_SDXYZfcZZ{sW3{g0hs?AmQjKo!2Ew;8~_%k z|9Lq9EdS>o7l4KHfA_cntp8Q^a1}g zvUmMgp9|o>x-d#QJGeRii}&B={`Z1Wl~K*v*xtqQKRTFtF^Z}(iUHlNOo1wrBL8^R zj7?nrLH^g~|7qm{VEVsh{SWc~E31T+tt-%(QNs3LRboI>2Qwg}9MImv)e^wW!OYC{ zpJ^_x&Ol>3L^#i!DL)$rLeAuqXOLH9XmGlM2ht0+Y%@cwAQ0^|^w~8HlgWFtoL7ME zXv;||c|F7UWk7F@NwU1Vx@?QyQXQ!F0=J|-ok5hFSJlPnc64>JhonQjhT!Mp;mz{$ z{3det;qoOh@~q(Jb9{Kdpq>B6)7j_v`uy_v>v}w6-L780Y_Dxx949s}Z#6XXv&Q%7 z`|U+lSinzfji)NNZx7MNjy2!!?dpJP{{83O&ljoMw zmgMHxv&MHZU#CC_haX%Q!I;-k3L+w(U zZWU3WPqOb#w7`+%R?Wt1gqs~rsit>D)=GjfEz6k$>9V?Lt60>`>?-P!2KYDIO}lPo zsD9Jiz2=&r;xmz;-m+mkHQV-kEWV_JCLxmol=DmTCTW5d?c(g%S{1PtOSPS+W%1Ty zmtS}D_s4nk@=as-<@L#>$1zK~s4Bax-p#lZH`j!9&{Th8cs%mzX4S5@@7>k##a<$T z&qkAa>(GuY^WP>rn#sm=#)D`-?vkh~jb5^OD@VAKv<1=PE>*sX)Lk=vYS6`4$ zo5m;4+HIjNcR%l*otxIU)lGLBmkWb=-S?Yok2~-$k3WCST>k!L{fNjH4EdhfKYZCe z7hK&mzdX9IR~<#rrwurHDcp5>{^gp3p(^Y8wFMS7D8}yw-M7~Tcn+xhwB53zO2dsJ ztTJFkI4xzm&$;GiE)vyUj-L8V-)*aXPvSX|U{~*Ma%|0e zIN7`vt_SZ>UUO&IxRB}3$kF5d5RWmYT)D`jJYx_}fM8sy+UaNnkPmcT(kwGJc%FVv zFs(eP?=ma|!OwYr<<;XiKRt%x{G70IbkoffRtFD<_kpYvHig=vFBpzsK24ExL#{5z zD~DoSSa-4b+FS3~^gi?V>>|3U1E>tN)L)VAj{^1xfXvqVhjN9GCc(N3givnC(JS1b z{)1s^3RXW6z3!yq8iluKja{PN7mUG|TT-!-<@wcg(k*J7PKtC&4pYj?h(eDUs}Ob*#*rgac?iB~tTu|C}0CB^?e zI@`70!Z}>mg%ZUuo*XylJ|4%}3_yoDHtHUA9|uXgiFErE#`t403)Zsq=+kSMV$u#bXNDssHXMwTTS6m((AxIXv-oTk9z#=Q?*v0RH6GdviR02xEd^xkc_)^HNGJ@ z1%~;+8u$wSCMulP_lKDvQc%d2U@Vjt&%@y<)XPZ|i&g(PZPX~`Hsq!%6SeCs`%q~Vc;zmZ* zKFl~2&?au^p@8%gZ+owDFhEC?*?c>X00NTa5G*KAxfK8-xqU|YyainpM|@ae^+kco z3V+dK-QMU9kQA|Et1oe7GIxu)SdSFRPstFRV3P-mi4%(s0Pq4g&6~lf5Zl`GC$DXa z>09eV`7`$)xXh!{K?OFjyf8A@|%WiN|PTOpK;w)#5Wma-D zb07g0MVWQs7U^vBa`o6TWe%-#RDwSTq|E!XSGkI<3dm$$tL$Ryih~i>@wz1t1Qm$E z?G5*0Rt;tKVT_La zV#G;RJdMuQLJrP~#!H1C&{tR6j?Awfxj-#^ZAH^dSJh>U>^c%>??zkw3Y7jyF)sN? zv0dM)Zi=K0&7gd-#nm3jz_N23?A>76NrmWjwC$itK?NxIqPVuRWOXT-Io){Wc`OUc zI`z29p}pZ?;+i-cA`j_a!gVcg0QRjyB7v}C+2T@o9bK_LVjY(Z7}($#(O!7o&7fPd z=QLlZC+zlXPuUl0c@UL-^@KRn=$Pj??y`hv9w_B>g<7ZQjj6BysKv*OY17m~+! zVHV|i@i~otnv*JWfwd}H^7Boi-pn8l%xOh4ta$sNWerrD$2_iUE1}D0$y-8!Z0&G{ zsj~kDQeb2bMydq1iGA>lh7pj-icFDuHo4K(9GRK>Rt9dHr80ct?D@qFeJ!zG- z2XZ#y3^C5<=nNZ4x*BHP#7iyyYGqrB3&&PGf=?72eOv>eq>o}*rCtif!mcKar4gPq z)a0tQ#ee}Oe?;>?(Z9{V@fE zK0#o*e3CcI?B^>r639WKt>T8{CS7c>mb(-dvcX~DfX$?ba!L}6v!t=X(%d(sY&j;> zk$aV(an8v#AkBJ?iElj(wZ>st&FuDyxSggvNS9voXXJ+p9$~^oKu?Lxv_%dI>dUV~jTM`C#qC6b|p{x$Sa6hcL*MM~aUwO)-FD z=_3VX5QzdtO^67vnJA#yN~Lp`B2-@hi!1BcG~NEvpbx#_M3iSjVey`UF0DaD%{#(R zNhvsgNX-$QOq5*r*3g2?Qf~+sj3YHpcAf()@{l%sRv``yxR<&7 zC+ZTNJdzAl$h>~i%nqh>cFNr zY3GIYn}Wf>_GiU5r%Qx{=#fdbcb1&|O)tqV2rBkG4Dtd*v*WZ$Cqv>lT!OKuAQg81yy&eLhi;P_Q7p&yLq68Ke9sxgboJd>_IZ+%Il| zA=Rqkelk`k6uO&h27;e%M!;cm0(Pv0g#fm{h{V03tsT7vcL7fhtemn^YCfT^3C)Iw ziooj!ZxRm5FurAyJs)zPQA?o5VRtYL5s#@K^%)wFvjD_Yn|4B^>WF{b#U`p=CvbWz z{TLKiJVu-THnxdy4|u|r=GXpX*~)O?Ckon?t!Z@@xw23%ytF9fI;^y4$WXFYXt^aS z*0PatbrKy7B_W1rA;QsM6OI(sII`s@0j#h`qN)O$B?+1Ni!> zR!LNOldB4q6ynxxlG5;m7x1b6&d#U`SRH)-;z@3#LeBc4Ny90p3iWBbt0^25xd>(b z^Xb~$_8Hq7Fn`hQLiido#tzAJ=vG{r7sNtAXX7~ApA6yebF^-k`z++^=X`o$2~;PX z1ilj5!g+GntP?lK?tums`s?g069w=W=F?g|u$9b2cp6FUiv`i?#`19?$!j@W^b4y? zZ4X1J$+ae1PCUJjPav7nq4H!@j8j=siiW}Se?&NXw?*1}TBt;K0!-ejNW6r8q@c;x z7WQnj{o2_$oARNyJzT+(RY}|*)A*W02t{k}G~JF*+Is0AzWPIA^ra0l9pA38fXhzq zh0kgY6Zxxt*fK=nIf4vk2)l`Dj4)-qhB`7BfRdl4l2pn}ADTdw`frr}pOKIk4jR~< z87#;1xec9;PEvhOKOeS8Q6Tdi!rE0e8E?mkxnauCBpA`Lk#zczMN~qRhUhbD zTviG9wy^+=OWaWd4Pw<~NQfF1$xpIhTFJ#nU2cRrAzM#MeYptBEI7`;wa}2;0AzWi zGu6z~;DN!xKq5Tk9GWqLM~VyTNE5fNLMIVT4Pj8-4QH}BoR_Ej5^rgpB5;cssjMPA z*^DjEnj_!(1V#0~U%CN0^I7YMZ26zcJ8h?jG zs@8)MXdHoBo4?D-n`}f(DYG^Q;v4_Gd7WCeE1_yVHn^Gpq`{(5kq<8qD%LzOP;LnI zHeVnYh9Vjjl6rX~E&4KA<~UQN8CS4DF(0T=s(*B*(Xvl zN&{uEp7-;#4ZusfdpXEN)%kjN+{%_}_gNPu@Q>VXWR}qO2$vFq@58V+9DZJU(9z4a0CdfsF(mV4MHZI8? zOdxM&c;9K)E@VaLy}+rkmYzt+>@DYGF-2SW8$AeMJ>X&)ArvKo)#`J%|BU#3&O+Ux zIvCq(27htDe{ob=on&0{Jf5SWX)-0A?j39OD@w) zU0dv}zZWRv?sx5H%GqB68Iczm?_FLdpiYPQ4-bzwoN4(Wg6+YFFnfa2KQ0V-KwJLk z4%v3=B9Au;hDnk{#NyVNEQKL;Ri{K*rKsZJH|S7J+X33IXYZNcMjQm?+!CfO9ADSycdR`h@>XqGK{XTFmRB`@{=d;V6eEcsIUlV6um`y)* zs16mUD({XSOq7m=maakq_jL0o#c;Dl1Uo&=v5U!^eRr+R=1tShsLrje?(=pN@8(^n zojZc>BCmDet5P-tBLp!?`(}HwX2`o8YkObDVEP9J z9A%?UV)1t^1^?jWx+63p4~AvN>l&B+%Ywnb<|%9J)&sdmQ(llcN=b{Ag_793F2N7% zbXUD+GH)T)Hm7SM<&_yJs#>W)1sp!+X&nC#PX@_pedcD9wHO6yF7tfW^5O9Li=HVy z%YNa=xL-rb<1Za#!@n1u<&qUu9kulR5{i#>S*9wxvG~}PhJT+*jZLScgO(jj#jq^H z=6EQVoT{*E9~u4rJjVKaj@X26{`tmXQ zL%6oE(kUD}XSK|#0FG}xniXDYVehs84)d5gZ$p+?2A4f0ne$AO5YdO5(SG_DJx`-P zcyj|4tdgWaUl>|TmQab-RPStDc-7!NPQv3ZQ?BG4Toi9RH=!)=0yB(vq#gSb0j$vt z-=8KtZ9)s}8LUyF3UXXE#J7H`NJXbXK)9z;!u}y3>#nr%1?>y&04^QR+_drTK2_4P zf~R7J(BGHTiK0^xzm&O5*z*cf+VxDGu1zHtPbm%+Ex^7Xj8FtI(5hPiYz(FBlS;u- z5B8>b@nYyWyLL3+rgyOv!99UU;xx9j|DpvK$se`0mzUIrL~}@a|Db5OKM?SM$Sl`N#V5`#5238R6GpZp%b4Y$s(`$3qA3}HrD zH?0)wQdSwQ*c(bh`H=qhT*sT%?&rhOZtbb&CiRDVhrM@`rEo!!FCVfqx`7vQ=_Oe} z{cqVjzl4!R=z&84powQPzY)i|GG0HLm8#XKHxT+688_fL-a+GTOSF?DUIfw&MEeCr z;v`*q>iC3?Z|)lARX!vfLU2R8FLh7u6Mp9{WzB?%Vd-w(OOOY!72xoZ5mOH_-}cPo(k5JBx#hGttJqGEMx{Z&29G9Ty+#{1h%BgHpBp1@w?H4(!%9nI?Iy# zQ5Wlh7t^6^ENw)v5r=jO9Ru`HAv`;myX#A z>BHBt!(Q&8Ny)dlUDX6^4}MVwFmlj|k$KO<{W?UXbyx&JgFgR=b6m+$=Y75u!bgJ2~%VeZp z#Sgw%Ic3EJYkOrmiJ`HR=P&UPpbYG5PAMS-!Ft+bz+#L8I?B1X1`^`~zagGoXRkZ| zk9X!Oa$J9=tw3p4znFq9aKH`*jCM6gY+0>|a+;OVew)`hPXk(zgAsK3?tRX9}NUhl~9whNP;`YuN+~8AB`X#$Z$)Xlgjvww9$Is0%A;O*or0U#k3hKuB7JP71>d#OHwhA~M zt@a#UY2?b8mhI;K+b1{dAk6QAb*EeQkPc2Q(De=1o#UCiAQ}PTNVSEUyo89haG89P z8pBFTKPrycIAg<96jH>NT385a+Bqp=>2i`x?+1%&v^ ze}LzEIF>IMKoQR4h8^5=s+Y3@=e#4wwG1U(#IcO)v!Y?fe5cYO>FN3_3#7kt;e zEFMIy#bi11#R>?xFK!@jmzz)mgg&=WLq#wWNL&{K$ev75D2#xokn0Z|@}u zS>H+FzHx~S* zkqq8SmG{xstVl`fkj=%z{`&;9_1laqbj&U-v_@4GZO!=fQk)OsW;a&-DbUlG7idz* zp360la9jABU-H0G-bU(=fn&ST`dMnxPgo_vDpH8RK8Qqoedoxdq)yF^sP)m`PsTo6 zMO)JMvGh9=uXXCX>QVa?*_#1g$TtN3o)~_=l|d}3gz4Hcyn>m1KrWlfJA~HpvE!;b zAJjN|Ez61^K!`;2AYK;n+4r3Uh)7UQkFNN;2wvZ4&$uiV^NG5hIG-w+z=Gq}v%y}; z3E46)n=?gKZX~n&Yeh%B9X4W=QDO_gGbpSZIT~5IlNYF=?+04cU2({}6k~at5hdmR zPT|mX`l@Qpg@21Ri;Utm;T8-4l-yT`IV#AtB zMzfs#Dozp0qXERg6&y+N>D*b~sVnWi*Kmmr{BEqeDKxm1bCLGt`=cdv z$8#2kE8vnonHC8>E?0pymy}Jk;e)k(0zf5k_%<&PBSjtj9iL2hGH~^ai=I&SM4H6i zl@F6Fh_VmC%L9LMm0%^&?&16(*9U1!7AGBs*>`7}y4@%?j9|qqNeC7e52P5W1dlPB zciPe*SV6^(WH&hu4-dsQF6e-*UGD)@4{u5lb2uH5y`s2L8CdpS9;=5LTqYsnidDy= z&jBS7v4!j-i26%AEz?(?8^z7sC`?b{oiz}{6kw44tIxjeQ4Mm?}OC zjVB`9Br9ogvF~1}LQ*&rG$b8jtNcl=pFxc?=ZZzz6?SE>d@$8jOo%o|ms-iNFzFt1 z1@YJ21vE*F^+NT@*BjB2vJqQ5A`Xh%{#V z=5?UE+<4`K-2rtZSa;}%N&YkGw-7x|*JTcsLqU>KncKM2oI91lDVg)u+b^Sj&)va< z-xyEXurO%*OwqB!BA}=0cb32`H2S#&S{W@)TLVHK$vWfZJ)DfONx8{&qd6~&y%p|~ zf(4LQ3>lbX3WYeuvUu5>9s^Og>CAb-B!tBWj(9LLHulx$r?BNXH;E6w@*7Oxf=8usrONFl_R%V1*EyQ%1T83XMd)$LGU7&O z7YfP9RII7NMqYn+>ZCZ+H;%J2bh^}`NLXlI?nBA`P%o~R;Uk3I;Awc69@RAZT`HRe zZr0E8qb&!I-#6&AKr9Grj1mG!uq1U!BHg~MBc%3cW1-y$y}YhpAk)RW zUbbU^irGsOM>Bt|a{%Tk-x2W_^U}{1&0-|Kl7Y(>WWSOmhm%Aj%)=IyEmB(sBT>fJ zvtM_^!%**L?#X#soousl{iOo_G@)HxPddpt_ZJ4g5ztkuN*I0jJpa@x9ewD>c4Fwn z{jF8#mKdsB66H#Hu)ALt526eyuR>*6 zrsPQV8Mz`r(ls+dmQgPL92&nz%a&;-3z7=~B^u%lu$4{xAS6QylR9a^M931Br0^J^ z(hauIZr;l@iB}8S!ljbobc7GZlru#>n6`A>zClQc>0t(!h5O-Pc?>Gl-wN)&>Nrej6_$|`uLsH#8K<>?Sg$NhNoG%9KGux>NEPh$ zduhuBVpm$ly(1AqmYmZt-XZ*p@ESmTT z4V24X8dFrykyt->u)>qt^=nc=RzWCmK3_C!An@>OOJ@^;ocg?#c?%^?D1+0ZA;|BZ zo3=dlu~s%~%z3>~GY7 zECY1C4D0BH0!=wY#f9u7g}tx?3Uk}gdw{SMvuIu^Aq!33@%>cQX=tiek+IMH><5Kv za@_bU&<8OoiP97KgfGg`y$n8XFwXn&6BgQ%S@V;ah>7~V4>09 z(|5Uhb@}-BG=$fk3)l@%nW#ljsXT(mzp7}ei=k6=DXfMKuVAIwKX)3DZmk&DL!HT= zLPb`c)^j&WAWVrkj+JcO>)yP%!k|N8H2&V*5b`h~$dTTWTmP=u8XAckz?9IL@+dvb zFz>IPt&o=-3j^?!5j&zkfZtgqb5bNQ8+EotCe_H6)nw7p;k0V&qcRyREhz5f9Ca&m zC@1w|BA$y}s%|dMOG%v3HJt!3{zSb}HUL>tj?C6sfqqyzf5$dn0;$ZfXqxVj$`QA< zcWz|h?8%?Av9U*&a+#-eB#)Ql9yYX-bnDd{PfooQ2=Mp!uHKL7q;^2kS-+s#w==Vgvvkx=P7tDKJm)~^XUP*U{BBkBy4+L24puS!5 zuRrjbQ?mD2 zA3b1vk6*R8Y!bLSrMqS}XeZvb+4k&)o3N@S8ITxhkAeSd!$=Q*@+j(_!ua5F;PYb~ z8YZXKlCJa%^5&5Y_bp%+L)#-q%}@t&p~8pE)I6zvx@NDTD@MgF_IGN`No#r>70tII zfiegz3C`KMYJYT%=yYB}nrq5iXHtUa6+ak_vd99Hg%U^-Q~FjMdqf`f_|sewA30f1 z$w`<|O2v7D#WS5?SF-Fg@ zBeq)j0d2U4dDyMVE6EsYd~L5^jvkVhP)|>PR>>$`?j8I7ge#3?Rvg3BLJ7|UY}2HG z8sKm<+_hH6tio}sf6O;}*iw&eQxO9~};32)9%?3DZ*z=>eXRpx(9Im!p)uiBBHv54HjaO0Fa z=u4ar84#wsbX)8GaYVwKg)23rkev7UQ@CW7jl*jbm=pocADuY-`*+ex!~q3ca&v#K z#|7guSClTTO(dm)4%K=ClP*$wiW!%j|p?VjC=mg-YfCOcgkrB~gS21tTKwsJdyg-(OJ!m6=6l8mi$Wc>P) z373zdW4a38WHJdH#PuaoaA1maV!>yh7h!JH#2j|a^y_0*idZS$b3)a*?sIQ48TM@^ zc7FPJ|LeCALCZx*n6Ro^vs9Gzrb#@NfVkU_+#;TG3IxHjc)>p2e25REhLBIKiM1(G z7?*V;xf}H*-chaaloZy89=hyo}@bPjed4Y7eAy`t1@!a>4vz zbz?**rrzA?(=;sjNt;MmWYgA9ML!NG33GNy8L~Bvn0LWbot^J2q`?~hogfRjqy%}e z@>)s}{_37AIymJqL(nI{+-mmk#p$2S>6fOS>AI^C7H35x@RFXBRFRvfnZDVMFN&&GBn(AaM&oI!=<0c7P>5z|PRRogoCc6(ypcXaQQ zR3Lr%$hmI{tO!I&T~Nd_bIaKh6+*RdG!8Bj`O?Dh?La3tbRfRcb3PnzcvDOH%v(pG zg%=UM0Xi~u5$EPMOz5XgVivO;>XPf3Y!!@_x>Tv6NB+mj5lY~nMHs~GvARC9Oqtlk zm*Wx)yUJ`==UBBBYF5}}#&-uKWCH#O4TA0KODohO;FGiRuS!ZK((F?#?=j>x`#DD$ z$i*GRjZu6}nMq^QSrCnwzBizt`TrKHK3$00#yK426)SUmjZ zsYIg6mU*0M3gzd>2MPihXW!?drY?6*Xg$84e zF3RyF_Z)O3ot>dy19iPt0v&6pcnt%1xrqI$JVK%DGW#|*$I=d5XVUrP9TcT6L(xYz zml))eD7k^ei!xTr?q}s_u&T;E8JL0zIust#;o-mC4<)eJ>bqWkJUH3ZOu`Ly z;Qi$7)81P-IiN47+8rmkay|UaXSMbaFp;ZHX(&AR+pjg&E zi$I@W%Is+8fA=Tb2?aSfm~g#GUFeG%_Q(*0KxWXJQtzg=yg4>BglQ7QA&i*i*h@t% z{hMy(LZMpk-EXoTCai?vzUVK$JYG|PU|xPGXIglWQ%Elf{f}nbW8bi~@-T(d3SB5x zxio97p^blwq~4nV9h45e{Rmhq_s~mN5bnT!_;iOA8l;Yr)H7W1fJ+u2uoiptDb70S zuThc*$=P(4v8UH%B(r@Eoh9zZ`t{L!VdAAUqI7nXW2D>5G~b=wg70n9OT@2m9dvt+ zT_d}X8v%m6wNl30enWsTw;yWY;#A0YLyt7bBndmlhI9xY<&Fy|kvp7s0{*isD5yP1 zcS>tfF}9rhk727ryRFJ0VMol*+{YFf!YPXUPl@S`l|&l1%vhF76uF6x8Dj2g=@CRd zl?>#msVI2+#Y4<|p7i*c74$Hfx#xKrltqMoZB&(4EJa6z98fvw$%9LfK)xS|p=|=W zlj9-EvSoDK%MFA!)Qh=z+d9b%*K8-`KprOq@(x!{-oaKi6*;ideU1*IXN|`z?1?%A z-7Xti*@?7o{=aaubZ|?xg0({WQ`Q^-e7#!XJ35VH4mpkEY`&*nS)+spzKYl5nFA>+ z-D;in-;l&I7og^cue3Edb+98kCJrvd80%@at5&&$Mu@D5*xKD)UXZuJNNkJljz#T% zz=BDb7dr+g=b}2)&+q{RU#l|%1Q?<(wEUL#Z^wGlWkh|cV$g2Yt2LMpS-3X+Xvf3V zsIJ*oA$wEf+phz2llYkleN5Z8?+v+y=@&P6J-qBkj@`8*KcDEt{vc^y`d{7|LC)B| z?7{!q&y@JLc{KC<{chKVK)v~foRHWY1oa2UJppXB5N;V6oRN*(rpQti6|WugQw&w4 zm$udIeTI(0d>`shYrNZ+H5ZaM`UQA_%r-PnCsR7AI0W;{5xfZJfi_4+0>aSMsu6sK z+Y%>P9L~2GX)t4zXx=crxD{yvhLNj^o*a<0(x=CQ!Fzurp}gt&O?fvLhUi?&X!L=`!dB3g0U@ z-xoL{Yw#o?kBS7{`?6!?s8J&?voQg95(q?i50)0X;rY{X7hQ~Fy}K4)dC=E!3%^Ma zO?lKBP8ww|+QwbI5E?owvFmfp1wgcdSxC<{HB!x~Lhy&ckr`JH&P(@U*a**ZscR~Q zY9$bL`uk9lA64=5^1Cozry~j~@hS=~!47>~^&k+%crtnSJwn7ehbG&CaG(Ox5#-kn0w6gy6K@ZdT|^Q@u3UIQWoL@1*5ZIS6q}wG1{r^Q5(Rjw!kIY2 z3zLS}sMXdjnhfFxTvj$H0kOwTjYuf!DQ_XBHwV9``E?B!be#S`janX3zBE4UiRba_ zaXpue77zg)Zs^j%<=hewg<#__$3(5(aE!()V4p45fONo0au?|J?PdNU6I zjJO8=NcIxZ0I~VVi8-*dFB=c130=~-9H5pe=R&H?NVvDD9o@j*Kz%o}MR-Qy0b)dnK z^nY0arJGRF3&~`tY9(+BRFa5`ndn235-z6^>m<^mn|>vmiAK+-6+)QGOv2T67fb_t ze&M+0_4ekPRSe0|sNtD3ui+HKqBba{+Cyc^XKBH_qr2_~8I6Cy&ePqhB7~iPa9;L$ zK?lL&MElK%GP%%%X#P~s7v?+hhZVD<62y@wiqt}DEZ%tZLZ2y?CsGq!Lk-)+F;&8u zEw{g%huVM-O+t5AkrwzPcnrcx*rI-n#QgX%Eb_}JaUS#Q>MdDyjLMu>t@KW^PPa&kqaOGdZcGc}O$qAov(EqS9GhzUd# z$E9ZthA`tiiNL711fBk16aAQnG7=1K*K3BXAS7oaJlMU@$^ua|sSqkdF;h z(y4hACS#qIMz;NWV$9~E=!s)b36cwQ)orwDCt$g@2OmTfPMDmjjt&t+6>JKx!aN=o zT|OZ@5x0VrkHAxkA=hS+(L@BEyPe3VH)>*d0;2vL%8?G59%qbRt z%x`59tf&U8?BRyWEHa_5?=~ta`4}Kc)Pn}#f>r%IR zr12z9)@Lfu!Nkv_0k0@DxNnrnP$rVFMG=FFxFhuvCdLVsrlDjMQi=@HH@Jc$ETAxGsd|}fbPx-&`wL7t zF6Zw`%i79WmJF*4iFWLi5J*&#ROi6K_Sdogm&#rw!C!EA%9I>98y%{8MQNsKqvI*4 zP4`kWXqi!QiNx1NzAb;ak* z?tZPWk%L5?m{EL!XY*gc{pn%4WLm4K(JgWV9R%(*{#N2vmwRBH(?ds+nw~E~B9q@s z^kzHGP)d!G5dcj-vhWl;=rtb6u)kG5KG|U#=4kJdy|>Hx7pW<}TJ9+s$b5VFj^Fkh z&cNB#>c)2!eHe)AVGmUP*VPxuA&7jq1AF)P(ThzILF{}12#(vVrj@IugnOD*jR*4x ziojAku^$Qy^ul|Eq-ONW5Dv0-ZNU>9x7lIUyXuL5zv*Aexr_horN=Mud#PJYpG$__ zm<#Abj{lke#~3aTZA-jNm>3H8JD?7~^NBVGyVq`8%i!gEDZ}mGMlUu<#~8(a_fPhe z3~*PFuW3X9>gB&2}Oc= z2;1Rl)*ApjL;wu|K|64Gp~aOAd7x)IzVlt0tGC{WsqHCROJ~ku*z8Xypehp$U6NfD zP5F*LMI6eewKeCAhAD`^)kp|OMcc1$R-#6+vL7`zTVIFUhvqlNwmfkT0}uCVH*d05cSV$G7=9`P+G|{g zS^?*!Z=Tzf1mZY<$q3MUz$ZPXlFU9I`7MK8hmzbRY{n#u$kJ|&xw*O;zi=nwyBUHt zVS+b~>tYLk11-=m{x-?-^~e3u-T46^h6)06_~-&Vdixm|$EO#Hu>DiCo#OGci)~O+li$$bnazm;V=}*k^J2p9!p*T63veapSaY z%2scNgUr6({sWZz5$RG&jMx{`Dsh{&2NFJ zzQtb@&`g2py{paOrvrCrJZ)Q2egt~r?>kPNgju7=2Gsqe5}xtmc7!|l4lPc3`S3-H zZ&CPc2oHCTS=_JUYNAM{&a?2LDa5Tq(KV$6`b56*SrZ zy(q?|fyf*uZ`(4uuREL<6#CwfDMIf!9*W5n=~hzOzYX834_ZDO*b#BBSTt~(3qqpS z%W3WZV%ux@T)fC@t2X06eN76-s`2yJ=xa$?=P`#Vj39MpFh_y6beOwMg9{40dsUC& z&ERq*H1FvV90V_QBqbGNdZ>lOsz!6H9HuWdtAbC{ut2$nj*fYplgc;wqY_EK2nj8th$z=w~RPOuzOICBe=B zE;CTViBiEuBflvl9c;hAvjaQ}=LiRoNiO@^nCQ{I6g~0}TN%W$XTh4P^Q-@q?mJQgOww-xL(8VuA%rQ|A-*3*1T}{bpgiuxutYQBo}_b#6g8PJDueh{ zoRXk+2>PYMX(nd5k)`l0HOU}T*gmwmbf?a9E| zL&BR7%cD7{#w5&qIEfDtPgZ}KHi|mSQ7kR~aIN68f#vdXvLECW#-1%g>+vDN&U>O* zBeivzv*juL41B%R2_}8|Ru{Xc^`F+T4oD?_?2?a!d&ea=mIxg2SbTjbuG`;{XkVgK zKE#>MNxwTr+x~YVl_0t5V$GD5?X~C+kaKT?TT3)!3RiolTYlj8h7N~<%ET7MykE_( z`Y8{uY2T~mWAF5}W(*Q%BoHzCLtR|L1eunQ#cE7SjKjpC@;-blSn#8QSG2)Fq3Lp# zdVR5d9X+78ocK;8ewB!hQ_xq(RJ=M;i5MQZwDGsgxuRoZU!ElMO)jnkt4%av3+mv^v z0B(_AD{eFyyxBE6&Um73Crv;?q}hL!l$1pIEZVgPPdI;2Q$d2h{xn`-GMYL-yLj4#UyE|bKhSkQl7XcyDFTPWCM6Sc+TWr@iM!7lHV=!TBp4}AP zE;XWVPqFgSVm#XOysD8SL9N_uLUsbyz#WSetb~&IJcX|RMwL@UXStvY%njW^7ogVT z=`QP%W(d9q+d9={0{d89b+q~v8a3_L9U`14}=~T+^}`p{(TY`$3Vh z&KN(qIQ}3^g&EMImN)B46McvI*WTrR<2qjZx8eUo-CIY+(R6FPxVw`80Rq9@U4y&3 zdvJHx;O-8=-QC?uaCdiy0GIGua^6qZId`pl|6*p+-Bn%Bu4h;E%>G4tF5YnIE4X}l z`PpcS&TU8p+IN;#Km#*d_tosUY9~JD@Iq05`d)}Q#R-U2}~>__Pod` zSxxH46QR z(F=)z?iFP4bFu%A6aUuLzb^a=O?d6+-~09g1u(Sv(UR~BQo>l5$K23VAD5bn$42)D ze}S2W=7l@(+iMD18pap$gr$(av7wPIE+Y#a6_35)Yo~DOscBx25{|#Up=G3@e{E9n zw1kg82Kr_SFK53V&ie{VpkQER#{JRO3$WpZIq}lW zue^vKJceKH^-o{}6|XpzgoU-4wkehF3(Df>0s4V%__rtV@%(zy53a;7C;h7cp#3WV zc&v@JP5%qoX?_P&{6pPoXlY-<=6B&=X%Ro)*XyZ&>*GIC{*O-mYrR>h8GhEA`X{}a zXqkV~oBkKQ85#Z)z3J%w(EGRW_+4*CMus1HQ@`j$YF^O)CCL9UpVtBS&2|18fqo;V{4;@Q{uGG* zcc9Bp0{zTH`P0$=kwE{lr~gne3K~XA`v1f=nV5c>MKrX3iAKXh{~w9=muu4eEE)qn z({HXx^Ww$0j7&`bV;23JZ1$4UsQ+7~g!Nuhgt4s?h0yPwp{H$b`y&=!a@23BOwK~y z-1rA?<|S#pQhI*zu0OMuDlW|{Wk((NC$WC^`G1yMe+{8OQZt`0|Bqh$P?+{Nq|R%A zzI-faAt=l*u5C&6N7t$N6>(`^Y=`RQA7!0vUb4{-J`yg?>*RhN-oL^Fm*zLn(90S0 zub`n{Zsz9epo> z?QfR&5|?zZ)99DZ|7_O(tGIk^<&PWvm+tw?&HuwV{BwQzAFnt5-%W2n#~|ISb^S7i z|HeCi-3c)LYa9J9QXTDYCi;(4-OGyok7XhKYgGKw<3FwS52)A=2mDVxa{>%JE`zp&Hy8?zkcLgu2#J}aYSCHG^qVr$+#Lr6nD~tFo7yN7y ze{=tr1=H_KzZZ*m%@V&@#LpXvUoGOL5C3=W@Y}xdbLwKH~pug|Kkw>%U{Oun?wHBYv11l z{3j0xe%_qWzh;D=9pUxVzjgJmFnPVv*Hy*;=-WSZ0*aS~>+eqpeie$E`9In*(l9eI zzP9*po)G-CLHT9J$ndgUq@bl{ehFy?mcQ>9UtyEK-0Kf8<^K*T`Tqe_B;D)Ex}|2}4v^s+8Dj)a5wM-BY<8|NRpqYY>3 znu14~7N*jlE;r|nj<%Nqj_&r4qM7=Np54#4zfN5btT3H81dJRVJ35}&{6dfGZt$d$pD}h^azxX~a$UH>ltlugSm){~uC=zu>kN;ktBt0D>Bsl2 zYc(o3j3(xe$s%ImlvG7_hC%5KG>MwEr(2-k5AeU=!wG=vF1Zy?XFnc7CpJ+lX_?b} z<8#>`URKARgMf2e^2#7abbHucRn*CT);2JIf=Anh`rv-h#&&zMq=xwG}% z6_LZ5%d_E5n*V}>accueEzPrGE8PiaV;gmmM}5(Ky{y9;*<-MF!%*xOw@-B=l?(3R zKFrKOYehB~(%5idCmM|R_!;puDzLR~SSD`iqllZy-?Yj)Fj-yPQk+{34$>kwbKuRn zM6K|GqEU~bZzm^95T$nfY-TAAt) zSwKQkW+YV>SV%sO)-k!NW$p{xe2mOrz*kz!R*f+QtiGKpib}g6dt8yA+R^IqzB|Y0 zwohVy5S@gr74Z6gIaN7O?lfxGw{azADm^HgD@`SVL6b*jQduI~mXPU}X`nj`-arM25a~u|-RJ z9{0MF`T$(%;qd87!JIAQ-P@8Igebwjb~29}$9s+J?!mzhSCekE1vk8EWwIRs^c(Ei z=Zdx>2Xs(A?xZrx?|ZW!DR3*16`(wu+DqFi!9Q_iF#uHZq@Z8XKTgD|vf@4?ev5eP zI3N)`J&kxB?-h@*_C-^O6fWzy0D@6vui-1y(e?Qfvu<9f9gE<6veNOl%7WAADfl7& zRIOEZF5b7$CSl9r4&`er9rmrO*|R3G24VOc-SY*AFqq<2k&f^_;?w zeB`8$04JPXc>Ag?Q+Wh*btN3v*AS?XE6$1N=3EiBOdU+;%!j?HbT;1;hX`vGsl1sf z2713$MBU#$WzOIdQm?gaeMLxm`Hpl*S}L==o#NK-+M&J6wbqzzPzAPU4HMSI(tTD{ zqf}0PY4QQl1t=wKS4?#jJ27YaaBC4miG!{%G1ul;oq}mvKZynKiHqU6ulD$%fn+4< zIco0S!mqeV`wrXGuLg9;Hfv%XK93vxpdxY7v3A~&-lccx!8-NHOlIZyQRu0{TBhayzs043BzZ`azVy}udeN1US){?t3 z)^J#b2DR&rqZp%r=N{l^wW?GmI#{D)pPRiZw}whKx*SY70I#S6tOm`VKLfS%I}CYS z$-Zf-EQG6S30nCu9k_)IR~f_>p-wP>)t{EBEr$HA#(h9QO4Ce$f*ASnDL^D7?y~5_ zGe{#NqyFjY#kE8qpwSypa88^z9NfoAk$X+r(keWQ{HE0;*;?-E3oURvRR{)1+PsPe z>1mCJu@GI`Ra)DY&(#B#bIUKCKYszm*7KJz2z9!8>I}9nV`RT}7&^8*UY~yOnZ{hq z4&pS83CQ^5cyV=}!bL8V_B9+q+3-57`p5{Sk+#POSx_*|FQj}T-i+fj%e`wwfWm(G z7Zz4H5n8V@*_FAq{p&&eHj5lYJ3)W#`jh?h+kr+m0!*)s&7~^^i_Rvk7Hl^)0yh-L z>lR&C_%We$p=w?6{Z$V9<-|}N2M-s+kdTgN#CPwmuc+%N+pXuyqjZDFJ;jx191U)s zR^hIu#}1+EtjNu?lja-o;y2ST7t*-o?*p*XMQ-tq!k_jU1JcJv%EWJMOV;Z|2+}!E z(r88WGUaQ|wMw|pKGoNZx^m{_FOMn@a@#s+H}9(GUZ>@?0bjUuVJlrDFN6hYwC|-a z)$Bx>jeMl?>Kz{TD0R)~SEXYtcG`n?U%f>LI>SkFHWavGR$3nJFWBjr(*eud zNhykdOQ9Hpy`S=(o(eW)&hu-t1>jWCb3p*buoTtP(I?Q7$;PW)(ALBb)Tls%L&mVG zmV$ElSu&UhSl`UC0xSWI&0QHG-tlQ{KZU(i@YckkZ)_g##VJCsNIRneWGc0pj){@^ zL$5f5;&UyhPW=++CA@6&EKFeo8TJGE38Q;<(poDO1lrv z1AfzevD^&985Qlavzv~NjAMRQYK1(lDY5H32`u!a03R||#4`QJKh3#tvOtUxO`Lec z3CgiaOPov8y{TwD7NRtO5!#`yRa-0jhKhqF-1}l>5Mz_Ft-#;-EQLB%a9zCStwd#@6g?5Y*$jh z1Qg#wgD-Up+*)|`<3U?!dE1A|oT<9SeUs&u2@A>nU^}+;1)nouG;re8+lyiksRoSP zLkBk_5Tb70XmDC$ENY_fiSKXLXfJvC-nP3q8L66K?9CeQlENfj%qI+w#3JwTJJccU zGmgw<>=35T2WO{!N6Tos==q7 z^ZbSJ=p*2p=o%oFB`V5AkYhtdOoU`qvP6!)i}b;L89wigwl7LFx-MRR58)%|9Mk5f zHWe;iv{5VGkFdeLZF&SG!p6NBB7n{%I37vt&>a7=Xdz=BUQDB2hhX#6+o}Q)6(ZIJ z$99~P&R3DS`N*S`moU*CpzY?i*K6Y)DUJkyy@!NfEi2%Qq!RLV&%d$yxS%iK0!+rU zBolhNXiZtC@(>zqGqb=+$U$+?X(6Qk!7pVC{DdsZMmjA*yzrYTV!GPo z!CT4sNDRwr$kdWA7*=kikC+qke9f&AF>eGsAMd2rd~0ANqn!()$lM)j3Y>vLzA2XD zz~B$W=Pv+z>J4m}KMZsaLj=LdcPC;@#P4F$3UY4n#L8+)%2JX_7Tt0}shrYTd#5G0 zQnQYB=}|+)@V}|#0Ez<1>{;IP&{zgEASX#$<))WrqwMz6Ib`<_wK&4lg{z+wR!>@l zO;H;e;NOr6-`rneefNnT=1^vqu!Uw-zR$ow+TF4MChyLi+4%V`H5m&vxjlw(ttX0i zS8mC9z_Ed&%=x?Z2HTEWiFWY=CBvsV4X$sz9{j>eZ4))B>yL1!``X8VA3w@E=t9wb zV$3()FQ6VQyGD&`=Q06`fW$Lc-c`x$He9ogH%8TgA# zzAHams3Co7WV~)9SB`wf=mw_S@)+?Fk?+A?{u4da=p>#=AnC5LL8~F_96I;g3jJ-Z zCNN+2QaMdRVjDAXLI|pt!E8FT9m8;fF~XFSj7Y6-`BaV%Z!8skv8Zh6P#3M=7EjMD z_J#(`qi{GK&>vZdESVOlQiF}~Qm&f$udLE(2$YBSYY04##w8jjHHw?lpk(RlzA=`G zB`N6^;KWX?N~p1P<*Wdz@{6e<4Jnf<4#oh+Gj-hV`(#IuL-Mg*5g&2;a^PFVkTIKW zE^PQ`zL8I#y~`@mK|kMQek4%rj03@SjmOCbz(9m0)5#(FmhiQD_u%>bJRqLSIL_-l z%fpWzP9R6}eTQE!V=jIWjw|?06z*ZBQaitlDr|JZ8tipQQOX;-guvG2`ZOTV-fb{n zdSeO7M5u0-J4Yj+H^vE$P%Fbz9?5b+@+vWY>IIih^3rc zvLVitD_B-bnWs-^Dxc)cf`l9%cM83~=|VnS@=5e6eVGi)4f1oReWI3VA~ z0(~@vz!fi#2ko2?ympG9YVALOhy-J$wtt6^X-O;9O=S1hb4_|blXsaB4cPSn&1OmGUOStdLHWL?nVBcgeVLlp5MGrLLRPq@qP5Rh1s^^C|JWw9hLdT z?TVm+>)AA_?%!$UT^8)}H_Dop%~->Jr}Fic#xV!BCufx-I+To^2(=fg>8?LnWF^Dg zM+#KITO%b!5Y?qGTQp~TI^Prca?_4jHVV7_0Nt63VNdPkd+s_LS`i^nwQq%gAx6GT zU4u*xYfhmIiCkRX!t?343+b}6@4374E3#Ak|y6cZ$m~;Fv9Ta zMmHL6xZa1M_Ce=%N*R@u@=8Q}-k5HzGeym39}Y>jC(~2o?^mur6sre{z+f8{Y{YVQ%8Mzxkd;r zR+Na}(wAtXmk~L%4Me0b%#*NTmvcW)mR6eIB@>!wjhQg#py)-eA{mEidGrjPLj2 zp~Ra-z9gfvhnFYUpaaIc;MwGhoo?hY;?>|Be;}AT#OqhVqcH%#!WDtEf_H6&8A-U+ zna|o2>*|d8U{9lIDZl~dsrnU@a$Ofq%G-Mw1|6%E#qI{@bIe4+SrT~EXg`Zo)X8gL(HJ+*Y?!7l zdq|9tcV4IfuC5L=KX3rh)+$O;!8$^4Yjlj{+v)A0i6dM!VZr`Hu;LJZ0ijL1%_}VF z;83Sc-0$iWz36fy-A1b6pFZD@dixo%O#y9!iZdB|+)ltnHh}O-pyafi_%&=m$A{>0 z2#bW)d~cw>k|u`ad`rSy$yN4=t4lHbXntp2-U|T4BPOt0{azB`>Yeg>NIFiD!0JS} zVXFQ^VAM=#sb8WjybIF~#$uS2kRY5sL!9b+TKIqyt&YF{K3x=@d6|@FWClGd26&1S zrOz^FpG0S2L23qdax_~zu5Y|npHgJY7gP~PLdWUCi?7eBr?Q6Y7{$*|>XxbYjZEO= zQOlJ@ul)(at=b9QfjIiDBAZvS|0A|g?-CnCtwkL5Iv#SFydRJASx>m-Zne>;;QeSS zaU@bP!p=Y@5D-fKagu&5S}|lR+%X4l^$avNBm9iAXz3>~F`GB2)@5XPUC#W~{ad=NPqv^ zjT#r|jad_P>?GRsE6}2-Y>^S)fwEY>3dNNNO?zE@>NL7_keDHoP zfwSZ$)P$;j&U>{7Z)iaIWj|1w=gAL~-oP@IMg2q&no8GR3e_FRy0#7@>`cJB6k8~t zqBtqU{4l{~!@SHy}09OZQ07y?obdjRgfY#4zyx`26ryq{xm z8w>9;C?~Nvcn>%|j@sAD{DNCjWh7%nQ4H;-YhzHVSVT0Rl?#O64#%;wEpllJsUeb;w%!D}AU~YoP)OvlWq>d>NhN>YEu`UQq9l*^0piX`^Bs zn{Jp2Wk2tLw&ZJRS5h#PoZKqm;EN1sHZj76_|da3XFm0+<l!7750%ICNcRhT-YD0xK1HvPvbhabYB z7YkCNQePNgI!LBJ=Ad8|Z*!PP;ibIRqyg&KMg+gwYV#16YN-1Ap!(_sCdZYfLGjKe z`&rv9dwM>oRtB{ulqGQ3?0J6VNx%9*N>nj6Ex;CVrP&F5Q}3zzq)*GCDr{Y&WPYPv z5(z2^&o&4>?q$w0CX4~@S=Q&WV1yn?nte}Hrfg1vP|o6Yg@AF`6X#$WpGyjWo2g!r zJg%(j*#&Z0q=F(<9S1Zb-fA=GYDsSl5~eu%Y9-rY6xGech72Zsa+v!Mw*j)6dsGe) zrWo)Z5u!QWTpS@3$e3)zCaZF5>Ya=huN1WsVK%%KDd+fQwTiG~^1r`#Wxoy=# z3Uv)D2}y!@xvEgw-^nj`FU1qubDO77K@B_ezHJwd3H7ooUfwG$UVNyB0!;V@z4}bv z&+a-bPLX^xSo=0?Dvxf;B3P$EmL8qAn52o6j*#(#H$Ey_WoBwOD5-lyhuyx*fMw|k zaphH4w850jOnePBjj}v zqSK1|xdJ?#5#apPC2^zAm5ov!gWs0-5*IuGmAw~TMJ=q9+<+NNw;>wb{8pbB(fq`) za)37!Upljp-{BP6u6Zhv42=$>3ooP!86QU2bLys;c__E3wC<-`rWt!Zz zX{18Im5^G|E5UPdDd9Ml9^`YM8t*cZ?}XJnMZEfstlCP%#h7w0ddSEQzLZc;A|(Ua z5h4(6;PbggcLo=oqj#7?uM6x)ZFhLn=C*|ZD3a*~));;qB12mWwYPKax2`PY%k$zP(xJGeFN67K^Ky1G=)~XSo6a5Hacrkmtd1G6Bq4ls ztpp3%t2I%-VCij-+x4`9IND8ZE6UPXbv5r5W#eb0iY?lPNf7e`q?9iFL_Z}Eze!~j zvAkrX6*iM{t*i*|he@$shf;FZL{aYwK(qhRi~mgraenZtmr_-}4wW@GnL@$l2UT(x8C87f zI6W7>lZkF;WZsx@&v>t+?R;a*m8C81(c4?^^;uq@^0xZj_#Fw2K{%~j%h2|~xg@PH z(moyN)y+G=N92lk!--b5#YBr8TRAN6!;l(KH@*bigxtgxKvZrn*Qi9k%>xPo(d$+u zFEt~|r{dY`F!{h85X@gp(31`CxaG#M#0M~j=ZZN~&gMpl;%|mpo=IoOIMa4nfgxbi zz(?1Uwy;*@hiWi=@M0di7WyRqQU$#VS)5^9 z$WYkbNFH3D(i&-~1L<<_KvfnBdt_$}t_!CTC)=zS)kR3ti{S0i;G>}~Y-UJWD5mEL z10-HFb(kJ9G)T zrS-Mz;q{-mtD-bbwiqEvaZ8oShGoe8R^C+{Ca&56G|CFoTpq&t4+p?4VX~r1Y41ZI}POT5x zw_8*I>HT7I4F7mzvp+&LX1aaKx6bmC&$9xck!;c!Q$SCL<|Y~M7;lRVhXAuFaGz};=>3)}LQ73qlfr1Ix1y^!tQ0`2(<8<-qXGk+ zrZRQA4Czk`SA$qzL5aS>Uu6*c6@RlK{=oey@w?B8MgZD|o0p*h{TUW|xxxe_YAlLc zsGn+*M8vX$zo=bCY+c~j1%R#FUYGV1zG&RGc5PoE6}_g%_Dc4URqnvpWt;~;Tj7?9 zRgL8Lr)`}8Ng|_>%axxvz2e)wjQUcm7_EJ2Y=)y$5w7uQe5K}Ut8W(vn{u>pPL^s; zCc*i%VHxL2z!K0>vAMwevFdWCLa4H&cO94Qpjbzc-qaqj%;>}Q3g5i{qNpiA`jNC>KD%_d01vw@;%+y^9wi0};N*h?vRMac2(?MFa@ z)>R2v_)6-}2}T>9sq-IdvPmLR+4~lcAe^MFtkM>_G z2PS@Vxd>iI6YtKs$nf58w7(?ih2u48U{Dm6n@+!tvTKAkVm(N+5n1#4aO1RO$C+7DFIBIzZmvkZL zw6=q?z6u%G)0MLtl0DV7`wS#vH=3&qa**o)BRzDVT)D!pxDY@T|&uvz& z-dm>GP{+Ik-pvJ?)e&Cz%ZUmhDvbz7|M#x^KKw8_WTlp%FR4eomlR2tA0shU6jY3$ z(}yHIAH0e;*;0;Nv&=S;3O~ND0SoNaY2iPR(xxfE9GiKU^)^WbQUs?ixKG_xrhMXT zKD^&kZNqV=i3@{Dyi<2=cu_vzMa>8m1nYJTWZahn7M^nWbLH$E8CJcxTr_QoluR=P z!s9z8jv~^yM#d7z>nLQ-S$n^Zt$qp4U{L_v0HjS@NBh;6Z}U$pcSthI@!m67nll@@ zA_SogW3e_O_u-3+oI#!tKWS-unc|l-QS8SpzV{#&h@DEHcMPfuEi&G9`?dx}hOs<6 zVa%EX-eBYQ1$-I4_+#@1=$WVyJtedcth)+ZguG{1&-qq=cuz^URW z6b3$bM;`;DJWJ_tJ0b8TJ$LkX%DS2?qzlDt{`O+dJ2Pmyhs^y*{d$GW&sQGKW3Z*1 zsN_vEICIV#`myWLrQ_Sk$W1gcBj@HgM6eN+MYLnwo6s>L<3%Q1qA$CL zQv^K=?laVc-(7C19kk0Kqv10(8iW8lBnHy`k&YRl#7P zyzH>htj84dCA?5O>j8JB9^5E$oKEh`KhfeHt@RZn`Z4~N72u(Kz(-Hq@S|Hh{P5o%;$nE6jF_=;R}-Ge_%mc93oo%&J?I zzkM^)4QGu;NK*^zy@&Q*|0a*ox(aqzbs*GU)8Z~?7J$mTk~z)gf>^bdT?F7DWcTy~ zCc?Yf3t}5<=-V!0$n?Sbw#G#>IO8wz@#i7Qh3yG>*+R`vW8Wh!jld*q=jF0SjntsC z5k{%1!j~L1mD4Dm6?>cQzkW@E)89CR5|C2m;;jyjD!kkB_BENyHKx!;NGiS~3@7}S zkkAVRNoD%=OTnkbtvGR6uR^2XQJES(r??zOkkNo1EB@X&IM{lrlhji5gd|V`lo_Tc z#a>?5wMQTm=2-qUf4gPu&Cbt29KCD;VG)jHYEdwzWp<40fAJY@3wa8@_Wc z-t(CCKkw+^izkE&gnV}iG#lu$aSvy?DZD-?$W<_;m1G?U3q~Zbnl+4RdeCtrAeOq^%Lyc+U3-h5QHnB!|Aks6PR~_Dipi}chPb#A1xqWc#S3Nl3#9rPY z3lZY?!{~a*R5ih-QF5-W`Ig3C)8ENcLcWPj#We%3mn4PAJ};@=Y0_t%@&->t8ls&! zHf~JGpJ%ixhw#>hE1b~20Gn!@98>~yYUUS?G!8SG)WhYo{yOPiwEGF(PZ*O$BY{us zL&r{vlxG`fPf67%ygHk}@*AAg1k4L0 zQ~ai0HDzDiZ1|#2p-fQ`tFTm|xg}rJ>f4bDAy-xO*TJzG8%IEiC@<}z)z}H*4|51A z${QH2El#1LGYdqAhWZGe%I%@Vo))?Nj32|;>>Jj5@xGy{tk7PgfQu2N(+SuY&HEpSU19Qq+}YUbE@=HsWn&)d%>9A4}-;6NhZBsvN^9o&ph z%<4FS4_2x+gQsi=V4abrH#2EzsWQs~;hj&OgyOM~tm+>)e5T77Np}Ha?8B>o9R@yb z@#!-wmbNqZmOfHC0@ivXtyIf$m_HLaiP?#t40){Gk9JxU!D&V%Pd}*`LI4&>B!gtn z3vN;Egcx~(Z3Fv;6>b@VG+{nCikinkL+(+{xSj2k!LFb$(} zk(uyBwpNx|`mxGYaWQ2vi!jhT_+pKI+qTKjR#$uic z!ov6ISIRrVjERkCEG>2U-2#YvK3ShIFdHTCk~wdZ88-uC5xY@Npy08^bZX|T=>ppK zhFP)?f%z{9 z{{%H(k4mhEA5kVGs_BrMtCu3p#EZm0hzNU8LrRk2Dvtq{KUlY`e8F2h(B8!p{1_;S z%q7wrtM>(XwPb)jZd*tUtWd(rpc}2PJ+b|p50Hc~x$lJ)ra-Vn0qQm9kT5x5hYysM z&fK1n7rscdM3Krq8I*9sDm%Nr);$1qHk05~Zja>su?}5yb(I#%%_PBR3epcL#8@D< zO6fz@gIY=*g@*A8dKdxSF8%5xU5(YEa*@iFZ9q8_x0piN0Sh4qJ*V7@FhHU5d6OKS zW?KD-De5uHV%B=?Z_XOvz-^eK0Z(}nr(0{g`lJFbb$fytqPjq~eD}zizbb(WiSVv@ z0Gs5fP>?|DLe}-WUwk?hAeSwH1R%&B480pi=;~3w?q97mJZ0U)|ZONUZmEqX5dR8d648&_iVQV$YJLa6d&7+sDKM1>n>XF8>WzC z{maK?B6N7l%@)e|(O$Or>G|u}Hm;uYFDwTrv)wK4@o~m@Xq|RaiCiGnkP%H_vNa*r*VF5-vTt5b5>lo+-t+f9oa9 zV+3~UT~^(`3xcHrkuRGRBFZ9^O+hKrS?!fe(ArfJZ5hTCWOf8sSDVvtqb#na#rx-IfB?CfMaAiOJprPf_1Ec4@CRjp!?BL08YQ@(<>L z1nrRS!O$?GXK;tmtm0UI*yBh34x#;3Ej8?;2d^vnv>R;MTGLI2P#9?lD|SWMe%}bH zxMq0HPR~iuHCj(LC^k6ZnIeeHDMQrg_X@9(r>TV|x`<(=`ADT5&R+AcHLDRCx8Wt$ ziOu~5YBQ)7pIQ)6Tq=1L@09SA1`=b7p!iFM)f+j5*`cGz-az(a zfBP^JCOu*y2yyfBG?in$l7D?9V99RRLDkymWAePhV%V%Rl~P^vXlySESJr#|&E+pX z#rcFY3m;EK4ob!d4AHeAiaS{FVl)=o=Yyu#T&G3BOG7`-$R?^l#dN!06uhDEK%4Az zTXHR7|KR-%KN7C(!xvrhZtPiBvTDU~tIm%7ePg97gH`dR0~V;#&+w=BnPsZ`b#>?z zni0G}%wdzHFtkq;Uf%DHY*}?FCrEh$6XjhoD!HAMdj-xKBrVM3Xsu3U*T&%^wXv)|`S7OC>}aMHYaRlb&hZ(iQWcrFR&GnQ zNu6WN;F_l;_qA60oWjI_^Yd_a=2H%XwRfJkT(^f|Dr~5T&m}^s=T3>SA!MHqIkgz# zX(gs}97(U~E19;8>wfP?)s8D&uU;W|J}~;k^)TZgBh7%zd0#Xgk>$4j0;T7na>A#8(W zAWvk^?;LZOblZ(QhF{BJ+j}@J1?Gs1Z9_kx-y(9%UcwGMTp-rer?}=X9tVDyWL0HJ zY{84klk^2`gHS}8bQ*XHXcbq5YJ3w^nq=?ZC1GQ|Md4S88)Af?dbT=Os~;E?;3 zK-_pEMQbhWY&b^#EdFV6RXiQ>`8aH?dsWgqjKi}$EACJ%7X&5r$ki)b(3s{tXc|A< z9yMyELSGC*pIs?$)!>~OOJ4h%oXQvqgP6+0!JK$rFk;JS#w0sLv_(R!ytri0&i25) zw75cHf#HbdIDQxPugi=Rbv`mSwW$6sX6Q5_zWw9GkfaF8iwmpCP>aeC@P@RoY+r_N z%1H6L)XA(G+8plFwe)N86A~SPB^d>4J{iy@Q%FQo0cQJ&2-qf`JC8Yy6o3F%HM=cT zuilV25q|C^`n(>SK<*nkj68i!q>p(1zJ`q*PWxNnNH*MOknheyOthg;*(p@ptQ@v2 zoUSC$edS0V*et~AtaifjH93J-Q&;HDRG-m{ywW!>msM1WcFp@|2a>Kgq2|eoRAG-- z?p2PUXDZ!UrQ77__xLA$`)***SJ)n@s-`52YD+OCCo_qq!8#*St|6-AJFL6Ew3&qB zXgJ57HtMq$RZbpn6ByDz@(0U@wPTp-R2npd+cku47E~3 zZ^q{LO4yk$GkN!16-=E z56UcwQf9|B0H-fHN5@CA_#E`>q-TIDSdh&YOlg^#-GIL=U?UFtW)BL#17cRnL} zX=B~nN;41H*5alSLvCgF}QekL& zmcl~>M1@$m{5DoC-ngRBC>YFOYT#~zN?56cHf`88o|f2N1DI!y840`O)cc)N3`Plx z)>SH+;430souqdR843uoTKD{>gx&~N#$@MnC_nolw@qsxdeKu~_;}jzCAJ4*VJ)Gb z+~*IdHzE#S)Xd^^H{)sLfZ&Lj7L%%4V@R6z%}KB1&9`*El#>vP_L*Zm4-YD`bsrie z;2fQp7a}IA=Mbl*KRiJIu7?LPzh|?-)LSs^6gL>@5dyuDsQo};Ha+lWk#t}!l+(k5 zqs7J=Sl0>~=YuV~cW*?su$l`$4Hb+=4FYmb^p%5^j7|1xh6+(=^JW1t+&eWy+&bI- zo7>e}7Jf0UF+HdCJfP`ZYbog_HfbH|%4OeAs47H2>n)2-yOTM!^5K+&QVWkZ9zj+g4jV&hQyk#CKZpl>- z>PUTd$liLi7&{$wS{(ZuWe%TR3a){*CQ$a99!dqE~r z;TnEKvzEU3ZwT40O7^VVix^#Dd#PS-W))*Kq~+A<68J-1@VhOvSYDo}Pz9(TAY&bB zwjjLb&UFGRM$#z9D*QS`oomgzruDuW8l+Zy^)Dxg^i(^6iu14v0;cau>AlDojoLf6 zk^m=##eRI8oy#fLT-a1Js<76b8}2a2cJO#U5;t;t7@nTgUoOim>SIi#DW6+$A1`{? zCgR}ga{AIu`t8CE7%k?mpPeS&GquBgava$k2Tg}B(e;*a*~ep8@{^Ks7vX}hC_Ar% z`X#V=Q89=DUTBO{s*7^bWelB$L#i-lCV|vr6v-8nhMtO=5K8`@cs5j_y^a5N?c#hSo;+h^n)0{ z_<~+xVxd*XrQ&-jbABNzya0`Uzr3}Do#h`j(1JhSP|3cao3zbsEVZrm&2^oq_+($; z752uu`Z9vNFBg{6*0FgxMAp{YPS;lI$1yJ#qy43l^2e3`7@U7zEJsVt#PDmaT#bsE z#R4P36AtdP*SiwD+#V+B`Oko(pXhR|6q_h2?cX(b3$qj0Z#N#?pEx)m87pT+ebI&@ z2qaFrSzNNQuzm2^GO=f2(X5E4dhnStv3BQZ(R%l=(X*8PcvjbB{5-O0!kAuc5bWe} zxmAWr*^(qd`DIAcL%-?4d0F=n4%Pv^XllJyb)c(9=L%Q^t+}5umYxM&OgSw&E@u(h)>~YQG0`fKF4}}y z1!ZxwvG#ziHpYl>D9H%*SWygB&7cufsRn?HCt}O*2Qm{l1(%V016L6`*`*Gd#RW`?u2Gud~$)W9qzRSkk#wPqiR zdb`9`|E$_agKH&LJ0Mi8*(~T?7LgPr70>HA4n%*<;yLTSk@(;oSDT8v+Q|}tsjM{$ z8C+g;t?*+M0E$&F7&hP-mS$k=-L1954^Ejwe%NYS02S6=$0JCYBI;I7o@`~LRI6vV zHf*Uxv0rhpXW^v?c*aDRr}Uxx{JLsA3JZL3Py1CtPZcMatmeS#C@#QKsU$0sES+3{450P0%M{qC{^T zG8L*=_&`4i8GX{@%ZnhG4`LO<5-;DcUbAwelIfzG^DDs1i!JN^dVxwXQtWBFpoilF z?1yOT`c!el6#k{GBB90y$s zt$o2N=$jwW#+pH^*_H}J%kn78ZXxoDZL4mfs0=R1;_(DHi$~W!Vmh7ilF_McspG02 z7w2+&S}6J#j}VoPDz$NUVp;+?Qg^nG}HH zA2ACn!Sv@Fq>FO7`x*_*UN?>t7vT!OdN66qau%a9oC-dq1AJWQidB|~l+f!F5dmID zmou6&SWgU8KD|h$(>eI^18Xy-Z%6dTun4Rwsp)QV{q>;AB7BGQ-F+JEPJ^U_D(TKd zXENS$pV0xgZEKru;x+Fs&x-)0O_-Wgyf14_iXlTRPKjev;zcV$vMOW95IsFXVzc;9 zNdWiQ(!fb6;pNh7J^>+cUU45H6!kVwC)}qw+Jo2Yc#VtrA99;k%M(>2OB$9`mXETg zK~TJO3?JVl6LrSyiILZlDvkYpr@{qaFTGVzEh83#Kv$N+2D&>ICIP`1pR0K(VVMjsFtg2tbPoc2`J zn=_S%v+URw;j=)Uss^>KPzFF*OZ@)v=EFo5bB*oE6p}sgh^NvD{4|*@{hZ7O#8nC$ zV??TV?eKd86=I*UB9NSpjMTu~jXk8#gD>Cq(&;VV_e}I+SG7f!bkBT1P!Es%%yc%H zvDN^!`Z>|qhLWF~e@zaosw>1H#Di6KT!=1!KJvjbC_Y=Oogz(MzvRem*}vFfB26(3 zA0A9ljFrL(KMO)i7>qyXj9;n4?z%aw!tntOfhW?#_h#v8z|S0mixBZd_G^}%$9Hwb zKGM!Zs3|Em;x+JF zzv!L;O8{fE_^_pNtIam3w{pp+cJI*rEtXm~*bcn}nVpNd`pPTAGnwdH6mRb{;KSjFHr#UW`uV3A&6Zo(c$2L z{v<4)o_<{2SB;iGuI0x5!^ByRfwyQ}B21N-IB^huCr%45TnPN>Nlo@kU0%U9E65s5 zkN^^PxPTQbWy}Y~Is$mfYdq(4>Joon%zj!{++jcOo#`fqzR2?k;r6G^&s_bG54n$S zV&1XKG2QuTNi@QOu+vso%Wu&(3?2JVo!-Cgkx>y=k^#)!n3E@ITrv%27UtLY%jRJ> z_V1m7Gd86YXrFhoaNRC_R2QgO8qN0IPm#4*rV>7dNJlI>#GMo<#koZZ7N%FplA*7t z4dp&$xz_ytQ7m-ak2}CL$;!r8{`&u6?Hqt?3%33~#vR|WZQHhO-LY-kw(UE%ZQHhO zXWq+~~QI z-(NH+4bL|f9W=Pscb;WFmR#gMt z>Sz#dz^HcJOo&Axt+P%WsHua7k?l1h#6>e&ZeP8^Zcknbfo2?zJLeGzy;>vz6 z4APkdd$6pjl~7RN=%8r`fzENsB2e};ITV%M2j$qIk^(4s{-#-*0H3$(XaRjHi#@m< zK^~|pDLfHCUBcd+=p2|+{XhR^t;t&;QA+se5TX`%%(!^`xR&h~8}NzuXS#&`8i zuYH#A?|rcUjuiQEU-+FIO>OMSL@aIG45{!0_3Ujde+W{7_C|U?*hsRkZvbfkGXMhs zdjJ~%ho6@*fFnTF4+vKECvOj6_>(jKXAbY**x>;%03`RM7G*y-r0>DWl==tzIeq-_lU4;DXuoS~C}(f_X>_OH&@4-oQyIA8ztOl9@W z6&EFF#P|ADb_G!ioU2Z8K|I%#d5Emd}zpEzUhz9-{kQ2cgIO}}rND`GDHop6357Q_uR?6@u08)ao zH1__qegts~@i>@+R7)b2bV~x~;G6le`1wL@g5!}IQj<{-64a41u_;0DcvmZAc4#8e z>`}o{%3l<*tVkWi)#UblP~p9ZVDWxpxk&UUfEIZW8YtuzDCN`eF)hL8jSBBKvjveiViSk%y?w zg5v_amco9&VT{2q4)vJCkdlPwG(l+U3yBf;DK!MtqKSK;NxFg|Lsp$)0jtoVH5P;P zqP_h&2Z^ZoLr_HJ|3V>@oy6Zk4;d^7!Bw0e3s~t;L4bjfAnF=b3lJC@1R0DtMBi|0 zf>k_KG3t70&o?_F@uLP+58Y5O22(0(NZp@S*ep+4?mLXJAUA?-A(`xCjmBhexEm&n zHe+jwM=}vdCFf8#qLEP_jF3^MLU2xH$+ymr;?A;S$%P1Enwxl+rnHkc*5?X@2>at; z!I_%G2C&+s*INqDHXn7cThL_Vt{r*Sd$+B5wpd(eBuFZJ%Q-j;YNj>Y=yw0e8pWv2 z4wt9WBUdEq9FFU5KHjIMV=F5LtR2&1Hz(mJH!yRtf=?sgdcsUk_d*GGsP#iR+$ca! zmt*K;`h_g2^_AZdPxeyY4XVEO6+l&isS!n1{|**Sv4E)=LDMyYj-h$N_!GyFCxMPb zi#+d}H?5Fff!y0=`=S%&iEe%96tqN|0rOp6-!t9v5-KFC|vd8 z>SnsWvu1J`lDJ)>eT^PH7XI6XWKb5T&{mi`!jLjZDk%_3@eo%iQA!MuknO!!1{3

O~5B{15LuBT321bZg;aM2%lGQBG^r8KMGX_r#kZ$ zUh^Hvj^E+UAaQzh(9_lnkx|F%y!4PD8E&6>;r5r7C^sH$8u-MZO|_wZ=;Y323vXMS z0s7pft`w1stkCuHHELU%29+yGX%;}G3KTcu0xG|5Ar-RXQi)#Z#Lh*-Xf#U@< zHtS!5;OfoytFzNCZ7Y#oca60!xz#Sq)h-?O)x}2wui1^Q)JLIYvPbk)9lgitJX&4v zTNAi<{mrKBHtTuM^Q*P$h!D?`ByMiaid`e`44dXA&UVQ(x9pv(OS5+ihdG`Egn3*& zi$$xoBc|{7nzpaaR^OADD^-Ur9jkQN%e^-3=gSJl2HxtcO;A@qdniZ<{ zq=)53b);#;d0{pY6SGo$pV3yXDObn9ObJ z>UHi_9B_Ts^FgnBJGc7=Yon?X+2BED43^ZnbN1vw0SU6= z!eENBJoDT=JXSdc6e>yE;J2*XLNHh(vr;6ZtuF6J+;o2c*MHj?G5>=h_20(4vb0h^ zB*T9mWSp$@|4BOh8ABVG{eXA>^}m|!UkA$nx0Trc6{7lYD@ob77}@h%+M4S9>zC30 zCrs@BQ^kL!bN#HQA>wn)FG5*gy9WTXk>i|0Vj(cRD;t+`S z*p$FnAZa34uhIrwz6)Yu3NUp0oofq^QAG0Op7hPtEau2hCXY#_MZ|AKR)Uc1#H4tP zwxdGOSYRy5!&>IJq!ZDDt$Z4dY1YBlnMXO3zm^?j1YarQ&Yd_TmXsO59j;#?$+?tH zMD;mLtZclEm$(bL?gE``B)=u*Pby8fBQQd*$BxpNUjO1A6(5bg;Ar4&=mqzOiU=&D z06FGG?+7ncqzZY|>%%2_j>IU3D14x-)dKeyCn^r(?4vF9-H`RFh(XEgN0PvBzBbTR`u* z0fqB(sr1l?&h36#Z;iUZ*6iv#a4OQJkwZ`acGldJ!p}CL2SY@ax)~;J@>NXcNs&z! zYfor?s-fv}mCbBmX>w3;@vfICN7AwuTyJO^gT4{4W3XFwA-SARMrXSH*slh*J-g#%rd!8}zdC8anP`OpT z{rR@|{kg4fvZP~C-TAcjkHdbXA(4nst(f5&mzA-%{9IuMX6Oo#nC4n?6AkY3J?1dH zawYyAZliX+f|vPsZIsX?Dgt^{hvzG@q@+*$9`WCw$@6yo(X;VqEVm>4{3g8FYdC6C zo2>5~rzqNj${4=_RK8fbOn1&T+{lj|P)Svt_aoIHnFAl;gLCb=ips~mqsHssl;1dU z?SIV98^;w*hFj#Lzr?iHy6TkI@)~}xr;=Z^*EyNJ``z9*BWA2iGU@l-8DUcjwl)N< zVCzDC9;!Cd`sBX%$AlJ_AgtbrM=pvbzK=Xc91SJ?%xuxc`GRj_##5f0Z*$(>rpV@W z_Q@2Vj-)$z0p{QY9y&Q=kcIJG@Z*PdXK;+l1Q8*CKh9DMNl?5Uk|hV;q!^SAUd`17yiOJ zEn*S`c0=Rf4VzWahfZQMzca5X;vJlCPJlZo!j@%6z#J$j1|IH{w3p}*7rHCdTPb)J zK!spVSgvx<;|1tij}8QPsJ&bp`e!w&{U7N-9gv&~K{wDd7?CgI8}yof01R+Pzz}cE zZ~2!T;13=|Hu;;>3}PSMDUA2AFKXw++57E3>+j!p-Z&+LJ&w8Ox|_~O`S3wxLioSr zUwlwk3i7zO8)3eGr0Bl6n?!yJL;GAHBLzI9_&9~1C;w>{D)8TH{IA)MbG|Q7?JsHj z+v`8yn@vBb{nqrgdat7XucqJQGcGUJchi8npkElSzg{?QDz66`AV#p6L?v>6wPw@A zlg_XmK=uA8ALY8;!H*1XD^wPP-8L&Ka71xmUzdkJbXT__xW6iue0wcEGD*5cqmYZ1 zNxx>|u*epN@LwmfMJZF9;tQfTj;PLpJOOP|c*&f%23ZStmUU$7A6l4Y)d)9(zvWAP zbS88Fv200b9tM!T130432x%sH9!$@uJRw*mahRsg#66w*#55{@p*6rCN)kRuk0j3- zuEKWm&Sl*JF`$1W1~p0D{r*G6-aXF#;sWT|_|@L@(^l`D7{#YopK4I4s1XqWQa}g4 z!MKjx)$PW~@1JS^uVt@q?RCWexY{Lr;eU)@j-gUV@OQ!DPeDM03;iW~(9;F`O{pe} z&rQL(g}ECtknetffuD*5$TF9op5Za;R?4T8m)9pa=%92*`KAY$ksZntH=a4Qedoy- z7fy}YX3cmUb6e%HV8|8-!lij{4|i?dHa_K4V|8D@Y<|C<+$xju61%_eJ`aC>R`Y!{ z!+(`QhJU(Ue676Jg6sPrtViEsOnoMT>|+6cw`=s_*9nL$G_8g1UED&3iI0+(nVq6X zPw=a^P3>S!$L96hce?e99-tzjAje=aw-q*6PORM7?XsAglS*}%&K!!i9y5Dt@ze{j!9hCdX#UD{D zz_crXEp0WsxB$iMHPi8N&E%ygz3od!zEkkRKB*0(TrhOU&89wT`+X1+L*w> zD}2m)?g{Oh0)@P=_9Kk^cSvJF!>W%N3aF8q*|cihyv$j(Su^-pK4tLN(k~fFC)#WM zvi-EFn}s!c?Mx|pI*D=HBet|;Z^)?>4Ss=2+6 zwqMHUo2J3$lcg>NG)M(`>kCyKz<%oi&gsj0jgv0ZHd*t9;+5XBq8+H8OQvPZSz??i z!uybm(&y8(PGWv*4MBfLz<|z`t%e_p?Qq(NsrdHFZOIhAnfqG-Uj_> zcn`!Hd}BhY42<%X=5^#Ocj!>Fss@JEVmSQ596o=Q@4v5tv~Tuk)dux@FU9GydW;zj zdDo_)?`;P4VuC}jbZ}Q>tDfT$WqrNt^p0u{jR*cD9<3J=H}M(edqiO+XjR=ti)T(S z*+Dds6|c~y6EPtFmbUX~>98(tw|=`IX=IuCeQTxN587($cFLz7IRCpmg9tpvb_4&k z2x5#Kc#L4)aQ*~9BK|@jwoEv@5EnC3>w~w~26b*dpjtVJ=22f?M+%Z?PxeK`Sz3!bZSn_hy1Go%b z!!b(;%dRu8XQOWvdiF62v0u*zO}za~)b#A2V{zddOE=5pxr@bphjmJO8MeVGh9HZo zpd>S!jIy?Mlb(A@j8!UoyIC{#@ltSbvGtUzlVno21>IP>b2%G@BRQ%9J`%dJVQF+P z^X5k3Ij8e`D959MI_k-56CR0M>EW9g6534k^SBVD zt{XQJng)t&$ih^#h}5r*Q*NJa(NY5gtralJk1c1TNFF16Hp|b?&dBbFQn}rnm6wav54IF0pC}? zup$on##wW~y_tl|VU_nu)(8^YA{5uaNt?XSXGvibYPu|x6gU-C#)c%HIXq91s6Ci zl~a2+f_gw?z?yI>Ndqd=4nK}=QanYy-MF6Bz6Laa8Ryf4nK_t>T$rRZH?*q*cEsP_CwLy zB1u>#E=&;eB)b5@<714d=Gmn=%TZ>+NsiH0O`W)na<`D_<0pm4C$Mpl>V8=f8B%}= zqw)DM5Qzkv>}VRu^BcS}tUP9UDZagB;JNH?W$aHxlq#^W?r#9T4wu6kn8)MmK!=9F zHuMyZot0f}m~)Csg=|+>78rt(gCzShET-mlx)Jom;3wC)%<8Lz;8{Dolwc-F*!8$AkP8qVm<>2jHEXFg8oMJk zu0qPGu%l4(ftmR-{M7Wq;1r65NYD(R!L*`dFZD>nXzXSoiBcG`lEI+^q{I)z96x|JY1@nW=;9S#IP-)OK>3BsLN<-9Bz(yd!FCrW9ng1N5 z6G{YUj0#<&G}@C%-H!Roh`gnEoJ`2*+kQTeIy)nUMKF8^ej@y)Ao1kP`F$zFQcn_Ts7AyYp%o*<4 zt!X2mLW~RZ8i@4w^s_bi&(X=EH*BE(0K=Bxc0w0KhA4a0;irJH5)dmMulqZ#S1^-! z9)+=yA-dW>MWt7=e5Xj6=w5j*kJeCcJ*LgUVYk+b=ekDAg!GE#UF{fp=@swX(j<$u zZmO}OKGR8Oh@-QDF~P~WRtEQcV_af4T;}M!6HoI<(jY5D?O3s*KETOc%>?!wYl1^r zWe>_;^|??#YvHW0M1hK9#%{m%eZUTt>r7yxy@vEc)ExwE&Vh%lY*p{@fr`K0!=AG5 z6~Q1pS??7~XfPl(IKUB_%)lR(AQCn|F2EEI)(q5^<_6$~i4KH`j}tT4W^SJCpI1(6 zNBZZv39z0r!z(dDK+IW!K@>R_h0Xy>eC6j_b=BH9#5A04@VG=3&tOp3Ldi%*9@`^M zjQ4-82y|8Sj2*mqqdK~u)*sO_JHmV=sdc)aL})8}hqBzXv}iV|u4hQwlvZaTh)IeI zIrL3d2+773P9xzLRUq*JYAEHbM_Alz<93_krczm0JNzKH54@jFM{f#~vb5^dGejXI_;;NfPcMl^jN%WW<&;a$que_GrmL|PffaBvSm!wNM_bLbB znf2s#t2|--r0QJUWX$>=f%jFsIp_McyoN476IFfte(ql(zf73)3F;1!)qUblk~t?3 zqok;!W}~S_3;vkLss?|OHknv(ewaGhmOT-uqdL5^mR{8x;FP75#Nn6~t@P-bCCENA zIy&p5z;PN|=Xcw)Nmlr$k#YG`yuK^4H@lq8j$SI(5JI2u#B!I5iUzLhXAb!r&w zq#yl!K7bVx{hSl@L{Xjc82Q*diJ)uH*kZBnN4;jXOeKcMYTsBsiTmn3KF?5X<(J!% z@V0r$tdY?!a*4rtj2V6$x>tLZPLr;Uwx1As<*o9Iij;k`$NUfH~ZT<*`x_ zN5DC7dqr_q)zP+ZE>W(nhWOfykcqX&k3Iz8&Ds;0GT*&xPS_cp|OL|E$;F68AK&Yd2Yh>rCt|RQ#{8< ze6sR#Pc0#nP#4l#KtvK7CC3NaAMpFS=r0xmV~Gax7qqvg0SP&UPo}>4L)t8D)Vq#gu03=g`bSEWua5xP^H0Hs`7ZOhzTp z7lMy%PZICmXEbelu-@=D1&3F(kFpK~X|rZJVyg136zV4gR0F#LL1{EVF%8oNLRJB< zgwHP@03{793e6kt_G3}pQ8dyPq@YYOFZBvmgsv2h0*eD-6@R_8pj2)NZZ<&4#(Im` z&}G<@Jjk6u7b)UM_qn>CB0KY)ake*A1K}v{6zM4i#~VF3xcliCSwBH_N!5}Ihs(Co zJYW#Z<4^iG#^fI5teAX=aIuO{k0S~wBSCdr5Ws z;oiCw;f}$Vx&L zBF{}9P;fCG!oC_Gq8zgHhj>YQO+S6MUDwOWpP-~iyQ)E*`-nXo7`C56d;(6>yQh#( zjOpK&P7E5?p5JbaT>XKo6J0f^K<(@p+cNSQ>V0B;LLRQ2gxTI*`3XwWwK>6-7{c0v?&H*Audo@chq&(qMR;byHez|92{^2~ zz0h|xrqLWwsj=7{y{@&oAZ@$kHG1DE+OD0iVGlMrV~N=*a7E4uK4#dOQr)q3LA^e# zzNaNFFihGjG$dC|Cu5epoRctXY!IF+Oex}(xR%I~(A|d|MOh^`8OK?rlubMyQyqUE z#~r~Xb4W}{e1<{kSLmk>&fTSLA&6pK!Yskeh{}j;kE~A9fX@OqBjtAZ@;NaWE^1V$ zY)D|a(`U!F5w6C|KA!V5`z2acZLAUn=ni%eGq?Dlp{g80@IMN{9CqC~<6rZcE` zMV$S?{>tW(m~b7`1IY!2LruwL-`Q4xSFS=jkT?dxvHrjy)dl?ejpZN@KA}H~ew41i z_6718Gt&*8cHRKP1&7d`q!**9^NC&?sUxU1dRs)W0k0aaMgR(K(X$yM_t?^-w(R(= z^BZ`@WkeF>M<@e6QzrNe?u%5{Vc%;|vx6FjSKx8UNTlkM0^b+*WMTNsW!^SHTZn#0 zXUNg+lLt@%R}czwhElLidF#T()jB#s>_{o*dmbV#Cq ziXYV1K6n2^Npn{7x79QJv+xrnolTEd7#Ugd!sI(p(mlo>3aLJ2ef;yQ4+y+?^n`Jz zGhmK_f&muw5EbbSvFK&bRUAV(^Mil1HjbC4FImi3Wxe_NeEQAX_JwZ9Hf^C__0*td z?s*VxXK3b_dzGCq>CcISZj;H1Sb4AiX|H5%K3YY|1W5vs9YgvUwcU{nBkO+gzGQS5 zrkQt!xIMc`SL#a~)n?OP9Fty~s_TOH(z;d^rwW=!6BcB|PvjV)np#hXPRR6-VL=cQ zuugp+a1C#UMll>fljNX#TkFZq4KE4~O{}}xSsJ644{V8k6=_iLcv!H+*dV*|Si}IE zC%Z*ms#YwkyWA_x*{mg65ag9EGar*JHtQJI6h1bQ3C=pbf79^|-=fFX8>b zx_0&%PV0zLdWJrwyrMmnmWo7E5*{pB>wvPH?TAXeVSN(PiWafGc^v)-?wWEn-67HV zMn5hAxX)kGMYjb^!vfFb!h^)Ag$1G@`+$nV4E1olOKj)FLBp$F&c*L#6>vsh*cJk44YN`ee>SUmbAQ^XLK=cEa z9Ch^B+6nK3to7%bD8oNRT`j+i((M!K#N(14>QJrcP4e8vMrBnpk^BsKpEJ366{7{N zM`dLc21#Rm6m;-^zpo;`mAndh3Yf3HtsrbY*0OS8f|F5+mFrw)Tu{^!!%o>98y<@V zgUDKHNnx=jwund@3<|~_5J#tqfU*!3I7pPxr)Vf}*o)0FQP5!ETnpE-(v$O6!BYgW zP(osM#1ToQwFn=%J1kcj?fqS!NJk(sH_MwgH*$re_n~hO)8hyf?M^(fRJ+exFK5p~Owm zQonhL2QEKD#K7Dyp70=Wm<TR|q9o18>KcX0*p-&vtny7Z-$dLTk?{?;1ad8o%h40|rx3orx zsH(gvJd5ioak_v%g=S`i_7$fSB=i$rcEX^>+ur#QxV2@GSlU{N{;!TE9%@agGkXN| zIln25FEqzgHoJ*y?$b1voNDXLzc2n`lY{JcF}beXr*+$#KsU zUOqut4Zh2Bwr*NMhx|r)R%?=kZb)g8Oc?|dG;zOl=7E|x?zFSxaL8vR5i#L;8dRt9 zRMDznOdjS*gRj+?SmRZj+MD=HCT%14K7gkh#2-`(7@M(57ugS-o79geJift zzsT}xZw4{`J}R%b$vk_v$Pr)pblqORFz#?uH|?tNC;?seC57f!xxPOUrL3>9ud{_j zXGp{tmg=8$M88t!tm$%A8?=kCx=BEMN`NpMXvSH2vNhcI-85sOyWRenw{1JZ4Ne+d zA*dmTM(1ubT;6??3?CDb41ud|nPepHd{j_V9QF4^&<(#g%e)$VIP`2CD5)W*8<6G_ zREI5cbbH0tkKviM{6;*KWmi|@quhzKzUxI@$qr-`X^p3Rr9;_hMMQr3lY34t5+?DW zGT(xx;O2@ao6rHw!D;A4mBYdsVf1U>0}MJQwey%R+UHi<=mc0NI4!vkXI)z1t7LEbkk$NsV&ldxv=x4-pN|YZQom={%g_RB{q!M@y6>W1)BcdM8Gn(tu)|4z%^qXa(d3kC#w3bi*VfRB zS~>GXs=j{D1JjU8Y>V~EO#F*(iQSnhv7%5?-`{o&Gl%3BwiVvN89Pcoij^Z|4oo{x z3myJRjOr!NN9j&5=FRLdtK5w8rJRsx#b0!QXy8=HJ-*)E+q1|Cfrzax;!=^BdAO5x z-b|{l!l~F8Gl;tScUOSD8E?3@`lj*}a z?RJv~#@>memKzBU9&Z9R508)7p62I_7hPTLvN?KCEBT60HvEye{YIaZ1F1oS;SlGw z(2*TEwGSQHC@!s>_2k-d!8S;J!K^3ur8o8y zcu@&|S}&{*dG7~n1qrEjJ!29bas2s-^{kHg`?aEi2p`ePrNFFKPp#5ZwzIXI%&MCu{ zr*BlVZ6I7>r)!_7v}X$w=O^aXO-tj1i=#n8BEK4VS_|b42nc0JkX8RfBTx!_b1(UP z0iz?>@=8sPV%Gc78Ns`pDli!()j2TfU$7|EPh9=&O zX`5;6$ItKHKGo`pS%8VLFmcDaQKA92u1b-j^a|m#Yz4&7SSXH@H-bL64eldOK=yjt z@MU*&4k-~6+BB){OB60bTRnV|QaD%q%K(^YE0wCB+zB(Dj`SJD0(by;s(kzsNkuCD(M9^QYz5RSW!4!Lqfx6k9(Kf&< z$2vik$x@Xlh#)Y;Z>d99hhi}1!qJa~96m$ap&>}+N)(AwtOh~(Uv|Rm#G=DFeK-)b zHGN^mNaEa{)7wp3UW<*-ecFDsKF>q5K0cn?g7?4Io3hsGY_DUUeL4(Rv7fCWI-agf z){nSeF9=z+=)f%W&v)=yBAIJpaj5kniw4HxUmN!TekYVE-hx$>#WHIzBGzdkN(`(D zNoSVNQZOx2^j<|+!4ozFG0pzAcXem#b(K1K!KGtlyBpvNwkiG=-WO&xxTvu3(8rPp--#r`>?iJpZY^3x*Z-D8=G>9>;%Qxpy@qO`(B?B^J*#H)I zSLp}L{Tr+nF%V9C(pM2!9o?T&lGgvwM>Pjk9||@R{rWV>Qb(;X>Rdv1AuTYQ5tz>k ziETE9Z3ZzXg0b7qQ;)trv#P5SjRIpvArOoqZU603r-uKzVa#Wpm=gQ6dxjF2Jqis%# z+i}gOqwi<<(OV&UAdC&`3wd)g8K!IXTv!~3;LdjCeQls!E|OhtehS+NQd^wnn`}Jf zl_5lsS#GrlkX{25c`B@Zg@hBxOwylE-2BH4R*zI1qTTb{W8MPa!pEcyip_ZS&=Cbu zp)n27l4k)Fm{tBf<#Mms3E4`jfKf%j0alM$;%d|->>pT~sV7{_ zW?sA~DZNlSRUPoGWG)6;x(O%Dq+wB7&LPyw3q!F08scHnhjyfOfJ1?MjC#n$RKxuY zOP_fMp7E$$0&N8%>_&2lOt8$@*Y>6jb1}{8&{j?BlzLhDYz7Y5yb7B0|X9_e8?!_I+} zmI~(DhK9gGBtJ)MYi766{@yTphqC$2_wvh?U8b8d0zOz!-$ubYe0{KlXiKS8G}*-j0j5Vc{ygyi zxQB_aXTHm0NLGblXfYkfP;zVv2)_)(b7511556ma z&poB!!di7oVmyUC?zBWWF?$g7ICy9JiCdlsSge zkJ?JSonPBuZ(n_&uRb5%g5NQ_O0LYFdqI`&GM=#>b(Pm&?b+TK({rGywEY|Vt`|1> ztB~8?Fm*4eVHJVmK>&)}{uPDh*w6tZ4HLOK)B*WzEBK$RseX<2Jo+TNi-Wc>(U%gQ zRe%Xx&K~0rgr6puqT&;Rbua_#gE;>CYx*9MPLf3XqE;{pniiP{#&WJbHv)rf^Ojw{ zr>m*gIUF+EgIe!{Q5UsVnAZ~-ulrutRslJ^DOaG-Y^a*jkWJ|jr2vm7gZS;ID*ROR z;WKE_HZv9S1rvt;$A1xvlkoy&6OcsR&7$*VNZsi5IoeQT;r+hko6%^^hFnGIK@$up z1S#0Lgm~$Nq-mHEBha!>@URwS$H=U^3`jfSx7y*iT4Mv4uJb&Y&a-na`uoBx*;mD7 zB)8D>S8(%PI2?VpMsHeoOrL=|P;1{#4f<*$Z6Z~Y!6{yrcIp7~_?8dJKERj2%sD#UE<=Oz-4?37%` zzhTM#ZTSV~;n7u;0)B0<+!h8%Gx#$w*r#4MO;=#!j9&jplKyJg>*ym7c}3buTm1h> zItoQ~ga<)q&SaPZdC~?tcP+ucp{292$Zw>3-B=jeITB?cD*%8G18^@x*X5ysI4wPx z)T0_|r@v+ynOTHELtO03*Z4W3$J=g3qVj9tBW{hEgFqGc6Od3|2-HRlOyXcFrf|KUH$P_AySlAcgO;~j3{PPYi!C{x(<|4j zwC~vv%UjR$seL|d$K!LBB3FF+>jPFMw>{3GZaScrby$6N-x^85N^H_^)s7owtwF83 zV$@WC7V=vK*@Lnq(2buHaUI^}|0V|>kkhIW$ty@d%-tytKFoWDZQsR|D*i z6?~HCw3T@Ix@n?g%ri!a`HRTtc^{^WOiJo!GvT;WlabkUvGA8*la_MslR@@544RRT zoI?hkOM^}3=iP6={%dz?sTd*9!h>$vT_yIe0`E|r|8Ku;4Uo@L%tyvqusUEd-!hQa zCvA(CAW{pslHT@T{hNZS!pU`=uRQFdH8Q}u@M(NLt|QQra0Wd)lzcwlXwF9)Q%gl; zJ)RCQwOeW4NvdLeWt)q*=BF**Iyp2`fvSjolpQR z(bDr@z$NJh`)YouL<7w=Qcwr#=#77xG4bLFJ>?uNU@rs5-(X6DMey;L`fKx5K~fJA z0Wky%VY|+I{D7s_{eVevN{~G@?sH8Z{J>ApMP102$z90cC_|88Wz96KlS#)b@AO+m zzv`*T>SXa$2^wy=jt!DZ3>9$8b~M!7?Un(0ZUvwyV^m3O3s%Lc>eNSgN<@Ak z*-2C_q1lM}`a2!}bO@NDRhY+IDjn#t(zU&y{1Q$8wq6}*$E|h+n%M#9ie)8{R!oX4mX3s?&G8z$)tN%GKNgXoutVA6I zVGRa3@=0Pz$*M-}&s5rI*O2!JLq^Te1AnTr@n4T1IA!C6U&?h>Cw&$`8ibCcP> z&aFfXByOO-HPk}#iA8*PS&ouTzY7DX3!#@~!^I0TJG>Kxmt~UBL%xQ-KD$2OmmQ`s z$~PA-$66JeNxL|g`4d|m+SIcUf;Iy}#3dv8Gt1W;bP@MZz4fOOf2sC}sPz)1@x_a)-U0<+_=FsJ-YoP?<9MQeeO`?o-ila1*N)z-ew>=p zY;>H9vTg@bJ1{~ zy%XZGdeATNDkEQ_*s0?6`7Em9d&We%VOp;4LegM1aH1S5V?1foIdcii5W}Ut+K1T)jqwT&R`dr8VPY$QY8{k)npnX2<^X*G-1 za$*}$w$=DUU6EAR!{>{;WzXYu{z!)HdMfJGYU8R&JEHqp&0w)UOEO2DE|`57SBJrC zY>;`l8RG24C}FzQ|6n);`YWhC(gQfLZ?@sC6JA>>{71H8ruLPHGe?ZYbOyygX(xV8 zrl!deu_eSK+)ZhH(9wd{tU4f|NH_7#%ebt&A+hYR{Ik)n{X(jenT>Nn!ydo6;(_9x ze z$y9wH;~|vw6v1Wga7kt*kLzNpT>4pwCT)Rb4w;1}|H3Ztfs({ws7|D47sh z9E-6!+{h@>qQV-D0`uGeqb+6MmC-i{dt7-ajWXdtCv$)y^2SowjW`>zzOL4f_>*Ua zoi8s}d;_@2W1IZ-`=y2V`)N8mxa86UH#+sjbH9r-`=#2A61LLgWf~&tb^R7~2hQi4 zKJ#%xv>T`O@G0cZr`<~SYaV9$x6O|Cgq}m~=DDj5mQ7E?_>Nh=L6$dR9xy$_M0oJB zFE=FxAuwStgT_T{skj(JWtgTT95clj{A`^0c<;K*uL=H)ShU9lBtP=uyhrMg~f4rRUZHr%iKvQ}Q}_QL%ON*hjF zuP^k}*2`IX6RGdhyZYuspZ-hOB$;>bug1@q<9k`HnHsB&R7M`Jiy2;X#6sLX{0g>% zRuIsK`PW~!S^`L*fOJ%84C)eOd`O|u;4t;#Y%%QP$yP9<@=-~0e*2vt-hdN@K7bQM zr#(6GSe&dh0#EJ&P#mx~}?O z(*0xRJz|FZYvDEIK)Xr7az~X^{0q_f$z%DD00a-r@ec?gaTBC&eg~fZRXU`Ttfs1a z81pZ(s7zw%-=VM2;5#P-KOK=PnW0Ynh%^7?^vmmj%2e3|xq{*mtbGCVVh*S@x!4MQ z3{)=341hYN5H*_@TB!uHwctt&!;vg(TycbccXwc2CXZII&T3^y)`U|3*0h6Y>c)Ic zENITijDE0~QXyF-9Yl{ib^sh4xXHJ9tX1jv5VGY3Kd%l^q2X^O23P*|*P7Y+Y}xCO zRNj$6XNJ|WvBDUV2lr&b6T;T0;H+`CQD_iPgkPsT#%hBzW@kh#JS>N}!56!sbiedW zNh@vn69#v-E=-E^keiQmx9?=W{H|!&?mNNpI=DX_@7_C6b-S-cM{m#Z+b)MgzG0-_ z*s)$rm4|6wI$gH>u{R~(y%oCa9Nt`(r&@OXK3{1$X-$K@D+pu$G|-Z+0FrXzWCt_j zV?i+2N(sa_k5rF~9l)&MF3FtIVQ+WGl^=G6pN-mCJ=h8t=Kq)56J@ zKr56`Etm1bkZ0qN{iJA|3rl6oW?CZ~l_Ate>PT6~sOnItJ{qJ|jE2UvwV%j?AsNg1 zGyBV+;6xx+wlk&WJyS$0nDGM*^Y3 zU6FFur3q(kMVD9JY7tuiOf@fbD=C4YFV+_z)ZhoN42on*hT(Wm7UY zmOE$v+X+cKG*#T$=vVoa_1pzhy z(}4DSI2`+#r_b-ZWHQz4^@gk8$h99WL4{GZD;G|=sCr-93)jAI0B*6iUVU1}kD1R;l=QuN(VqyX;>eIYhhPRe zkB)1CDR0h*(u68`EI&>@Q=ZMNW8P+j%j9>Ncd_e4P@lSd)!asQ1NS8RnaIg39Lv7N zk|!}f%1UOkiLuE7yW?GI#w2^Y@V4l#S+Xvr>%CoOnC$;k_Oudw&Q$6YQ7S1pg{`%$ z@8GackYq`eIfi9zP7ZP$-ol8)2QetgGT;ykv1wtul^LW!!1kdDS-qZvTe$7qUhX4~ z<<1hxKBZoOHV*l>3yjc*)^&X&LSbrB?0+@n`+Ueb+Dg(1bwmE>K(4)?WY!3=v=I1d zX(2C+EhMKvI->B}q74LsXtaneaO!sJ+^O4RLm31Tq`x!1&B;-djl*54pA7BcYy=Cu z;qy0%hRq}}fHwz?t1B25Q=sT_~-h>{FTUgfb2HtIk1hOrpV@MH=UGb5G< zGi)}PAyd+m!eS^>l$&THpA6eq<>~-QV#M%A@;uAnsi5e|4nxmIOx2C94Z|eFzk+}B zKJ*S02JS}J4uXNBhcS@WptlEpapEp?=(9ZQ=ptK7p9Olv>H3BUGZzO$k^$^PkM{_u zfU1l)c}&5lW4^VId-SB(g8#bM;!w_UX^Hy=7|(rmOwMD65WcJ5V~qNOdZ>2+!0Y%9 z48~ytu~u;a9bsWu&jf}RhvY<`=BMryMp7(rq?56lNUBNx%;_p40f?ZBqYrOvYUeYX8=c)+sI+e==3f??jGIYt( zB}bPc(e?@=A?g)lr;E6MzM;&ZOqYU}I26eMm(gx74yv|nHtBT1;V2mTa2$gNlXmNn zPoo0;kUgjsn7}}-S| z2WN|Ya7>RX$zIad&CF({R@u&~YZR#L#<%T*3O*L~;gt@-sd7=71W+zgi6aGUeWd}7 zKGeF?<`!_B9A;>=waZWZpc3)`L9WFbpxi<7nRqK&w0ST`^tWZ$vsY20pXNE&ucm2E zyuw^lNN3LZB~Y>BAr8I;(BNA%jX8C@ivLz!3{=|Sm(TcQ@<+E1>doF{#y28Kd!c%p zGropqD)x-Td)x``_XZN2=`)+D2V^235r3RO?6=*9nl?BR5rBmS)u{efxqgvbX zr8RuX-!7JAPj;sZv22mIR+p=5q56_*Kk`-o*(4?7OlCbxmBr&FX7Hn(n2tmnF92;x z8dFbVDy#|JVs>%{58>Y4$r&-*;D`+w0kaWF*!7lRvB43^-U{c*52Yd__=5NqAlpZJ z16V9|#8NDVi1_6*1$u%-R_`d1%7_38U!^4{JK6||OAr0`zQmy2FP+F^t+|F>gEgk} zcqjSRIUH$!z`Y54AO;9kzU`6J|+^eV#qGd`ey`Y zM6%I2+#G4PG0Q(cFgG&aYK_j0-4wYS86^pZF%yau8s=+;ij*Ye`I^10p|mYP;klxR zECj5JWO@b9;TA~y1esKUc#~va1+QQ&kO`-tEJQm?VX5v>rtYbbUWm-~G|`;G^A~Q2 z9%u!85$=}67Y=1Qy!>abpe)nB`cOtL*B*ODF=^sck(d%1B{NLXa~2@kz>>TfjgFfQ z(AWU{O(^2;J9DI$xn6Xd!K(TzkBP;u=MTsVC;%dZ~JZjZS*Q!0q@(FfAswi z^ILjehI60G|0DkzY=W^asN9kN*vWXZ1K8FzV8Kz2N4wa+EQqEWQ-kLl=LcCuO<*7g z5zEOJnZqXJYg?bYTFtb$2TzYvO#9SCGK>1c{m#El2pbsK9we*d_IzZvc5h6)j%IJMaiTe%Y|en zs=0mo^4r_~ntvsK1H9_l`*YLAtj*uZ?e>|Adspnq4-EXAfg9J%yDp@Wob#N)Te!bs z`w4++#LyYIbAI}PG%B&0Lr96UWh7}6EDKifwcN-^U3wzd5}A;m#!ZV%N$0p(6?4)T zb5}7}avPbA+@0Va<`M7|^9S&U@Wl$}vAHQg%C@y`m)jn6G@Et_r6F8=|yB(yAjRr;3rE$R2H_tPbc1w)vNcEmD( zbn4c}GOQJ}YPksX8AHivSmsUgOcG~FQaBPVNkpC-+*?$)+3RHHA`y@*r#NR}P4tP0 zVR}2ft*wv}Nu+)E^4>F#yoXFQ-B&^re>|R~Iuo%L=iC?~KJY(Px*(fi=uY&W@7TY0}kW_G1Szb~CppLx`SWXSY z#aAbv_?6OZBpeA;FryGg*b0iawCOGRAlk71H&?#;#`Nl0(*}?1oAtxlBa>7A6FzwT z=CgnLaK4_~J>%so?tQB?ReAQM`F1#F?Z)wnFmNf;*nGvA7m{ZU=MDaa{cr9KP|uu7 zdsh6Qs;IvzT_hJel<|BS?hKZALKF+9meJ$1qFTRNBr8R7yhx6DGLFqHS0RumKu5?{ zw=QHBvK`DNESstt%VgrGF;j$TrDv3#UOBnyT&7K!S30};#(=Ma6ds83RC+?n6Vjeg z|2aPpBf9%E?5k*3y-zlBr6K)0-+zPn|J(uH9NOE45s?NL%Jx5?pLu zpT!^Y8&*Mjq59~OWm$*);EU|ZrD6BnKonTsFbdOX#i@XCfhJ3 z4HjeQ(MDboyD^4hsjxOOQI&!zF3c55rCeg9lt_db>N=6KLC&dfkX4R8%{02-`4`KM zOjY_cg-gau6ERWXSqAYiRauVr@mwM{GMXhebSw5f`omx(EdbGg!hklM2^YYXa1-QV zAKac*M<(n*;5)O(i8*4@8rdS3g74rgX;|Cq@RVtx?Nv&H8dyWzPM;6Wsv-A7O}Hl7 zkW^1%EU%jtj}c6pB=RBeGCc$^3*A5Fc?2j*#<)eH-1LJq8R}NsJtm_ick;-jF_5bx z-waZBgp3q6a^l{Tj)^vsCT$S`1_!OVF0?wrmm*B!s_rTiCn!?%qe-aPv|9jBkMv?6jrUGu|>FMeUs4}Nov z@1|R>Ie$iDpGug6^iqs3-X_P`L;iMXn(>( z_q>zIA)hGr-d&t}qhAs;a4>hspx=Qo$t~9ueq!A~Z?{P6bJ#iJD_l-gdzN^a{uuX(E~o(c z$)miy6dXpF!6JhQ4wVLiY2-!ps1VU3!>y8jzL=i40ils(+ySgZU)U^Pr1OJaxO$7`yuvpGSkzIoLgil1aCAA41P8Fu@5XDixc)sWR`{=he0110xlgV?8=rpRq+z9VsM2{L+EVz^-4y(r;E68A2NL*Z} z`Mf|m-e4kM!49Os&kC$3dUAKG9MrVu+|NDa?Sz$X&kFJcIqGGL$YE|bC#>|Udsrw+ z>}E|;-=~qK3^+;TAmT=UaKQNBfWNU3I9UkNhFv+DgSDWVNy($tdUb(%qj;mVN!_a+ zRux;FsUj9BB63^PC8#Pi?>uP|Z9c^BmLFx}i&1u+>Q= zwv%W5$;&dFZ`X*H^jpq1Y`8s z)Qj{}FCt)d@J)Og&215OT~DRafYFk=P!rdrp3a@FP7R&M zou{53TEs0W)v>0F3p=S}Ry>WGf{BqwNlJuFESi2* z*KNZeGz`BfsiGC)bpQV&>`UOIsPla5U-y0VRedGhNxE}&B#?9n!DtGVLy=3gN0cZk zs2nOm1RQn5Rl#eNah|Jlg2ytB$8aPZgF5ETFwS$-2QwbKGV3uTDjuwSW*JtIB>VsW zs;aw!?t6h${kl@=u2lW*%u{Bnh(tzNUC6WLcD$wC$uhNkVg|EWTrN zs3`t+C=SipnI)ZmiNtPn?>0Ah=uIYO3?<`3L&?NYeCFguXMI$T82ltS2^Ks6DZj;c zMldBldWYF^y>9N^i#OeSeJZ%YY;IsSw*o>DlF4|9D*}1^v*uv_93;U@y6cr6^s5f% z5bP2J<(d_}Q~-CXY!e+#4Pjk?9~q*fqSrA@Qs=I$gk4z)hwv`kj_MHU97gEp!#Dlm zy~}>H zHmnY<4zFYH5$=&6Vt0$XLNBo|i~q|0oA|c=R_J5)ka$S{DAd9OFpFwoKRrgaib%Ft zVSd?92v24em5N5pxNPwx20!HJgfe-G*0718X~@bGe#LCzif1pDOi4my6+#C4V_`$r zG-R5Z6$*zH{McwsVbrj!Al{_Ik{k|aDG8fsiJ>(ut1=N)Ww3c+7@7`inyOM_YZOJX zq-9k{rLi8nbXQj1AP>rnyc_M_aj9oXb~`dZ;Fz<`eI{e#lN>oqB_h$^)(}fln0b`Q zUvK=w#Npn>VY8>$y?lMuwm9mthbI>lXJ0cdmr(od@Q@&2N@v0wQ;X)DRL~ zh(zu2R9A?!2B*5h?v5o>kf|=QHq}MIBe$nXU24$Dq`Sg6!e{WerpN5=a42SB#xajZ(`UL%(;k$0=nf)Df z=g^E3U(w1b9kZ*45m_m~^S@Mj0r+1E=eaj3CIUf#76<}7m#92VqVi>Hh(>v3WjqQN zJED`qo_}^C;GZ22M3NO>&F~vA{NP3X{{%0)N}@E8VNb=+?c@HCO=R|CN5=%4z#E2a zB$987gE;@2b1Ej-M!X4ODHfaMR-L^XUCm#uyvwshK6p`(cwXWeNmfZub5>R&vaIk7 zFEJ!h5F(g?Lrr5e_+!vYSpgk}PA9!G4~;n~pn@DxK%ZYh@Y>#STYP6g5jR z=gp!Y0O8o-5CpP_p9FIh2#{1SDEgZhCeiVon(he&FxdBKE#Gm`owmdNPk<;?4A}Hnt+^z$A*ON-Dbtt6yg1s5{$6!ej)=F9}*h@HZBW zcef=-UTfaBGOcH9wj+fn=`MMhf&+vK#{wD!gklx0wdhlaUilo=%$|J4B`E#3p)jVhGrF~p{e3DiIGJ` z@_c1SS1DaXO0|lelWHDo$D!_?p}n4FEIiHP=Z6q0$*d^L^;I>+Mj3sDL#-@gv0K8* zjY_(R$cAuFKi;#1i^Ak6{uQoT;Aus!#yU}c4iPLo7C+h)C5#mv>|mdx_9#e6>WJd_ zIaER;aV>T|W*o)JjdTTf&p6uGZ5}jFlpCeHtuDB~09e;Yw4QEy-ouJ{Fi#G>I}%-6 z(}m(SL|whRBhf|I!b2*6{=<)hlA%2n`^Y>8aWU`3#f3IN+e70!NpOsWnrIXK#q4P( z{x6$6@$x+8$pPl+h37o`>t`lxw%Ul zAMH#{Y(BfWSLi5=IsYQ$wN3G+75RzOx9Gu`}^POLMV{?P@ujoz_wk=M}CL7Z+Bk>(!Ul zue7fUR%cO1tl63?+HEzF_@zzPHPKD!R=r1mK;Nj3=-ftqi~fbq=+F6TXIDvAJzU67 zeWb{vPK4lv=`ho)^JL~%ulIDK9tVxLqonI;#@xWkh(8obrv<9~ zw@{NCZt@_<5{-}}-PdFe zKSyc~^D`>B-Sowd-rx|7rELA!`YrWb7pY;8TwDF_k+)ppIX~v; zFV&Chg5H7Zy5iWb!7h4J7wWPJt_gW_Tl9NV?0meo)razF^~0D}R}>wq)S7#d-^bG# zzK5szh#%2M%4k6D`-~~^IusK?WHhl&EUh4bSkR2q2+xSX5yD-=vD zAb$QetFPERxAHP@?u zQ%GSn-5@h3*w$l+85lSfcclW@E6_mC}VGCL;Nlvg6{&f@^T=10BX-E>rq6De8 zuAboqopy(P@kvbg!e1}h^8D26r*$kna20BweAlfvR&9-6yZ`RHo|L?`K9bi7eM@ zv_{5qJ%+=bYs}`Zjaf`bnj+t0G6!x2}Poza8;gZCx-;g&?><;S*BPgq4()*zkBvXt_+dvt+eX zA-~aq)%->W*6oaeY-D>mP-i^Usziy&)AS%h8_*`S6&*or2KAs>h(RQUNrV;=_=+kb zUI@4wLEaCA2zMjkZsO1n5V3+&W8{)i9L6(Xj1eWOr*@7?(GkX7;m$z7v4fcTheiv8 zTfpssF+tam_pQJTJ%D6WkGf;(3d^TpOq~~8K=VSEpDL}x96Y|gt(uOSR9$TY6R`uB z3#a^IV8yn}xAZx~e}CcmrF8LvAFO)jUv60S47X?K9}moW;E&f2e=+>l&(J@=c)`7| zy|VwM*RU0wJ#vUSjO|8}zRb1X_Q(<6Bb5EIXUQiFpD=xbJfdI@t*GB>AcLsS+0=51 zq1aGb5#ni9L3&gW$-NffUJHPUAeiJ{3n2Bcz3~#1Q_Q`+ZR8se4`@CSIX>NI-~;{Q;e!3;aE3=vAEB}3RkgxEN6EDj<U;OI8@PDoNqU1x^~4=Z?nigpyY`_fBtL`b+qOKK zTDt6aufO%%f5Mt_3g+DR+_`8v44U$0nP#=TR@BbYxQ<9i`aAUb@_CWD>8t34+#>0U z$b$4><_+%6@H>eQ!yiVzus=(D2pTb#$rO@AjZRMzohpo>bK02LM7l$pPEXdRM9xls zM=ohsX&>?*$Bv_;x{0EUu9ybal!{Zjn zTdri}O>@76Ov|wrSZgh;SBa>&+SLjX^<{wvL-ef06Tz}T+geb@AUt8|geNS&H`nre zaINS3tHX+QWk@VT!}&OJE|={rg>r&lQzpF?g!4`kqTZ46P8IDlcW zO3^7_M=a35)jL`sNKw-nf0iKezS|WuLiW z)xSJ>)0#)u{rtYKAK!?Wd*)1{_2X0M(5rv^-!C0_buY2a)3I%?#=0_!IW7hqXJ@E% z6i1o8T(5M2vWQvAT_-J4#Hfp54)^?^a~`3p(tubNdYd~QIhJI{g(fD(r6+}EBqyck zgytvCOJ5ecCV5$U4ZkLOj6N1OsTeXe+nyaGB_}49Ha3`>OxiTrR9Y6OJ@ivVR(&fr z=)j{kvCerYjP)293qJDyhKBAh*^Y)?Qm960KJ23A&$mEUMQUg+Zq<;M%#eAHe0`DZ zcM-QUgEFz_e1~{vDb`-L2bBS0(YMoPPH=L~MW0`MHuKb!Qz((v^)d^kfrVV9mImg5 zu`$9zqfKpZ0h&n;;$xVdjzL1kFNu*cB@ZR0d#JD5Lq_oi@?jDpW2Y zL25tAwyFC5EdHbDL+&6T9o}*4E-F@>fQOSOM0A;X`qN2v( zcZa_+vs?CDjUHNe=GB`?v>nE5xR!gJve6}Oe`G{LMxr$_HsK_eCw`*-RQshSCbdRw zYhp0LCLlGI%oM9cjZuxXjG}ZQ5@s2mk~crs!#6VIbPR1I|u$&{Z?DL?Z9tS!pI%WN*-8u zU_(XhRFuGTZr`{uoVoU2TZ@KRNxszs@fwA6Je<0I!NQz9kdLgi}ZxcrZ(KBlgv z;q$H!T*fsJWtPLCe~f0}Y32{-f@T2Fs4| z(cSd6fyQ)L08huvQe(l-Sqlt}LtXk7$K=v!L!MllR^?cO37q=OjJe-D7;_`qkm zlAMpn_uI&{9eaVj)@EDqQ=~6w0V`yK6|&1#$ObD!&+$uXAyKm&kEV@WD6#+ z!8{(&QOCW{R7>_s1LdjOjL(o6pCL0IL)xWG?nPst@t!eauo<)h%Fo?8vEb-(inLiatg_QaAe_pN!FY5vpTi?9CfrQZ>KvYw)tL(pIGvO9Gg z!}1-q$&}jr4n+2Owu70>?9o^_6txpYTeMUw!f=Q((wq=cWObB3qXxK2^?e(sp`_*| zr+s{JL_&iSih(l|a{!E@5w;P+JQ7KxSO7}_!JU*O=Sh%vg_RZ{?<0{O2?!PmkOjOx zwi70qWbZhOce1MCF1i}B<~JmnE3LJkYz2hSZLlS zYb1>v_s6o1J2q%D)}L(-jAR^l14D`i^+iKCIx{+Tc0A}~_Z9rX=Dxzw(Ol(2$+$rw zNe_b_(|KLU>%5vmnus+RMbf#qQ3V3epW3O&l+18gbB!HX#t|eYH%RnHZ(USttIh&; z;iK02f$t5j`qlJ-8^(RW#~DqweG?U(M_hIDS?*V zd1(3M`gjGm3KNqOUb6p0AYoRaxd6@j^gxML{5ahwc%Kk3QGU~*Ky+21xd6=uYW=MD zbQz!EeL}!Q;7Thsk_ycQXttc@>nus*$#b41ZICueTcttiJ?V%fP*O%(F0GX|dWYVZ zMkF~S;ebzIX-49i=SBv-cW5p#-$NA7@vO`Xd5&T?vYXhg>>&F-%MY?gSejz9?0)=; zW!*djeF0l$H5OQnmB}TrU^H3ZXtI6}7E7`cGGRM*rg$>Lt$`S2(5GZr99W^ z6Kc6V$|v+-KgH+{3)LsI@;ScHGCraDgyy-I`4Y?Jxl`G8ZXL(jBF70VOS4>2TI(Q}uFQhGF1NMPz8`}XbFUuPglu>$=SqeiYGo4QI47^EANDC>D^i--L zf2ESgpooyU0&Aiq!jWfdrk-p^NG7Ozrb!B>eN@j3lK3eA1V;IZSnciWP0@wD(M8j! z7fm^ZBwa{RuEd|z_MF;g30UeKX_;jx!ADi_o)vQO?Mb5-L4RI1Fno3G_)O>cf%Zuc zpUr;qr$2pl(~tDCA7tl$y=m`^g;?ETxo5s4Q{+s@ov_o(bwKIc`m{hu%JFR3oiBse zdvNwny2QI7AYbAyk{Cn#Pwp7cNIp)O+lepxgya)scF%!M{tKAZG9BWxVJHY5=?^uK z0^yMXybp1302%;}ICtP9Jj-${-ziPS?7*KQUns9;ZjcW!AMnCv9@X*nLSF3RCrCZo zENvb;kH1iuC*8u{$o)uqiT^YE7JrccMEEcMD=`|9WsYH3nvAeZB7Px>V%{BGXIM7x z4ztTx%vsXa#d4&TMo}o4-Hi-K;#eq))QaR)Gz<9|(;es9fCH(@)Vj~cis}LQDS4_a zK4rG2%h*eF9wF>&_fWI2rY1lIsNe^M@Z18548CdK9l@<0@%tEhyk{k3N5l|jw~ z#`Iih(xTgUjI`(;EgUWv^>Ns3cahQ->yNsVnRfwms@ToI)+^d+(N?S3E z8yYg!+APJ2*rc+&Yf9awDSrkY3itxnfas}DhL@lh-yMGRR&LLU=h4>TRYMEu%y);$ zie0y3``HO-BDa0hdX@|0-(o9IvWewWzr_NcWa$F$`VG6btHgo%0^E?!2`+v}vBTYu z9p~=dr8)xOfGY}v7hJ#etsYE$cW!r>=`p^COg`g5o<ZfG) z;dzhk_5pw&alKr=9>-&K%yrQ1^dJ=uC^K`^++c;a0(4d24yOZjDnKU#bX5RfG##K* z0Xi9=)j-p%7NGS2Z3O6WAY3y8bSOYu0XiIT(1Xr;C_q~QTJuKRM1RJObY?g+6s?$N z53&cPzuF&Wxi`6ES=tt}by7T)l^CY3I?YE(ge4%pE}1ap{du$@zbQ}Wv60pD8!Tk8 zkhg@%ItwbMkhhEgJTff&MA*clA$tin)`D88<&RQUa+teOZ-3lRI#q_3yyB2nisv_^ zPzpXNRsN(Dd{TA0LUcSuvtVVj zB0i?cny%`jLRdW@`Fc5oO_%gs93s{@M6U|OtqR1gN;z&-K;J2~a0V1L!C{2B6~F$< zD{{(S_4AlJII2KstRVbfnHeYgHA!OF_>nlhJiaNuH9i<;;|$#%jrqJ6^SLqR`&}{6 z(8mTW%L7dL*3eZxDRi|0p)2d@<3T5Mawt601{m)0mmAC;g>ZJ%4+=i^5nvAV9InK( zG2W77Q5Ix|H|s54Pa#7Nd2B}@+xm#}ka8qVj$!6vxDc=u=jTew?Bvzw1H82Uu^Zl5 z@aSw)9%x=V?fPG^^$%~EynII6EkoDS>#n_K(u1!KJ&%plStEzo2FxuQl|UneUv@_0 z-uUhz&?#hv1!obV67YkNAScwR{4}w|&l9iW7mH%ioEVxI>xfS_r-!D;CdcP<^QH65 z-cWDsy!bWTHPS-!n$R_|h4IxWD)F3lF>^k5zI?H|j9J7jl9#Enon{3KYsko`+*>42 z0g9A!Z)PcyOF^D31?(o2mjV=^&f}F%{gOHm9)y6qID-c>0K}l9=kmp|0-^*{$O?>5 zZYLqd!S}GHgoy)^+Q+o+Gg&!U)4jsuBrF4zs*~(4;QLZg#ii1~RFJdxlou#(3edDD z2Y)otiZtaHCM(NFs$Qvy8kZ#LeQ(vsASao9gw~Gav!1y7_kTgLn?Aeuz2U>Z-oAd__WnE9 zZ>Phk;r>;_e;s=5vwuR>NPG2_SO4_;SN@3qr1is#*&57MA*vd^30i2SYMx@AW==QR zp6u2voyj(-byaQAwyHC#mS;C)#fkRB)H(J!sd?hX>U?{CYKgd1U2I-sFHH?*Uyr;K ze<%5R^})!&>i4rF*;pN0FbmNRcA`0jJ;%Jr{80IgPor$GMn+6|C>k6eOMln<( zz)*=YL#0cJ+_mwwa z`QyK0u9-Hxn0X&_jY*}^Z-8r-DRhBuj-N_Tr*BmGo@h^EdSXNMrfRMjE~a{_&kCQF znj4;*x*~iNof|Lm%;LmAB;$nu&R#(U}d zQB~%a=7%qMVfc&dUjN?j`yLyrd1lS^o42gG;ql?cw0P=FGzJNqhHu|||M4@KXJ32m zcYk=}tv?XI=1z*DU&7pBAz0zfIkh#6OcvF#MfOZ~E_)@rlI0~!ltf7jTardGB2qvZ zP_opxK}2G0HjKh_?aAx-2JiO&obxV6a9=uBz;WO~$B*(^-Ly9!$TiFiP2Kx#Y43w( z@6i!YXtwK-UH6+5lY>ptEHs^5zm-8>yWrogAO=jz(tTLu5NlK5ki56&6N%SOUB`!^DN$gAT zMk15AF7aN1%_JU3(1{l?8$~gPLkEKsA*Y)Jh0R3!5ltZz0Ul_7BvypZ1_F+V(3tsE z%c|(AbW~v7hFVCKf8?jbKXSr^7uoa}C6h$ST*3**i*4lB+n_idceg}MBw*d|M3($! zB1?WN0U5dkv>A|}PLf>9NnczURPDzKry0NBj&#@lX4FhRn!HW3kG0Y4E4gMDU=$P2 z`47Fn)SQHCscC2~E@&HUqkG!cw$W{*T_Z=u-RKa+C+j{2Y?TQ?u8*?I%B!t8iphtNnPIvgju}Vhmdw%jTG0y9w!1^$_mI3xN3WrzB^jmcc zg+9_v5eO*{lXkEIS@XCL`gBPahD`C3T^Zfkc71)P;gsq+F49tOnISW5GJLI;O;J*# zkV4!kcvBt0FKhI=6jfWNsbZ6yLX8cQ%oo@cl`*SGRN!Jd!v=!U=E7~a-9`m;H>qUw zRt_LHIHeG(QtIog#<YM*dDWb3dYN zbS-44vZFFW%BYd*3M~X^K0wO^Ik=wW#s{4oCe|hpg;Y&O3>7n_f+1tCkx>k@ma0YC zx43T}ykRYN-&8arh?Aws3xwsuT4960QaF6rBy1H1h5Z5#kY*mX*-?-KfglG!m#3Sl z@dyv?>^Sf=2~3EyNlF7=eBuUR!XA1F6-VQ@U3pUS4?A>+z4GZnVyq970SscSt@d_U zHOP++Qrnmq;2SXt!ToM?q-6+u9F{IiV7p}=mi23EYxM$wyzMrLk#-NO6INh_`FPq) zp4)x-vX(pU=-;s;TxhI*bfbCtqQ~ef?nA<|;qTu!^urk~Nn&Si$9Cp@wjN*hF1Lc7 zB#l#1yGUolF#;5H#7TrA#X=b6#BfYS;h2KWw1w|K)gBv_b%+IgvREMN5X;AHk{(Dx z?!ktvgB^lai}KKc4K~^?=Ns&Z*L304$!$n!*(Befky}1uqd^R8%C1s`k()$uC`Hq8Bb{xDCz6^zXn8<=+J>(iB zD3ePw6QdHnUM>8aNd?!5fX?aeE^oy~tY(s|scVKt=9$TA5e#p#YKqcC%PpQYH{a$C z7h+cqGrtAtDn^O%zy>{jOSRi4*ia8OGrI!A%=Fy+<|U8MGL-?vx^~W-`%fMC>AnJAYK8$73}KZ5LIh+$;4`FR+khDrHdlj(AZS<<2~~B zGRg5u)ip(`5pN*QlWOxtDuy@s)dA;wjbn;b7HcTOQ=N86EBpm zL|4*_#l_MZYBgF--zct;R?F+rdU_pmw{VwukMuL@5$Ol=Gt^`93)C)QoBT5Md-(wM zru;eef&4XfRBpk4jvS|AawA0+9-Bouvcx%|SdqhQT=XhV5!;3&;;szOFf-YMXFgTG%ZyjGxE%Ww{zvLK3iNsdU8OffW_cgNg0S*B!n zW}O#giJ=hJsv@;kbR0)oE78(!l-l8NYdM<3lp|$n2h}Q{{+Y1z;bdZ{cc?cRKYXy) zTa>L_S+lxEqqUM@K5wk0@)d+rAP5TZtb+&&FcltdN9b3>%YJh(pNSVf|Ml>-Z2iz3 zS6z4hD*7(qN-{UP3v*?N`}}##S!IqS*__`58gw+T*61zx;#ZisaxVp4=1*o+sj1At zraxxNSH?`GKzo;788bC1XM_T0h?O&lRq!uwloYv_=g*n*Yyc$W#HfU=_dAAKJ|h%B z9}z2O2$eJB0H_1&(>PG74V-Z(PysJbv0GU`xT?i}neLtXZlF!2O#1-RArd!(adZdC zcrWMj#(=7UV|xe((4?0;tGYiJ9MIk7kU^)FP?iI)Ws5=Hjon*lne_>a z7&a+WQ#8|6Zdu;hmB|%ZUX{XpN=k${mSTBDQgkt7Qeh?{q{Wm{rRS);&@2}8BGn;G z6i?O9Vy5zrFhiWKoM}w8&Iw&?oEKUuEEKN_-N=7eSSkLR-(&0y{e%BnYE-O7s!?mu z8;yogYh(h|8Cor_6CYt7R)2w>q@Pqat2?Nj{2u-Puy65iONZD)#>b(f{BbF*0O*|x zTa$N323_|Uvb>&JNy)mwhA2xE#JphSb&|T$1x7<^UfVtLmeWbhn1;1OGvuc<6bb(~ zZ(knYRFyV-o^zAjo15j{Y)#UnY0@-X(>86>(iTb^wl0*V2ozcfi?o#%utF)&7C}I$ zEDE9`C^P7cj>|Z*6+s+F(Z231e$EV|Bk21Zbab3LzVkA^I+F07bCWKj^ZVwX?@QBr z&&}P=vz%u+%kwCBmCnltb+cqQ@$+@db?bFIbi9t25m`aWks!>CF*riOtD!{TEAa~b zOPJ2&Yl|2eDK-ZySMofkQmc7Ury~|b?QuEM6GxyX(!vW`=c_uFbFw#Fjb4GGI4cL*CWC#PRmvMqlt<^}?(-Rwj<|U${O)t=hs=V=QQ3Es~ z#~m7maPt!p-Z`Czc77vW6^&;{l+$(4uG^@i!Ifr5RH7UfNyEbo5q?jcfUgZ-w@6Nf zb?Vh<)IyvB@})vBYWeYC9Z9LEleYgxtPWNymeK#-w*!XtxuaMwRR$~qG#_1{kl zNW%(Vc#gPSTr9j)?_jdl(hpGojI~dGT#1O{EKyCbm(@#~%!cRSasA%kS zhvR)OJQcV|)WySBbZ_BXWBcDei(9_?0IxrB?L4(=1!B+q7ttId4kqjw&9EdEwZox2 zz8AV_q=8}*K&6mjr2;E7q$voJ11^MuQfrWsuGvlsdJrLvJ zvixPm%k0~NQi!CKrZNVlNs}rT!D!VcxJSi>^mAklY=dg9_CeUs?^m6~r_^t#-qOA! ze!%=r`L5=V;wQXbKOBKWuM;d9aWDc0EeTQt4xYz~A4cG4Q7D*13=V}-ioziXA`QYJ z2pVxH0*A;e1T664Yv?s47Cl2TI9}6$#xpVuheDLXaPZUhps&$vP-pPMVuf-;geNA$ z$%vv(afg%^b3#PxWH#W8=>+vPy7f{QTJ&ndiXcXW_=$LR;0F;ma)Z%iGl||VCPHoq zLa!8Mqd&1FJ64LkQI=%2KCLCKR0@WnN-I@om`fS*PmhneN<}GTnW+>qT&2p0eJFcG zgETNFO`nmhk%(GCA2p5$alap)8z5|^hDkAxTxkelXXNIyi%m8e(hp4Q!pp8wEzi*sX$Ip;8zkdcmoRsGUjn zwuXYKmr^ZfQ>`N1N!^{2inSdV>>yCQci2n)#(Lo}&qOp4!b47fUZF@gtXip0(OC38 zl~3){j8c!%6lo9WRDQkRP@U4CZ!k2NTJ^1lR#TT^jiyW2ZR|E})$Gvi((f{CH}2=3 zRQ+0fL3i5t7yi%2e`sRjx5n#shu&b(YKHuS`LR7IJ zs70b(h%Ib28VnviZzKnTny3$tiZ`lMyn!zMS1ITNkX^Lnko_e)wx7ZkCj^3#i18Gj z6;V~_BYM10|B@c-Pr>--1js;>Y&<1y zAt;Q|#IA(8T>*b z4(j#KsDPIetwVHo#ie{kajBMwi4&$$T{1{)1NB2`(v&3skFe2$wxd>>StP9< zpTci)SPun>I4kEON>YGwhcuJ&mB?gL2w=Z5B$h%cUjvo6lB-cp7w5w)Jd0~o)``nu z30}grDSKcw*Q4ACTedz`xnER`B3MN=Ur%1cSZal7@UfJZBN z%<;SjFtI-{(6qsLu{=m%=NFSkN_{k?WKIROJU#_NAF)Q{iWl%agb+&y7NimF49z22 zK-!3Qk@gPlS6aCg_#u;?(ym7Q27to|rlWRr9Wh9Xp^dD9xY|XgOb zk)kq@d~Rif6p4k}4VRAzQkaj#?z5A&QYDvS8%wR@W9Cw+V}wsB#YXam*Et9 z*Txp~C?7@`&1C)?H_G25V$6*GDUBh`KV&^@#eZY}X22h@9~tm@_PhbV#J*&}huOmh ze26_{z`NPq2E2jYV8GvT-x=|8Zn+URa*al;=F~=PG;pk0EvOJ9e5+-?#aaypb(sd0 zX@DjH4;k9ojqGlgVZl&hEYoV#WyG|Mm{SY2oxqlGWf)Ky!|cX@t(KKfB)Xub`O#wR ziug6nR6$FqB3c$J8xX&j8a$C5oc8pgRV!Dngq6t;M2R}WDwWo3GyoT2Au$*FZ0KxQ zN*N9&H(kS3Y*r{(*N8G0&p5N@WfvDRaQ|RQ*8lF~+sf(!mF9VKhD!vORm|sjn*2=( zmoW*K(7pBeC8cCP_%W;Yw~ZR~9Z z+{`r_@l0-J5|_k1Wsvcg28unkh9Ff-&1_FY3>GjtcH)&A}s!is%La2hlMq=b4 z<`mo>;R%1^R5@5>=bndWkR@oHMu_k!O-Md&BaAdX(ITQ!NJeNYHCB zFoX<*2ye^Y|A*A&g+dhLN)1F$Ij3h;Qkq%zOu0{!m#&pTRHVNzOghgj(bg3xOIvbKRn`|dE znp8fO;^i2xli;~hK)W(2YKdWyS)80_Kv|bgu%ui}MV(aG_;qehG`Pj7 zf{-EyUmm&Q*91&CfjmI)tWLA~7N)kR;)+yArSPW^_^oLmJYonqeO$o%5wZqSeGov_ z>_H?9B(M;>DxqN&#RI_xh3FccC#aJ;WHkVCw1AbOy&%)mP6^o3vcYjM}m>B^Bn~f!o*Z z*-|y3Vb)Lg?z*3OR47YvOe-!RTI*IQ3wjE(N1NMbEvqZ8H>5OAjP7VB{Br#?c;HE( zGQsj9!uW3p@D~W-ua3$gB+}6eLPnyo;mNojz68&+$$R9s|NA|1+dmJ<7Z2Y9=wIFQ z$?$tn{4`V!-^2YI_i*TcPIDvf5&w;QMD(}QV&WbVwV+1XJlQnFB7x|~bmT)JRE#Q6 z6`GFbpoQobVxX->8_^q)B}hW?6P- zW_B4f*It-s6h(`D+T=B>R^8lEIetU;sDfMD^eH#hW5ww5dh(aCpwW@q*xlCH*w)Ro zWboRo++1%)3kqF6TN*lh;j9#bKNJ!#oE6WKmPSfvX>sJgbRC2ed+}W2`Wr6(!Sw_K zMt4S0VYtAT+#8a6b89}_J8Qb_tO%yuYlT_`QiA*a5(=X zasUsKLOp$0581$r1)-u?H7Pximsg0L$&2DFDSSq6d%rNRFqaf4CrN7qi4GG`YhqLs zU;iwU%c%>s3Z-2pAO&a`g~o#-C{kEbh$1FSp+J}iPbbDaPN(ycbRv}sQvVkapKofc zf8l~C9RN(^|CK4FrLQtUs~Ak_D3{i|`RMYuVmf{Yqr>C()lAG7l|D6v?;HKN1K+z9b^-~V|DSug(a({ zEq(WYp!HBj-pwNo8W(Y}3T0rc23j<0G+2YZvQaWD1C?5?n8N_0I)xV>2c;aL(Ai7< zv4Utn(SV`;aP(|xG#cwC3|d^|DgruT%57qF%*6O6c%Xaf(mkK9f2apOh?|!`1PWl| z-x8bV0)aW7X+RFt5J~r%MwzfEnggs(tq*858Y1Lb1R6DvKZbbji~ICOBcr1@(ISRy zZ4yrCG>QSK)#zw?w$SB)0`fyafk^iGc~MBxLlVqBgUjS%2Q?KTmN$N3Pij|XC_JaU zvhwcf@ca0VjOh)x9lE)3WBb%uFjn8{2}~@X5wSv7*H4S~w6C9187?6hdzaKaL@<^@ zm}Xi;FzW(xJEdS|2qCDD)v+3qh+K)rCok z)OsKs8W2Am2ovJcDQ0w*xj9iur%rT{G8iTG;?97yL;58Lb2qN5y=l(Ojq$gkyuG$O z-g#H~jNNw;Uux>_E6XRY>#Uv#E8za}aC}8?<+v>b*IyDE8wjpBL@X4H0R^Dy(EWgvbB_FT%RcUJAH~ob)~7AmE_^VtXV&zD&0|x-gjW78SY@ zKo3!eI84R_^%6|Zj5yl08Mu`-^LIV5a7K66O^c3p<-7fUcQ};cwLef12<2r(_F_KN z@sh7#X-7_AcSpsj!m+FFKht8ih4Sqdi!HumW16e1Ji|d-z=s4TKY__Y%-YEjEhVKU zkY+Um2nVK1{E&i0XqhkziUxrp?UhiIlz|Y^ zM1YBZcu0|!8GbOgYfWYCjAL!%y+si9E~O&rW7U)+qbykn0U%w|_whf@H-=-@S} z4s?o1OkamkREkhTDjH3EfW~z*YOyl9+I!C3x#j#lwYB%0-*V@ttU7nfV7NLtEx{;`RD9S46`+{sjrih2*{ z-dRPTSd9?#If31e9*?xtc%VCFM+)9yf-Vy@>!4l-+qJMx3pE^6%OJz&_osF2P{0^P zj{*l2MEa;yS#SH`A>Y%!(>`XUZ<7z#`4;)`cpvzEB|hu}eTTs?o%nSIMTSqW_lXY6 zX@WB!e&HxmOQ<6}tS>Dkq>fT1KAXUvp8iPoMDWmd6dn4wTm+ZJT-R+wv8EvJF<^M39VkdPzvZVQ7Td{xkmd5(d$zB#88`ag*U0hII zUhc~rmzQ?c(71D9Uhb@QHPvnPlLOwef-D2Y&DeMo6DPPSBNk83)-kZP815Ve+Y4ZO zJ}k-Z$j0^AP?Zg>fvy0an+a_u=+Hxx9%k#IQV-iTuw4x;3^bwDgvp$x9bT8ynd|k+ zJ1iEVJhy|`CLJ3Dm@d%Jc8n0J$c5b8AS!b?oN0oXCqkN-CK`f{Lc(On4GvzLL|3AD z=)wbZ%#C1;U@j^(eS$Qyr`9x;4AGHj8~lKS8DvVIG+C}3CorjCi$|1)(5hhZUn4D5 zp4zP^S{MJUYl`Yb>YjCx<#jn(wXKoVh{rf?ZR4oSidp#`I}CmLy3T!b<{fAs+tdX6 zG?4}CDx06^o-lg({sq?i?#ZgVt!ixRlpwGCskJmZY1Hg7>33~*Exc!8UeTi7n>rrI zp)D~#-Xj|$3lsjVL}ly7xvw`y?^~G+(NA7LfLdnKB;$T*pY_>Ff(YS&rsvSPnjBI05#Z0BJqzmCT>JWN(LD%+GyB@2@nXaum? z38L&n$R)555>X z1z*jN$KQWHe(wC++MV0aKlsqGyN&qo&k{`EOk`d_eumijZnQsgOQCzB`%X9Gbwh6k ztjU00C)8LW(*$yp$%I=C&?~|k5mpPZO9P`dlQnpf5@xYYEN)_8CIck^I)VZsBW(m5 z9v3oq(8P#3y~9Dg4~x;Em0KK~X0XAB2hJ5xky%h62~A1B|BzWF$Z3+oL=-NU6lc(# zfpsuKmV%|{)4pr}eD}ThMPl8c$3C_6<-flDb*Q}kz=17mpLrTzeI@?&`FG=gBz4>k zy|C-~KJoMTm+|*wy>H%q_X~%2-z$xhR>U_lR|(yDR1rzLn}y}<#;r$G@iSL1#$QQ&UIDg`VqxY4^U#;E z>iA9w;2ez%o0u zSzxOPR-0g(0oEFzN8BmmZp{u2?&7!exEEjzF`SUr*pZrwJ4EjuFZO!5JZFbio86Jl z0jEw;A(`0`2)NT6I=R~+A0o+sxfDV}awIxrOZ*5~q9D^5WrQenhHVjzyqmBBeIv*+ zQr$5e7~=o+-o^OU{To00?AI@Saq)cf_MOcwy*ryWK7Hi&Ess6PSm(z7^TjxzGds_l zWfMMq^n;HN{bA~aO^cg)x6WU&F=l&g%a$jf>Dl-U;oMpACP`acP$t?FiH>qsI`L!& zlsaI08U#}yH3gI@u-XVMMrhE%Jc^>tTBy`Qp%y0d(8j?$@|7)M=+yE#CwuL#?h#*Udf5k>*OQxv8d?O zioQ7Dx6eW#{D3A@prZye57anubzZZeH-wiCE?7sKa^Y`sKg05djvkCX;8aq5@e>^thH>kK|8~2+uD2s9&)cqFlCO}zN8VF~dLmPIPJ-@9&^i&CBTyfKmeDXL0Jr#IwjZh- zFfk1#TOl9<%66v6z*z-VRoWV-*X0`P)w-NvM@q_go5RbDcgU1VVtf-*C&{|sr%bqi zpx_4DxZGZ^PsQy5HsE6MNZL)oz6gd3Qck*@aa6} zk;6%lY;!vV#^#VKheU%UtckRi1Ow4j(S)S?0rOJ~d%aS)Gilzv-eht)7&>waE;<8W zi{M`nNbB0IU~silux_Fvn?bQ*sDLerybYvA3rC)Y>X z-URpakAe0~Tk9+FFP^Eog1__Is0@oYC#x}ho+Dy{WaJc)>KC9G9~UsfW+wy+akI(^?~?jNtec?b^o@OjzfVEyJfo%dwh8lk;;s_cyqI1kChF?R68DPw^+f9eDwP{2<}do8+en9Y#gccPgP<2}Lpx zG)`&+s2s>?qfjZeoSLxo5ET-7MuG-(gKduHgfDU@+o}#0of`y3P#K-mbgib)2ZB}#7ddc z!E!v~py{+?{WK#*C>p!eFJ9`;&m-0pMWCyQRA3C=b>&KI^{1c8Pe1$J&z^l&wtznO zF~V(&Xzo0;HB$E-2cPnAlD|mW4UagMbJA(YJjZ>l1Xk%&u2uFbPb%d~r5-Y%02m$- zXAOfB@ka6hq-L>Grc_{-1qEghBxp@AQYw;4#rlaY*c%klnO>SO7gmNXqydKMVX&9G zy&t6J=W+q1xei<)UlhL@)4%cx{s>S^hp)!e^3$;?xGy$^AbipFMG71aJgS{qmzDgK|-3}-rL^~mI0CO>DrKg`SD1ZKPAzV zNf=*7tVR~xP*BW-&BM~3N_N@G2VQ(M9y_&U>1n_Zx14aw0_TUL{zOgJny6veqSM|j&HN{_soP0<-Qv1Qq-Iw-H zo4o(eKiT+y_RW+cS^t^&h*&2k6hO-(lZ*XTeq5YUm4Qnf)ec-@skUI^RL|8e)#AC_ zQV!c}CJz{miM4VN^kgOimn~GuR3?Sft{2%9JEKrhyOgRs@*V7Ipk`-y*imqiRzr=C zgoX%(^et1fy)`Q;tJ7tzNeSLlLrScd5IYF`Qon*|9Z>07#H(^Gvr2eL9@maDy!Xufe1TBeF6c<+&lBZ0iaY}HpTf=imdty&y`p&8!Iq|d%>@+ST7rfOc@B}# z84*#%a~`=2t8h>u=M)mB_n)Pr`D{ON;u8*h5go%0o7}MJ*qJlOe)AjVp`F0Rzu8Gw zSjTT-J|Ui45KW2XY|DT)p(uP*X{}=?+&Vp&Tem`fdUxAHYZn*f zjvv4FiKdpLn7XIH^F#jLr9PtK|QD})am?C+DHr2|XKGAZQJ|(Zt9MvJ%C9!px-Lkt>XHMU5Ov{t;?(FQD=RAf3Q>`m9NK=J9%wI!%gwkLH-rsLF5@HCDa*)Kp1nOTu6`9> zJvZ$(tx;4fWO9|ppuH6;vRmc>)EeH1GJd%TRaxz>&^(iLnct%jg4?1Si zm2|8rN~}e_)GHj70)x@Wu*q&-m(E3dX0+Hd)YI!GKP8JUrg-f9v5v@^Czi$w;Egxm zG>0lslP8VAte$P4gCIq#e`R4oeo0;E>N0PHX%l~=KbI!R7OsYwpc1u+vJ8$KNxYCKa zsR8lh)#}`6aqsD&Y59psnpE-@?To8mv$uc)#8V6MOC}FP*)b?P98Hp;QZbq?LX$+O znnBZGZI2HLq2f2hcgOFGXVT*l6)%tH;+gpP#K`y~2hSA!+R;K_OU z!*cvlp)V?+5gRWL-x;~xhwzB_jB)=m6Bn+x8Wi?&-JD26W4jyNB3toKVY$Lo;4h!jU|Z#Nu<=2{3Nkh z%Ch-VlrBY-R1QAGNTo>TD@mXb2ir?gibR5ZQfTZ(eksV~Mi<>agrUp?Xki6}b142Z z?&OD!>uqPOaGY4#*Ga$5&g*ua>UQlCxHHFqy9CaCu!!M5O3zAZp9K77IW3}7Sk@P% z;A{2!eccgQV3vtDu=p7i!UCL5(8b{n+}3E#ez4Ltq4a6 z;b`R$l&DytpjXL}NrrSXl;ce@~FZ+%R+GV-Mqt^ zYdYjckgX;5Q|AXu6C^b4BNHP>cSRzUAW0Zaj>}C8oeALG1P(GXw?ZY5~&r|LoKcOcmx8J$QaRC3uTiE*#Nej+} zBV<4i+q2SVh+H@uAUHE2k^6YSF2Cq%+at1U*I7+o1NF2#?$dh13Dqpj{F&$m8esRYL%!= ziIl;?2`r;znP7IMbd!{xFGWpKR3k-20F6w>uZuFDH9n{l7RSKSK)M~^qgi>N4=Z9q z5@w693wr5wQ9pv9S7O_VBk`$o>|;z6k&1o&(cNPjQH+%x}Bl1WoRq zp-?zBjK(iN_@wRibv?G$wU^zCz&n&f#k4d!42550)FXj|4mG ziJh(a+0`%!MfiqU8A4+GR3}(mEK-W$;3rLxOOQk&qutiMoklJ6BATW}_||s5|57nB zik%Qq`RR}`b0+z4s|8bcrJP^5-Qp9`Gx2>tR{wn8AK87+(UaIGnY!NHV2#~O9och# z&7OtmqM(wfIr$S-B%&1@S`&#@hoZdTNx}4Ff0XBsCW_H$G0GO9LK;Jk{+slr%b(YZa?x1RZqub)=?QPyPdAj4;|`VvnHvuS#z14HxkY@lb0FuTs8EhnJu}K zQi2H5yV%Qs^ibH>f11B_MJ8IFfmWxY)yXI;q9}sS4lfC($AnA{p(iU*t`Ze8D4RjW z1n^jj$+k+CVI z(Y4WZG#42eEKTwEUloYVft`W$%s@0b5ak7;;XrDH7)iz8$71N>F(PaS`ODnosf50d za%bXO>N7nTUm#t~mt9?X6B+3@Y=}6454Isn<&YGSNW!H^$wDL{3lUi{ z`R$k6O)_xQ%MBKG%Bf5oi5da`A)#R?mcRWK%f9@{*@STuCZ+E@L`NFC+r~b=XHiSv zEHq~IhDEDg`%w1il9V8qoSny&b<}q42xZf^qKX=0C3)Xq63o_c@S!o(;e6-(T-2U} z7N)I9qdP~e9ziciK&#`?QVy9or039z7}Obo7KEclIjWYUwKS56gX5FN#8Z?-g>F(I z6_q4U+K|K~C8dvxijRyG$5ZKCI-Nd-JiJIzGDbFfSR@rCkD{ZZRKrwZOyp?14ILI4 z=H7<#cIO}S=AXmx+=R;I{pMoO^>jnAp@gKIky|ILl#cn5@ds znr(Aa=)3ybQuLEGX%Xplx0KTxKx#3}jX5B7AnY^8Ohc#!P9Rd5FU!!rG@F254xt%_ zmIYEsh9GnzC55W+#6e3DvLj?f2+@Ho#gKVI^BXoeRgNP!%gDj9|8hRrk$4w#$u$W4 zo9Q#*>OA6pIzf#MpI7W zOGg4m5s}J>NH$_SL8S@fag0U>X><>bnrJkgM%gq=e%&mJ-v1_-aUCT*BJxhM>aq&2+?u) zbuxU&$Ge^J41Vigu!h2SjO+LxT$kkm3~bQ?T)lsEoq6^VWtcAr9T?zq{@qjlp+SC# z^;O89#x`P_vvPw1%hGzgVLOzmU6vR$WfiZF87&o$Kz_$vwJ2?@UMZj8x|!C~QLclL zsll=dsJ8D@Z+!!(vH8)y2|lbJivkijY9B;gBwXhQ7#Svj1RaK43l1hvru2O{6Ngs$ zhxl1gd{JKz2aLlST}g*r+3$ZJr4Ev%B>n=Jx4B}_IeLTZ&Xn@FsFEDiin+%?bUO#! zgQ%(b@p2K%vgj`?k^#>IsZ0smpr9afzfvKFO)U$jWqjt5(ef00`S4;!2L2HMgy`fDOu2a*wt_7RrI$= zB+Eh7S65Jyr=TN*$*e4Vy{!c(d_Z4fMARjU>Wia9Pf}QN(uOZFuOe;VxZ5J#ZLvUg z1^q3weG6>~xNW#pXbXdJ>*-6taV*kaT}C((qNH^goEDeTA42Om(h4Jox<{r$+wElJ zcr;TO8R(I<4Cq?P$O-5sexyg%5is&LGBO|Op-uWUjVL)8O4>6$?HV*qpd8|%oJzk! z-|q7&&zG(ztqR87nhCA9kyaQhv<7)vhe7Kq(hB$rt$v=?Flb%xbC}11uHwDJA zVvE^%>}K{IUq9c6MHeK=emdzY{}*I&0cYg-@=p1u{V8~|;$~$@2p6(9R24Qp?DCMu z!fPY$kGL9nGU{CP3o%Q$mxtxYr^LUUaC_p3Q_iBin7j*P-x+slLPUO+rcm=gh2x4Mila)JCoP}+rzzSB(|=E@lvX}j`D9gq zCl&k)X`LtS`)AWsYU*O`kij&V2Gjr2l<~Wy^1(Ei{sU8^cCmKrU>Zz=X)q0@!8Di# z(_k7*gK01grhg{s9`Z^*4yOO8AyMY#|mkI{%mz>wXXU|P3Zq)YN@$r+OTQw z*GATEm_B5>Zu-SKd)>mirFCoT?ylQb_h8+lbx+kDt~+*<@ut7e$eVFshHK`CnJZ?E zowaw?8?(b^SIj;=`-eHx=X_w8ZCGn~(QvYUQvH#Jv<7FxQw^UO^~R;f?Zz{vB-3P5 zPotvo*2WV}Ynr}n-qw7?Jj6VIu5RwB7QYt#zajl^{8DX~;rOoM4K4W_{~nEulfMNxV5^AvvH4!?95L7t$&c|`e<5<}ia z44}6Qd4?*aZx!;aS05j02>qgv_l5rN33(A^^z;!^YWhbZFQL}>_zHPh0PC~C{hLXV zOmUBpN0g6phmfZ!k@B37XQ*MyH-tRv)yIeOSAH+#eJN3h`?sBnsB}*sF*QW7O~^~A zq7adgmx<_*a{Q(|%K$I`$O@A8fxJ9&7RmdPdMn9`Nc}>R7nA%Nl9zy>(e&p+p2v{S z^BD4Z9z#CQdiCKk_~@DzK=FZSZ)?^W;b)@2XHQ5lq)N~f|Rr>B}J6MRphY?Kv#oKz>(MrsP6WQRQd zG(f$X^hkvk4P}AEQCg^Lg7HquK}sh0HbL+C@M$DHWspjtR1dW#ssm~&33V&*b@M3) z>YYHl9q4dC*9J7rR0HH2Am0XUb`KxUBfXl+gj}MhlufCK3K#yn z@oV!~uwM_{?gS~!Fwy}iXh=_9Bexa?A`|uiBjJc;ZXwz={!89-ZO^;`T(J)|0Uj2@ zhoh3AznN%Y^WZj|sv%f8Jo;nl%{}W|9}15kNgb!`hSbBU+)L~Rw^W(&;fm| zfCsi?BP6q+(@2874dgKs4i%)O8EP<^4q}@s;z@SWVkUD)2cP}AW6aX2Tq*}FdcB8Z zz1u;SHlii3ZzGY{N$fYBV9b%(*hw(uF>re9>h6u}Y{a`T`dB6tkmWEakePEM%sK>3&16nl$n0^?^lP!lV=N?>4E=`> z@YNer=Vka;vGtw_ZvV6s-*@|)+xKszft&mFa*grw0jvqHi<9tk$3{DuJ)OjV@ZV{$ zl89mWRZV%{GYs%5e&*YRPhKlNk7H1q5Q(s~Zfm;f;GQ_%{g#jNQQIn5rk}cdhL{DE z?Ibpu$sBeHw#9oD_bSmyX0wH8=|*{=SF4C+3?y$9{NVbCcI`|^CXpTMnMb8TGLeXk zd9;wYWg=EJKpjS?33|BO(gd2buZfbwh1uI5EgT-i+|vBttV91~+~*>%p;7Lp!9{tz zc`nrPc5-`-iL99xVZG@0?%!I=++O+X>lwCOm1pKSymkh>y?HM%3A~z!FIxq>tBB5a zVV&V)4~`55fmVOSbMk^MnvjAMdL;Uq$vAiK z8%H)*7UDLZX2pdytff9jh?c}%R_M7;w|I=YW*uL zFLgbU#U!j$?H*ft#}eL#X@bQaWR_SwGPvjUKwtXjQT1Noc`dzHnt?vuk3LyFsgfjLAy^Bl*NU`*E2IJsP~$4(D&gV?Od5i3aW!FK<8;HvmN&i8wdjU-SDv z_bStmk9#HS$H+UT3>@Vkv4Xc%y`cAv*IdJ|Sk&&(-a)+7N~rU*h+lcVSM-0?1KjId zDOE&TE2&~AO@q}^OKQrX4)0sF&{6}XLZ~f-nk49<6WWuArKXW}s1*8DlQoW~p@q*1 zC{HIbu9)IT8J8wQ{|cam#}-kwghvrj(~+K9LVF6-mcw_E&<~F&fSPJ3<9rE;f;_(p z7{#9!%7itMmsAh+oJYffQk4;oZpo%VNelE#g;ovFE+h1?{Fr|+$ya#fEEXiy5R5Qo zOtSzaDkmjeTMge;&`(GBYlzOgv=u~$Vrb`eDIyYJ&Z&Z~d_Rm&jnIN^gyk!T)UTC> zU{Fe=>Bp}CzN5@L52Rt0X1f8U>`JC$_?^D#4aoqQ|vdrx8yqBE2+3 zW1YuCii!8~(!0Hr=TYe;3C|DP)>{_0S98BHM|djk_G-b>*W-dw*APswTsjZuU!$D5 zhs#i>XLI^y6F0?XwK+T6Ok9D@-e$8KoMxLfmD5-(oYvgb>~wHilfz`6Z!)HGGFhpq z-frsPD%(s}JswkT=(M#vIg72y+`u*1+B)rc42Q|9Gr2_km965m21{ErS8A{}*cw`( zc9N~x%9XYo9hj58+3esf-gFvmc5b4%-ePVrSU7<-K(J5{YFou=qL zln8Kzc0-5R+Qd~hHiC5ANKR|3H(R+1b3?PuVsNOqDudH*ZZI1-oq_1+;L>w*GCUkO zM|)eF#SGdt+N{o0Zn~|VYc+Io?VzUA;VC0PeBHCr1j?MCcaZs~1SODC6X9?sj&t22S`wyE6=T#dF4tHow84n)tuLj^7d>ezr4e6~B=z*UVVtS0W$Y_hZs zL=>ivwNvPh4FUuKpJsEt8DvhC$*^NJ+AJ0uaRC8p6<2R?fUGvF$DQ3qNN#pI+w#)V zOxDy6bBnpnWHcL6ZT6-#TuOtUvjxW=4(26p;lP4nGB=*7H=1E@2tCSik2f*Qb8Vm# zhQ>7CWP#a7;2t=aF_Z&lvrJZnjpHER4|)RxCK%OZHvlq571wBonF-$0&}^_bfzB9X zzzQr1!#P_$%t$K+#XzEo+t2=aEwDfahrv$Gjjd%2rRa5pTyyV+s}KjnF2T6R950tYgQu!btG)n+s| z;%^fHq^%tkax{|(3#9AY@#J;jO2JD&!!%IbVS)$^6tJxYlx`?0KjDETKWPM{i4Yyl zw$|U!5l@45yA^~n5duaVgk2)bTvLP7?O^?W49;&flZltdySSm=Hs9p6DnR_hQ-;Wc zCs|v+rwC1sW&>zdZyGRj4PJ`cF?)v-oCybBn7sT%|1ALUd@C*Dbd|;WX&P-2SEl2t zw3Rhwg++y2l12yRBo#NUOkY}At>>VJR#Ty$&Q%t3nu_V%xQQ^Xq7oLLGMI@#RA0$qh61UwA{{0+rAS*)3PsJtvhp(h zbQM=zrmw&>i-D+ytI}xoWd+sc8ZB2bRmB zC~&$`O?f$CrKtw#wM4#x%BtzwvXW9gS6W$KSOk?5i$F5X#PTAZ6)06ut|^ly1iIjIwXUdNmck-UIndJK!QP!yVP9h-XC!=Hv68b$ zJ=KY1aN?K?tJFmC%)*HO$K6uT2@S6MJHTP?y`QUZ;f!E#L=nP(aAH4QHc}0lt9S5Je(Axl!5AW0Yc-4&dp&blG z;wL`e+R0O3n1=BDI7MAeqBc@D{)dF`Wc)O@(PC{9@}UksKOTODY3!|56<5${x2U)h zyQxLRl^dK^jonbM;;wJQSLXO0ggzoX;3w)n_*U}lqV839McwHu8L^^t#ZNLMqPx4I zx}fS-8liNlTH@=I62QzCq-Sk#qGBeq+oo~l-P)kN-$UK~k{C8?4GS{nfo zFU;d{Qpb7`W0h}jPz;d=4{3VwP4wtX``&$e#6aE z7rP}dWzYZO$5)BKFREo8X^{`e)j`B$s##wJU9C%3sDp4ptniy=a5S5(O-`FNU9Jwm zl_G^mYcjUltj6?cbtJCwQ-t(iO82_ckRGd!!R?G+joh_8eD?$vV+>)HEjL z(ut-Mx8HW~l>@tOy{lKZ@9TN5EUo^_fz*KSmoJ_u-(q@QMJ@lOq@?O{mhb(s;pgtx zTg%EbE*x6l6Pp)%)#_*@DS;FFUriyYS(}m$%KmMc!OK{m7MhkAL#jX`_7Jj4|id z9g(bfx2W*MsoPKNK0jgYR+KuS@#xiwJE^B17jHY-)bm{L&d~E)Tpi5pyT%Tm`rf&9 zle_gJ4CV>)GMa%|^k5f~03tr>D1bC7fDL7p=c7B{={+$=6%@WXxOK-q>$=zOdyIHc zR2(}*9lBTlzo~$nGF6k~>Q6SuJXs}yS zTitd_ZLqbbwY8XWZJKbq$C2hSBeo(jBKTw~bWzv(ieXy%_=pf<%hi+ArS7ttUNKf+ z(9zLxBL*h>Z%}rs64^Q7eV#|SpwR>NC|1|s1mVKp94;OFk^A$h+ zrw@;RUo-oIGau-8-=@&!CI9kR(%6|_7Me00ekZQBfBJpN=K<5ku4|fq+nsawW^b5p zj>;{2{H5V%4@@6=)48VgGah>DiHN!_VY8SPcHEKG&X5b?2~S>elxdHCY*I9KGuwKf zdgPnucfPdl-E%#G=J%G)81-4rmsh^7beI?S6~DajQCHGgLxKO@5ld$JrH?x)`tV|R z@c38i=^o9#znD%=Enj#3h3z%=xPtE=JKc3tWz|>fwDO&Pt>f>0+kgBwk*n2Rz6gTY z7hXZE=Zn>s7cQy#;wlMZJ>IBEA&A}jyM&5lbs|6MW4zmqCQfH=A~$=$u=sv-I*Dc3 z>YViS3^k-td@SoPs-3@!d_p_(EA9U<8m(G$F#ee6j;)J3LwXbE^x9Xce);~vO{+E* zA9(Q8?A2*`nW@otF8Jl|3qKCFt6p*ZQ}XPzsjeU{^eWhL}~ehqO4D+%ldBKe&T=X&M*Jrt*wDE>vw;$ zEbQo8Cm)DH$NNejR@{}jDYi7@@{931sVAS;om^@ie$$dmIo8F0KYT$U{mLDU76T$T z^Wi1|ha1njTrNhQnaX&Dt5a{)FFQRa_sgqIFV1-5#NkH|98+voYjJZB3$bWdiMl9V zrk3(63p=Hx!ij<5@wK_c`r6ET>sy@qP7&$7pe$>cO8JRgFb23L~j5NaH z*J#Md$QsqyFdzh$T8*Dp`MlZnhp_DIVb8Qaa z#EprV$jDupwVuk#dY1fHyQ)?mr+Xe}kUyWFGIW_zuU0=5I2!O53LhIfqt5)x{LNV8b#;H1eWMZkqe$$mKvG% z!|bWuATg^K!q2H9P!~nxyh=#VeO0kH%A3bAm%}ecmsQBleV1A_54(9o(pb4lQPS<4 z5y^IQEp=SGrG=Sdr6tM7t$@7;WLY>UocF^`wImF7J^krkOz}Z$96)&)kyX8U=i5_? zbU&fIwpg)vCx!FHfUr}!*>aM_YlHD+qHt)BR&k}uO*n6k0q`J5O}mN|_-rRP7816G zcl>Ux9c$HoXl;MVM7PfHa;`K{|8_R755*Rd&@@01WuRBhO14#mpro% ztCaTq9Pvu(-&v{wTyB9%*;QtVTDJGF72Gl2S-J`AICl<`81^VIlGCz4y{MQ`eW(eQb{gRd8SF_Nh0w4<&4y7P)wb^;L_2tkN%#+(HJ+Je9Jud^y?`})JHtKau)~}Y0KDdo%gK0Z zpyW^~?#*>cWvbfN?y8`DWM7x2(IqI2$cVhESIz2ojs9Dk-TIlT%x zSO=-8j}Fu;aOyyo_(52!)hAuDr#N%3qHo^WyO1&GN(Qoxfky+EHA`;CR>`Iph1Y7} zaZs{+9I_i(V{y&Iik@9EtzdoGqBb=>oxQ2Fc0PA5`503*`9uTy0`oX8sd%8@dlUWz zzyA@+N#()&&Fff`$jV9-4&c0JeW<;{$Wn1~y~v9rhlKxhe~Qql_1+{u4&h0{S=z~# zcb1Wje7Vx<8pwIgQ?h?6C`?<12Jw#4W%X`!PsLEkxU0PJxc(>^U9usVSswawQyU89 zj1w`NuJI^7Z91H+DxJ2>7QOy_Yc;wqX-(!WQ?}Z;GgdgjnGczp8e)qGzrDeZ~Zdb}`dP5w^Hn`J)Ft68`0BWETjsxLEhH6YCDc~fv1Bg*Bi z^1e{Z6KRA?_c!w-GB){HPJ$bPPI`5^Gang?JXrwGLw|tO*rj#cAX0ttI37sg!wLQYmA3* zzc6Ua^o>CTvN_?5YnP4aX9Q2OL3tQ`NOeKAA+{a1culN%aMyJmFqyW-j6M2XhAYRx^8>)@*_? zc1vNdS;s={4R2c1B;|%3V$4}}(`##$4#gtS6!49= z_`Qz!aP9@O4BE12?D2{!(UWzE9+NG06EYMpJq#R|txfbUtk9vKRSQ zdAy;_siw3@;TJa0af+=G)*b7}6Js-P8EB$DkCY*2u4v%2+KPP;}8B1MnCluIuLJ4~l5$=ooCw zhNoY|x?quNI6wvy)y@iC)M^3(TI!7rjV{Yi9ETnd2w(>L^6v#KsSn4{ZUbj)RutAx zeM#DmAKM;Ka}4~M#nOr>dHh0&pI4NcqIF#3dWgi&E;4if7>{;@C)M=@*KsZwe*TTv z2$j1xM7CQ>7EnMbzEXwyPDTJsvDw|pi_!SVvD2$Tn7r*1r^nZIHRz^g%TWx+2x!7s zAicYr=gnQXTRfV6B!GGpF_8%qAFW%}@EXzgpHp5iqB;cu)0*iLeztIYs??|`uR6{2 zK@9vt0Iv5)_EiPhtrIBs;f{%jbM2iR8vW&phA`iYl&}#M#5WYqkT|R81`bOsRS>*N z_H7VV9Qvv9?2C03n1mq|%VB5&uUqwXw>4Z7YG4)SS*2=44dTf3PP|ikJu_`tf$~B2 zuSE?Ly$v>pu6x4QFAk#5URa)@VI# z&QpbuXlQkGLA(gmu-^_nE0nPOTro{i25Kc@0369SG8YZLg15N!bVD6>Y6_;99J0@i z8`QpM>c477e}?KwQd&KrM=ly%bliuDsJxoNS-PCLr{-J9i%HDnuRs!7GhkZ{m%zh_ z8dfXB=Za|_iX&U!hU{%Rq&1?lGm(Z&)P!K5_uj!AmnQQTF)O3zYo5SV#d!Z<-1b?v z%sEx=5MokN{{~-;X|Y9JxqJSh=q(gb=dJ{1K_Eyz~xdd-{p ztYdQ427)z27mA(SI6~-+MQMv;Edc|s;nI=?@Jkp85jaw%&DD64P0p13!%c=N-mVjSGfJFpN1RH2g~xd+w4 zDaVSNIffX^y9;Lu^;uNg`;1I1<8N7GX8`6%IxSeXzRWUQ>gRBD9OIkLn!IxmLN_4F2av|S5M`gYs)vHVNq=q2!YGe z@e{DpyO@I?O9mbIy$wHR3dA{h7ysDcvzZv^hn;~a{L(GTceP(m2O|Xw9t3*g4G#rr zOYugRYq>IcWMiNaTWo_+`#z-?Ddfl;@LEE+5!i;s+!Qc)^y&p*6Dg~ zrWqh%97J=9OFQ&IM0ik#lab2l2096LiGAhted0IiC1x*afhh^p{Z2A67OursjhVTm z7EjX@-Tet&@(R3Mq+B}7?t1o;!4 z1$j|Te%eLRv5fLGj8yQI((@R!d|WM@7uOv6r6G|9(Zoe(eCI|NQzlsmG*pDplpB>2 z;%sd`+`P7$`8{>$lSeZDb50jBaw^cyuTlRR{b`NE5B~5~%2b3)!@4JFC4lW(KVGg! zSgWjz#y)z6WPsCNzC~NVDg{~TTXN#u5%9?UNZb>g{lwCeD$&b(NL(3^^Bk;rb6@56 z<}tz*<9PFBTt3EXeK0n`)qfx9HS$D-@16173})(L1^irkDVXM2FpLin?DYM9paCBP0D*NF+z}G;uvr={ z%AjWYMf9~>BWbS_iUOsoXjq}7Lu<+wY*r+6i2jk8X^v|xYd&S$ykB6-9vD7pTGqQg zQJ0e8R}GHg{Nv}5*l)xZJ$h(xaBW~=_xl@woPHbs`)@uW*P~(LYDYF8kKJf;w?$kbfJpZkIY-kKmRv-bQH;;jEPU zT3SfbmVSu+HghthuJfY}1%IRofg=SjT&V`z1%u6Oe(vqp4C=OE74KO0=s#N5HVm<{W)zfU|Rpa4$?_s?ONOAvJq+w zQHhrvMbGx!6tq(}7)Qc%8*1uGtf0<3f4q?z$&xS5XC3Ev_wC-}S8t3h94CXQBRZ`K zp8*{GpFAm4u$GA=kzA^MS0y;r;Zx}wwfwWV800;#&K!{GQ$eERNDOZcx?^*vNR0a* zCw(Eg=~M^2cR`cM0Cg!TqS^JV;P&x{qvNFWw2?oVQ7zOmSgR2wZ)t6h5}vEg9KpTc zPJ^mndP%9HChj|o%PryH2mn*t+?u;GhlumCK4(<4TY!`~ZLTFjjJYkQG?*|1kD4Ns zd!xsg)RcSGw4d?KWcMI%uma=wuW(-ttqklP>GxL9*15UqgiI|Q4DINIEOZ?V1q}7A4Gifd46TeEOb8fRn0a~OzW&#(;*w@4 zJ8I=m|E=wi%43bE474r?4^b{#GvpUOQX%nQae`ZU{d9n)KubI*5DJ}l{QVv z4TD6xbXwVqM*h9Rv$E?n6Q0}tqLV zcba;}n%vXm{}@fxmfTgab!|0lc9FK44U8Q(k-=UAbgQB(UVDI%MTQdnzdSWq|5r>h zF)-7!|C`p&+nAMs`F}8b<_YVeFh75};-1M=*+zPat}DeT@tqMo@DCzrbecc0F9l)- zDW4grK)3O4a8xP*i0I#;ib$n2NTaz~jL8I~AYvX$$i}3focMDG0Cr+ z#-Kmx&MmzkA2q$(ofoPeR-*1V-OuY6HxAEWK=cGqLIkoS?Ue^&-wapc?9x5So@UStj&BSrD6(FA@XIHLIEv*r(%tPww-sX9-v93*t0O9D6JgX9i?4@TV zM^nH!?zrmE#eoI+5t4c*N5;XV{YZk|f_4=~InsmZZH|fx*UAY7u%at3hday^o9dq2 z_hZSmK&Kk&fQtF4L~v8Q%gmq1XATEr$*4}8n~Z!&fJBz*C>g3{3LIB>4qqcMWyA1M zkznU%5V_%;cLCf0csW10Hc91MjIfd3v9F1FchgsVbnA@h>uQFZOk~ni(D;kkCtUBQ z7ICiJx5s^;K3~vi@dq8^g)ZQk z3)lv^;aJRiuMBAo%Jv&y&Um(WB=Zd(PCOmBj5c-mt;xr95WG&=gKF5;R zH(5Y6#ry3UYJS=0_p=sbZsea!Vb7kA388QXST`m@@d(D7#2Kc-X|XzG zb>pdy_)j3hmyeJ+vSKyQAhY>L(^!@uR(>l)7-p$;iKvmys1}q!W%;=P!(wmRN*s_6 zGD{^d!P~eG-!^RSIM&j{X>OK4RBEbBhl^`=)lw;#KwX(E6U|=6Rwc^`i;+Wa#~h{rZuVblj?aZJf4Se`1YFZz#x8 zqr2^|Tyd0ii8Tv}trtwiMCEQ0>8^5dg_IHPntKB)gdRzO#Z~cCV;>ryD4j8?;aJZ56M)vXjrStsc*s#{@?-Kj1OlBg1}<5aI=WzH|6e7n&!ypOQFlj=C|jAGShyGCHj(oMh>1 z|1+1y8N+d@=MUVkiPR&D6P=LVZ2hWQ+mcQR;;?N!iqln7>OAEI zIovEf?X1p8iIOT?X_q9%$!sH*MN{DV7?q~Jix%H+f;kA!6 zmgp-7sIbiTF{Z7;)h{G9Y^jf1e{e=6%hW#fEGSGXMc1js_BS-tr!+dNjR%w;GnSVN z;&<688+xjf1a5pGQyQP5$I_Np-!(L+&m%SdVT_ulom=~JVNSc$wS0o{FsH%%w3?a1 zQY*#FyoHWL@Dlz`RxDtFwRRx+f=1MFfi(`{BsjO>xY3F9RPCmprn#5%Y?^=2Nk9nB zpofTdj5))a$u6*EOi>bWsY*&?C-sxhr5*x{y4Y4vO5b3HJ#Ymr+y|>D0c2k@7*+MG zUaH8tHY)!9`=z3$Q{ln`Dj{eZnY4z+EWM1r-E!xT<-nl6++DpgQb`B0kfidK& zf-2iKS=u4ohPJRMC)Q;a=%%QW?bX1QYND<8JBQ}3(^iL+DBng$HOZ>7={E)ZW1_Ws zEiF2Ax5@gx$rzgQL$!(C)AZoI_5rP^M>WRDkB}O&J|$|bQ$v`KtY(HKEtXzY7E7rJ zrD-L=)@n&Idc5zs{zN0(URyP?nE)mJ(6xeY-4F7^bmfGA6r0MO9{gCOIoNhkep3bd z@)`5WObiKg%j08j^jX6OjV~u(Jt$-YpWs<_@}0|GcW#=(M$lE!H%N6VV}?tOCQQw1 zHRqH3PtXNnaY<{McYAk+@ISXyh#Q!lr;R#GTWCTUERvy44U8ts13kX8pr2v*rq zp4!y77dRfBWnkKywVboBT_&sU?uJVj(UqQK_%@_i!5LS`jvAJi#X(4-6@?N`c&_Ok z6*`>?lna&WDPZhFXoiP#jy7`&!M3&tixS`nm8Yez%yOG^Hx41wU2uTUiEM zL91Ib6i3*WAj@SCxtU6$uRZ9op?^@Zq?I*gA4hTD3qqH{;^<@AF{KRjXqk;WFrkS+ zl%B?;C*zWXTZ2c-3oGOtM{;#TH;_mp^n|%+yWpc^)ECCQZ${X!$3tvAro;+3S?^G- zadR%z$}k?1-SxHW6Z!Q*-ECyAk&(*`QH7)a3I;!8G`s@S9};2fTFup>@2i=tHz)O zGbb*z(}0Ow;kdq?YZc+4_@gmk^9>+UJ6O-!WZ zMRhaO-RTq5M_rH$CdkdDWDe3OF(yG)*Df4ht$;O3qNCM6=Dj8Ly7?B#&>OyO-%4HZaaJr10u2rw!wjOq2bUguaZfF35%{wG4L{WA%Q!U}1ihnCeJT!cR z?s+HI3Q$I~T25FhWP`?b==RuDlffx9bH``2e`sdC#t z?ofN;Z@<6UzT<2P>l1K!M$dp(PEz;dWr~Ixd!SG&M%mzOO$=(OLYsg=A?x4^DsK*mC3o5kGQfpTAqbEWTh$RG~FsXG+i_@ zG$x(0o9i^nWmYTVSDroD`5J7_v^(%j2~BaP!4|r)pE)z-j%dyY)Arif>wj3X+1-nz zhT{;{XIcYihT^YSm{G^32gR-^ZK$5$pXt`FQMXpR0HB*zbIg9~U2MC|rl2`6!K4H! z-vp2mCHa^^rVR*E!l%`{zRMyA@F3pv&0z<-5uErGLFH`liMXHo6tN-B^C^JI!U@#- z6hY=-1)C9+zzMMYt;B}L{YTdMj{YT(KcPN_4H1uko6qI<6$UiIzszs`P=8IZY>3!DZhzyD4!^=B)4dEo?{%KEm-Y>y77I%+W01Aa>nGx)yLe&g2qnp z>@$;vlM7NBk{*)aljnE}c##*y-)Un3k)XXv_If(d9C>`1urcMo>cXu8NU z4^OAb>gnv3wsD-ugU*L_9}lbKs5;Xq21E;G3AG+^%7hRmaW2`v(vFQ`Ma3R>c#?g` z?HHSmq;(=74?M9NLf(SEa=J?2`e}p5j(!Hp72bk#w0MLGriu5(*k`so1;_XMUZ)TS zvgLHEEBNZhvmLnVKD+92*ZHHND?y8gsyt!Ft>L(HoZ2$)NQ|}MnQGaZ)esNwAOkZ1 z7e)FR=atau1?L&jv7>Ao(cV+lnp2EuiQ10*JEuM0L|0pW+J*gC;0kl5KDqyDVmw8? zJ^dfp@?WZOxPA_BS%Kifb}u-)1dJY$KZ-50shh9H1eya~DOJR#Vf;Fdj99+T_TrG? zB5+E5dFQ|hyK>P!l_P*}ZCSEdKVbj!%0!;X>5@b{hAbD4gtdHOEHUBAXF zP84S#*Tv|0P1cLnQu_o(&tVht;`q8-S?>I{E%PCsJ+-~Ywuy~|^IWscrTv&a&L1N= zn$qpoM~;%|ZQ`+>eS3|I6Gn+LhqaR_T-C@Q=fCghB48fWlic~-e&6`!BKUdRYiKeK z7^6B>oA3<2<-6v(Ry2=sPnnbWII`(vwGjbzNd=FOq$|cr9pL(#az>+)AN>#p)O2*_e7E`}Ucl0Bp40Yg_8TtxwCu0LqA=Pi{=P5j8_~(8CLAnEMD;2W z@=V{b%^db@_(&FYi|E@HF>yJP7?Pa(oGB|n;(c!^AqG@@JqvfCPx=fqXG2B2`013!%8p%Rt*IX)=7g2lu}93g#6!)+=&;P!pR3J6>84ey&&q@% z73JJQ4&;_KehgB^>&K5&i`{+KFAMEwyeKv2q*hSk($$i2x}nF`YvN^TZ=mkpVAg_k zcx<}Mi!bK+TAhw^P;no~HQI)xvmB?MqE4AwHEAH9_r_?KMPa(%bT^0BV4pP{d17LW znQl)bnonunT}NsC63)TGsL|BtyzwUr2eWj?gw)-^Rfi!TRyt$UV9rKuYFo6WYA97j}*AaKe9X_QB zT@+W5HJ582y?nHi)LLD&KA*9wW%Bd9b)h{R^Zi9TeSCrg*ruNdch@p@)bYIL3x1ji=inO;9l2o#zrgr~5lj7> zOjWGTvBwQAOXE;Q9>7+tl!tj}3zu1BxKA?-ba0~D+jc9-A~wc55SfQ&2y8F6@W`1u z)pTxnLgt5OZ_Khtia$9JtJ223C=+jh#hcVpkD;lRWQsba59O%I6wIBviu;N}QwUH{ z?sK@5?xShanW>d+t<#Nv-AA2AF_kG8aaYVPRqvDeCtyJiD zDDh0#$C};@XFcRSv^Y)-Y!Yfh4&G9RwM4jlat5p}GZ_>XH0-({j^A<(*M;ti&`^$s zM^w6-leGvuhd0)YboZUls49ZUrjSBj$@vTshCREN*9ILi zl_-}}He(I&3k_3fH+q6@CIDQ2>vzAGBla-;;U@>D8>QvQ&hs0O8t>3wsM3v`SOaPFr0MBS^~svzjjxK^LacjeUQ8JTxd}3q zzs51{bM8y-gWX#>wl{a50b18TvOY{dR(%dZt%;m|OnZ|m_T)MfP&P~xxk;t!58oJe|5yg870{{i?8U8b!#??4deiN-^F@IZ=PNXoT(v-Qf-A~PAl~RwaU6rua4BKnxdq1QRP%PM}5}? z%VHF(htI@VqgZOVmen5g8e2GG2babnI)Koo~0@ zj||0DLqXkYa!aZ9OXd84Ym6cvTZ?c^^3l9jrbO~86VI~gn1JcKpWCKelHAl4F-1qN5|=L?FK-{b|#*Ugz5 z#;4T?bRWuIMsK*!=yG(P12IWq`?`QMg~U?qdCe28`^1yfg6OLUGq-O@Kr(?^I3^W7 z>sjMFmRcRl7*mY>-VKJT_P~n6`W*sE9f~=Bp415YHVYln(W%k&tBu{INvLYJjy?{C zC@)cAOyiKGY0z+Met_cDt>V;1Bx+ljV~!D_)H%)z&TXWDz6}Gu5U1OUzkDe=>nCZx zjbMSAy*9LQwaZ7R<}ER3dj8W2~OczFyY%_{rueepheb$vsCf4`r zZZ9lAQ1cwB(eeEeIm3vX@@pmxxodv#fF!M6=a9TS~#cXVfh{(Y_KsEpKY@`Tb;JY|OJV?YR9 z?-`l9RSP;Xd;f%xCv8-?8z@m_pss>m>rmibhs{<%QFszchRxw&r80f1ocC9pYIdTr4;wipgGqx}>uh{YL9V>}-aNlJ`2f=hbb^ ztHGenj$zXB!^7W~(5V{1pIA%@wOEMqxL+5u+S8o6Tk$b?gk|Hc!k`-k1BWkq^J1Zv z@stMzmUAA1hw^`p=T#AQ|5?>fB!XxU@0MDT#?BaUVLThYs>gW%^#6x__e(%46&Lz)od5Fu%Sx zHjjQaENrpkPA*9F{m!G3WfLn}`a;bjo6;JiqQcMC@thRt>XkaQoNOzoaKb4mjv>Zu zIi_W+oQg+tt9M2}KnCI2dwHevQt5-r1BYtF6^P2ZyjlE^X1S~8o#`QE;d^-4ds_{C ztLvaZ*US+_6WT^{NVH#*AJ!UX)1V)(U8`Dt9a8eZADoC$(u^|;hMwzI`(OXidVo#6TsK7^U7fYp`xrsZ)vqvMWVIUBGBvf)J%G%O}Mh< zfWmpfM-di*Hl)gCAN6Cl-q}EfgBWTo;u7a;cbwv<3nj3oqXZownp_;=@Pp(2QWR6JxhN!e6R8KxvB@Qn$ z4u8JC<~}@0BQ^NthaIaYzmPIT2ET=jhf-^@U^1>zlR(U}uDw>+O@{*zQxh7os<{>a4lsL({p$%l=BiQ9}^uoy>A+s^S3)1m0Bv?6eULD+OiErwj}XgzJwQHc z@sjkC(uv@zSP^@4xUr_k%NIadTpBR$@!ni7Zsj(@`(yE)M7#GzSO;jPVx6Dw22;jX zOj~PotpcygTSrbrjZL$rm%W8jR@K?Zj9NJ0;M1s-krUwB23oZtJ1@u{+aXxp@a5;Nik=y(DnT3`DN58`~=Bt{oqx8SPzEQ3|RQ)krsE zAxV+b|E@1rn_C-OBVPlhP8v9_Xry-NuUWcQP$ps~wT#|8`Cb~VeAZd(Z4rgyCy}yd z*+P^*v*l0Z+sXb=&NOZJKHLW6$J$>I8194j7vC}{Jjdz&7fARR?=p|{(`#7aOV5;Xun zhT*FdPJzQCCJ*_b)5ZhoURA{xcNOpWPBZDXw|TE%{n*~AJ8dyX{{a=bUQUPevB$Um z4#Rza`d-0tnqj=yW}imWW;S!Z2wT|+ywiT$eCbS&+w^lCF&2zO%9K8#EzMa0Z(Pw3 z8^i1NCx|W4qZmhn&5^G+Dd@F3yuNn}#FNF9#V1Vpp+?m7ZT=y61DeD)2m-wo9U)t% zsX?O+Br&4`yD17IN`lyKJi-N4s4a9$pD(mCWYM0`_PsXb^O=xv4p{N#J3j{WiBBrr zaJ?w&N?{}7xigUQvthnwG0_3EKP<$$B+kidOB>D zl4v*@yt@KorKK9^RPA2Yl7NMg5Ptso!n=En988S08ZaR@CbV0tstE`0@{g~X3=aUz z=kHHNCAF9C6FknHwUa2pb>jBz zW0J#|TcgTj0WS6nUL7}}bhrECUERje%;VL|r3IJ%hHf@mIF+Syh^SRFfdj1;T%(v+ zJZDTahd6BgC<_hUrI6r?IzWYyk6Jb1i`;NoAE@sk1fK!{tB|Y!KRBrUu9uZRbg~ip zH>NqqQlL{3{=&-A(O?sGy9;?W)K ze+$NRO6T6OBjXKLCa;N=I#E)^(5ivZ{g}R&@$R+PI5*;3yziMsLiu18+9UM&5xVbH za9c9W6vi@Q@bFk^jCsl9^}==|TeQU7M^oDQouj`w43~ZEgB3>-iCn(Gh`|tK-40op zcC?|;5k-ke4?*Si-nbrfEw`sy#5j#sidhfgZy^lB91}e=^Zx4i(~%%4o^N=A`-Eo~ zRiTR0Zdw9cd6rJh+6t&6AYJ$#IOFVVpkH9EhMc?YL zpBp{)c`z|L6Un3VDA-~=QL<<+G8>O27~jO9C{vj9P$Gp0D|eIQDEfweHOdvL^=qHW zBFyC{Dqzg)aeBf)uJ2l~Kj*n_s=vTr;Ov}Cv660Fd~R1FptR|AD@ljs2v1H-wg4uH zC*-q}`VEA&lVvLU#>FsWdk40MG0_w8S@Ov!Ur={74j#gpau6kr|3^+7P)2_ul}uVnG#+rJ zWCyT@AclWa%>>Wph=@CUX5vR)`IbtL+w zzH?am8Pg8$Lt{=%_SA3*%0die0B>>f_jn4NypXLjU#?YV3WbCoc>)GCwgZT4$dF)& z?l9FT`Y(h2-Ru`ctXzvm67_{GoU`GA#d*qV} zJFKqv``36CP4vD=X0d`VH{=pzMB=Xy}E%8-*BX2d#{$8b)`0c8a z+7hu!YNgzm8WId%Yl76qaLH&|tf^wAexN4K8jL+xf53H)op!2u%5&Oo4lL1ulDuj? z^D5jty@8FCi`}C_RdGJU#AK*Zgg25&M5CUZw^O%Uwd1nmvVYdOSk_20k8orR9`GKC zVCXEQGTt9e%u4P?Oo0p3=~LsIUiC37`29=nEIyn6o1NSd#NHPjmD#Hi?aB420IaF> z7cpRwmxy)+azP*=- z2kmtR^-#x{r%u=JCBtKT;+@{9dTRF#-*^A9SS4Nt^VOzj>$ayWrbhTqCjmt==Ox^i zyJW}pa#rORu$!0-Iac!}e^=6FjK$PvZVY8~dGJ1m{*^Vy?DR3IL)u4LgwC;+o?7N6 zB`tF;Yt73wM8s!};wgNa1I}L^JrEhI7}sc!EHJlx{Ryv$hcpNqBoit=f*6HmfCIvzFyQOFgh_akikbddCc`^hA8PaKccVAOiPonW3^=%dn$35OMKo zA5w=e~*D9nuTBL&whij z3;?Tj+slb-;lwI+vPWfeUgOqa!e6@!f3NGna?FY@LMkt#oIzMz$l$iwx}D6NF*9bC z?zp^assXSQ^+=yix0~Rlgt4BSzeH|Ys1?C|j7#5N(iQMeYxw<&QkIj<#lWX$43`LQ z&RW+p$24tLOR5}G&iwO`#6>ij3Wzh%+B1wgN`Oki9*;oduM=Y}TsE~;gGzx?tEi!7 zplj?PqZ-XK>`=E~6hHMew|5;rk5dS9t#J);jd%vRtsG0^Fp*0qjWv_iB#mmF(sj&1 zGcH8tN|&G)K*dy8=?^x-3Jqf*rQ++|^-&?~i~Mv5$zJU;Qg46qe)s;?a!vF#gKHcY@8jS+%IlAO?Sm+guIPQZK*kN7t3u}m zHMc^RBQ#d%-b?fyVg>M}y%dha;B>;X<7gg4zy_K&s(1Hhz}UVv%xXI^*^1&tga=7B zb+7oKc)MVT+cDD9s;J)N;&*OZ3x0#-c-d_Jz7}5-$Ra6;# zWuyn*J9p3E{&#u)CIUHgF+rixTuHd??da!%<>>u_)gEjK=K%cJ$E1fq+%J2Q9!XJ8 z2ARXSXgx^F0)LECly4#A!`e)uog2~J5q6g)zP$N;on!kAnlF`r=~L8;ag0zf5*!$1Z*LkWBhI)EG3;ejXF}yswc3&SWgr2QPx~0Z zcERLs?|70?O)?e*CFC5uA2(?8FCr91}?s<*d9201R)}G^ zcWelCtGj|@26xFy$-n>1PZ@*dSD2UrBd;^lA4j&qzUWG&@8)FyDmW>SN_nI87GXpu zIp7j2Pgt?X-=jG~GJHwY65ORfV)CSZZsv__bJ*^Ayn7l2ZjHij6?=?nsznfdjnRQ1 z`k_gpMFOKNK3k?g3to6@$Ak#M2#&0T}SKNcvH){^L{BHeBwI9*#gbG9=rt(ujE22JFchlnf+_TuA$2+=0bedp?DF zR9~fN_!~?PQRri#P|`|0uBFMPSiV7>3cTNFe9-Vj0o@McIXHxS$S>uppPTCOF0SD` zK4|p3@&E*06RXdvY0V+~@xFZFf-sS)1<{359{3_|nDPtLDK!3$gZUc}9A_%)&f7ky z(>wXwb4Bz?X%~IZdK_Dh);ON=u7`{$B{+Do2?6`o{{dz|nZL~)U68Z=^8LOlK<1}4 ziLTlBAW{2j46Sk09yYmiQzQt9091}w{ftX0W(pieJxPAWJt;q`9wA-a5&3}nI+4mqm`lh(^+9q!_n^E*6|<`|A~KukroAq6 zC__FZC;;&b(MR6ZmEnZDG#6!vH`1EIvkVf55&tyiOg|QAxTC;xFDZlzcK4Qd@jwE* zdn>h>ETabdX@JagT%{sA6-AbKK?sVHQxqklsA`rj!k4frgAq|#PF4g-shZTt_mcdja{PO1e5Uvx}o#{+{L363VM{ROH}DV_~=N`&{w<% zisv&WCUGeW_QTm}fxm393p`vbMJP_l93Zkp^8mK*}AJGrvG(M^qqYu(GX*2p>=Ztl4=-qUXy~zCT5E!7}XhW5%V=^Kv z;aGuigyk~jAVwe)WZVhz0;Lg%2*BuJMz9!oI>X5#j6^q{3wDjl3k1f9@)7KT&*rh` z0C4&sgS)ZkAi4zj=5$F^K6LHdXFcV$rZXA%q^x$HKSh<(UTZVd(w2=|i2Y-PfMDy^ ziF5%TfywHNrrEGNeHvYZn$RRPHT?x{WhbUzzzr9Fo!$W&=LR2p)QC&Wq3hLW4_Imi zjOeh!+!gQ)53;$$2 zzGsp_XF;Xpi%<2rM!0BHHEt{pW%mAjX{3DyKO2rf(1}1WWk9_MU4HMdRcUAhKLOo{ z7V&Lp72knG91Inu1P$5&mwIf!txBQItE|4%IH6g%0k;Z^@e1MR!U23lkaDqAD8)gc z9+zih3KN98aEH86xq#0Kmh5Uabtg2+8BfBn0I)zImz%}usiflKr|cC-VGmyT@?wOp zKTYjDL+Z&;z!#ru6Q^66>`m@@mR5VKdxdwoZyVmGzG{8d`+@a@_bdJ@@hityt_!@Q z#!=%MWgq2k^fsxjsxZi2?XLE+OSz?%b=+FZ2H#%$9`_;p^KQvZ2hkfynGN^pOqr7= zddHWSGG$q#=`bRUjBnBGHiglkDWgFPq|{U(oHJ#znO0s z-@0Pef(dh6$QeKP)>r!9kh|xlFYwXEK2<5C79X-38-)nWqngXcGny6 zg&by+R%&nJo8%_@H1{-bQ_gl_hjc-b+VVQ`@E|s&4sxY@qu2&@l&itFLz0|yk2|OM zA=Au?V79t(~|{6v3sew3lxC~p@b`=M2WoX@)qUo&Ler2kf@m~ zDG+#TMy29ESi$e1CmWnzt;kvL)$%Nm1gwzc*y;_BP@gualpP)dS{Q*3Xk_Hjj^_LO znx4zLZ{rONykM`T553<*vwdBXJk+QT3)LGAzUdUTY|X~kn>CP9W4=zd$ z0BY?XUDPn(teW=7_o_%$RFhJonqp?R$5m=lf&6(3s0JJ`G{6{Wv&|4vm!f9!qLPdP zL<%!`MJl5Rk}HsEp=UWcI^psyF`Aj>QmP1wOYr*ml42$5_tHrx`Fwu=#tKvcQ?Of; znPfr8?km4E`_5(OpS8!DHpp8K^*raYaoRp;&lnG9eEELxD8}$z4p`BP`b{Hn1%qVF zTT836wQz3noOo*j<}K6g;fz(r>Vee*tEylMX9v(z1*{90RuJg1!+CiwFGs_~+@fg< zs|y{bmB&u3n1#@9f7gM8p>0Pt>i_!7#XDN&-n)L@>^mA`HLkpnyCQP!gHIjU_&!pQ z|JPeCUiHG^`E`fxHRC%Tf9RLLc(n5&5Y|eD;kU!s#?atHOfg8Y*(TS6E~zfwt)|$N znDV9~4fsm&N^e6%4YG;i$x=&kNAd3BNBBL$W9k9^fV!{v-QttQW~Mk%JON(#ZSiNt ze6i-wN!3HW!+4PsLPYTAy6KR}LWmAY9ucfITP%>16OGA0+$`3p-KNc`YO$e3He~C@ zjhe+Di00W6c{#w_!_?!)5WJbL1Jj`0~@aMt7)@2u^voOBC zZy#S~AJt+&m zvx-QXV9eaiZp|w=JdPRSJdt!01fykb5F0FCSvj7HIM*fs0aFzP91ccNH=_j$QqKTV zfe(Rs08(>@3sRfBonGALJ?q8ac#&6DqNiAzn)K` zyPg|9Z_Q7s<(ULtXead41ej%;HsTCAEq?3x)i*Y%bNVp(c*l)H@x+}fwpD3lj z@%Q`mZ|_3EtqT`!*|KoqRvf+u@ptL3ee+NKLL`S=frrW=qs+HQPd+{W5W@P!L(?~ohaq;~J7?DIYj2pl9)@Uy zsBvogEA~*=lMi+6d3ZM{VqPv{B&$>|wMj(sud?s9uNpLngvHghYkomRH7m*95^I^W&G}ScQv_;uw-xEHh3d15=K|^$I_{Qiu z`#R^^@ErwGb@WQUQ5j_#Z)pr&5f;J)v1qkg6{0cNsse$RIhz#nnqq1=9F7PDVQs)q z)Md`)u3L+jl&p8HDcR}TQu18*xrnI)ZS>sZ-BI#*$-V)+C*;;bk(67@$xr3G(PwVt zPKu!kg&PZTq2|p^75WDli|Al=Ck#O415jcB8ju$%w<2p2zkCZY79+5O`aQ3tLHqDkp~lhD2$67 zCKVU?uOo;w7AX__$lvG?>eJ2eKSK;NS95!ShP{7JaYLv(lZTCEfdX1T*_B_Asr&p! zT?;^Afe8%^H-@*H9tgh@J|5;nVbw&4pUENt-(jTlO7E|*Q3FQdbFzQlt+t~K~h)@ z34E@_lkd6Cv&cg{Q~erPZ;KyI@b~(0w(~~t-}s|DP@CK7Uev{zYDO5@XuT1CWZL1c zIkRJ|p!W?;Qtb7XB1k|Ee}CRot2)(MTHLSF`2W|>Dz!{@V@(s-!%QR}fD5>~Sb-7A zr{1rBAfNuYCEZL_j{Nq8H{4uZ=yZ+LpSot{$6tJWyh#7ncHQ(v<-wdNda-Hxxoz_{p1AYf>!A|>Pm16cPuuoBQhEAp~YkA14E-nrZPjFW9~b~raV-*ujKa*WgJ z3_8o51jy;K&OR_PdS5lziNVHBbZ74|x)^XVvld`I=cA^j$A}+5(^CK=Y)Kl7G7hA; zA~vTXQxBgFvfCn6$*MvdzU`P2%ZZKh&idJy+iDeQ^=jlN(Uba=+v7Qbk4ushudLXD z-Z}Z+Bl-rA&(FciOeRsl2fH-S)wa2|tsE=yKE4ju*+%2hw$oU!(5~8u;$~!*(% zL{5=G@^CUvewm14{zLDH4FGDNj2gp|8u>3yxM7}7SSRx3L>?T1{A}2|`+(;xB z@yUbaaq?HQkk1oI=j}cqcT&gXFh%68qc!o?gqM-1~^Tnm9 zUSkW`NzLLeNU_n&kznpk=p zClz|zel|mpGY55MtFLrs1E%f9We zK~d&ulcPX&FE*Pzrl{KDoBIdjz!mhU$9l?oK22f#*d8PPX-%K+X*Lpy=`)Z%L>DAs z0`fxg2jz$K)A3toR8JX+*Q58Lyrjt}v-2(;)6q}A@zZ-QpV(<-o7i?^FR+JMsma-X z@Q641AbhRVFcv$q(^}l|wWkKOS?$-4n*RN$399AlHvoGGAfCZp-bZiE9BKQHJ9YW6 zQU5ZYj(nMppmloDhn)@$?Wkzecd+;6>vkbF5 z6Lw|kkImpyte;~}Y3L|GCIY5;9y$yVi*A(jd;0?Opk)1ybfIP;f|A|4+1a}X^7J!$ z52He~6<5Kxc#kqHyAXh(0N3JYIh3%TV~pKIbYIj~y#Qu=A+l!PL%#g~zlVJJ-EnTf z<@X@wKlmO-Ka3hLzeoHJ-Xk*K9v1uFa|9Jy|AY5fnQx9*uQc9cW#%w5$TegfBQW5z zn0zM2B$#TZp1Fz{&s@!1$IN9GF-w_M%^KP0jWy;Ly%YIf<*LF+MfR-Bz#*9$K z28}Qx{L2Xz)D0*o7*NMf&qH={f`>|ul67TXCzF2DLa}{0vypHa zu1XEXbPrz{)XZ2wU`x!b3)&?eM9 zW$l^|cQ4;pkDP;Vn$a+Obd?=X9D8i;#90Gvks(tm$KHF>Fp|IER|{*ZZs}g5FJ9ht zPkVLQm}snVU~tgQyRM)0(5;hwj-YMgEu$iyfithvKlhZ`&E-Q!M9W50_`_pYHfMK3 zt_FGJFj3}OZHQ3lDa2yX8^nUwI?X+mbYBz|QgcN)J z8%;!SzcRCS-sWe2fAdvW-y-#t9v|?`$meEtjd%a-$N;4I*UDw_4R; z&|GfDW}8szW-S<*RHpuw`ud(E9n;DL9Z~~u(O66wrq7j$#Tdg&T-;dw!_u*>bu(9t z4eK8wd;X*acTTFlWznqBadzeGJI4j<{ttWK0^dZHJv?`2^6n#P(kE#

6s~+q9uC zo(XN*2KpjxDHRdYCT#<4(oIq-tSeYVKmpN3UOvFr`dV>OTolwrt%@Qmt1hg&Dz2{X zqM)n0Dhdt%bMH*rl!yQQ_V@GqcE4Y}HaTMmyFi-q z-P6j`MVF`bCk9%pT%KnQyROkt226*v}dHX`PR2gS>|0-ogMr` zuFY9)Us{`f2@Yb5udFVaH(lW5+!g$g;)Sc{;Jq&A|0b#s9RL}d%-Sp&a1+HvM5NLj zmuyU?lQBCor8E-JVIs!zH5Fhk@T)R{A7CzuiNmnP>rqxnz$h#Dd6cisW)iDv@$Bxke_FyAhH@ z5|`76qvRSGMit7glhHC+gal+j_BTstMkW>GJ=?rWhlXOx%RvsqLn>3j0#m`7TqX7V z8WDbFaKV}cW$qd&l#L6(XLS7A%=p={XlHP6cxUh(lu6sswZj4W$goMYf4Ga@K3q$@ zc%e9ly!)C#tuohYbSO3%Ns>|01QZ_?9T(jN3llCK94{jyHBre$N6RyfiRQ#jiA-W@ zrjZh!;l{;fWTeMW%u`24E0wA!wM?5QAz{yiZLk0=uc!#ViEk56A<^$=YlenU0u)df z5Z=HDe`#4FmLMH@^E4wq^u8&6hq)Amy}tasREA=MJ8oOH8NbBL^liKfzry^MzdKWM@&>C6q_uo$L~_XVnaMnyjDz47S9sX*-*nu=xC)1 zsSuRg5)FPmsa1j`v8o6rS|X0Zi_5(g5 zlsbAgN^OW%M=8Yn_OZU;Z?(C3#roeJ&nV8#Ny2-$w*h~ffWIgzi84{mW-c>&qE0H! z(Q$FcICI=4{K}!J7)9$jlhH(*GO>K0h>ngTcZyB&sQq++n!r3tQUdmO!Qev~L4|AV zSp1+_c>h(O#>5JHry&&}9eyYf7psAMDSScJ)S;)Ynb90$m{O3ZZm)MAxLT(#YcOmJ zT4+(~-vFN=IE`HYjx}6{Y)?4q0?_bhqg4*0KB+UtLC6%Px3aDEwt+Ol?zJh`#IGA%E38Xk6pk1{T*}m z?90EhHm&jcMXq&C>CRqkmsoE<2fg9nEGsp~%XFO9s5NUhX<4mSuhw%h#uz$=JYSEI ztFe+OK}jJx@sB)U>8CLGU1bQ!WIr;lV2UB7U`1_1Lp6a+`--`F^J^#6EF72>EI{0? zwgu%}L|lBHB`1EqmG#LL@$EN1-MjVOnxvdm6|-)5kt(euy<)|@{#kmNNW|~LVJ8En z3aZ|$l}O|a6WTo?ry@mCJw?{u427S|JRy}*ES53?YGoj02(+PMW7M!rAn*cV->%TM zG3$pn)0W}i(1Wb+1zD3P@1E%!Q$)KZEZS1oq-v z80>ywc9@o*G&JjiF>|W6F}1^QqZ2_L%y;zDV4+}FFN0lWQyu0^T=ZlkltSxwWurOS zw`9{?wlUkB&17firs(+nim`FTouFVyd={pS0n^H&(m-d76s!x%O~UywUxW(A1z#%O zz>0OM(Zcg08v{)i(>rKGc3|DsSvP-pS9ATg6So|LU>W=<>FRlveGLXfqj#pRw#FQX zcAN+r^fQZ+Tkm{l@S5M>dBvlTnQB|InpU(fSl4EVOyJaAf*zL>J(|t&Vm&L9fiKAQ zL%&PA*p{u~$eGh!8mA^y?tq(+h}n0Ug=rO5Wtn%t&EK#X{>)>Xap0=Rf@t zLMld)l$(~BA17vg!*RX0cNDz-a&lfx(S!-6>Rf?l_U}YWtIX9>2`iK6#UhPZERu>? z87q>B#VpORP&hCkK@BU@v4t#6u}Y{P*hrbaP=P1~tW*??LPjHrST1K7p3ty@V!>%# zP%MCY0zJP5?^uNtkx&p*4w;!9iW*uYg5p9-?jWc~w|DBWpkG4sgOku-g1-npwI%og z$h#OF4wekhL!WI4J|cL`cR;-eD(S>UEV8h3SembHSP2u-z^gWscD_sgT?QW07> zA9;U8<|r(Pc3;F|>_AlNlt}%}GIAiZR8U!la~RYjnb!qSz;* z6=^v7i69JxXA0sLe6@$;4E`+?>>&AcMyRG2)*x5T+4_eyH@vgGscHN1>({)q-HtNz z>K9L+?yfWB%wA%iv82vGn>M_+tyk89Vxvvtm#t;aXCUEi9U+q!yw+p3oAthv{R zavSzx4K)$gP+GZ8spTNEFzaj{w--%g7X8e z)^V{8)^a3FT???;n0P0EI-cCQ5Hy$}(d#quvxQL72>q}PBC0IaYp8&omzta zI{${YTzPz6?B@7sX}SJcA%*k$J`MWjr~<0jTo4}{tJlP6G%?DURENf2UK76mqo)k5K%S=*ls-Bx< z$TsCFn=RW}U-ZP%tgNd{d5cV$kP(rPnPGZSzoeteQ56$>Om@Ml#yUp?{nz5xikwY30;M}Us53@O;#6@{xc2pxSGSpd@mPe# zY|K$7CnZK}vSSU~dJ7k}RL12z|1!?+kfFXJ>oL7K`VAJbV!aptD1<~VMwnR!Yadqs zsNlB?hL0>DzeC9HDY`cJ2=i+2m(048C(jK4rcDqnuLMjPl+mnK>y^q(C^0gO88pgJ zWzZQ?T$v>+R6_P8{>9(YAZb+-7thP6lz%pd#lTl%Ag@uWZUQS3Ax;{X zk(^w8rLTJVgPv*8iUpc82 zEO^Fhpiu>WVTx49Wsx$4UM|zfWpa6q49QfiSOQid*5h64QmJYutTiPR2kTEtrhw`J zXn$NPlauU%9RekS+M!l}<>cty2)w`~tIJSFC`}@~r2^6;Jx3&qjEOpg3WJrwv%xA< z`bJbt6bRfyR+Dt`*^R)@OTwBGLENUqQ;B5nFn7#N>`dH}$h5_|~!U9zn*x>|~A z5PFb8+bPsdq3RS=oPzWzC?!KjQSs?IerskzjS@{(qHHBn%5^fCf=$rHD|7`BGax#0 z@y2*MUYWu&vb2bJ%q11#k}m@-+!lHWT6Yc>(C zF~_Eo63jo#J{=v+u#7tL$>3MPw~@}HN)*#5B05U66{X$EMn@}P<(a7Rpyc3DdSh|9 zD%w^s3`O$rnWj~G)sz{Nm2|=I@062gOi}4qHPKSOMvVcjZXgxIV)JylSV7AqVg=47 zBK%ViB3dR=h)14gilIu7>7|TD$|!MVK}F)74Khk07mH{{C=H2vQGtov(}cnhKaj*K zRfcwSK&T6Q-WY?n1vjATV@HuSxE|m=UwlC~(i4M^qx#|F!~2jsxE0eW1v+mBI;B*z zIg9MQ(Br)q60t;&@V~^#;-BI~8gM_n3k*ppfg-*<9}1PL?}gp?N>C2NAzz%1Y?eR= zgEgoMNfC`Iz`EY2HwW9$1H+4h>6nUC@c46N71v_cFO{HjsEc7rJ--b^DlHTvN~{!f zVn!@R3Wh=Wr-K4d$mC+0uV!#MfR$9yhxLhuJ%6L` zK6!X}CKRxc)%mu8dT+F$iKvUTozR{`Cok3LIQq)b#w0RF*v0G#(R9&zae;Wd_&te1 zs*=u-x#ivRQ;OyYUBp`u#OtG;i=MB%Mm1UWWX!b}Hnmp0S+hg4BX;d*i))WN9^Vju zB>qUkjKmLwT}@l8-8rVcIo`(GcpGm&dOP)Fv}Eyk8*l%CjZ2=K+&tdK z+jtvq<88c+xA8XK#@l!sZ{zJ>X(c1HG- zIkKGEob}^vyp6ZDjiQ54n0d`jV+MR@yE64{W8Vf;H=Ndin$Qu+=7W~k}( zwF1mi8hVoei>L(p837gp{I&o~DCYEUmP*n#6IcZOa_w>fW~n%>hrnX!7i%{OFiXW~uP3ks%T0Ta0E4Ww zTL>(L{s`@_1(>B0v=0+l227^Vrv#XnKM(Wr=V4y{Jj~0Vhk5z)FfV@|mQd~57X+A> zKM(Wr=i$gGO>#Pc@xRv*m0KymzfLSUfmzPxprY-jx0S0<<`v@Eb z{mR@O0?bm{+>HcQ580(bRT`C!G7hpWT ziooeur-p6;2AvwNBybLvk71<%gM17D0_PDtn*|u-S&Lz5nEa(-@|TA3B@N>%B5Zs_ z*!YMry+nlc@({&QMU;^$q)Gv5rn)H?eAiK4$^(A^Y8ll>dMe<^2QdD0K);)e$cHl) zss|cJ*`cost`AUta^!+<7mQvCpH4C|5?T!$wL`Cq8i1Y#g53l2hUTdS?8^XmKj7g2 zuNQE-sSbcU0PcfxzL9yj5$cVU2_XH*Q7L60G&lf5AB^Q-UI)yBadc2igt4>Ws2h6m z*?#h`#)o*qa&QwqdM-;(Cy@n5&4lB2ID>l~L=t0p%gNukz5ln^`^9!Js-;R|2?H?J1M8(q;$`9*N`K57{UmwLOAnMzM!H#4SkK zM`EL!_;5hbEv{EWS)!A8a}VJ%B;~PIZ6KO)5ZEc$!Nn2nLQlvdksb3nnJR$hA`uzS zv4q4e7tyK%`mlt$U_|I#fq--21yM3v@ZQmA;UAG$h?@VCJoGQdeNKA;j@l3oE_uY7 z7egPfr;ydSNY3mL^2Mli|4=RqS>@&P8P;6mi0AmjY6M>2ycM{Fxw?ogdj!232+uwt z&+xGaM+OIxKd3J4DxLwKeKBqnRkALj#HTUNTGP$gwNTLU7#THRK0uqZyW` zx}DJC5;9f)h%Uop39rKfLF0bnB|RfFg#3D}E&c0K4bSjAm*JUatWA$fVt~l8_utfG zC>!8v&?8936-I%ReBwDr<+2z?bcE%Ozz@V@KJq&W2ce8Nc}x^{07fr~IG3)k`T8%E zWk%%_%4DN53Xdscukw>v!RxAB;QP`!*Krw*`bN0-6D#!)?7SE8nKwM6|GO3t%5OE4 zm7Hy$s^F*ttUKG!Fiq+E1>|-oG8I@lJd^ZB+7=JaKZ6+C_0j{hhoHh~ZSp}J4ZfnWWC|A6uDhM}Ns#pe<0N24^ z^@!~3{`kB(NR2q7U8pkNUnh#@!F^$N6kc6_^d(DrJe9`;jFF160KyE zg>c+7Vj)$;dU@(Y*2&M&5Jm|CtxA(X^96g*cBaHFFq@3T|@9S{ES7%_r z;d62Q9;eI41u!4BX0F!V;qv%hQ#ilN#kqRhT~4RV$@TEPoYUp+@VWaigJce;E8uYV z`1373H%tUGIJkh%;dJ#nd`mcQ=jEjn>YU79In&$YCnD|@cX=f`uD<{OP< zL>?Yd*F1uYh_2G-7;t;KxQ5P7ppVPr?A~^_hpTsYbbEUoegoI&2>9F`ZU@)oAYA&n z!t%1Bk!d)8e_vmZ8~E$=dII@eo423qbu8ohfztq%DemS1UarIEas*rk&gu5|ft(GT z!{g-od~P_~0YhBy?eKGbE?=)Z5CC-T%ZNNfQVYNd5WX)2bmAEd_?t+5gq%K~*V*3@ zFmTvf;93K|CNu#sHqZ^6g;6#Dle#?}J^fB>S|RGao}Oh~mOGo*Uf6KJ@I%z{%Et2a zx%^meSW%-B;LAsFO(7DFbES1;D8&keIWy#t;eufsW3dJbMxU}V6K7p8*G{y-lX ztJ8(q#ACW$J$+*(3J&C1CJe_40fZo*Zg;yIXwHv}#HQ8h?dkCn6A(mg;MyI2pw;Uc zG3byIvbqC-zR3jzE>He|dx^Wx<#appy}qsjd{h7<7Ye4I4eBLk;m3qwG?(_(OZnKF z!iZWt;w>!Z#a`eEi^jFo)dT)V#68xVu_(v*b7W*AR*s*z0PqbWaKTkwJ_ks~Y2Z42 z;G1AQ9o-IJ7x0W_46*`^!sVQ|9emP*h2kLbBxGm*Iv1EAhu`l7&%yTL^mg?3f<_#? z+q-)}GFceo7;d>HA)vgKO(=4@a75tsaLIApfIHBQyTi=QAebAbK6JXr4R*@UjdA(- zkP0&pC&C;WxL&W*-HE?lL?C_rz>vS2I4q!U@5j#T$DM+e0EY#@xZeda8Zcm83sSly zsl3C(l)Tdjk|sn9bbEV$fJf{G{XP#6<01&0UI@H|mc_1)K*+#G?HG*T=_ZahnKyAq zyLYK8EH6O(!!ASU!A{mUYAM1gf42j;YIlwCTt^tAK0LiY0LFwvFE}sn=szR?>~A$z zuBoA_xz%E~a<(R}(cUoER%xx|GA&JToN3@%ZOt_eEzKN^uv_Yz+qi}*&QjmT&9>E7 z8aV5`M!U7CiEFTPwz|ezn-%(O^%b=(mA3k7ZYEq;-vH^y2A&8=n;SShp@7O}ZNkXv ztoDi;IJC^P)!Lfd3|y71xgO)J0;CqM(PD45RkYMv>|A4uy|JOm3e#5t+Im}kl^tfW z)>-SD^I=x#&aIN^#0f&{LsxAIu9sJSs3fg$*~rO7%d; zJ~^s~l|~m?Z{U?o_?ZCJk0N0Wb8<{?Cs`qd_f(N<1L1wlTIN}1i1`hC?in-o$9hUN zem{Qve*F0T`2Y9!biBVKe;O|K~HQHIszVx&(Ut+EV1od9E0-Xt0z< z?5-x`9HSwuM|)4oN-Z^&v<8w?yrj&-MaXpAjJhyntfsy%YwPYAWw*s2_*%x)Cw!cB zN8wew1{0c%gX{~&LFVCI42@`7Z2}UXyD$3dSMQJdvyl+|oH25QS|kFx1_+r<3oBOB zElq`LqY58L)$&${zuWEU3V1z*N@Eo6l&B?km$TRFaTcZ+wYW#Fjvall5ccGyP-jfV z=NWav==o-MuPd)9;OOn+8Y?Wul=#TP5@Wfsw6L_eY*P6Za99?0XuR(6ABD0Aqa2@C zs2NK`MPa5fgFi~~RJi-_Q-I1QD`#!0pIkP{RGDWgtt!c@Dl|?i%rK_$3~4XPP!s*o~PS@h!Shvs~B+Na#GHZE>S#3TKG{^N^B3ZMNt z*Sn8F4TgR1PncL!Ar7wtv{Ob%RTor#M|`S)9?H0!9P{*ojm1%U)WA$(MwKb z$Q$0=cw1M#<^F{>+lhi#{?JtN71Q-IapRO1-#@zYv3GYpbkoY4A4#1Z^U()02bV0` z{nK|h%+}BSsBYo?U-V0+?Js$8L)<@~wI_Yve8n@7-bdOVu2&tL{VL6X8{IpIWFQfd zF&RXf9L2`58m%XC!96`Q@0@-j`PuyZzrL__(xw%}gp$+P1Y_JvP1>Y0@7SyQI^%4kZCCX!>kSpcfvG-^z>5^z1pMTB$eWCV1O!d3#gR+b_9K{YE{ryyN8Z zzV;S<#wUT{^+~aJ8(+Bo1$Fhciyk}w;tuWcCyaw)1X1i{SQHyNx$eyJ>l#m^c3SbH25P@Bex4*0o!z_T77U;kts!ru>v!uKxa- zhf)X8lgke$J;S_K_1R0?zdg$)f3;eEexm2@uezqalnL1<^=o#srQ_5``(xLiR_`c& zzpSs>JLS})R+-WE{Bt)Ow?`aa`s%m-+u{a(chiB*uSi#Or&As*{_-a;d=Q{!-}vUc zw|sW&>fna&A6>L|+Otom?rz`uo0qQNvuXFS$8z6lK3n|9BR{$Ila%wPezN573hC0o z2g*4$Z+}T0s;RwKQv7jS9tj!?D^MyQRZ3sj{~RFiqAsPXeCJG8a~`);BYgNm}{kI#8a8D zaCP`t`}J=uD*yC+*9%v^dFa3+`(9LUGurWK6$`QGzG|bjFw&^tQwv+y&}c85WGupm zB6V(&$!IJr%IzpKPHHc9Ir1izw@=EORAeg4D>Ic8oJYyLNfsU{c z*kXiYB`gG5|JMmZQSb+*2pA(U;cA4=v$T=Az@3>v8d2i7)IYVp(0C}V;hs5HA3uF& z_{g)rKltT$6Xu?7I^?bv{eI~1sSnR>pMTpzRaw?SkyZV{j%90~>3rl*2R@@)()LYD zyV}yb`^=Zr6`Qx;sC`{_+tD4`O5;QK$Gx(@dj8kBlWy91^Ssg*>$Q(fd`)@e4};2w zivPZQ;-Q<yVtt@!)$}dXTWter>wyZ$joa?;J?P1N`AlZ&q0`($ialTcRKl%J_vW2565r9R`|F0C-RWz)r#$q^%6k1@ zrMlXV;X8gFTW5N5?xKd{vz{*7cs~EVJqz!vSn}%CZ|pg+9e#@ii(xKp#uXp=P z#gET@Bk_NVJM(y`_CAg?_OWJ=eH$^hab{3dV{bCqmn~b@juoi#xCnPypl+NJV*IJyJy? zHiq)R`taWiq5n$XbBG>ggRitxq|Um-d4_6+N1D@(poYaA1Kj#ztTSDOUAo17fbgyl zbk9sLabi;WG*WJ-Sp(uPDcs+mshNtUWBtrRoi-QyMCh@+SajC>f-}F&>Y4Evfyr_G ztHgS!NqfTbp{ER;$4fd(HK_BJ3Op`4KZm_Wo0LX%j=|7yiCa+yM~#_Asbp5%l9B+g zXp$w6wS2CRQ1M=baBi_1LZZKK;%R*6P*RRI#gW5KyCfu?t`kOkXv2==EkzXWI?Ta< z&xx2miv2>FAz(-!Nx=r7r|-XkqN|!@O>;^FvFeDxCz*o>!Y>l-Kz9Y0%T_;UmQ!|! z>X?3AqG@apX8f-2MdTGJ1X#BehX$Y`|LoB7pGKGK@-4thz5Z>Cq`d=hWlh`nn@lFQ z?POxxnAp~iZQB#univz?wr$(C{blaweO}c$r@pFFd#&BwS6|)L{jcg>tJdzme&oK9 zN!!@S>}(xL0>AzH*gDl7>-xXpe!qaguoeUciQz~n{auQe|8i<~yemQHwairlDwdj+@e?i1SneV)92+9qf_IXn*)S#mU#dJ=I7X zqnYTJZZ$JFcWOEq`YF5!e^@QGmGxu0`AGONol2p&@E19peC>^&^oUXlSOh#=CjuP0 zvdN7cxx3HRP2XGR+%%fSN4R_FLXC06NUDvCThk- zsN>1Xm78aF0+fS~U1KF>5k$WskelfYEQsUJEsD{9ZOyZE(YtWlzkq+SBODK#8LV9J zE`l?jW1qFyp<)E7Q`N&QFDG~L=TK|WvH#=6{gf6j99XvYq+mFQOkDe@&RdMvnBe5W zw{31?51x^bK0cbDlnuMbvmAtUhtf02z#CWWUZT#>z>MnYvbW;I*DvvYpeW~j4K(2Y zweq8KSU5_kC?)RG0S?R^K|4Z%wH&~Ok634eZh%^0VNMpZKV&6nby-7q55zy6`t$4h zEu*DV$I;rvC(So6wUB>oi?&+);QzyltV5^gUO5JfDhX57x+@%jGPl9aJ$TXXjCU;m zVKi05g12&@6i6Li1EDlqfx_D`yWTYFy%n%8C)nt2MdzPI{F=0kSo$@m#DY#%BSrX%mCC==DXnRJ?tal)-Q~_DrKchD4@LgC$?LB-1v^6`mRO1TCAkkj+z_;Aw}n4|4iQ z=TP9?QheQJ>QKF}_m#@DV_IG!KERRxS2LsDKV=^im#Fm+xKlP%_c{J;VvzujHU=p25Scv3Z1j zx8R~_S%sFd!M)mL^Fhh2Cjk$Ff~JL$UNb_Wj!W)@RY4Yz@%tie0dr6cO;8s!uzMos zaw4m5C23Tco2^q{3md}!-DIT}F^N3Bs9`eOn_PjaAylBTnN3G=;&R5MQn;)t5T@^- zFCPXKjn95IkyF~8c) z0#SFdZJp8*j~fqysS~&obTnm7ftu+z?y1XUydf)iHx-1h367DY z-RtZHf80d@O%Wsdc{a%=Pszb0m)+;!=J15wVo*^(vGLALBsf;b zOM7KGix5rL)LRaCa|OYZ$2@~~Mlai`5K>CGxwoeZ0(-y&#ACMnNII_q0ta^p>Xdrj z8cHnKOj8&OwUV^9lI`=A^4DnS9|U14vt?ey$Nmu6WJCnJorY(=mv=%o5Cgu;Ht^W8=zREt@fwZ&uuhN)SMvx~e+h0{rx zx}b3L&N{vObBnI-E%lgLLG<95A5mS3$=_U000a^e1ZOjyuoYR!AdMcx&3e+OJGbK@ zm_-OuT-bDbrC+g8iPU^EJUD=l9)!%ltFLUt;W-$TEIF}Y3HiPJ6l!TB{R0jhjH{S} zVC-vFp>#Bo(k3nhvIkNTRGX%%*R?fg)GYu63=*=20bO)8AQlRMdC z80pxW&Wzh=(0@F zaqL%kFv?)N;%nKgix*VrotHY8jSKX(3l&=BePhcTDLVy{*GOv+-`;MpL8zu7O;DDp{O63Ck zWT8G;!98L zyZpS<4~wD_L*EKMpDm_o<8til`32CvS%9ut!An+7VneRz0H%Z>thB2U5#D#=U^4yH zPeht;@H-?nEA@J*=pXe$Z-_CI>`uKwC+Tw`p z4`kt*de|=*lbU#y;$rOm(EcuJCM0wpF^uJ_BOSe&wypdQ)I4N*%!M!42}HKGdb$&F zxx_NLqmdimGB>BTs5L4PP2;8$IbT^%@#b}ZL^BJ;WXB^Y4@xMNFvTIIiZ4WX-n1(I z`c1gnyAXE55Bla{oVB{Vt)18>90j8mZd8i!-iN@mG5Y7KVOI=~x!~&;Xa$YB)h4FHZ|T*$iQTHh zokE)f3(=EBk!Q+axqa&_y#FYIl?HT~POFCrMGB!pR3xKt+Oe=HW8lJ0*ppmKgQsPs zw>QFS&&eD!4XapV1HhT?`VnM;{6wn%2%ANPg7OKPZs+9l|H8s%`!@?4KtK;5U}K^G z0=_eS@!kKS)w2KynAw>BBH$VR2Lb-e{x1gxwtsT}QDXcboOq@$)crr8^)I0OztjIj z+kZjd|JuHK`3I){H~)pU{|8e4kMEcLuP@tw`2A3<6FAAOe0(*5K6k8uB;TF>!i{zs?( zZ)!c`|6;3a#dMqZ(!&H@dPCu^{h5i%NMTmy9CONIv{aK~6~bnp7a7>RcgFN}+4`nQ<#)NYSC506$BUe^ z5o)&)IEfgXZsd(cl%HctynK-2HdO!5Ymxb{*#8$=v9r=M|0kE8m5r5!k(U=1%E8gz zNY4ru$~FDW6VfwbzW#~h(Yw}!F@B8Xw^WkN52ko4B%~S}`k$;2gh;I7zWhBMLwM-9 zXhJA3ass6b`F>8o`ANf7uxUc4+6c*o^%_*l8s#h%VgSP1_sK_TAh-Mb`%mr9&W_F3 zL(k?@j>ip$N$1N*0e+y60x4e>1F}8Tw|d=6aiDbB_VZda7iy2|V-_bM;W1W{^3=ilUz5|^3>O*15h_0RZp0yB_1D{!Lw+*jz@Q_ zH;VkY=Q%_#qq`reo6=T~Z~YmI8RBmbs&n3m+1Y0$`L&!Y(5cqIO_j(5$7efHr%CE( zU>eY7u(s5hv&+|#n{ntsys?<6HGd4QagP1{mDYVw`M2{%lPb;Idy96v_qED5er)M%*(?XGybGJW07ASte2Z9l2yv?#1zT+XLlR=M|Y z8h!|KzTuCPdP2ba_;lXw_zZw=!jOLBM@wuz3%%-RQRgtEZ~zjL=TR=M{LWPmO?wSd z0lX5?nUS2p4DqRi!2fd7$!u-pN|jIZY3OX&e)u81iQIIZ)jmN0)AN6MeWD>`1~q@0 zw9+lqOS$I;$F@6QP2Wj z^#by#z5B*T!c*EJ%t^gQ#6y)HY=(ZQM$SW)%jZ-+kNToC^Bf3MFxU-&9Jgq&pe}f9zY+Q>DWP!`GIG z!)=ValNTpFtg;SfU`DiOQKn5$L3@FGC0O~-?(~~Z{W#fS9AOEl1j^?sXq0&|&1IKrmydBB6egEKC!gW>r|7L}DdUHEQ9`O!oX{0Qvag^r4(=y-=cd12lD{KLd&MhA-a(-L7D1x5TVd`wH zXiq9QWjM0XWO2A_=9tQ3lbh$S=l8QvhyMX1n%{$TLg|iS8ri4Rbx~3dP10g+1ov%C zagl**in}7ef_3=Va(o<8cbs~045+CDiyzOm;2HB$L3_c+C%B)f055iB-_r8nRLla5of{H)G zNbrKG&Ue<*Edgxk!nos_XDKY442IyDiV@YADoAIXyT<+yJl(dH z1251TxYOzG{1{GQL8nI#ZfqU$e58cKN1rOtd1Ls!OXGA)^$p|GWy*o*X#lcfz)n#E z$7%f>q{b@URh`O@t~CU^2$Y(IhC38?P7O)5$}n`67HKpxsmOOL2fto_xb`O&&F|b9 z|EL$=W-lcYn|ZE`&#mm3yFhLU3R+lfT_(Bh_BJgy^;gD++!hW7>OZvup^7c7_sQEQ z6labui>)~K>*wpZgl6MX$-QSMyhG7f#OtFcD|DvJB)BUnC`Frd5@{*XsD|Y*B_5hh zdYaMrY*HyR_ES>Z-o1TE1V2?~wbxg6e`t;hfD3EnCl9yV7p~b3!I}y&-Vw}F-id?1 z16iPyWjm6es}p|Dm-Wbj*HZ}*ei@4moJkCPbL5|df<%n?{8rEJwIa5e3gL7BRG&m| zjOhSD9_Q~D;fq6Q{-eQj5f6pTVjLF{6!{tZj8qIbwo_p9^o^w`_r*fMDa8*2l4mHfQGF)oha{$_*6$qDoWA#sCH0&{GPlAWB`@8Qlb zuPwP90+T>tP_g@o1k{09O^Zj4f+Ae$!SE2RjZbhgy5}$fgKs1&*IyO0p3Fh48=dfD zj(070BUcYRpXIE7Yf8{3q`ir(xvGnsC{)F?ur2}#9C^Q$f&oI#TcUV8+p|?+G;dAR zjM`(mGkz4E;4`#l>01!S%B?MUuc0#+R{M5Yg(oy=gL=@(ZmWgmC_~ihEyix!T2|? z502y}TdvIkms1e?4&UQ(GF*J#6QJZfc))gkhY z!agxSx?f_$qGn*EzuHANCGXnB6l#eG{NRqM0|UN?sa7*+e1>2+Q}IIZ((lldMkhNo zpja1h3rtksc8w2I+OTRn)Y=8aqRzI@3|UG{?Bj4HtnJ9!V&gktU)p0YwnR#<4-{J) zw6CTdB!RQ=Q^typa`2Y#!BagueIzLDE-URO7Z1QIL|*1&>4>vvXP9dynWx8_<7An0 zkCi!^u zf85Z-!a|0*gb_PZgA3istxE1EDj8F~izJ15x4?x-_4P~mP@+KHG$rV6o2AlFsImKn zJ!Lbg!fey|Nf#^OK(nrk_bYZ6Anzb@muC-}7!8`i5#Y^yL|w2Gn_U|hmIanEYZe-ut){-??zT%urEJvYW!8l&cIuQEh$2ItXoe91z|$?f`w0&<=f*rZ>EY7}qH0$L|FQ`f_iu zh#n82mtkamE+{kaJC%~w$eZ>-S0mX^#`y~ArV3%Z(!oaB+eUZb_$WG@2Dd{rl3GJ4pK9&8F50rgx|ht@J&%UF{_H_0&PX z!d#UhkRfwvtdYs&@wJR9B}M!-c^n68>P~D7gtxI~+r^Fy)Qr(=M~dqKhIh zdxEw>9+j*>3TD6U$HBtYBy|_ZVO0EW_y?1bOu$az7?Bl|`3Wum{(~@e)7;q{Z%(rq zvw~t~&!UF;7%O@pr`p`0-q@;+Ribp!yxzP#+el+d|EWbf;{U8N)_fXeXN4JedgGR;7edNVWBjH344f*B*QM}{wU=mHp}TG$;yqE z)>gu)-lTmxyS2~y!Gj5li{7#eyfGehzs5Zv6?+^?MFK$&S*gzmm3k^47tZDuYr5-6L zB5%kOEAOgS8B={sFk(2zr}cxFM)XJ`;W%4gm-C!QE_W)?^iuxldj$1qa9GPqW*Af; z22E-A_=v5#;eqn>dmr>CLrrfF&AScOZUmce{3s7BMcDo1iO?Oi8*q5N=VSq#) zMunz&}^T9u3MByh(|D!>CCW3#og*_+8S@!FDx|kjzp5PT2 zb1(Wmc|ce-+`h<{YU^jJ3C35f(ETD8Wuw@8@+2AvJ<+;f}q!wXcT36@IJOFP0)o7ab%t%I?*vb!ajt%6(e{hRpZr8?@a+7GB_!uUTRqfkr4!U+A8^mmc zs2W*o=6l$K(ziTQ(OF%JGJmdZTC7%NZ!lnXAkl1eQ0i`lvR?e__jk5Huk2q!D-NVm zq#V59o&KG=+{U_aXxn0O+Sqv8UV?=s{4AQ^j`3Prew!ZjS4EIGV_OE`UNNWVGpH|- zmdCKd99gs1A&uL|jY{9)5|+QAm1Br%^7`6T<3sqxP;0@%&4}>Wt$bJ1<<-JbiIK4h z3f@L_h2{0-ots+DjN;V{DSUGLpx^9{F5OV^Li7f-$hnC7EjF|kIl)S_vaQM@9q_WoLRW4+vRxeX2w?4~P2+Cbz*n<%bOwNf~raST}P zeT<8sQOIOgI03;vUJ{*A7h3SaUE^OZ;VFw@mo^n+Ia zs8*j8j_iHJQ>uTt31x9xWz8S_n&l1m1CP&yvcTA^EI_$t+w!}G$zs*((JQ1!lGgwY zx4*};L`Nw8&g9i1XuNGWWmKI`wz-^B(S^ch=|=JJRK(2KAH_FUALmb--U?eQSM8^Z zt4WeZX2H}SPGay9@RHy|s+sZHNgMGU7*C{SXk&GShBtzAZe6xBp2QKs$9FV@bj_K} zKG2TwI%~juKPWB>yzs~**}smbh+dI;3Udj~5T4?a&9DB zYcj;RVbE!iRYTYGTlI>wk;5X!t`WTr_q-~b%gppixC9OPnH7EZ2 zh!eUKiF+j{vG;0`@@k8Cf1<>6qlQMs@8;PVbs%xgTt_lP9w}_S1A=>pa5PXtWZMgcSu7&P)kYVD_jo>ELBzo3)(tFMrxl-Fr2LE+PeH+oeAi7E=~6`p5%{7a&r znX#%>UNq@%R8yiXPpQ+od`31*D*f;_h-^T}`Ej3Tt=p0y%b@WMeTYJRMk=@{c-N{^!z2&n;U=q9;W)IvbI*bAc08Y` zVEQxefR;iBk4diR!DQ4z>54ccd(4$$Nn70*0Kd`#u--bJ$LW~~XeAeE7JDF^SwJ`ZdPT3jR{SDm)qA)t!VX+{jXh)?-E)0QJo(tg*|7UY z7r1abjhXR#RtX?y-J!&kOjaiDvUt`-^%+Nr z1e5ds)RFWc^uvC=1lg($Hg_xY(lgS|Ur?Lcw|eGF!|-x7ObpLdFzv4=5L`Xb{^fniq(e@N;_>x1H#BpM*vR;(VuA^^f5oZrWA8P@LglH_Gz z(4!-({&d1RpF7ihs&l@H`r$k{i^`6F*l}}H2)_53laqjPq0iB$2*dm$E$%0&N@0t; z-bwv}v96~t1mdpQ%m;Pr*{2gS&Yk_6n-waL6#%yl`SuMZ^d6DUc;^_2FWnFgfCbP@ zkcVQZ*_y-euUKoSZE`Ndj8sHxYoJ+E=KN4kf8pLK#+^(E_QC1++#B_YNu^uKB2nb4<&t=egWG>c_(_;-=%y9dq;jCH?&R;7wyMT%uU}_jKh;;vh@lh zyf{I5RYH&MCUNv27(Wh+B;=Xo>C#e}&@!`bhTB6A0aFcmey^_P9=47SV>I0rHaQau zKMLdMYXm-g{vcO%&~V6{J=8qkn?RUc-Ch2OJ(IFBnMglpdVKtiN|iIYhz{6D5~Z!@ zbq{fAx81D`Q%_C?LEUH0g&%K^^=yA0Fv%-bnxy1(FgNV(Q{P42Nw1lZsE(nMj+~*U zps6Ro$IE1`=2qPFoCZe^tx4bS3pLxRA1Yj z&Ntj3*Yvpv9j>nLRmO`}!Ch|=>iN0tKEv8CG0)g?d$>CZ=co8G(n&CKR{IB2iZ^&9 zXJU5GEKPa*1Tz&=qr4dic}AsJOp^e@e;au_ua2)}(tL;MbbKMV4~8NBnE!CWD1{L;USy7X-c~xK`Ay!~DhzDS*r_$O zG!afIzvw+woZUihtLC|hLx)G>)Uh!s zv@O{|ZcltZQR`f2a@3T#VTHPhoZn^>)U_^#4SWk*qaSXJc6-tF(Q9#aJfWobXza-8 zKktp$R@&}Z6V9_Q?=bHk+_Dk@?P1q+HlwHU(tehCvJ*vEN#)<#wdW%bOjwkawXr#* zl(7$%TgQB=_cNmQ49On|y>vecV2J}Q39L36_NlTX2Z-pX$th^|D&lr>?;;!-J!;|u zo_fBqIdMutn#j}V%Ulq7a&BK@*7%499uCs3hve1?tCmHEy3}3t5H1>7@w#ZutsNgL zXo|NAkY}Bt#nrI!X7_wluC%^%k~=3Yq}^@blq3Y0o-T0prg2NN-Gag{AFE@@J`FFc ze~g5{UK1PsWPN(yI8svN2>R3buEcpZ?%XgIrCX__sxE3SqJh05GRU^Nxa2fVcsvKJ zD22#LekwouiQ8+_5P}uS6Z;d2F^{AF59sDIs(xI&~K)GQz-l)T^<^u`7jQ>Ge82;y2@@8^hzf_e6jrUff>Xx zJKHHuu4v@SJzhG=M=$LNI3L?J(-<{jGjXdCcMb+-?j%_@+P>xqRT+yDb_p(C8@-8e zJD^m_FJ_E;R)gLR+a2YWwoE9PUNXh9z_TikwzwMe7d|s@%GAPc9BYl`nRpPe&jsBj z98SSV#-KZr8-x+jYU^SVgi`%Z)`Cl+UzhYf*<4aw{JzNwQYBm^Zybe*sg|3SA7vN0 z?0U#lN28otUO0ukRm^eC*K>EL46hMzoVyKapFE!At8X0g*_Q>DPU|ZrB^?6tvM@5sa;4Lm` z5n8F1qC+&JK`hHz?2l+e-A$(rc&ulaJ}lUpkr(53)xD`7)QScwx)rhakT8wwv= z789!k%fTD-M~KM|>>)lz*6}bYAk~&f$NfTJyXAH<*&QlKzV1+8o$S+4tR^_>Fqopn zhU8SDPteD$OaRdBc>-l6tm)rCXkBBSsh4@I;bn5?-U}DO3NTO@`=Q1BoFPM|v0G+W zqg}qho+Ng1t?JKuL7{6LzhFKYQJeZK_He6)&oiej3<(N;unVK}`$x z`-o2Zp`L6fwT~v0IdZObF;gL-h)d|Du_pK3IfJF!d@*>6H(Oo8q?&HB)e}^Bn#m32 ztjbs;d^3QNiac01;cF=E7^_;bTaibsva=aycS|FuF(u4 z6z_J9OPH39OJ@4T_Iz@?N-qTHq;?NcEU{Pua4+ z_EhHCDxhv(Gc^ouyTwi^g5K;mx}!wH@p#`<$?!gKhM&nrBUH|Nd)zlB1FVdBsM*Me zrG1L%-EL0R5VM{nR`?9-csvr&`u-p;ACg^@uW@#59NsSmpM_iUFo%`*gD1e~10xZ5 zgTJ#|Ex-p0JNg>ccx5zGs#KQt-S}kFc*Ba<_`uksfl8FjMFjaFr9ySkF;u-GFqVqh zJ?*6;yJ#no%F3k8G5+-SemeF|6p-opF_a^)f&V_#dP_1T9GOXY2N&vuuI-+X5Z69s zmoDOB!-&b5$5G%c)p@-@fnDl5$IyYV+Pm!DP6oAQCou ztIEGtbWy5ugmPFaDx4+7qA-B3UGHFi{zm7{NW3tRwFUT)AJytPliEUMzbcnlI0s6Z zvjz5dbE2FdZ#3QNAucF!b$2b$!eIHh_UXEu8(==^jAo-8)le(xe0jA!S~cJbh|3hx z5WEeXN~fXC;G=KO#97!)PF2`EEb`vAU7(yzWgo5@B2%pwyYZAETlAfHEiL2g#Kbm` zwrtrvvB#f5(YRKx|yNc&nF@nFC*8T1rqy*MDyGo*XyYv@uCmo0x4X*3XT@wXrIps{jTggN5yKcs44lImSh$6Q zL02uF!M#j3A9d`>7BhJz!iRVo2sZSCBszK=r`V}%ZWkV~q*|(*fo79ZwtH_*8U-|w zCNQU+PHMkr@jL9aJ`e^mG{s*gNMAbx&-i|GZz20AC$fKm)1Orebm`5@z`#|*Q!4Dt zv|0qMa;)pr$LplfOCw2>7ew;^R41?zX2C6~{`#3f*7tyS1vsMLoUVv_Cyl~mwK+2M znSQ6Bgq=buKL(XK(8FuOhB#o-d_3N~ObhBtX zQ(+;|o$_i&&D|sjV3i8Z6m;w(CfB&c@;#r8+!kH>$-<5YH#F>v)rz2yoSN{{T>Nrj zJ*F^Vk0uw5^j_Z!dKtGWrrK0*G#F>FbhX?`Tt@xX4@;%NxneF1@SaC%a~5*mO;`~Z z$F5xN*h0j)_q?qNYB{D-EMFd2Q-4$<<1%j{#|!>?TY4WdMVrZ&1Ym@Bq>Gv#SC(Xn=fRXOIya@Lk4AhjF9hSq1|M{jI01Er^ybb%W%i z(}Qr+4oTDLA$bA{Z+Osdqa9vBsjMYF*L?W->F_~W&+~3;?PfigZDQnRG&qm@c=Ct^ zr-la-?bdPTR(e9PVs$Qd$FEkS0h7B(thN^G7bg*k*r`;cAPSkV%qUMAEwKGO6N26h zA%MtD%Di^I&5gm>xQfh|m__3c=Xk~0PfFBCb^-+Z{>4MHtF4b{MY?TL_tkzux=VM$ zrc7c`aNE%046Iug>{oW6vqJ`74_+r*t88YmR_z~erJ8#~ot>`pnhrS4?eY!*fTELy zANKXXI4hG#XEVCnwFKq*zSR5TLTW5nC2}o2PS;9~zXgBH0+wj4l1p6VB1QZv=i|(b z1;IBX7)jjrY8*-cGgz~!LS6#05M0Th_waCK8l0tbNr`ow{(Gh_t5B8rFw`l?%P-y4 zu`Kh3Wf;0r89ZqoQA2<7V{O`Io~Cm)l-j6hUcEHh7czfme0a<>A`iTLY%f1&^k1}{ zon)Jiwq+o5rMbm7L5Cpvtv8W5Wj&P4Z4)CeYD?xUCiC+H4?UVoz@Z~pk5w? zv_WRX8sS~wO%du}FuyCHVhC2{Y|q(=IR0jwHDOZuSf;VQUhh7iaahheVs2FmitpCq z5_?a84oMHkq?yyM=j8He{i*NA-g zclv&!?+Kbj;-JI?0;#6JLeqHC)xO_s4dzCK2}3j-LM`;jD7ww=Mc!U%-)Zqfxxwd4Ix3%av)ardU^!&GypS^i4*B<&{K=Ro^)OB(}Gdk z9RK)mktPL?UeWN)Ok@;{+TPMgp)whZx>YmaLqPT zNL2l>*yycoVsidCc4 zgOAF#E7uX1%2!C&j1;@Ig3 z6!)yAek2@PWkPSP=RyDl=8HSDC2@{Vu~H1w?wOfWD3B_^(7B~C<7S1&p8-~@k<12) zvGTJ@c&lF8zbl>Rl)$Z(KBt_S4o5O7IT3|P?sN=<=O)%vFHamP_tt-?hl85hakimi z(f&NyzLut~9Y-o(#XFb2KkOYH9z<{X4lOFeiCA+nif?)+Mv9^reG^j7<=q;DaaSou zO%q#}C1WJXmdLD^V|l`M-yeM28)yML2os&3$;h4IzI;;~r%r794N;baA3*tA3gF5H zx=Rm-)C2kfD`*!XMc!aY82f4UVmUKUdn2xGS;*QhIAL+GKX>)KR7qaz=a3Yi)dxyX*to^vCE0R$;Ers@T$8BI`7rXV1D^LI%cv!myC4=R|MNjVa~uZX~KsR;!t=yzJo2KnWaGKUSY}c=Bg_oOGG5jTyJ~+~8|FA2@QoPghYNEj%$0v~BLDQpF{y~3hd-I$CQJwFTulZ)ogVoU|;9%*2cm|kKHbO~&2xv|4mWk;}94w*u+jyM+J00uC z^O6_5&Fe{P`09R#2^Wz(MZJq#CO5CmXIy+_WfoWS?N(<;Q~SEA#h$jb3uF#o@7RSt z=ph;=Pz?wn3jd2$1&xS6Cmj`hoQ39MY+Cd9#CXN(kX97K4qyuaTpKSs@Y}_s?GN;o zr?{J&)(WCFvy)E~15DN3`3l6x)PH{MS@zT-y_!t8nI1W8@1LD(zGUW=eC zELv>lpe&H4RfvfjgR8P0$Pb}~!Y7DL8?D0cs&b}DU=RM`_;c0DGTH&uaT?}a{!HXn zp5xg|@1}a)fLz&gjxuFQ(ehIDqh-nDllm3M=Bk|&AcXO>pn_p5+vmke(I>1fY1r?4 zaui$U6*Qg>(v1&!@%^XCXQ-$}2jfrHHCVGNyWgr{nSqyiZfCnM6risO4HiW z*vTZV0QT>Ru^4I>S%aw{tRQS`ZfF*5OY6t&gS(Mg%(&p`@Cfl3^7!pfOY$qUcaL~w za9uWjvPqjro2V0ZVAv)cy$kf3XNRZeJBvGyE_wJk$QrG{cerH9@~0k%W)!z{=#igE z$Y|ffEre&HkK|zyanh#@!}5&fse`BOBKPaY?x6Jwv4K&?X zx`hn7S;bKN!`<^ z#3fK+K3`)I+?=$!I5{hge5F^6_gcn7QAIipn@F&p7NL!-tOE*w# z*}!jo8nC~oowqF((3koAU}U;}J@^hh9A=q+yMk^c?)3Qnye{*nU!^B5E<;$rK2%Co zGtrSx6H?XQq86;M>vq%>SDZnqx?O3(@U_KQIKM#-?36ZuXIc3-%PEdL&?rBga3mMB zSdF2BEo+(q<|G-DI&J(gQ6MPAna3E678i?;Gicd&cTqTOfUNg(jgeah9}q}kTrH2( zKm+ALl@XJe@A5=9n*bPlZ1;Yx36lb;W0cZVl#v|!sLez~EMkAt$bg9}z!;SUjA^nw zr46ggo87K@-(nfQKuBnIT_zP^X_Hp+O(!nH{IgVBRhO;=>=CIHDy#tP5Spei5Vg_S zk+ES&7Qx}63l&bS@=&AA6m!e|7}+C0Eg$^GNP4FkMRM8lSVPyf$VA+#>s@JgXS@{u zqwxLbsC&2-C|5p1Dzcl-Bs|=U%0qBv`@u)@eFU-AW7gcWPxE&9G#iPrq=mwgCuk-g`~xJIJ}m%V+8zINaf%q(6m-W*L!UL&m7{ydO`$_O#InlnIdW_L1nu;$;ZrNwRo` zOO+QemPN(iNI}(y-I8e8;{BowPnAaEdGRekU()^*qPj|6j;BlX5L-(n86|J`jNo=3 zhmN#qT#i69=|ItLtS~9A>2j#KPQxw5_kO+D&X*trY?+o% z2OS1ZQdgNdm4(*7B$1a&7#lKmJT|TOEhAer51Ly!FkK_|rQ-Fa>hMmdg*g@G15KKW zpXWY&3jc5+0fRfoi;*r6Y0%5TsO%k8N3c7ncvXf}eqmKQrZL~Vp^`w^M~5AsCTsyg zIhGL(#%=_D5Tjc)(~wQ2c=^Vv7ht}e{^u)71$Zzn_Mkn^U;`W>t=f3qtDi~ZBJ^Du z#;)4hH?Bf8jvHo|FfV218g43uo3P_kQFOX$WW~iGTRvf$T$({&vd5o>%mwb``0&R* zi>!pySu?Z(IS<9sND%PmM1AV#MEw3u`g8AyKunXZ!wgNgWz$Aj{&`OWMDo5yuLCoRvtwXL`Ao4<&j77h^tG<^3tNya+X03ug z1Kw1w=(rfjr&5K3-*8qHU6kh1FOHSY2?Y7kiz5sIbF_UQ+@g>pR}zkMhgJ)r*xl;- zQO>s6X6LY3w(gQ}187`IA5}$eF`qQQgQ(CUYhGNhHd=3?afwOuPautz5%o(GI9R_; z9D8n@*!4>tHe70)s68x4tsBl9Ok?tyx1*octhHet)M0qqj1-NrwMX#T{ zXL46`?mQ=+Qbp@Lb9?r`%6j8HJ2$9};q`~}q1p#(LfEj3xZT}025MIDQxA?pvTX>g z@vu1WNHwHPmilkITvrd!Ykxq(^M*AJ8JNZjPhwr7Ks9IOl3gLzRbzE7XP5$a#!lAB z!eo~R*De!n+rXM1O%GIFkA8?U0HpFFsXiL`;WRtAmv2cNfj+}azF*j#&XC7%LPh!V zQT}|~Y&E!iICtR)Xx-9U7mN(%zNE*Fglil zempT!MP4r5o>#RgyFcU1#gR{#84|HJG~9K9#pN$LZ%+1HZ z%L|9t4yAI(-c{1!UmP3JSA^q-b-Pf_W-5-^!IXN^VQFPWT%R_A1y!Lm%r6>-Zg4(P! zwQciaC-oJk1H+r%EXS|#;AeAOiaF!K!AQ4U(r^Y|Za=4PhvnKfWheg+dv6&XSCX^| zi0>KM5)?Mu|eVC}re)Txc_lc*oQseTuK&G^=j=4Do7f-9n#%^&% zIPxeE@9XT4^`R353S}}U`^~caRdPyFSkIK6`OTttONZV08c&~~==jX|%yHo)&&t6; z_W0T4xxc4~?P9z04XkL4;>_i?XwDjw(xF`2 z(}vd{!Fb8kqmw50Q`@efGe2&LFy8pzy3?3;Tr2ns$5vFqr_rd57NrPbyllTUu7R1} zeq6N%U*cDfz643UeBIy^b>dTPb*~q`sgecbjXn@)Cv8dK#ofNsG>`pJ+R*&qCfeQM z=-uYJwMl+rvV6sH*LI$*?Lc8IRK#$6DES&1qghB1%aI+g-ebgYfN>paOOE=k3=Aa3 z0`e9`+zlTSFkP+6q0R|@n$pF{`;dE9I(Bqe+?suKL{)*mHCKs01&$w$f7;eNkA@BZ>Mu$X^9BaR8=wIt}qLxniJR~`^vvjp|byjsUejaE?xYA^6>y6bs z-!@-Gce*xwy4G>utpf^^yvkL%+V~tvkRyBLM!@cJQWeDv{{_)+wT<xMIrPW|2BRh7SRI+ISv8{|K&3VPbP*tEX+&|Mv%W`qKpHnj#q)tRTrNEb`Dl3) zzBj6@qSOC*2~(i)twO+;fJj5JaqV>X)8j`{JkxP9fq-+>~#%8Hz*NseU{CPte= zq$ANL?7*RX8qJFv*s1?8|BSUVT5l+@?7xZT!TU9aJL{i3KKQ7yLT921o%34+6 zz`dO6dOq(hTDEQ>$bN}Nx}R&Lkr->emBdB8e(k{ZcwM^-ZpmBT7awXAoqO=ut)M$O zuDfJSMO?r4MF1Lgxx(KeW75mam(#~`_$=e6Pq0)b@lwQ&9J}wQniyYDATn;w>{0i# z;C#ADB(EeC`8#4XsiogR-VGT;pvgPr!3o{2W$M-bHN7G~xwZ9})ih3Lw(9d4MMrK9 z1+S&2I7`5;pGJ(c2bcg+d1iUXgI-45u1pISy7Yeiz?}`r%j~V}>Xh{4S(ffwF&Ft8 zcxOJ-!w#se3N1#BqjnP(qX(@0RxXLz<@3%IOAcWYW`E`5l+3Pe7BA{}jP4*p|2GoH z-qGg(796KD;S0fCR%X0V!lSwK~WE+!n{!nlV!o`v~Xr0CTcbErI(9n zK_XR)uQkL(84{2bp=wf%at1ktU_PYU3YaRdY9{g>omQql9X|@Zc9X-{3sK_p?9fLUJG0R&nugpqn5WeY5xjyY z2w%A6rQCfbuA|R$Y$Lc|oZ*Jqdy+1~D+F1Og(vQ0iy%>NU?nbc@ z_OLt9Re-7Ll^fg$o|gsuBiVRTjTK?rBT2-mK_%m-aIbz>;=Z*ChLKgH6xksrN0%qyegE zk}TKt{6aW4tF7PQG%2JFL-VF(16;r@-tnZzs&=h6$3w(VhuqH!i`Ia*zw;?fI()Z& z2@#0|)}-eXOsAJ91TPht!Z3QKioVYmSV4!WdHa&XZg4w=Rk2=Advsw~FLy`g z6EkrY2rrSDwLKxzvn_r^DN|K!r9wR`T2Ta*9idJpQFz1<+p^@ajp7j7K4e9uSErKs=5&s?wx{N^~!b5uB zDMZUP>}pTG{n970&vJI$ukd*=$dsvoL!)&IxCf4LVB`Abi(NDtTH-FMaq()Dm8I~ z6J$!H1vZH|jhbLgi5E^P_gI^z_!BHqFRegyi+c(f8bP93HlQ6Xc?gB_QbxAHXb%Nh z{8LyzeSidmyPE{0cnB)K2H1kzSLLP&3>|f9EH3m`52m-cOn#SMM&3l;Z`lH%RJVKA z^4wy9RB7F0h*%;&@-iT2FCm#Nl5rt#OW6jE`(%=JKi;B*>Qp@o^|FwIr;yoxuE~wx z5syvCBYQI&*|)k++QWgHmycqHkL3&2oQa=9j`ps@s94a~x2;QA5Y z$R1(B0(K@h`MEYd5+pEbEv z9T5f7$p6Tar?WmOHv?ckzAFrxhFwwYwqhm z#)5bX_*&N=6N&@zIv^5_qx)J$49Vmt2rWj|WZKRbydNB8GG=x#GQB@*R^jUDQ3UDX zeh5b{N|7Pc3{H>DeNfIMT-jax`h)xYjQwa}9x@jI&r5rUz!vgfk` zfpyK11ImiHRt446Z9CPEA}DRS4k3LHEDVFks3nG8_@A6_y)&d|OxTfQ(O=5h6LnY3 zM-#La{dE~cmIXm>;Tq5e@`?-k?mPx^Rnf-?8FipGpmeiGx-a zM7gUey=Le+RVD)v9gJgxdk-Uj__TgJ0@eLPR$tjjGft}GTcb&D-$dj~PN zZD`wuV^nC2v61^&x2R!(?G||1IG$iPatG*lz^_Y!jA(#E;?KO{IUoo~dvFTV^ztHu z(O^;Gm>EBXK$+*NY)>E|qi~9qkPwwnbryCa6CPeS^p_Rs72{KG><7#6RIGggVM;YA z>DWWup~*-Ap3#ejz2?a%s+q}6oNiCiZQ7qSozr!^Dyp0sT$S~*zW7FGb)H`0y3W3k z>Rf@J)?nRCx2+)zhnVfPk+N$&dmLN2{BX11^eIZY5ifILt?6~J53&Ayf!B07?`zKa zLM`O+a+m0Bv0oYzDENizkccb`DuVfITm-)_DULYAs}}u%t0<~G@sr-3TZkJOR1RD( zWMCY8uM|CM&?!QUVf-~%jT~I*pNFWEn?GJ-ikl}C{ym%#g zFlUpmSd-fkQ%(Zk2tG5g`1nvg6cs>y`5cn6C0GcCyyd6C8N_x)15#5O;r{xgU$ZQ- zu!-yPgMx}=3zg=^D5s!nboK)eI!(~ZjOA0U0CH>I^IXx?uub`?<4sD*fFb!FM- z4Ed+K7scSBrC)zmOHm?^Hvgtkm-idJr!#RYw`HcYR0VOjJCOO4u+KMhNb^G-9NDAQR@lAA5} z2_j>H9;-lB3F0PG6K8wC!53~vWt&`a!kkJsNRzO-C#7eIZ|eTupybz2xR0O^jqchV z${?B6Dh@;ag!0=Js5H;ZmojF^?VN}Nb#!)Or({uVNLpr9)K2`7nQ>}_k~t7j#&==rM|!=m>- z58n~IHTk^Ub**Go6>Ch$&tuy*^pxG<)JzG@XLjVgs3YB{w@I!@KgB#Eu^=lK@ZJOJ z9dIN_@iZZ-{j0K6QFx70*KUdL@AidC>abjp`o8cf9vj~_`y-(f7t8S-?C$ir*sRFw zHFCVu>Xt+I;SiS%YpzVun-%?G$mCqK7YSVj_Bpvw#P5{*`e2uOAHC)SK5XdW2le$J z=^cVhP0t$7Wt>Eru`*)^l7I*XRIIs2{g55ozVb(=s^%M;_w~x|2x33X{W zin9WmbHzOu@Dz;Xb)EYJFVO(&PZ@}&%t5~xmUc9`Sg*mg{I1MXY54hcX=Qbm5e_2U`)K+#d;zM5KML81Uppzlv+?Bb+XTP24WyW0w{?@TM$iiD#w)? zlPcE7YmF8aWesy0s%3M^6?2oEM^@2bw zR%%7@BW1&sM_ZWaI~YhKR}plhBa=>%akQPBb!R(8&>uc?r$~}>&&`fD>{{$YJEkU2 z!0La8Wf&P-CL=sXB-Yv0m57YZcNdwo`r6HrjR6cpK+c%3w+F+6qU7H8riRRIjs!X- z+3HFT@^X5r=0c-_gnTUi-GN_zLj={mUtN7=;k!>|8uf+N`(Vu58$Jp42bb3au+&kQ z@Y>?BI~7p{Cpy?ux0G*Cs7nOW<6e@jZAjiquoc!aAtf;h#j+ufB@%CTI%S-GXe@%$$h2>U5=C>Z@tEh^y?qrEj4>ens3K zxA`{XNAB8gU|cF|v+VZx$gzjfxbsxl+~&!(>{g8Z+#m&A=q_vwiwt6jqBlxx8>EJo0pgmNn;eK=dpJ1E8 zlieuy){YJ3y@z?$UXZ%jbn&C+f#cmbUB7hsHF%eDB4M&cHmB<+gVf zRMQnso*?5(+CZxz(qPYg(AcZcs@QT$O{jsnVMf$7a_Z1pkQW4 zqGDWZPH}8(PEKrS8`lf5EmGv^PlIW-av(y&#t%%4tc~>|I%tDu8H4-x=gR7o(4(Wy zhTzQ{&Nr8OZHth#izp-OiENbj@4+{$#B6C6Fm*;6L zeDjbtpe6gZ!JTwQLLud`66Cll(I;?7@RDB{6(LHU)~YO9gCRXUn=Zc8KbwHv3U^N5 zOGlJ33#_)v_c`T@2IU@Hr?BEWdf2O=osGP;2fAAVUVjYyJv1uiw8t2B60joyWS^BY zD`bs(!K<3nIQ#^`m_-evb%v!siF030PmZNcrrxD56qaA$wp;-8z3dSy_G8P=WX44-?gK~(%KL9nfIA33{x1AB1;*UiIC?6t5W40nV~ENz1ljjX^>&BRMfOJjaU$)7$2x)COK(Gc4FrZS$n zc+hX~Kum;`k9jV5?-1okz_fWnYsLEH(aV2gEem}KSduHO5>+odfH1Tj7PBsAbi180GL zY9!M#6JIua;tRKCr^VqB{+2qk@rhcE!|ust=}8!ntLJk}Ht^Xda(bZ2u*IBjb;-S- zux%`Q@~HTAL9zB#Zp-vojtY%Mc|;67eVsMCY@?I!Rgw&(rn65*Df1W){m3fM#B?|& z&MZ$0YW7l-rAQYV#&D!?&Qcb^PL4*pEVZC+i+J9W*NKDW*Zzr+mktp`a$J&Fi>|oQ zfih{X=T)E5KAHu*X}sw#5?SfFtB%-^nU?96>Ym1d*Qj>%^bB-47Ot4?X`~gpw1QRc zY7KVw5?`MG!^){|VV)-+ONopDdj!n5N0EX_?7em+6aVH%q^8#q%n@Qpdz$al`^ zRJm&SJ;^TVo<=p|zU+Y0>K+X4JFFgx0zF8pc4=Q27klF+Eynk^oV_jkL@2j{56rj)E9;9R<^g`O+0Oe5BgUp zrmE~63mBKcVv7|FM6D>_=W0|e#NE3*fYnjy&(%yx!EIpy{xmD{P%gsT#S~aEz7ZQS zdhMv=fq+=Z?mq?aoEY(LHFP!G-l&pdkaH?}PkJ}y{Pa(`yZ$=hSjzghdIeDW^B}DzoR2`BpxxS# z7aKrT2pdXr;HhKv}d*fBa!p< zhy>T%^&6M=gJn@O2h)fHF2ShisS%LCfKJnlWHwKcjA;(y%@gA0r?iM`PktZF$<9iD z(conSF8_=YiTsW;#Ea)A8ND;BUXK@kE0n>aQ6LM%ocE(%;e&a-0>wZ9*0?}_h6u3Q z?K>`C%s~}T$eXIcfjggBg97VSkO#dg><#d#8gxcRLOA4G0aPl+hJwFxfhu;~Cn6QI ztZcK+@ciu99aUnVydrQllHzZ_@ILFWT-Oe#Y)U6x)|bI;KRhEb)%Y6 zch;DaZKy67_eL1|BBAS>q|+7(MKff4o&F$A~Cdhj!!Qm>G)B1yF?gU9MnO5*uuNO&maX4=I} zZ<7iv&}4DYIQA%fRqi==fZ-{=mVSn)yPL)|BzJ!(Zf%GGsk6n2ThEN!MZ@UwjR!M1 z8oy39*aekCQNTdWHyw+FK;wWP^xf#XEfQW7cvBD;Ur%hhq+&dBo}s2hhwgj%T3m~? z<5K-`NsQZ$Y{a?hN}kcsl5)7-fK4uDY+!uEH24}w&~X5 zQ*9JpkHipS_PEKo2neLHGleN5251J1v7y*kh+P`=+HZJKpW&7LvG+_F(8jJz8I4HM zJ1Q1?%_h^*GrJT6*XZ}R%T4WeGVM34BH0kZ3=6VGbiNAQDhSR-sGR#K<1=~g4qIZ^ zYt?*oh*EimoLVcuETPa)9!zaWT5SHXuUm{2@0wEdL`w)j(EDf^ zF}FqOP~MQjmpT3huBpaMDq1VpQCg~1RbZeDTs^PAFxA@CBQHEH$J@tn{ot@{P<_-; zy4SrFufnO2PxaM2(KI{t68%B$PWh|z-Xh(Y&5CJlruzMOyo*C*_jQLl!xdX29| z4Jhf>gTjwGp(RQhPgK;1{_c_Wamad>71gBk=wX=X%B4p%jrC(63*ZeYB5~!D%xJjN zwp%OcI$BgJsJST%oeRy6rzAU`k; zoV21x0X7jU-A5i&C@ewP^>_bRG7!&By3v4i=2SLh>;eaZ@V^T-+mA58Yh>Ns%RfM+suUJ~GI<1%)KSWDJI1 z7UW`IUX}^ShM^boG4#d}Ny}`LDRhe&^bpD*guM<(^sxj|RI=NXA&p%4hAD1@v?*9D z1n=TYl5h~aU(#bz`3`qUPWB9{8Ofk7uUUnHF%kaG2OEvsRPeP(;C&?DPi|C#==o5? zqAH}OCi%B=A{5rVM-39f8dR|2pbaq2aqt@;7paz$SzsLr?PBHC4YS&m9&4PDNpyi8 zqXHS!0z6YV>LuiYjRCSPNwgouVoXa@h-K08)Um}3O8W&J;_eex06YHzd82-4am1y$ z49q&!?W!LUe<~_tDc!oo>l7yUusmKtXlFssAAFmE z(aM`ot2ZHJm9NYi&c3zIxoBGD5~gSN^xQTxK768e%`IesC*QpTUNI3&77mCD$c0{p`HWyzKwXgO>c2a~^PwB$j| zD|c$i?)B`L+8Z4)OK4dofG)8Y^H66wGpGe)2eV9$CK zzhT;Dg*XUy3;zCsZ^0sLCBVvsl{#gUKa;H+$jOz#+mXY*f);dJW{D%VYm^2U^m5En zMR!(+d@eA@EjgSZ0{So^+t4Zi@3z<0%s*Z>W6sRE1C;cw#VvRR%5B!1HrtjJqSoFS$R|tcLv=f4pbt<{_gRbSRV|LMk52CgnQhQGU5?U@d7Of zNdvVyV_HR3^3&T0Xcx8mvn!l3dA!L`PokmPZZ{VPw{Ox zgo=o1hG7D|nnEq+n2P_xB2Qt5;xg^^WVFx^?q{?}mH=(&Tiy)ik%hnTj**X?rJo5q zk|D0SJ!P_cj4*Lb5|y8v^wMVV^@T@6DGq{W3vQ|d+aLh;Pz3%ZsQ}&{dvFDoB9|xt z#oC2f;fS4#j`71y&GH)5@O6W6ksg)6e?yG*2-}spL=~LANyox$H zQrxTFAd!z=+@U5nk~Oia&Bqep1hxPna|KXT|Ba8JugWlfmnr1Eg~A-Q zQ*hx>n39(U;_|ZgjTE@CgK+^qV3>61!&ea!M(k-Heo`de_C`8fMD^;px!UWq0v>IF zE-9oysYBY+58nT1fQ*HTX|kJY6bb8iDOFur4K+i_5GLfOq!J_;fJRj#1{04SMyl?f zn;B0zhB@qEe~DL9j zHt07!ILuoxe65Vs_?%cV<0PQ7k6~Tq`CB?D_BPpYT^Ko=jO;rbRF?x~^ZQ?%3B`Ad(`sBJ%>74bG95TWO~Cs=f)-iu$=! z7^8vtiXp(4<03g2FQblpFQMS^g+~+Wpn%8m(_Qbw!&8vI<69n6MC<0>Ayg%&V}F-2 zE~2kX%SsO)aWYXgaWz4~x`iY;9D$=J^P=^}zI_}5KRNg5Ot?m5p2N4uB=e(>u6|G% z#VrqPkVZ5~c*en(jv2#QV!ME)U4ksBIo`<**bC7%3i2ziiY0Ym3nSqO&6nr6&HPf_ z#JhOQjI|ut@!-pRlm+Tb`*p^wjACI~thaO{Hd()9*5R@oA}}O!A4iEri(A)bikA^z&4@{T4yoM(0JgN4`HL+B@FE)Pf4HfXr$f@w1l^4^{~vZdtD z_f0)eEft3Kw9(?061mf9qYC+uJqj^dV7&irAgH*Jk`7N5L7OdO|OYGj%xJL z1g19(Nr7k%7Ca$g-dhN@BC#RBoE7V9_#80kcFYYK4!owb;miXa0Zl3OE!BmWou0wZ zZT#1ifhpH|dXUI{0+ry{(RD}tsX)ScEnnaCq-6T`L{eJ&1%G=`m-aT#@^M+vv_7N5 z`wUxxCoPtjI!MX))`!Ph+^?M#8`+Tr@eQAP%_}yB$aQrd4en;X0cSRiCLgOVoIb=9 z#l)C1&}i4~y)w)?%KW&=o+`c+I+)3uzq8@AH!0utJ`4r!nsijf=8+;q)OEQz8k^tcu|DD%d97)>Sv}esCr@@siR*U`dN6bAt0cuwQl&q*bW%Z3= z`*3C`T{e~Pc+cwNO0#zOx^GrbyU!i}gLn=h&&gUVx1HhY=Ax>U&xztoUu8h_@&O!@ z2qE(%o}Y*n6ssKNa`NHmO?$zm?A5!4$l}14F{-wXg%pS!aGSy4g%47?y`J@^Vk?Bj zq#PbUa1Uc4_?D15Tx4HD5Q5-jpbRjOR-x@3ZR*UtFq{QT=*=D)Ot>!5TbHoBqj6#e zn~d)4E~XqLVE!j_T;VY*841; z<}FpzVHIt~_sioP_>{?~`?rR8R2%6T;ZbNx0XA?2GX zTC5dc5?kyo`wGhz@7LBh+osYc)c}un7rb5&97|S3m2M<5!cwiMT_Bi$9G0!*yN-(l z(3TY({xqytx{QA8;){>0^1OC;@(ZMb!+EG=O=S|=6y0g`LMN|4vUK6MA z))k%k$7v|a0OM4hL}n{tD{}qg&x55Z|G)(==OR2{#yWGiN))$c zpWA|2&N^MYB76(|z2Aw;&U7|=st9x!51BdHt`1$-zs+-S!9O${rPcsn!)DLyk@-4X zq-yz?raT*9w#`OtB;N1UWFt_Tx3|=9QmrYSJ#3Se=0vF+G3r0ftoHe)MiYAHZ8;?} zu(ZFQI^BO2Uk_!{8jway;^LHMF#RgePtrH&gbFboT}Z#bJWUfnR4t)8%<}{eE=$!i z)cn{fyMOFzgu4AYhs3jC$VMiHMYxBwIF6RVE&%1pN-){07Jvlx6+K)?OSoNPEK>4Y z*mgg(A*y?LO;#|su|a#T!Y8zWAIelqM%~;MR}M9!HAS*sC@7rP-(C;+@xAAUnKoci zlVog##F5-!?F=`*cl+i&r}SYG#CMaR$U`J)8h4|BoG6zw<{mVFjfh{MlS&0K8Lb40 zcn%eyE-+1wnB`Ab2XF}6llab$t6f~*zpY|6N0&pdw=v(WJ)Ugm7DLcCFBwJ)XOv9j0cO|M;l? zV_fUdRFtnfT(OWeTAhXZ=oEBR^)ykPwzO}mp9yv?C744;=RSFR^fR>X*!lY7+ttX4 z=rVb^^CR(_(nHWyv2qi8QD`D`(elo>z}0O?IJnMQ({HP3Z`Z0ccxY(YXB#drKGQW{ zvrdUsb-kNhmMBh+W1m*IN0fLpPBf@^+)U33+9KOcPMTKB;sy_e3*F0~V~!B?GydnN9&F)FI;j$37Nc1YhMx|o31%H7_yKGw-p z<(*X7H5EEY$ou4GKbugKk?eVNAvm)F5H{k8ehjd%cGitV}`hq?n9m5UPFuG6e|^77s^ zXp_~97{U1$#z9ZI`?$-FJF+W4Ug ze8-FMRvbe`F$m`&vKjWFWd)D!Onz)IFHkwGfe09;;5Jy&MWYjQ^^}u`{AU2j_DUt{ z9WwN6dDCxm*fT{CY0KeY#4%y$P}_#k&GP6la%?GntM4laPtJ$llLn)$*TvUs(KtGN z_ifeNTjS4rw@{Qi&IjeT`evb-qA7$wb&ZMbnsiS)Q%A2*#^DCNTyIx8g&tznMrZZ8 zd@_yZH}j|+&rZc513Savv#Kw~I;b%2FRQ6h7w4`D! zZ|udaah>JnZ*MnwAq3#{&R)~GybWUn)Ck_ZCs`FQ+h>w%-zpX~B;JdV?kTceUCf^P zr>rf9)(o}vrU$B4Sa-&f`nn1yY;;~;@I^CU6|X<7rFdk~YFwTbQY%9G@OjMB0l>cp zq71b%3DB3s-w%4WB?b2`wF#EAXKe3jwH{oezv|D;^^3!Pb9?q^)~Pzk+~aYD$#$6> z(|y?g{JE(opmGwT_;vnPJ155RhP`W9{_XA*Rnyyt_6wthu9wq0qRU&WcaY*<^|pfG zh4fZ;XS`>KalOFe73Y3t&u=H<@eCA-J*brGwl$G%vpOHJ5&co=&`!jl={tGV1Y z4)yG1HY+tvxdvbPz3EB#p0Dfo#8g!#t@roq?>@yFed_c1Rn~2(u5q3(1$It8Zb~|4 z?&rrmwzOPqwwwM&t1$ymi51JEJw06eY^M=%);es}#|8a?ySNRJJSnFyr1y<&XFrYd zOuUC537*$s9$OuL2;70We(a#Om9BDa)Pd=2H$MgLANatSxsCm@mCQ8%3BT4r@cM-F zUGZGzqU0EjYE>*K; ze5PhC{2uI-a3OC|X5uRK558L&o9B!#Z{2Z)D4*#48J5ySVx3XRCi(pi?baK)?aE*V@7_%HZQlUo z7YXfqm9DbHh=Hedb-d~0sd}`vqlH>;O$E)MAFTAPm6~09tKZ{AF=O`D=uS9z*mqf21|CWghiwT{PP{Dlf=+e8GJA)-IWiGN>`?ml$efQ~Rxg~- zD@}rF-kY!3+$XwW@h*F>!ZG`v@L|ugsTSKxnQ4>kp9d#+8P-Ez%FpNXL}gw>_^M%- zRi`qCWPLXq^hLWb6V6lyzM@V2opCOTN&+avw_V^NUJF@K%oZGA_-{ z?nl(#hX3f6EP^dE_C7LKblZHossa-gAnQl&__+na$4_hV$?S|*=cS4F8rU8-9|Npx zS586ZdkSoD$;fRJl_N@i8bv*oW4f)%o{s;z5c8*ik?oNI4xw$SeEU{maW9BT_7E|q z#IMushPtjJ3P)%nMCq+6HfC}M(JvN?wDC5d`N_9Hg4uD0Mep%0uHPJ-mZjJ1nEkTj zpVmT#u1?W;$9yI~mmiHi6gv7QusmGHW-%)}XPh%Hp9XBLx~JEP;#u2eH%cmx?H9e( z{IDxyW|N*y~a#^JL5p7doaA#v;+f$wNN%ehH%5AEk> zgRF&*OvAS5*n6R6_tbXg!YxzcCmltftX1d1iMTcbk`!sC=yZ?sOgj&mnX{kxP2LYR z(bq4yv%bpLrYxd`!m~-?=${cRtj}BO=%QQh`?*{l^T&B)INC|xQdq5ScUh3x5OO+> zTTKW|$5ue*D`i|y$X}7_MGrhL?|9epc+YLo$T@rto1cf3%jl~3oR=&O5areL?c{>q zOPueC-`Iah56n8w-)AUQ%+jtKEG60)wnbs|i(5-vQF`7}F9Wf-q}|Je4zIX^2r>PmmJ)*q?Ig`3A{J&k_h_NFXu3hf?&c!Y(^5M>+F zu{hWGWa@D;l`ni<_ASZBT`F1kby>S*^SGVxxvt4m*7v5hUf^hy`Z_%sW$nGnT4jvF zyYX#4=WKojzKXG8{++>Xcx&4A-N)SKN@zLKvG3*Oo-K%)wj7CBq1@wquwnA;K7vWo z0P)jF8omzJ-u|XX_u`7PhHR9Rm=qM+)63vJvy4_rLxN+cT)L|Sl5s|=w zQc!JseL{F4sc0HAZV)Q3-0-x-#W+a!HShx%u-N6eaUgY-8Gh`%__DiwUP)+jPTZ3xp^<;T+OcNOn2Isq^$xNaT$ z=;|I}I5_^YqAn1C1A(h!;IHK*`A1NIu+(`|vJVU#SeJDW4C^ zG06oTU1+7U9nRZIwKn2USAOz}tixjvxXe&!I`!GyPmfpKSv`z+XLx?yZSm%OhQXI~ zJzdi=ZYBc2 zSUN@Oy5_7a>PW9&^Duo_?CaPI?VNd6(A)E&xK$~Xv)$EHp00X1Ijz!U%X%LtcImtR z;AkQ5wFP*z-gb?AB{l0Z96tz$xC6-^EVh42pRpSJW6$qV=%7%x@w=3^Qi`6SK zt{zG{r^!~S-1;eX=G%_TDO6R~bzACa%}4GNwaw+WXz=5%6RnF0H63q#{j+aEusWGv z)@47vYutIAMYu21swbs-+1~ElYMRuW`R4a|g+8u7KQC8~eY<+b?w#B8Q_8 z%QNH6Ve8jUI<`0I_|Bij9rG`<cW^)?Yop?(BdW`v?DbjbC&4 zk39X>1IW@ZAk*L0!k;5&0SxgEz)K}}J7YR&V<&wheJ6c7MP~!2Uko*AsXw4DiJ0iw z+5Zf5$<60xr*CL!>_lW>Y-(=9Lww%WMNDLF#6zsgB26!CCunSDF6QB2tmq-5WawdO z$YDgx$II=;>1J(b4KPFGW^HBT$mzyItZ!s%V9W`if3fL^iGCAtvg9FV`ouuXM$bUZ z3&ZVTWWuQ+B=Uz6pv6OM=Hz6@Nk`}E>iWr*>65L4DIEg`2L~NJBON0nEkJ_S(cQ*L z-;LJBk>npE2pKyXI+)uzncLbB{Tflp=8RCK2F=(A>Y2|E0i9 z_sj5~9Qh--KXLv?*NhDR8L^$SgVk^G8X3|VTNzs$+c-G_ykq#yJ0n9*LjVw|ldS{q zZ%I=$w*Q^5GUpYsvUNA2ArjPgu(kT#4*>Ib;x{q6KP&%V`U#lbe>(baDRk4f12{?l zo09+`PQl;3RI;_T`ajCzUv;?u$uJhucQWQ>q-Uh3WniRbpjTp`=Va#Kq-UU|XXm7+ z=cfCMc2=LbU7*LPEly1sEAb*f@R*^WSxU1q$GZ+c-Mu+ZY=E z-;DMr-TxtYqW=dM%>M%$ME^DNA7%N!>H2TF{-X^1N5cPIUH?tjf0Tj$Ncg|2>px4^ z?|=3IdvCx_@t>P_-hXZWjcousepkTO{(n(C|0-erQDX?1JK9<4y92g2O6E>h#zamI z&c?q=3cwh@i7+t#DkJ|P{aZz%reJJBr)chBOa$npQ`ID5AYvwB)Fz@+u(fppH0X&G z=)`SIYylPa@3fF6_h%LsHbD_~K)u4w&M3$%^lPv5=dBX&|GxylfQY%3ld%Jxh!tRW zDP(MDYh?V-N{EB;&xMfN7nW%*Su}mLfy?w@;Thu=x6i^Ukt)2^(tIKL(yNqWKr&=if3PU?vpyg^?8hq zNgZq3VzI7osx{{u9A!blM^qzTUFZnC5FZ8W65lC*k2GinGG^Eg(Pl@lBTx$^B#!rk zkAI<1_z;iKE&D*t2-`imh#$Xw_fkMX6M7XoMW0s1|WqwLHP z@M!G-SgPE<2&H*+h=& zg#!Cv#8QK9m*ECtNDsVC51vSGIN#xXM2r*y@|_4~8Y#4x9v1q!+V%;($tUvE7}v3q zbg#i}?->2kL>KP`HFOEDuH2hGR%fvp@C;~ot#+thrb|ZZy?eNAyr36F+y8L^x;Yq| zz{Ai3Ru4Pi_Q%Df01M5_&bmNZ~0?j=lFH}^zXVHj0}J0 zG5tES`HLJA+uwOCOn=v9rT?4H9PGc&PyTH@MtXXtzs3tFBH8{f$4viM8;tbKf3wR- z&%*w9T~=1WIn7_}a?t;Eei<3)|8AFo>2G;qWMKW`%R*a)R)kiC)`ZrDCe|;$WYU^0G3V^1#d=^2ir^@MqX-VnD~>BT zir^@M1M`KRS4BN)WCI7*CHo2v{5+rS1P8wAvYp_lf&=4BzD;m6!GU$ozJdcimhA*b z6CCK1>?=6B;6T48U-WU-1Vo z3l0|?E;wA&!z0^>!5oG+^qk+a*uor!p3QcVZTK>Wp+B=PbD(JpJLa%OaG2mgzn|k~ zZ}gr)&L%iaaG2o0`+SZuo+mg=aHN!BMFYA zIJh2zV9q6br~MkV7WWM6l2MO&4BQIs6WyHl8=eyF89ddv=d!e#&{nWbabed|$j@-! zMOw)XwVZuRc8p)pRzuGw4eK&$>q=Uym + + Network + Model Source [1] + Floating Pt (FP32) Model [2] + Quantized Model [3] + Results [4] + Documentation + + + ResNet-50 (v1) + GitHub Repo + Pretrained Model + See Documentation + (ImageNet) Top-1 Accuracy
FP32: 75.21%
INT8: 74.96% + ResNet50.md + + + MobileNet-v2-1.4 + GitHub Repo + Pretrained Model + Quantized Model + (ImageNet) Top-1 Accuracy
FP32: 75%
INT8: 74.21% + MobileNetV2.md + + + EfficientNet Lite + GitHub Repo + Pretrained Model + Quantized Model + (ImageNet) Top-1 Accuracy
FP32: 74.93%
INT8: 74.99% + EfficientNetLite.md + + + SSD MobileNet-v2 + GitHub Repo + Pretrained Model + See Example + (COCO) Mean Avg. Precision (mAP)
FP32: 0.2469
INT8: 0.2456 + SSDMobileNetV2.md + + + RetinaNet + GitHub Repo + Pretrained Model + See Example + (COCO) mAP
FP32: 0.35
INT8: 0.349
Detailed Results + RetinaNet.md + + + Pose Estimation + Based on Ref. + Based on Ref. + Quantized Model + (COCO) mAP
FP32: 0.383
INT8: 0.379,
Mean Avg.Recall (mAR)
FP32: 0.452
INT8: 0.446 + PoseEstimation.md + + + SRGAN + GitHub Repo + Pretrained Model + See Example + (BSD100) PSNR/SSIM
FP32: 25.45/0.668
INT8: 24.78/0.628
INT8W/INT16Act.: 25.41/0.666
Detailed Results + SRGAN.md + + + + +*[1]* Original FP32 model source +*[2]* FP32 model checkpoint +*[3]* Quantized Model: For models quantized with post-training technique, refers to FP32 model which can then be quantized using AIMET. For models optimized with QAT, refers to model checkpoint with fine-tuned weights. 8-bit weights and activations are typically used. For some models, 8-bit weights and 16-bit activations (INT8W/INT16Act.) are used to further improve performance of post-training quantization. +*[4]* Results comparing float and quantized performance +*[5]* Script for quantized evaluation using the model referenced in “Quantized Model” column + +### Detailed Results +#### RetinaNet +(COCO dataset) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Average Precision/Recall @[ IoU | area | maxDets] FP32 INT8
Average Precision @[ 0.50:0.95 | all | 100 ] 0.350 0.349
Average Precision @[ 0.50 | all | 100 ] 0.537 0.536
Average Precision @[ 0.75 | all | 100 ] 0.374 0.372
Average Precision @[ 0.50:0.95 | small | 100 ] 0.191 0.187
Average Precision @[ 0.50:0.95 | medium | 100 ] 0.383 0.381
Average Precision @[ 0.50:0.95 | large | 100 ] 0.472 0.472
Average Recall @[ 0.50:0.95 | all | 1 ] 0.306 0.305
Average Recall @[0.50:0.95 | all | 10 ] 0.491 0.490
Average Recall @[ 0.50:0.95 | all |100 ] 0.533 0.532
Average Recall @[ 0.50:0.95 | small | 100 ] 0.3450.341
Average Recall @[ 0.50:0.95 | medium | 100 ] 0.5770.577
Average Recall @[ 0.50:0.95 | large | 100 ] 0.6810.679
+ +#### SRGAN + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelDatasetPSNRSSIM
FP32Set5/Set14/BSD10029.17/26.17/25.450.853/0.719/0.668
INT8/ACT8Set5/Set14/BSD10028.31/25.55/24.780.821/0.684/0.628
INT8/ACT16Set5/Set14/BSD10029.12/26.15/25.410.851/0.719/0.666
+ + +## PyTorch Models +### Model Zoo + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NetworkModel Source [1]Floating Pt (FP32) Model [2]Quantized Model [3]Results [4]Documentation
MobileNetV2GitHub RepoPretrained ModelSee Example(ImageNet) Top-1 Accuracy
FP32: 71.67%
INT8: 71.14%
MobileNetV2.md
EfficientNet-lite0GitHub RepoPretrained ModelSee Example(ImageNet) Top-1 Accuracy
FP32: 75.42%
INT8: 74.49%
EfficientNet-lite0.md
DeepLabV3+GitHub RepoPretrained ModelSee Example(PascalVOC) mIOU
FP32: 72.32%
INT8: 72.08%
DeepLabV3.md
MobileNetV2-SSD-LiteGitHub RepoPretrained ModelSee Example(PascalVOC) mAP
FP32: 68.7%
INT8: 68.6%
MobileNetV2-SSD-lite.md
Pose EstimationBased on Ref.Based on Ref.Quantized Model(COCO) mAP
FP32: 0.364
INT8: 0.359
mAR
FP32: 0.436
INT8: 0.432
PoseEstimation.md
SRGANGitHub RepoPretrained Model (older version from here)N/A(BSD100) PSNR/SSIM
FP32: 25.51/0.653
INT8: 25.5/0.648
Detailed Results
SRGAN.md
DeepSpeech2GitHub RepoPretrained ModelSee Example(Librispeech Test Clean) WER
FP32
9.92%
INT8: 10.22%
DeepSpeech2.md
+ +*[1]* Original FP32 model source +*[2]* FP32 model checkpoint +*[3]* Quantized Model: For models quantized with post-training technique, refers to FP32 model which can then be quantized using AIMET. For models optimized with QAT, refers to model checkpoint with fine-tuned weights. 8-bit weights and activations are typically used. For some models, 8-bit weights and 16-bit weights are used to further improve performance of post-training quantization. +*[4]* Results comparing float and quantized performance +*[5]* Script for quantized evaluation using the model referenced in “Quantized Model” column + +### Detailed Results + +#### SRGAN Pytorch + + + + + + + + + + + + + + + + + + + +
ModelDatasetPSNRSSIM
FP32Set5/Set14/BSD10029.93/N/A/25.510.851/N/A/0.653
INT8Set5/Set14/BSD10029.86/N/A/25.550.845/N/A/0.648
+ + +## Examples + +### Install AIMET +Before you can run the example script for a specific model, you need to install the AI Model Efficiency ToolKit (AIMET) software. Please see this [Getting Started](https://github.com/quic/aimet#getting-started) page for an overview. Then install AIMET and its dependencies using these [Installation instructions](https://github.com/quic/aimet/blob/1.13.0/packaging/INSTALL.txt). + +> **NOTE:** To obtain the exact version of AIMET software that was used to test this model zoo, please install release [1.13.0](https://github.com/quic/aimet/releases/tag/1.13.0) when following the above instructions. + +### Running the scripts +Download the necessary datasets and code required to run the example for the model of interest. The examples run quantized evaluation and if necessary apply AIMET techniques to improve quantized model performance. They generate the final accuracy results noted in the table above. Refer to the Docs for [TensorFlow](zoo_tensorflow/Docs) or [PyTorch](zoo_torch/Docs) folder to access the documentation and procedures for a specific model. + +## Team +AIMET Model Zoo is a project maintained by Qualcomm Innovation Center, Inc. + +## License +Please see the [LICENSE file](LICENSE.pdf) for details. diff --git a/images/logo-quic-on@h68.png b/images/logo-quic-on@h68.png new file mode 100644 index 0000000000000000000000000000000000000000..a83b3d27bea607223fc3adef046242b07cb38165 GIT binary patch literal 4381 zcmb`L_ct5f|HrA((5OvfuNn=ltu<@cD*CQXHHa-WiYh53h&H4~>{0VgRZt_bs;WZm z5frtGjzpr>psL=!`Th@|AMQB!dG5LQzRrEV&VAerhg+6hoFbe|OiWx>H_dJ{F)?Q_ z`ep!jM*ZN^2VX|R5q{GxiiwF`@&606UUAGS6BEyrm6`FKho#gquRz~n!D0JlhaBCE zj?)np`36B`$?VmT%H5py7QUgT^;fC0y#mf0$h^*bDo^wabFe3O&Otk_UTWudwwCxa z-J*_&_?fa(pxJ}OiOo(MUW4dBzuQSebRtBS^Hi$;V)P`|ySj6lL&FZwQed&Q@n0v) zu18IdUk|XruM#3m0n*6liiQKu(ny}qDHh2*CMaPM4ikX%-9R~p^sq8piXo)_?}VMh z!QkTDv4RUSKL2f-%gM^P@fHbWM|;i+W#^+buS}E$wv7R#x4(hOQw8OoZ$g2_=4PWZ z>TAPIqB6I8G-InC)9uUv()cT;YJ3U>S4*%aPG(=YT}k;$=|slvv(iYJ??3E}5z9zP z5G9rTiQw(oPZ<|?kUSC~_9eS;uDT`*FRL;8k_%hSm&@|vcyJn2mKt!;nunE$>Ks%J&9p9qYbQ&o?z;g7 zTh^Vff+ME!u$rY65Q4Np0HaAdb6&-IGs*8((;}x7w0H>ph#(r$F%9DmtAR_QBLK;7 zNtEs#V(@3oo53L|z_Kxqz-+=HGXXU(1Qwfn{B`N^DP#ROBqg6a`9;RVK#q9a17$q) z8m=%TS!yKf2KN5twAV$GAkJ5cx{jj=Wfx72!|};4?(xbl<9nevYTFNa>cRH@PF*uP zC22lIDc;>oXkzpda1b$-bOS8B* zhq%}Jk|*UoHo`%f1XZF1)j*xhY|x)2JO6pre26nK(!4r6@v=-s>R#gc zf&_Q7HNI?qnSHV=&Q)?uF#Y@OhI?#|)|cg=pnPeRjVCV0gR440jopIX_K+NIIV=KT zTNG%qI;wM=wVaqnBOU3!T~*&vE>I@4^zUR;?=zdaA4hnz?LC=DJgPS(L_%~-(8IUO zCtBIX)=tT^WfGja=aElzFh(PKgw1wu87~mlatG%e%R{%36@)R{kHC zWFLz!{S9gyHPUbA?S}7Fl8Mb2Sv1==SyW>` z;1T&{g1u4Ds`OwS1!2eY@k$+=8zvX(LGxL$SpiR}$OQ#(`ztqd&mM&vlnrUD>dIq1 zdiGXhgSvAa@*0vn-fxw{z&T8BaI4fYMrNtx^Cu!B`7mCyr|l1MDg#Af$nV3@_GIHx}bE zA`jW7D@nnHZegbNbsfriGp)GAOiZ@|O#GZ-)=vBqZ!f2Mau&4c@#r$3;b4!Q#j*2l zTWa@iTyuTr6ZH~I&w1nixW>NrNlLinp%_R6$u;;xo1&rD*aXKGMXXPKH_Ek(661ut zynC`dJNKWcs=(I6vmDqp4~JK4Kt0>L`|Cg17TYw9i^C(4g|*pcQwz;;X#(tj`QB znK8?IUfM1;HwXn`1h(mCL+b2rQgYmkg8BBa%swYBjvHQQ@74(N1<_KC)z1FIp!5}F z?5UwR0|Ud3>Ec4KCcAN53{Q=2p!oUnXm{(onYP3wk8A)WNnTZg7%upNU`xIaW=ZUI zId)a|MyiTiy2UBd%>h|qrJCXEb{g7K@7pwZv{TrR1)5CxQ$92Q0_uNdhJak(_G=L2 zFKimIGgqYN#56z`?vUcAg#se%x!9}Fx&g0rjmR9Ghc?XAKyoZ_r)SUQX<%-*(-C?u z->rQIE~!gR6rxt(Y2T&1+WfGIqrw+V;{r&-S%w53*oj~1;U`%Siw~dms3$Vyn#9mc zq5bKjUSs4CY>x<^fhWu;>j($^T=lvj^Lh2Pc9q*4`U)V->U`-c=ArP!tCwcIHlj-O zFKwNj3N)_=%dT8VgM7(w;=WtE*IYw-ptA_sU2J3fnQIzvZef~wKO8vpoAmBPuO#Hl z&SN>5e!*-}fO8^>at4v9;LjE|-_~JBLyXXSqwQ6;GY6iqmGAwkkC4p$mLxk^9E5Le z2*E@SD!+X0XN1Id5`fIB;+g<$k<{2_I)^h$Rng^#hvmlTZUK@OuQa-DJg0AWh)EGD z-^FfzI~+Ij_TTIEt$dDfAq_3ON^pK<$*>l@jN-66m&LbN@Q?tMx1s$?rhSb4o^Sc^yRu3Go3@)Du@N($CeLj&bW}RO$ZveAcSYYiOY2i5yF< z&qSb%vEgC$2=`Q#eXybT6>^QSvCDVjyjB0RPf$#~oNrg{6@{;jU(<^w<6Vus zbvgRK&Sxym6B*_SEQKZnR|W%7YI3TefCuwGe&}RO`kj_eK|jm}M-I;{cEY9&j!^r9H4 z;29&s(p}MBv@S}_@i9O5D7U|Sj8Xhys?PS3u=ZU2ZOG(pk34C89M3XXEG6ej|1}}M za(Z`Y_f*0jF59M1ZQd$~M;q5bSwao@O0Za-+yUk;zu~FLq#}90s7fAYUF25%)3XLXf%-yIlwDHktNC*trUbsTvpJ8KAp8j~a zBI!|kKfHbAX<*{2hU1OUtCz(ej#;N?US3!5IDO9@%(0D2!yc&y5S`E&m)1cK$ajnv zw!7ie4~V`sRRdF;?J|c5;JdT{!8+gTxV>@s?^`*scDdPVeEx!)L=_36(;>L+mR1NX zo1F(e*R8;$L0v6k6@W>w(9r&q1e(&gKl(ns_h;s^wIr<}KR_GD`3`W`E%lsta zPv32IDkQz+5T*j+>P;mMYv20plUA$m`3Zv`&nx=+*RI_1jar0zsdSa3f8wG_8QA<6 z4J3nFhM^J_9of)0X`iF+H@Uhg}9LW8D7p3Fl?Ew@GD*KfrG_} zL>5DLJ^vZ&mf@PMztlm5!qJ(dTvy$kB{B2lN*U`8zU+}4Vm<4`gO)#@r%#mxYrKx! zQ;_yJDG&Q#ninbW%E8xtp42yCM5iM~0tH>C2n{-Uc~BP3ocho0v~Nzxj~`tqbrh+S!H0)HzWFaLe%zr%t14V{U_ zujjX}AASTcNBybCsLUbehl4Z*v(X%Q36@g#GLaz3A1=|SW&RaN4l~P|w=~A(JGhs; z$Zao4x!b=#yMlv$FK12+gMD#0jaaCR9In5?A&nPdkR;3}MvJ8&LxQ^Di^)a}h*>%G zcdAFp8Sy2%A V&hOLO#`y1GvNFGAhBLXB{C_skKt=!n literal 0 HcmV?d00001 diff --git a/zoo_tensorflow/Docs/EfficientNetLite.md b/zoo_tensorflow/Docs/EfficientNetLite.md new file mode 100755 index 0000000..6fb4b51 --- /dev/null +++ b/zoo_tensorflow/Docs/EfficientNetLite.md @@ -0,0 +1,49 @@ +# EfficientNet Lite-0 + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Additional Dependencies +### Setup TensorFlow TPU repo +- Clone the [TensorFlow TPU repo](https://github.com/tensorflow/tpu) + `git clone https://github.com/tensorflow/tpu.git` +- Append the repo location to your `PYTHONPATH` with the following: + `export PYTHONPATH=$PYTHONPATH:/tpu/models/official/efficientnet` + +## Obtaining model checkpoint and dataset +- The original EfficientNet Lite-0 checkpoint can be downloaded here: + - https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet/lite +- Optimized EfficientNet Lite-0 checkpoint can be downloaded from [Releases](/../../releases). +- ImageNet can be downloaded here: + - http://www.image-net.org/ + + +## Usage +- To run evaluation with QuantSim in AIMET, use the following +```bash +python efficientnet_quanteval.py + --model-name=efficientnet-lite0 + --checkpoint-path= + --imagenet-eval-glob= + --imagenet-eval-label= + --quantsim-config-file= +``` + +- If you are using a model checkpoint which has Batch Norms already folded (such as the optimized model checkpoint), please specify the `--ckpt-bn-folded` flag: +```bash +python efficientnet_quanteval.py + --model-name=efficientnet-lite0 + --checkpoint-path= + --imagenet-eval-glob= + --imagenet-eval-label= + --quantsim-config-file= + --ckpt-bn-folded +``` + +## Quantizer Op Assumptions +In the evaluation script included, we have used the default config file, which configures the quantizer ops with the following assumptions: +- Weight quantization: 8 bits, asymmetric quantization +- Bias parameters are not quantized +- Activation quantization: 8 bits, asymmetric quantization +- Model inputs are not quantized +- Operations which shuffle data such as reshape or transpose do not require additional quantizers \ No newline at end of file diff --git a/zoo_tensorflow/Docs/MobileNetV2.md b/zoo_tensorflow/Docs/MobileNetV2.md new file mode 100755 index 0000000..4db23cd --- /dev/null +++ b/zoo_tensorflow/Docs/MobileNetV2.md @@ -0,0 +1,50 @@ +# Mobilenetv2 1.4 + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Additional Dependencies + +### Setup TensorFlow Models repo +- Clone the [TensorFlow Models repo](https://github.com/tensorflow/models) + `git clone https://github.com/tensorflow/models.git` + +- checkout this commit id: + `git checkout 104488e40bc2e60114ec0212e4e763b08015ef97` + +- Append the repo location to your `PYTHONPATH` with the following: + `export PYTHONPATH=$PYTHONPATH:/research/slim` + +## Obtaining model checkpoint and dataset +- The optimized Mobilenet v2 1.4 checkpoint can be downloaded from [Releases](/../../releases). +- ImageNet can be downloaded here: + - http://www.image-net.org/ + +## Usage +- To run evaluation with QuantSim in AIMET, use the following: +```bash +python mobilenet_v2_140_quanteval.py \ + --model-name=mobilenet_v2_140 \ + --checkpoint-path= \ + --dataset-dir= \ + --quantsim-config-file= +``` + +- If you are using a model checkpoint which has Batch Norms already folded (such as the optimized model checkpoint), please specify the `--ckpt-bn-folded` flag: + +```bash +python mobilenet_v2_140_quanteval.py \ + --model-name=mobilenet_v2_140 \ + --checkpoint-path= \ + --dataset-dir= \ + --quantsim-config-file= + --ckpt-bn-folded +``` + +## Quantizer Op Assumptions +In the evaluation script included, we have used the default config file, which configures the quantizer ops with the following assumptions: +- Weight quantization: 8 bits, asymmetric quantization +- Bias parameters are not quantized +- Activation quantization: 8 bits, asymmetric quantization +- Model inputs are not quantized +- Operations which shuffle data such as reshape or transpose do not require additional quantizers \ No newline at end of file diff --git a/zoo_tensorflow/Docs/PoseEstimation.md b/zoo_tensorflow/Docs/PoseEstimation.md new file mode 100644 index 0000000..da4a076 --- /dev/null +++ b/zoo_tensorflow/Docs/PoseEstimation.md @@ -0,0 +1,53 @@ +# Pose Estimation + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Additional Dependencies + +| Package | Version | +| :---------: | :-----: | +| pycocotools | 2.0.2 | +| scipy | 1.1.0 | + +### Adding dependencies within Docker Image + +- If you are using a docker image, e.g. AIMET development docker, please add the following lines to the Dockerfile and rebuild the Docker image + +```dockerfile +RUN pip install git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI +RUN pip install scipy==1.1.0 +``` + +## Obtaining model weights and dataset + +- The pose estimation model can be downloaded here: + - + pose_estimation.tar.gz + +- This model has been compressed and its weights are optimized by applying DFQ + (Data Free Quantization). + +- coco dataset can be downloaded here: + - COCO 2014 Val images + - + COCO 2014 Train/Val annotations + + + +## Usage + +- The program requires two arguments to run: model_meta_file_dir, coco_path. These are positional + arguments so you must specify the arguments in order. + + ```bash + python ./examples/pose_estimation_quanteval.py + ``` + +- We only support evaluation on COCO 2014 val images with person keypoints. + +- The results reported was evaluation on the whole dataset, which contains over 40k + images and takes 15+ hours on a single RTX 2080Ti GPU. So in case you want to run + a faster evaluation, specifiy num_imgs argument to the second call with a + small number to evaluate_session so that you run evaluation only on a + partial dataset. \ No newline at end of file diff --git a/zoo_tensorflow/Docs/ResNet50.md b/zoo_tensorflow/Docs/ResNet50.md new file mode 100755 index 0000000..c7721bc --- /dev/null +++ b/zoo_tensorflow/Docs/ResNet50.md @@ -0,0 +1,62 @@ +# ResNet 50 + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Additional Dependencies + +### Setup TensorFlow Models repo + +- Clone the [TensorFlow Models repo](https://github.com/tensorflow/models) + + `git clone https://github.com/tensorflow/models.git` + +- checkout this commit id: + + `git checkout 104488e40bc2e60114ec0212e4e763b08015ef97` + +- Append the repo location to your `PYTHONPATH` with the following: + + `export PYTHONPATH=$PYTHONPATH:/research/slim` + + + +## Obtaining model checkpoint and dataset + +- The optimized ResNet 50 checkpoint can be downloaded from [Releases](/../../releases). + +- ImageNet can be downloaded here: + - http://www.image-net.org/ + + + +## Usage + +- To run evaluation with QuantSim in AIMET, use the following + +```bash +python resnet_v1_50.py \ + --model-name=resnet_v1_50 \ + --checkpoint-path= \ + --dataset-dir= \ + --quantsim-config-file= +``` + +- If you are using a model checkpoint which has Batch Norms already folded (such as the optimized model checkpoint), please specify the `--ckpt-bn-folded` flag: + +```bash +python resnet_v1_50.py \ + --model-name=resnet_v1_50 \ + --checkpoint-path= \ + --dataset-dir= \ + --quantsim-config-file= + --ckpt-bn-folded +``` + +## Quantizer Op Assumptions +In the evaluation script included, we have used the default config file, which configures the quantizer ops with the following assumptions: +- Weight quantization: 8 bits, asymmetric quantization +- Bias parameters are not quantized +- Activation quantization: 8 bits, asymmetric quantization +- Model inputs are not quantized +- Operations which shuffle data such as reshape or transpose do not require additional quantizers diff --git a/zoo_tensorflow/Docs/RetinaNet.md b/zoo_tensorflow/Docs/RetinaNet.md new file mode 100644 index 0000000..ce8db8b --- /dev/null +++ b/zoo_tensorflow/Docs/RetinaNet.md @@ -0,0 +1,62 @@ +# RetinaNet + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Additional Dependencies + +| Package | +| :-------------: | +| keras-retinanet | +| pycocotools | + +### Setup RetinaNet Repo + +- Clone the RetinaNet repository from github: https://github.com/fizyr/keras-retinanet + + ```git clone https://github.com/fizyr/keras-retinanet.git ``` + + Within the cloned repository, checkout the commit corresponding to pre-tf2.0. The included example scripts only works for TF 1.x. + + ```git checkout 08af308d01a8f22dc286d62bc26c8496e1ff6539``` + + Install keras-retinanet and dependencies using by running, + + ```pip install . --user``` + +### Pip install pycocotools + +- Install pycocotools by running the following: + ```bash + pip install --user git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI + ``` +### Adding dependencies within Docker Image + +- If you are using a docker image, e.g. AIMET development docker, please add the following lines to the Dockerfile and rebuild the Docker image + +```dockerfile +RUN git clone https://github.com/fizyr/keras-retinanet.git /tmp/keras-retinanet/ +RUN cd /tmp/keras-retinanet/ && git checkout 08af308d01a8f22dc286d62bc26c8496e1ff6539 +RUN cd /tmp/keras-retinanet/ && pip install . +RUN pip install git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI +``` + + + +## Obtaining model weights and dataset + +- The original pre-trained keras retinanet model can be downloaded here: + - RetinaNet pre-trained model +- coco dataset can be downloaded here: + - http://cocodataset.org + + + +## Usage +- The example script requires paths to coco dataset and keras retinanet model (look at the above *Obtaining model weights and dataset* instructions to download). +- There are two actions ```retinanet_quanteval.py``` can perform, ```eval_original``` will evaluate the accuracy of the original model, while ```eval_quantized``` will quantize the original model and evaluate the accuracy on the quantized model +``` +python ./examples/retinanet_quanteval.py coco --action eval_original + +python ./examples/retinanet_quanteval.py coco --action eval_quantized +``` diff --git a/zoo_tensorflow/Docs/SRGAN.md b/zoo_tensorflow/Docs/SRGAN.md new file mode 100644 index 0000000..ab12fb1 --- /dev/null +++ b/zoo_tensorflow/Docs/SRGAN.md @@ -0,0 +1,76 @@ +# SRGAN (Super Resolution) + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Additional Dependencies + +| Package | Version | +| :----------: | :-----: | +| scikit-image | 0.16.2 | +| mmcv | 1.2.0 | + +### Setup Super-resolution repo + +- Clone the krasserm/super-resolution repo + + `git clone https://github.com/krasserm/super-resolution.git` + +- Append the repo location to your `PYTHONPATH` with the following: + + `export PYTHONPATH=/super-resolution:$PYTHONPATH` + + + +### Adding dependencies within Docker Image + +- If you are using a docker image, e.g. AIMET development docker, please add the following lines to the Dockerfile and rebuild the Docker image + +```dockerfile +RUN pip install scikit-image==0.16.2 +RUN pip install mmcv==1.2.0 +``` + + + +## Obtaining model weights and dataset + +- The SRGAN model can be downloaded from: + - krasserm/super-resolution +- Three benchmark dataset can be downloaded here: + - [Set5](https://uofi.box.com/shared/static/kfahv87nfe8ax910l85dksyl2q212voc.zip) + - [Set14](https://uofi.box.com/shared/static/igsnfieh4lz68l926l8xbklwsnnk8we9.zip) + - [BSD100](https://uofi.box.com/shared/static/qgctsplb8txrksm9to9x01zfa4m61ngq.zip) +- If you want to use custom high resolution images, one way to generate corresponding low resolution images can be found at this issue + - with a Python version of MATLAB `imresize` function available here + + + +## Usage + +- The `srgan_quanteval.py` script requires two arguments to run: weights_path, images_path. + These are positional arguments so you just have to specify the arguments in order. + + ```bash + python ./zoo_tensorflow/examples/srgan_quanteval.py [--options] + ``` + +- we only support 4x super resolution on .png images. So make sure you high resolution images are 4x the dimension of you low resolution images. If you are using one of the benchmark dataset, please use images under `image_SRF_4` directory + +- We assume low and high resolution images are both present under the same directory, + + with images following naming conventions: + + - low resolution images will have file name suffix: `LR.png` + - e.g. `people_LR.png` + - high resolution images will have file name suffix: `HR.png` + - e.g. `people_HR.png` + + +## Quantizer Op Assumptions +In the evaluation script included, we have modified activation bitwidth, the configuration looks like below: +- Weight quantization: 8 bits, asymmetric quantization +- Bias parameters are not quantized +- Activation quantization: 16 bits, asymmetric quantization +- Model inputs are not quantized +- Bias Correction and Cross Layer Equalization have been applied diff --git a/zoo_tensorflow/Docs/SSDMobileNetV2.md b/zoo_tensorflow/Docs/SSDMobileNetV2.md new file mode 100644 index 0000000..671c3b6 --- /dev/null +++ b/zoo_tensorflow/Docs/SSDMobileNetV2.md @@ -0,0 +1,142 @@ +# SSD MobileNet v2 + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Obtaining model checkpoint and dataset + +- SSD MobileNet v2 checkpoint used for AIMET quantization can be downloaded from release page +- Or you could follow the steps below to obtain the checkpoint + +### export inference graph + +The following steps are need to have a model ready for AIMET quantization + +- ```bash + git clone https://github.com/tensorflow/models.git + cd models && git checkout r1.12.0 + cd research && protoc object_detection/protos/*.proto --python_out=. + ``` + +- Download [ssd_mobilenet_v2](http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz) + + - `tar xfvz ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz` + + - **remove** following parameters from `pipeline.config` that come with the tarball + + ``` + graph_rewriter { + quantization { + delay: 48000 + weight_bits: 8 + activation_bits: 8 + } + ``` +- Add the following code snippet to [Line 147, models/research/object_detection/export_inference_graph.py](https://github.com/tensorflow/models/blob/r1.12.0/research/object_detection/export_inference_graph.py#L147) + + ```python + import os + saver = tf.train.Saver() + with tf.Session() as sess: + saver.restore(sess, os.path.join(FLAGS.output_directory, "model.ckpt")) + aimet_model_output_dir = os.path.join(FLAGS.output_directory, "AIMET") + os.mkdir(aimet_model_output_dir) + saver.save(sess, os.path.join(aimet_model_output_dir, "model.ckpt") + ``` + +- tensorflow v1.10 is need to run the script, we could use the offical tensorflow 1.10.1 docker image + + - ```bash + docker pull tensorflow/tensorflow:1.10.1-devel-py3 + export WORKSPACE= + docker run -it --rm -v $WORKSPACE:$WORKSPACE tensorflow/tensorflow:1.10.1-devel-py3 + ``` + +- run `export_inference_graph.py` to obtain model checkpoint ready for AIMET quantization + + ```bash + cd models/research + export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim + python ./object_detection/export_inference_graph.py \ + --input_type image_tensor \ + --pipeline_config_path \ + --trained_checkpoint_prefix \ + --output_directory \ + ``` + + - model checkpoint will be available at `/AIMET/model.ckpt` + +### COCO dataset TFRecord + +TFRecord format of COCO dataset is need + +- [download_and_preprocess_mscoco.sh](https://github.com/tensorflow/models/blob/master/research/object_detection/dataset_tools/download_and_preprocess_mscoco.sh) can be used to download and convert coco dataset into TFRecord + + ```bash + git clone https://github.com/tensorflow/models.git + git checkout master + cd models/research/object_detection/dataset_tools + ./download_and_preprocess_mscoco.sh + ``` + +- If COCO dataset is already available or you want to download COCO dataset separately + - COCO dataset can be download here: [COCO](https://cocodataset.org/#download) + - Please download the 2017 Version + - [create_coco_tf_record.py](https://github.com/tensorflow/models/blob/master/research/object_detection/dataset_tools/create_coco_tf_record.py) can be used to convert dataset into TFRecord + + + +## Additional Dependencies + +| Package | +| :---------------: | +| tensorflow/models | +| pycocotools | + +### Setup models Repo + +- Clone the tensorflow models repository from github: + + ```bash + git clone https://github.com/tensorflow/models.git + cd models && git checkout r1.12.0 + ``` + +- Append the repo location to your `PYTHONPATH` by doing the following: + + `export PYTHONPATH=/models/research:$PYTHONPATH` + +### Pip install pycocotools + +- Install pycocotools by running the following: + + ```bash + pip install --user git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI + ``` + +### Adding dependencies within Docker Image + +- If you are using a docker image, e.g. AIMET development docker, please add the following lines to the Dockerfile and rebuild the Docker image + +```dockerfile +RUN pip install git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI +``` + + + +## Usage +- `ssd_mobilenet_v2_quanteval.py` has four required arguments, an example usage is shown below +```bash +./ssd_mobilenet_v2_quanteval.py --model-checkpoint /model.ckpt --dataset-dir --TFRecord-file-pattern 'coco_val.record-*-of-00010' --annotation-json-file /instances_val2017.json +``` + +- `--quantsim-output-dir` option can be used if want to save the quantized graph + + + +## Quantizer Op Assumptions +In the evaluation script included, we have manually configured the quantizer ops with the following assumptions: +- Weight quantization: 8 bits, asymmetric quantization +- Bias parameters are not quantized +- Activation quantization: 8 bits, asymmetric quantization +- Model inputs are not quantized \ No newline at end of file diff --git a/zoo_tensorflow/examples/efficientnet_quanteval.py b/zoo_tensorflow/examples/efficientnet_quanteval.py new file mode 100755 index 0000000..2ea64c8 --- /dev/null +++ b/zoo_tensorflow/examples/efficientnet_quanteval.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +import os +import sys +import json +import argparse + +import numpy as np +import tensorflow as tf + +import aimet_common.defs +from aimet_tensorflow import quantsim +from aimet_tensorflow.quantsim import save_checkpoint, QuantizationSimModel +from aimet_tensorflow.batch_norm_fold import fold_all_batch_norms + +import model_builder_factory +import preprocessing +import utils +import eval_ckpt_main + +class EvalCkptDriver(eval_ckpt_main.EvalCkptDriver): + + def build_dataset(self, filenames, labels, is_training): + """Wrap build_dataset function to create an initializable iterator rather than a one shot iterator.""" + make_one_shot_iterator = tf.data.Dataset.make_one_shot_iterator + tf.data.Dataset.make_one_shot_iterator = tf.data.Dataset.make_initializable_iterator + r = super().build_dataset(filenames, labels, is_training) + tf.data.Dataset.make_one_shot_iterator = make_one_shot_iterator + + return r + + def run_inference(self, + ckpt_path, + image_files, + labels, + enable_ema=True, + export_ckpt=None): + """Build and run inference on the target images and labels.""" + label_offset = 1 if self.include_background_label else 0 + with tf.Graph().as_default(): + sess = tf.Session() + images, labels = self.build_dataset(image_files, labels, False) + probs = self.build_model(images, is_training=False) + if isinstance(probs, tuple): + probs = probs[0] + + if not self.ckpt_bn_folded: + saver = tf.train.Saver() + saver.restore(sess, ckpt_path) + else: + sess.run(tf.global_variables_initializer()) + + # Fold all BatchNorms before QuantSim + sess, folded_pairs = fold_all_batch_norms(sess, ['IteratorGetNext'], ['logits']) + + if self.ckpt_bn_folded: + with sess.graph.as_default(): + checkpoint = ckpt_path + saver = tf.train.Saver() + saver.restore(sess, checkpoint) + + sess.run('MakeIterator') + + # Define an eval function to use during compute encodings + def eval_func(sess, iterations): + sess.run('MakeIterator') + for _ in range(iterations): + out_probs = sess.run('Squeeze:0') + + # Select the right quant_scheme + if self.quant_scheme == 'range_learning_tf': + quant_scheme = aimet_common.defs.QuantScheme.training_range_learning_with_tf_init + elif self.quant_scheme == 'range_learning_tf_enhanced': + quant_scheme = aimet_common.defs.QuantScheme.training_range_learning_with_tf_enhanced_init + elif self.quant_scheme == 'tf': + quant_scheme = aimet_common.defs.QuantScheme.post_training_tf + elif self.quant_scheme == 'tf_enhanced': + quant_scheme = aimet_common.defs.QuantScheme.post_training_tf_enhanced + else: + raise ValueError("Got unrecognized quant_scheme: " + self.quant_scheme) + + # Create QuantizationSimModel + sim = QuantizationSimModel( + session=sess, + starting_op_names=['IteratorGetNext'], + output_op_names=['logits'], + quant_scheme=quant_scheme, + rounding_mode=self.round_mode, + default_output_bw=self.default_output_bw, + default_param_bw=self.default_param_bw, + config_file=self.quantsim_config_file, + ) + + # Run compute_encodings + sim.compute_encodings(eval_func, + forward_pass_callback_args=500 + ) + + # Run final evaluation + sess = sim.session + sess.run('MakeIterator') + prediction_idx = [] + prediction_prob = [] + for _ in range(len(image_files) // self.batch_size): + out_probs = sess.run('Squeeze:0') + idx = np.argsort(out_probs)[::-1] + prediction_idx.append(idx[:5] - label_offset) + prediction_prob.append([out_probs[pid] for pid in idx[:5]]) + + # Return the top 5 predictions (idx and prob) for each image. + return prediction_idx, prediction_prob + + +def run_evaluation(args): + print("Running evaluation") + driver = EvalCkptDriver( + model_name=args.model_name, + batch_size=1, + image_size=model_builder_factory.get_model_input_size(args.model_name), + include_background_label=args.include_background_label, + advprop_preprocessing=args.advprop_preprocessing) + + driver.quant_scheme = args.quant_scheme + driver.round_mode = args.round_mode + driver.default_output_bw = args.default_output_bw + driver.default_param_bw = args.default_param_bw + driver.quantsim_config_file = args.quantsim_config_file + driver.ckpt_bn_folded = args.ckpt_bn_folded + + driver.eval_imagenet(args.checkpoint_path, args.imagenet_eval_glob, + args.imagenet_eval_label, 50000, + args.enable_ema, args.export_ckpt) + +def parse_args(args): + """ Parse the arguments. + """ + parser = argparse.ArgumentParser(description='Evaluation script for an Efficientnet network.') + + parser.add_argument('--model-name', help='Name of model to eval.', default='efficientnet-lite0') + parser.add_argument('--checkpoint-path', help='Path to checkpoint to load from.') + parser.add_argument('--imagenet-eval-glob', help='Imagenet eval image glob, such as /imagenet/ILSVRC2012*.JPEG') + parser.add_argument('--imagenet-eval-label', help='Imagenet eval label file path, such as /imagenet/ILSVRC2012_validation_ground_truth.txt') + parser.add_argument('--include-background-label', help='Whether to include background as label #0', action='store_true') + parser.add_argument('--advprop-preprocessing', help='Whether to use AdvProp preprocessing', action='store_true') + parser.add_argument('--enable-ema', help='Enable exponential moving average.', default=True) + parser.add_argument('--export-ckpt', help='Exported ckpt for eval graph.', default=None) + + parser.add_argument('--ckpt-bn-folded', help='Use this flag to specify whether checkpoint has batchnorms folded already or not.', action='store_true') + parser.add_argument('--quant-scheme', help='Quant scheme to use for quantization (tf, tf_enhanced, range_learning_tf, range_learning_tf_enhanced).', default='tf') + parser.add_argument('--round-mode', help='Round mode for quantization.', default='nearest') + parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', type=int, default=8) + parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', type=int, default=8) + parser.add_argument('--quantsim-config-file', help='Quantsim configuration file.', default=None) + + return parser.parse_args(args) + +def main(args=None): + args = parse_args(args) + run_evaluation(args) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/zoo_tensorflow/examples/mobilenet_v2_140_quanteval.py b/zoo_tensorflow/examples/mobilenet_v2_140_quanteval.py new file mode 100755 index 0000000..6f50d8d --- /dev/null +++ b/zoo_tensorflow/examples/mobilenet_v2_140_quanteval.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +import os +import sys +import json +import argparse +from tqdm import tqdm +from glob import glob + +import numpy as np +import tensorflow as tf + +import aimet_common.defs +from aimet_tensorflow import quantsim +from aimet_tensorflow.quantsim import save_checkpoint, QuantizationSimModel +from aimet_tensorflow.batch_norm_fold import fold_all_batch_norms + +from nets import nets_factory +from preprocessing import preprocessing_factory +from deployment import model_deploy +from datasets import dataset_factory + +def wrap_preprocessing(preprocessing, height, width, num_classes, labels_offset): + '''Wrap preprocessing function to do parsing of TFrecords. + ''' + def parse(serialized_example): + features = tf.parse_single_example(serialized_example, features={ + 'image/class/label': tf.FixedLenFeature([], tf.int64), + 'image/encoded': tf.FixedLenFeature([], tf.string) + }) + + image_data = features['image/encoded'] + image = tf.image.decode_jpeg(image_data, channels=3) + label = tf.cast(features['image/class/label'], tf.int32) + label = label - labels_offset + + labels = tf.one_hot(indices=label, depth=num_classes) + image = preprocessing(image, height, width) + return image, labels + return parse + +def run_evaluation(args): + # Build graph definition + with tf.Graph().as_default(): + # Create iterator + tf_records = glob(args.dataset_dir + '/validation*') + preprocessing_fn = preprocessing_factory.get_preprocessing(args.model_name, is_training=False) + parse_function = wrap_preprocessing(preprocessing_fn, height=args.image_size, width=args.image_size, num_classes=(1001 - args.labels_offset), labels_offset=args.labels_offset) + + dataset = tf.data.TFRecordDataset(tf_records).repeat(1) + dataset = dataset.map(parse_function, num_parallel_calls=1).apply(tf.contrib.data.batch_and_drop_remainder(args.batch_size)) + iterator = dataset.make_initializable_iterator() + images, labels = iterator.get_next() + + network_fn = nets_factory.get_network_fn(args.model_name, num_classes=(1001 - args.labels_offset), is_training=False) + with tf.device('/cpu:0'): + images = tf.placeholder_with_default(images, + shape=(None, args.image_size, args.image_size, 3), + name='input') + labels = tf.placeholder_with_default(labels, + shape=(None, 1001 - args.labels_offset), + name='labels') + logits, end_points = network_fn(images) + confidences = tf.nn.softmax(logits, axis=1, name='confidences') + categorical_preds = tf.argmax(confidences, axis=1, name='categorical_preds') + categorical_labels = tf.argmax(labels, axis=1, name='categorical_labels') + correct_predictions = tf.equal(categorical_labels, categorical_preds) + top1_acc = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name='top1-acc') + top5_acc = tf.reduce_mean(tf.cast(tf.nn.in_top_k(predictions=confidences, + targets=tf.cast(categorical_labels, tf.int32), + k=5), tf.float32), name='top5-acc') + + saver = tf.train.Saver() + sess = tf.Session() + + # Load model from checkpoint + if not args.ckpt_bn_folded: + saver.restore(sess, args.checkpoint_path) + else: + sess.run(tf.global_variables_initializer()) + + # Fold all BatchNorms before QuantSim + sess, folded_pairs = fold_all_batch_norms(sess, ['IteratorGetNext'], [logits.name[:-2]]) + + if args.ckpt_bn_folded: + with sess.graph.as_default(): + saver = tf.train.Saver() + saver.restore(sess, args.checkpoint_path) + + + # Define eval_func to use for compute encodings in QuantSim + def eval_func(session, iterations): + cnt = 0 + avg_acc_top1 = 0 + session.run('MakeIterator') + while cnt < iterations or iterations == -1: + try: + avg_acc_top1 += session.run('top1-acc:0') + cnt += 1 + except: + return avg_acc_top1 / cnt + + return avg_acc_top1 / cnt + + # Select the right quant_scheme + if args.quant_scheme == 'range_learning_tf': + quant_scheme = aimet_common.defs.QuantScheme.training_range_learning_with_tf_init + elif args.quant_scheme == 'range_learning_tf_enhanced': + quant_scheme = aimet_common.defs.QuantScheme.training_range_learning_with_tf_enhanced_init + elif args.quant_scheme == 'tf': + quant_scheme = aimet_common.defs.QuantScheme.post_training_tf + elif args.quant_scheme == 'tf_enhanced': + quant_scheme = aimet_common.defs.QuantScheme.post_training_tf_enhanced + else: + raise ValueError("Got unrecognized quant_scheme: " + args.quant_scheme) + + # Create QuantizationSimModel + sim = QuantizationSimModel( + session=sess, + starting_op_names=['IteratorGetNext'], + output_op_names=[logits.name[:-2]], + quant_scheme=quant_scheme, + rounding_mode=args.round_mode, + default_output_bw=args.default_output_bw, + default_param_bw=args.default_param_bw, + config_file=args.quantsim_config_file, + ) + + # Run compute_encodings + sim.compute_encodings(eval_func, forward_pass_callback_args=args.encodings_iterations) + + # Run final evaluation + sess = sim.session + + top1_acc = eval_func(sess, -1) + print('Avg accuracy Top 1: {}'.format(top1_acc)) + + +def parse_args(args): + """ Parse the arguments. + """ + parser = argparse.ArgumentParser(description='Evaluation script for an MobileNetv2 network.') + + parser.add_argument('--model-name', help='Name of model to eval.', default='mobilenet_v2_140') + parser.add_argument('--checkpoint-path', help='Path to checkpoint to load from.') + parser.add_argument('--dataset-dir', help='Imagenet eval dataset directory.') + parser.add_argument('--labels-offset', help='Offset for whether to ignore background label', type=int, default=0) + parser.add_argument('--image-size', help='Image size.', type=int, default=224) + parser.add_argument('--batch-size', help='Batch size.', type=int, default=32) + + parser.add_argument('--ckpt-bn-folded', help='Use this flag to specify whether checkpoint has batchnorms folded already or not.', action='store_true') + parser.add_argument('--quant-scheme', help='Quant scheme to use for quantization (tf, tf_enhanced, range_learning_tf, range_learning_tf_enhanced).', default='tf') + parser.add_argument('--round-mode', help='Round mode for quantization.', default='nearest') + parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', type=int, default=8) + parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', type=int, default=8) + parser.add_argument('--quantsim-config-file', help='Quantsim configuration file.', default=None) + parser.add_argument('--encodings-iterations', help='Number of iterations to use for compute encodings during quantization.', default=500) + + return parser.parse_args(args) + +def main(args=None): + args = parse_args(args) + run_evaluation(args) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/zoo_tensorflow/examples/pose_estimation_quanteval.py b/zoo_tensorflow/examples/pose_estimation_quanteval.py new file mode 100755 index 0000000..5436ba3 --- /dev/null +++ b/zoo_tensorflow/examples/pose_estimation_quanteval.py @@ -0,0 +1,474 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +import os +import math +import argparse +from functools import partial + +import cv2 +from scipy.ndimage.filters import gaussian_filter +import numpy as np +import tensorflow as tf +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + +from aimet_tensorflow import quantsim +from aimet_tensorflow.utils import graph_saver + + +def non_maxium_suppression(map, thresh): + map_s = gaussian_filter(map, sigma=3) + + map_left = np.zeros(map_s.shape) + map_left[1:, :] = map_s[:-1, :] + map_right = np.zeros(map_s.shape) + map_right[:-1, :] = map_s[1:, :] + map_up = np.zeros(map_s.shape) + map_up[:, 1:] = map_s[:, :-1] + map_down = np.zeros(map_s.shape) + map_down[:, :-1] = map_s[:, 1:] + + peaks_binary = np.logical_and.reduce((map_s >= map_left, map_s >= map_right, map_s >= map_up, map_s >= map_down, + map_s > thresh)) + + peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse + peaks_with_score = [x + (map[x[1], x[0]],) for x in peaks] + + return peaks_with_score + + +def pad_image(img, stride, padding): + h = img.shape[0] + w = img.shape[1] + + pad = 4 * [None] + pad[0] = 0 # up + pad[1] = 0 # left + pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down + pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right + + img_padded = img + pad_up = np.tile(img_padded[0:1, :, :] * 0 + padding, (pad[0], 1, 1)) + img_padded = np.concatenate((pad_up, img_padded), axis=0) + pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padding, (1, pad[1], 1)) + img_padded = np.concatenate((pad_left, img_padded), axis=1) + pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padding, (pad[2], 1, 1)) + img_padded = np.concatenate((img_padded, pad_down), axis=0) + pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padding, (1, pad[3], 1)) + img_padded = np.concatenate((img_padded, pad_right), axis=1) + + return img_padded, pad + + +def encode_input(image, scale, stride, padding): + image_scaled = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) + image_scaled_padded, pad = pad_image(image_scaled, stride, padding) + + return image_scaled_padded, pad + + +def decode_output(data, stride, padding, input_shape, image_shape): + output = np.transpose(np.squeeze(data), (1, 2, 0)) + output = cv2.resize(output, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) + output = output[:input_shape[0] - padding[2], :input_shape[1] - padding[3], :] + output = cv2.resize(output, (image_shape[1], image_shape[0]), interpolation=cv2.INTER_CUBIC) + + return output + + +def preprocess(image, transforms): + mean_bgr = [34.282957728666474, 32.441979567868017, 24.339757511312481] + + image = image.astype(np.float32) + + if 'bgr' in transforms: + if image.shape[0] == 3: + image = image[::-1, :, :] + elif image.shape[2] == 3: + image = image[:, :, ::-1] + + if 'tr' in transforms: + image = image.transpose((2, 0, 1)) + + if 'mean' in transforms: + image[0, :, :] -= mean_bgr[0] + image[1, :, :] -= mean_bgr[1] + image[2, :, :] -= mean_bgr[2] + + if 'addchannel' in transforms: + image = image[np.newaxis, :, :, :] + + if 'normalize' in transforms: + image = image / 256 - 0.5 + + return image + + +def run_session(session, output_names, input_name, image, fast=False): + scale_search = [1.] + crop = 368 + stride = 8 + padValue = 128 + + if fast: + scales = scale_search + else: + scales = [x * crop / image.shape[0] for x in scale_search] + + heatmaps, pafs = [], [] + for scale in scales: + if fast: + horiz = image.shape[0] < image.shape[1] + sz = (496, 384) if horiz else (384, 496) + image_encoded = cv2.resize(image, dsize=(int(sz[0] * scale), int(sz[1] * scale))) + else: + image_encoded, pad = encode_input(image, scale, stride, padValue) + image_encoded_ = preprocess(image_encoded, ['addchannel', 'normalize', 'bgr']) + + paf, heatmap = session.run(output_names, + feed_dict={session.graph.get_tensor_by_name(input_name): image_encoded_}) + + if fast: + paf = cv2.resize(paf[0], (image.shape[1], image.shape[0])) + heatmap = cv2.resize(heatmap[0], dsize=(image.shape[1], image.shape[0])) + else: + paf = paf.transpose((0, 3, 1, 2)) + heatmap = heatmap.transpose((0, 3, 1, 2)) + paf = decode_output(paf, stride, pad, image_encoded.shape, image.shape) + heatmap = decode_output(heatmap, stride, pad, image_encoded.shape, image.shape) + + pafs.append(paf) + heatmaps.append(heatmap) + + return np.asarray(heatmaps).mean(axis=0), np.asarray(pafs).mean(axis=0) + + +def get_keypoints(heatmap): + thre1 = 0.1 + keypoints_all = [] + keypoints_cnt = 0 + + for part in range(19 - 1): + keypoints = non_maxium_suppression(heatmap[:, :, part], thre1) + + id = range(keypoints_cnt, keypoints_cnt + len(keypoints)) + keypoints = [keypoints[i] + (id[i],) for i in range(len(id))] + + keypoints_all.append(keypoints) + keypoints_cnt += len(keypoints) + + return keypoints_all + + +def get_limb_consistancy(paf, start_keypoint, end_keypoint, image_h, div_num=10): + vec_key = np.subtract(end_keypoint[:2], start_keypoint[:2]) + vec_key_norm = math.sqrt(vec_key[0] * vec_key[0] + vec_key[1] * vec_key[1]) + if vec_key_norm == 0: + vec_key_norm = 1 + vec_key = np.divide(vec_key, vec_key_norm) + + vec_paf = list(zip(np.linspace(start_keypoint[0], end_keypoint[0], num=div_num).astype(int), + np.linspace(start_keypoint[1], end_keypoint[1], num=div_num).astype(int))) + + vec_paf_x = np.array([paf[vec_paf[k][1], vec_paf[k][0], 0] for k in range(div_num)]) + vec_paf_y = np.array([paf[vec_paf[k][1], vec_paf[k][0], 1] for k in range(div_num)]) + + vec_sims = np.multiply(vec_paf_x, vec_key[0]) + np.multiply(vec_paf_y, vec_key[1]) + vec_sims_prior = vec_sims.mean() + min(0.5 * image_h / vec_key_norm - 1, 0) + + return vec_sims, vec_sims_prior + + +def connect_keypoints(image_shape, keypoints, paf, limbs, limbsInds): + thre2 = 0.05 + connections = [] + + for k in range(len(limbsInds)): + paf_limb = paf[:, :, limbsInds[k]] + limb_strs = keypoints[limbs[k][0]] + limb_ends = keypoints[limbs[k][1]] + + if len(limb_strs) != 0 and len(limb_ends) != 0: + cands = [] + for i, limb_str in enumerate(limb_strs): + for j, limb_end in enumerate(limb_ends): + sims, sims_p = get_limb_consistancy(paf_limb, limb_str, limb_end, image_shape[0]) + + if len(np.where(sims > thre2)[0]) > int(0.8 * len(sims)) and sims_p > 0: + cands.append([i, j, sims_p]) + cands = sorted(cands, key=lambda x: x[2], reverse=True) + + connection = np.zeros((0, 3)) + visited_strs, visited_ends = [], [] + for cand in cands: + i, j, s = cand + if i not in visited_strs and j not in visited_ends: + connection = np.vstack([connection, [limb_strs[i][3], limb_ends[j][3], s]]) + visited_strs.append(i) + visited_ends.append(j) + + if len(connection) >= min(len(limb_strs), len(limb_ends)): + break + + connections.append(connection) + else: + connections.append([]) + + return connections + + +def create_skeletons(keypoints, connections, limbs): + # last number in each row is the total parts number of that person + # the second last number in each row is the score of the overall configuration + skeletons = -1 * np.ones((0, 20)) + keypoints_flatten = np.array([item for sublist in keypoints for item in sublist]) + + for k in range(len(limbs)): + if connections[k] != []: + detected_str = connections[k][:, 0] + detected_end = connections[k][:, 1] + limb_str, limb_end = np.array(limbs[k]) + + for i in range(len(connections[k])): + found = 0 + subset_idx = [-1, -1] + for j in range(len(skeletons)): + if skeletons[j][limb_str] == detected_str[i] or skeletons[j][limb_end] == detected_end[i]: + subset_idx[found] = j + found += 1 + + if found == 1: + j = subset_idx[0] + if skeletons[j][limb_end] != detected_end[i]: + skeletons[j][limb_end] = detected_end[i] + skeletons[j][-1] += 1 + skeletons[j][-2] += keypoints_flatten[detected_end[i].astype(int), 2] + connections[k][i][2] + elif found == 2: # if found 2 and disjoint, merge them + j1, j2 = subset_idx + + membership = ((skeletons[j1] >= 0).astype(int) + (skeletons[j2] >= 0).astype(int))[:-2] + if len(np.nonzero(membership == 2)[0]) == 0: # merge + skeletons[j1][:-2] += (skeletons[j2][:-2] + 1) + skeletons[j1][-2:] += skeletons[j2][-2:] + skeletons[j1][-2] += connections[k][i][2] + skeletons = np.delete(skeletons, j2, 0) + else: # as like found == 1 + skeletons[j1][limb_end] = detected_end[i] + skeletons[j1][-1] += 1 + skeletons[j1][-2] += keypoints_flatten[detected_end[i].astype(int), 2] + connections[k][i][2] + + # if find no partA in the subset, create a new subset + elif not found and k < 17: + row = -1 * np.ones(20) + row[limb_str] = detected_str[i] + row[limb_end] = detected_end[i] + row[-1] = 2 + row[-2] = sum(keypoints_flatten[connections[k][i, :2].astype(int), 2]) + connections[k][i][2] + skeletons = np.vstack([skeletons, row]) + + # delete some rows of subset which has few parts occur + deleteIdx = [] + for i in range(len(skeletons)): + if skeletons[i][-1] < 4 or skeletons[i][-2] / skeletons[i][-1] < 0.4: + deleteIdx.append(i) + skeletons = np.delete(skeletons, deleteIdx, axis=0) + + return {'keypoints': skeletons[:, :18], 'scores': skeletons[:, 18]} + + +def estimate_pose(image_shape, heatmap, paf): + # limbs as pair of keypoints: [start_keypoint, end_keypoint] keypoints index to heatmap matrix + limbs = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], + [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]] + # index where each limb stands in paf matrix. Two consecuitive indices for x and y component of paf + limbsInd = [[12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5], [6, 7], [8, 9], + [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19], [26, 27]] + + keypoints = get_keypoints(heatmap) + + connections = connect_keypoints(image_shape, keypoints, paf, limbs, limbsInd) + + skeletons = create_skeletons(keypoints, connections, limbs) + + return skeletons, np.array([item for sublist in keypoints for item in sublist]) + + +def parse_results(skeletons, points): + coco_indices = [0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3] + + skeletons_out, scores = [], [] + for score, keypoints in zip(skeletons['scores'], skeletons['keypoints']): + skeleton = [] + for p in range(len(keypoints)): + if p == 1: + continue + ind = int(keypoints[p]) + if ind >= 0: + point = {'x': points[ind, 0], 'y': points[ind, 1], 'score': points[ind, 2], 'id': coco_indices[p]} + skeleton.append(point) + + skeletons_out.append(skeleton) + scores.append(score) + + return {'skeletons': skeletons_out, 'scores': scores} + + +class COCOWrapper: + def __init__(self, coco_path, num_imgs=None): + self.coco_path = coco_path + self.num_imgs = num_imgs + # sys.path.append(self.coco_apth + "codes/PythonAPI") + + def get_images(self): + imgs = self.cocoGT.imgs.values() + + image_ids = sorted(map(lambda x: x['id'], self.cocoGT.imgs.values())) + if self.num_imgs: + image_ids = image_ids[:self.num_imgs] + imgs = list(filter(lambda x: x['id'] in image_ids, imgs)) + + return imgs + + def evaluate_json(self, obj): + # initialize COCO detections api + cocoDT = self.cocoGT.loadRes(obj) + + imgIds = sorted(self.cocoGT.getImgIds()) + if self.num_imgs: + imgIds = imgIds[:self.num_imgs] + + # running evaluation + cocoEval = COCOeval(self.cocoGT, cocoDT, 'keypoints') + cocoEval.params.imgIds = imgIds + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + return cocoEval.stats[0::5] + + def get_results_json(self, results, imgs): + results_obj = [] + for img, result in list(zip(imgs, results)): + for score, skeleton in list(zip(result['scores'], result['skeletons'])): + obj = {'image_id': img['id'], 'category_id': 1, 'keypoints': np.zeros(shape=(3, 17))} + + for keypoint in skeleton: + obj['keypoints'][0, keypoint['id']] = keypoint['x'] - 0.5 + obj['keypoints'][1, keypoint['id']] = keypoint['y'] - 0.5 + obj['keypoints'][2, keypoint['id']] = 1 + obj['keypoints'] = list(np.reshape(obj['keypoints'], newshape=(51,), order='F')) + obj['score'] = score / len(skeleton) + + results_obj.append(obj) + + return results_obj + + @property + def cocoGT(self): + annType = 'keypoints' + prefix = 'person_keypoints' + print('Initializing demo for *%s* results.' % (annType)) + + # initialize COCO ground truth api + dataType = 'val2014' + annFile = os.path.join(self.coco_path, 'annotations/%s_%s.json' % (prefix, dataType)) + cocoGT = COCO(annFile) + + if not cocoGT: + raise AttributeError('COCO ground truth demo failed to initialize!') + + return cocoGT + + +def evaluate_session(session, + coco_path, + input_name, + output_names, + num_imgs=None, + fast=False): + coco = COCOWrapper(coco_path, num_imgs) + + results = [] + image_path = os.path.join(coco.coco_path, 'images/val2014/') + imgs = coco.get_images() + + for i, img in enumerate(imgs): + image = cv2.imread(image_path + img['file_name']) # B,G,R order + + heatmap, paf = run_session(session, output_names, input_name, image, fast) + + skeletons, keypoints = estimate_pose(image.shape, heatmap, paf) + results.append(parse_results(skeletons, keypoints)) + + try: + ans = coco.evaluate_json(coco.get_results_json(results, imgs)) + return ans + except: + return [0, 0] + + +def parse_args(): + parser = argparse.ArgumentParser(prog='pose_estimation_quanteval', + description='Evaluate the post quantized SRGAN model') + + parser.add_argument('model_dir', + help='The location where the the meta checkpoint is saved,' + 'should have .meta as file suffix', + type=str) + parser.add_argument('coco_path', + help='The location coco images and annotations are saved. ' + 'It assumes a folder structure containing two subdirectorys ' + '`images/val2014` and `annotations`. Right now only val2014 ' + 'dataset with person_keypoints are supported', + type=str) + parser.add_argument('--representative-datapath', + '-reprdata', + help='The location where representative data are stored. ' + 'The data will be used for computation of encodings', + type=str) + parser.add_argument('--quant-scheme', + '-qs', + help='Support two schemes for quantization: [`tf` or `tf_enhanced`],' + '`tf_enhanced` is used by default', + default='tf_enhanced', + choices=['tf', 'tf_enhanced'], + type=str) + + return parser.parse_args() + + +def pose_estimation_quanteval(args): + # load the model checkpoint from meta + sess = graph_saver.load_model_from_meta(args.model_dir) + + # create quantsim object which inserts quant ops between layers + sim = quantsim.QuantizationSimModel(sess, + starting_op_names=['input'], + output_op_names=['node184', 'node196'], + quant_scheme=args.quant_scheme) + + partial_eval = partial(evaluate_session, + input_name='input:0', + output_names=['node184_quantized:0', 'node196_quantized:0'], + num_imgs=500 + ) + sim.compute_encodings(partial_eval, args.coco_path) + + eval_num = evaluate_session(sim.session, + args.coco_path, + input_name='input:0', + output_names=['node184_quantized:0', 'node196_quantized:0'] + ) + print(f'The [mAP, mAR] results are: {eval_num}') + + +if __name__ == '__main__': + args = parse_args() + pose_estimation_quanteval(args) diff --git a/zoo_tensorflow/examples/retinanet_quanteval.py b/zoo_tensorflow/examples/retinanet_quanteval.py new file mode 100755 index 0000000..d9a1808 --- /dev/null +++ b/zoo_tensorflow/examples/retinanet_quanteval.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +import os +import sys +import argparse +import progressbar +from glob import glob +from tqdm import tqdm + + +import tensorflow as tf +from keras import backend as K + +# Keras RetinaNet +from keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image +from keras_retinanet.utils.coco_eval import evaluate_coco +from keras_retinanet import models + +# AIMET +from aimet_tensorflow import quantsim +from aimet_tensorflow.batch_norm_fold import fold_all_batch_norms +from aimet_tensorflow.quantsim import save_checkpoint, load_checkpoint + + + + +def quantize_retinanet(model_path, cocopath, action): + """ + Quantize the original RetinaNet model. + Loads the keras model. + Retrieve the back-end TF session and saves a checkpoint for quantized evaluatoin by AIMET + Invoke AIMET APIs to quantize the and save a quantized checkpoint - which includes quantize ops + :param model_path: Path to the downloaded keras retinanet model - read the docs for download path + :param cocopath: Path to the top level COCO dataset + :param action: eval_original or eval_quantized + :return: + """ + + model_path = os.path.join(model_path, 'resnet50_coco_best_v2.1.0.h5') + model = models.load_model(model_path, backbone_name='resnet50') + + # Note that AIMET APIs need TF session. So retrieve the TF session from the backend + session = K.get_session() + if action=="eval_original": + saver = tf.train.Saver() + saver.save(session, "./original_model.ckpt") + else: + in_tensor="input_1:0" + out_tensor = ['filtered_detections/map/TensorArrayStack/TensorArrayGatherV3:0', + 'filtered_detections/map/TensorArrayStack_1/TensorArrayGatherV3:0', + 'filtered_detections/map/TensorArrayStack_2/TensorArrayGatherV3:0'] + selected_ops = ["P" + str(i) + "/BiasAdd" for i in range(3, 8)] + session, folded_pairs = fold_all_batch_norms(session, [in_tensor.split(":")[0]], selected_ops) + sim = quantsim.QuantizationSimModel(session, [in_tensor.split(":")[0]], selected_ops) + def forward_pass(session2: tf.Session, args): + images_raw = glob(cocopath+"/images/train2017/*.jpg") + for idx in tqdm(range(10)): + image = read_image_bgr(images_raw[idx]) + image = preprocess_image(image) + image, scale = resize_image(image) + session2.run(out_tensor, feed_dict={in_tensor: [image]}) + + sim.compute_encodings(forward_pass, None) + save_checkpoint(sim, './quantzied_sim.ckpt', 'orig_quantsim_model') + + +assert(callable(progressbar.progressbar)), "Using wrong progressbar module, install 'progressbar2' instead." + +def evaluate(generator, action, threshold=0.05): + + """ + Evaluate the model and saves results + :param generator: generator for validation dataset + :param action: eval the original or quantized model + :param threshold: Score Threshold + :return: + """ + in_tensor = "input_1:0" + out_tensor = ['filtered_detections/map/TensorArrayStack/TensorArrayGatherV3:0', + 'filtered_detections/map/TensorArrayStack_1/TensorArrayGatherV3:0', + 'filtered_detections/map/TensorArrayStack_2/TensorArrayGatherV3:0'] + + + with tf.Session() as new_sess: + if action=='eval_original': + saver = tf.train.import_meta_graph('./original_model.ckpt.meta') + saver.restore(new_sess, './original_model.ckpt') + else: + + new_quantsim = load_checkpoint('./quantzied_sim.ckpt', 'orig_quantsim_model') + new_sess = new_quantsim.session + + model = TFRunWrapper(new_sess, in_tensor, out_tensor) + + evaluate_coco(generator, model, threshold) + + +def create_generator(args, preprocess_image): + """ + Create generator to use for eval for coco validation set + :param args: args from commandline + :param preprocess_image: input preprocessing + :return: + """ + common_args = { + 'preprocess_image': preprocess_image, + } + + + from keras_retinanet.preprocessing.coco import CocoGenerator + + validation_generator = CocoGenerator( + args.coco_path, + 'val2017', + image_min_side=args.image_min_side, + image_max_side=args.image_max_side, + config=args.config, + shuffle_groups=False, + **common_args + ) + + return validation_generator + + +def parse_args(args): + """ Parse the arguments. + """ + parser = argparse.ArgumentParser(description='Evaluation script for a RetinaNet network.') + subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type') + subparsers.required = True + + coco_parser = subparsers.add_parser('coco') + coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).') + coco_parser.add_argument('model_path', help='Path to the RetinaNet model.') + + parser.add_argument('--action', help='action to perform - eval_quantized|eval_original', default='eval_quantized', choices={"eval_quantized", "eval_original"}) + parser.add_argument('--convert-model', help='Convert the model to an inference model (ie. the input is a training model).', action='store_true') + parser.add_argument('--backbone', help='The backbone of the model.', default='resnet50') + parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).', type=int) + parser.add_argument('--score-threshold', help='Threshold on score to filter detections with (defaults to 0.05).', default=0.05, type=float) + parser.add_argument('--iou-threshold', help='IoU Threshold to count for a positive detection (defaults to 0.5).', default=0.5, type=float) + parser.add_argument('--max-detections', help='Max Detections per image (defaults to 100).', default=100, type=int) + parser.add_argument('--save-path', help='Path for saving images with detections (doesn\'t work for COCO).') + parser.add_argument('--image-min-side', help='Rescale the image so the smallest side is min_side.', type=int, default=800) + parser.add_argument('--image-max-side', help='Rescale the image if the largest side is larger than max_side.', type=int, default=1333) + parser.add_argument('--config', help='Path to a configuration parameters .ini file (only used with --convert-model).') + + return parser.parse_args(args) + +# The coco_eval in keras-retinanet repository needs a model as input for prediction +# We have a TF back-end session - so we wrap it in a Wrapper and implement predict to call session run +class TFRunWrapper(): + def __init__(self, tf_session, in_tensor, out_tensor): + self.sess = tf_session + self.in_tensor = in_tensor + self.out_tensor = out_tensor + + def predict_on_batch(self, input): + return self.sess.run(self.out_tensor, feed_dict={self.in_tensor: input}) + + +def main(args=None): + args = parse_args(args) + action = args.action + backbone = models.backbone("resnet50") + modelpath = args.model_path + cocopath= args.coco_path + generator = create_generator(args, backbone.preprocess_image) + quantize_retinanet(modelpath, cocopath, action) + evaluate(generator, action, args.score_threshold) + +if __name__ == '__main__': + main() diff --git a/zoo_tensorflow/examples/srgan_quanteval.py b/zoo_tensorflow/examples/srgan_quanteval.py new file mode 100755 index 0000000..1249c17 --- /dev/null +++ b/zoo_tensorflow/examples/srgan_quanteval.py @@ -0,0 +1,297 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +import glob +import os +import warnings +import argparse +from functools import partial + +import tensorflow as tf +import numpy as np +from skimage.metrics import peak_signal_noise_ratio as psnr +from skimage.metrics import structural_similarity as ssim +from aimet_tensorflow import quantsim +from aimet_tensorflow.cross_layer_equalization import equalize_model +from aimet_tensorflow.bias_correction import QuantParams, BiasCorrectionParams, BiasCorrection + +from mmcv.image.colorspace import rgb2ycbcr +from model.srgan import generator + + +def make_dataset(filenames): + ds = tf.data.Dataset.from_tensor_slices(filenames) + ds = ds.map(tf.io.read_file) + ds = ds.map(lambda x: tf.image.decode_png(x, channels=3)) + + return ds + + +def evaluate_session(sess, + image_files, + input_name, + output_name, + mode='y_channel', + output_dir=None): + ''' + :param sess: a tensorflow session on which we run evaluation + :param image_files: a sequence containing sequence of image filenames as strings + :param input_name: a string indicating the input tensor's name + :param output_name: a string indicating the output tensor's name + :param mode: a string indicating on which space to evalute the PSNR & SSIM metrics. + Accepted values are ['y_channel', 'rgb'] + :param output_dir: If specified, super resolved images will be saved under the path + :return: a tuple containing the computed values of (PSNR, SSIME) sequences + ''' + # TODO: factor out the image_files but take a custom dataset instead + # TODO: support multiple inputs and outputs + if mode == 'rgb': + print('Testing on RGB channels...') + elif mode == 'y_channel': + print('Testing on Y channel...') + else: + raise ValueError('evaluation mode not supported!' + 'Must be one of `RGB` or `y_channel`') + # batch size needed to align with input shape (?, ?, ?, 3) + batch_size = 1 + + with sess.graph.as_default(): + lr_image_files, hr_image_files = image_files + # make a dataset from input and reference images + lr_valid_ds = make_dataset(lr_image_files) + lr_valid_ds = lr_valid_ds.map(lambda x: tf.cast(x, dtype=tf.float32)) + + hr_valid_ds = make_dataset(hr_image_files) + + valid_ds = tf.data.Dataset.zip((lr_valid_ds, hr_valid_ds)) + # make an iterator from the dataset, batch applied here + valid_ds = valid_ds.batch(batch_size) + valid_ds_iter = valid_ds.make_one_shot_iterator() + imgs = valid_ds_iter.get_next() + + # crop border width 4 as suggested in https://arxiv.org/abs/1609.04802 + crop_border = 4 + psnr_values = [] + ssim_values = [] + + for lr_image_file in lr_image_files: + lr_img, hr_img = sess.run(imgs) + # get inference images + sr_img = sess.run(sess.graph.get_tensor_by_name(output_name), + {sess.graph.get_tensor_by_name(input_name): lr_img}) + sr_img = tf.clip_by_value(sr_img, 0, 255) + sr_img = tf.round(sr_img) + sr_img = tf.cast(sr_img, tf.uint8) + + sr_img = sess.run(sr_img) + + if output_dir: + sr_img_png = tf.image.encode_png(sr_img[0]) + # use the input image's name as output image's name by default + _, filename = os.path.split(lr_image_file) + filename = os.path.join(output_dir, filename) + + save_img = tf.io.write_file(filename, sr_img_png) + sess.run(save_img) + + if mode == 'y_channel': + sr_img = rgb2ycbcr(sr_img ,y_only=True) + hr_img = rgb2ycbcr(hr_img, y_only=True) + + sr_img = np.expand_dims(sr_img, axis=-1) + hr_img = np.expand_dims(hr_img, axis=-1) + + sr_img = sr_img[:, crop_border:-crop_border, crop_border:-crop_border, :] + hr_img = hr_img[:, crop_border:-crop_border, crop_border:-crop_border, :] + + psnr_value = psnr(hr_img[0], sr_img[0], data_range=255) + ssim_value = ssim(hr_img[0, :, :, 0], sr_img[0, :, :, 0], + multichannel=False, data_range=255.) + + psnr_values.append(psnr_value) + ssim_values.append(ssim_value) + + return psnr_values, ssim_values + + +def parse_args(): + parser = argparse.ArgumentParser(prog='srgan_quanteval', + description='Evaluate the pre and post quantized SRGAN model') + + parser.add_argument('weights_path', + help='The location where weight file for SRGAN model is saved', + type=str) + parser.add_argument('images_path', + help='The location where .png images are saved', + type=str) + parser.add_argument('--representative-datapath', + '-repr', + help='The location where representative data are stored. ' + 'The data will be used for bias correction and ' + 'computation of encodings', + type=str) + parser.add_argument('--cross-layer-equalization', + '-cle', + action='store_true', + help='Applying cross layer equalization') + parser.add_argument('--bias-correction', + '-bc', + action='store_true', + help='Applying bias correction') + parser.add_argument('--use-cuda', + '-cuda', + help='Whether to use cuda, True by default', + default=True, + type=bool) + parser.add_argument('--quant-scheme', + '-qs', + help='Support two schemes for quantization: [`tf` or `tf_enhanced`],' + '`tf_enhanced` is used by default', + default='tf_enhanced', + choices=['tf', 'tf_enhanced'], + type=str) + parser.add_argument('--default-output-bw', + '-bout', + help='Default bitwidth (4-31) to use for quantizing layer inputs and outputs', + default=8, + choices=range(4, 32), + type=int) + parser.add_argument('--default-param-bw', + '-bparam', + help='Default bitwidth (4-31) to use for quantizing layer parameters', + default=8, + choices=range(4, 32), + type=int) + parser.add_argument('--num-quant-samples', + help='Number of quantization samples for Bias Correction, 10 by default', + default=10, + type=int) + parser.add_argument('--num-bias-correct-samples', + help='Number of samples for Bias Correction, 500 by default', + default=500, + type=int) + parser.add_argument('--output-dir', + '-outdir', + help='If specified, output images of quantized model ' + 'will be saved under this directory', + default=None, + type=str) + + return parser.parse_args() + +def main(args): + # configuration for efficient use of gpu + config = tf.ConfigProto() + config.gpu_options.allow_growth = True + + print('Loading srgan generator...') + gen_graph = tf.Graph() + with gen_graph.as_default(): + gen_sess = tf.Session(config=config, graph=gen_graph) + with gen_sess.as_default(): + srgan_generator = generator() + srgan_generator.load_weights(args.weights_path) + + # sort files by filenames, assuming names match in both paths + lr_images_files = sorted(glob.glob(os.path.join(args.images_path, '*LR.png'))) + hr_images_files = sorted(glob.glob(os.path.join(args.images_path, '*HR.png'))) + + # check if number of images align + if len(lr_images_files) != len(hr_images_files): + raise RuntimeError('length of image files doesn`t match,' + 'need same number of images for both' + 'low resolution and high resolution!') + + image_files = (lr_images_files, hr_images_files) + + # two list of metrics on all images + psnr_vals, ssim_vals = evaluate_session(gen_sess, image_files, + srgan_generator.input.name, + srgan_generator.output.name) + psnr_val = np.mean(psnr_vals) + ssim_val = np.mean(ssim_vals) + print(f'Mean PSNR and SSIM for given images on original model are: [{psnr_val}, {ssim_val}]') + + # TODO: use a better default dataset for compute encodings when not given by users + # use low resolution images if no representative lr data are provided + + # use low and high resolution images if no representative lr and hr data are provided + if args.representative_datapath: + bc_lr_data = glob.glob(os.path.join(args.representative_datapath, '*LR.png')) + comp_encodings_lr_data = glob.glob(os.path.join(args.representative_datapath, '*LR.png')) + comp_encodings_hr_data = glob.glob(os.path.join(args.representative_datapath, '*HR.png')) + else: + warnings.warn('No representative input data are given,' + 'bias correction and computation of encodings will be done' + 'on part of all of the low resolution images!') + bc_lr_data = lr_images_files + + warnings.warn('No representative reference data are given,' + 'computation of encodings will be done' + 'on part of all of the high resolution images!') + comp_encodings_lr_data = lr_images_files + comp_encodings_hr_data = hr_images_files + + comp_encodings_data = (comp_encodings_lr_data, comp_encodings_hr_data) + + if args.cross_layer_equalization: + print('Applying cross layer equalization (CLE) to session...') + gen_sess = equalize_model(gen_sess, + start_op_names=srgan_generator.input.op.name, + output_op_names=srgan_generator.output.op.name) + + if args.bias_correction: + print('Applying Bias Correction (BC) to session...') + # the dataset being evaluated might have varying image sizes + # so right now only use batch size 1 + batch_size = 1 + num_imgs = len(bc_lr_data) + + quant_params = QuantParams(use_cuda=args.use_cuda, quant_mode=args.quant_scheme) + bias_correction_params = BiasCorrectionParams(batch_size=batch_size, + num_quant_samples=min(num_imgs, args.num_quant_samples), + num_bias_correct_samples=min(num_imgs, args.num_bias_correct_samples), + input_op_names=[srgan_generator.input.op.name], + output_op_names=[srgan_generator.output.op.name]) + + ds = make_dataset(bc_lr_data) + ds = ds.batch(batch_size) + + gen_sess = BiasCorrection.correct_bias(gen_sess, bias_correction_params, quant_params, ds) + + # creating quantsim object which inserts quantizer ops + sim = quantsim.QuantizationSimModel(gen_sess, + starting_op_names=[srgan_generator.input.op.name], + output_op_names=[srgan_generator.output.op.name], + quant_scheme=args.quant_scheme, + default_output_bw=args.default_output_bw, + default_param_bw=args.default_param_bw) + + # compute activation encodings + # usually achieves good results when data being used for computing + # encodings are representative of its task + partial_eval = partial(evaluate_session, + input_name=srgan_generator.input.name, + output_name='lambda_3/mul_quantized:0') + sim.compute_encodings(partial_eval, comp_encodings_data) + + psnr_vals, ssim_vals = evaluate_session(sim.session, image_files, + srgan_generator.input.name, + 'lambda_3/mul_quantized:0', + output_dir=args.output_dir) + psnr_val = np.mean(psnr_vals) + ssim_val = np.mean(ssim_vals) + + print(f'Mean PSNR and SSIM for given images on quantized model are: [{psnr_val}, {ssim_val}]') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/zoo_tensorflow/examples/ssd_mobilenet_v2_quanteval.py b/zoo_tensorflow/examples/ssd_mobilenet_v2_quanteval.py new file mode 100755 index 0000000..d411bcf --- /dev/null +++ b/zoo_tensorflow/examples/ssd_mobilenet_v2_quanteval.py @@ -0,0 +1,447 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +# ============================================================================== +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import os +import json +import argparse +import logging +import tensorflow as tf +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval +from tensorflow.contrib.slim import tfexample_decoder as slim_example_decoder +from tensorflow.contrib.quantize.python import quantize +from tensorflow.contrib.quantize.python import fold_batch_norms + +from object_detection.core import standard_fields as fields +from object_detection.data_decoders.tf_example_decoder import TfExampleDecoder +from aimet_tensorflow import quantizer as q +from aimet_tensorflow import quantsim +from aimet_tensorflow.batch_norm_fold import fold_all_batch_norms + +logger = logging.getLogger(__file__) + + +def load_graph(graph, meta_graph, checkpoint=None): + """ + Load a TF graph given the meta and checkpoint files + :param graph: Graph to load into + :param meta_graph: Meta file + :param checkpoint: Checkpoint file + :return: Newly created TF session + """ + gpu_options = tf.GPUOptions(allow_growth=True) + config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) + sess = tf.Session(config=config, graph=graph) + # Open the graph and restore the parameters + saver = tf.train.import_meta_graph(meta_graph, clear_devices=True) + if checkpoint is None: + checkpoint = meta_graph.split('.meta')[0] + saver.restore(sess, checkpoint) + return sess, saver + + +def initialize_uninitialized_vars(sess): + """ + Some graphs have variables created after training that need to be initialized. + However, in pre-trained graphs we don't want to reinitialize variables that are already + which would overwrite the values obtained during training. Therefore search for all + uninitialized variables and initialize ONLY those variables. + :param sess: TF session + :return: + """ + from itertools import compress + global_vars = tf.global_variables() + is_not_initialized = sess.run([~(tf.is_variable_initialized(var)) for var in global_vars]) + uninitialized_vars = list(compress(global_vars, is_not_initialized)) + if uninitialized_vars: + sess.run(tf.variables_initializer(uninitialized_vars)) + +class CocoParser: + def __init__(self, data_inputs=None, validation_inputs=None, batch_size=1): + """ + Constructor + :param data_inputs: List of input ops for the model + :param validation_inputs: List of validation ops for the model + :param batch_size: Batch size for the data + """ + self._validation_inputs = validation_inputs + self._data_inputs = data_inputs + self._batch_size = batch_size + + if data_inputs is None: + self._data_inputs = ['image_tensor'] + else: + self._data_inputs = data_inputs + self.keys_to_features = TfExampleDecoder().keys_to_features + + self.items_to_handlers = { + fields.InputDataFields.image: ( + slim_example_decoder.Image(image_key='image/encoded', format_key='image/format', channels=3)), + fields.InputDataFields.source_id: (slim_example_decoder.Tensor('image/source_id')), + } + + def get_data_inputs(self): + return self._data_inputs + + def get_validation_inputs(self): + return self._validation_inputs + + def get_batch_size(self): + return self._batch_size + + def parse(self, serialized_example, is_trainning): + """ + Parse one example + :param serialized_example: + :param is_trainning: + :return: tensor_dict + """ + decoder = slim_example_decoder.TFExampleDecoder(self.keys_to_features, + self.items_to_handlers) + keys = decoder.list_items() + tensors = decoder.decode(serialized_example, items=keys) + tensor_dict = dict(zip(keys, tensors)) + + tensor_dict[fields.InputDataFields.image].set_shape([None, None, 3]) + tensor_dict[fields.InputDataFields.original_image_spatial_shape] = tf.shape( + tensor_dict[fields.InputDataFields.image])[:2] + + tensor_dict[fields.InputDataFields.image] = tf.image.resize_images( + tensor_dict[fields.InputDataFields.image], tf.stack([300, 300]), + method=0) + + if fields.InputDataFields.image_additional_channels in tensor_dict: + channels = tensor_dict[fields.InputDataFields.image_additional_channels] + channels = tf.squeeze(channels, axis=3) + channels = tf.transpose(channels, perm=[1, 2, 0]) + tensor_dict[fields.InputDataFields.image_additional_channels] = channels + + if fields.InputDataFields.groundtruth_boxes in tensor_dict: + is_crowd = fields.InputDataFields.groundtruth_is_crowd + tensor_dict[is_crowd] = tf.cast(tensor_dict[is_crowd], dtype=tf.bool) + + def default_groundtruth_weights(): + shape = tf.shape(tensor_dict[fields.InputDataFields.groundtruth_boxes])[0] + return tf.ones([shpae], dtype=tf.float32) + + shape = tf.shape(tensor_dict[fields.InputDataFields.groundtruth_weights])[0] + tensor_dict[fields.InputDataFields.groundtruth_weights] = tf.cond( + tf.greater(shape,0), + lambda: tensor_dict[fields.InputDataFields.groundtruth_weights], + default_groundtruth_weights) + + return tensor_dict + + def get_batch(self, iterator, next_element, sess): + """ + Get the next batch of data + :param next_element: + :param iterator: Data iterator + :return: Inputs in feed_dict form + """ + try: + keys = next_element.keys() + tensors = [] + for key in keys: + tensors.append(next_element[key]) + tensors_np = sess.run(tensors) + except tf.errors.OutOfRangeError: + tf.logging.error('tf.errors.OutOfRangeError') + raise + return dict(zip(keys, tensors_np)) + + +class TfRecordGenerator: + """ Dataset generator for TfRecords""" + + def __init__(self, dataset_dir, parser, file_pattern=None, is_trainning=False, num_gpus=1, num_epochs=None): + """ + Constructor + :param dataset_dir: The directory where the dataset files are stored. + :param file_pattern: The file pattern to use for matching the dataset source files. + :param parser: parser function to read tfrecords. + :param num_gpus: The number of GPUs being used. Data batches must be generated for each GPU device + :param num_epochs: How many times to repeat the dataset. Default is forever. Then the + amount of data generated is determined by the number of iterations the model is run and the batch + size. If set to a specific number the dataset will only provide the amount of the total dataset + 'num_epochs' times. + :return: A new TfRecord generator used to generate data for model analysis + """ + self._parser = parser + self._num_gpus = num_gpus + + # Setup the Dataset reader + if not file_pattern: + if not is_trainning: + file_pattern = 'validation-*-of-*' + else: + file_pattern = 'train-*-of-*' + file_pattern = os.path.join(dataset_dir, file_pattern) + tfrecords = tf.data.Dataset.list_files(file_pattern, shuffle=False) + self._dataset = tf.data.TFRecordDataset(tfrecords).repeat(num_epochs) + batch_size = self._parser.get_batch_size() + parse_fn = lambda x: self._parser.parse(x, is_trainning) + self._dataset = self._dataset.map(parse_fn) + self._dataset = self._dataset.batch(batch_size) + + # Initialize the iterator. This must be allocated during init when the + # generator is to be used manually. Otherwise the generator will generate a + # new iterator each time it's used as an iterator + with self._dataset._graph.as_default(): + self._iterator = self._dataset.make_one_shot_iterator() + self._next_element = self._iterator.get_next() + self.sess = tf.Session() + + def __iter__(self): + """ + Iter method for the generator + :return: + """ + with self._dataset._graph.as_default(): + self._iterator = self._dataset.make_one_shot_iterator() + self._next_element = self._iterator.get_next() + self.sess = tf.Session() + return self + + def __next__(self): + """ + Return the next set of batched data + + **NOTE** This function will not return new batches until the previous batches have + actually been used by a call to tensorflow. Eg used in a graph with a call to + 'run' etc. If it's unused the same tensors will be returned over and over again. + + :return: + """ + return self._parser.get_batch(self._iterator, self._next_element, self.sess) + + # Map next for python27 compatibility + next = __next__ + + def get_data_inputs(self): + return self._parser.get_data_inputs() + + def get_validation_inputs(self): + return self._parser.get_validation_inputs() + + def get_batch_size(self): + return self._parser.get_batch_size() + + @property + def dataset(self): + return self._dataset + + +class MobileNetV2SSDRunner: + + def __init__(self, generator, checkpoint, annotation_file, graph=None, network=None, + is_train=False, + fold_bn=False, quantize=False): + self._generator = generator + self._checkpoint = checkpoint + self._annotation_file = annotation_file + self._graph = graph + self._network = network + self._is_train = is_train + self._fold_bn = fold_bn + self._quantize = quantize + if is_train is False: + self._eval_session, self._eval_saver = self.build_eval_graph() + + @staticmethod + def post_func(tensors_dict, annotation_file): + json_list = [] + # t_bbox [ymin,xmin,ymax,xmax] + # gt [xmin,ymin,width,height] + for i in range(len(tensors_dict)): + result_dict = tensors_dict[i] + for j in range(len(result_dict[fields.DetectionResultFields.detection_scores])): + t_score = result_dict[fields.DetectionResultFields.detection_scores][j] + t_bbox = result_dict[fields.DetectionResultFields.detection_boxes][j] + t_class = result_dict[fields.DetectionResultFields.detection_classes][j] + image_id = int(result_dict[fields.InputDataFields.source_id][j]) + Height = result_dict[fields.InputDataFields.original_image_spatial_shape][j][0] + Width = result_dict[fields.InputDataFields.original_image_spatial_shape][j][1] + for index, conf in enumerate(t_score): + top_conf = float(t_score[index]) + top_ymin = t_bbox[index][0] * Height + top_xmin = t_bbox[index][1] * Width + top_h = (t_bbox[index][3] - t_bbox[index][1]) * Width + top_w = (t_bbox[index][2] - t_bbox[index][0]) * Height + top_cat = int(t_class[index]) + json_dict = {'image_id': image_id, 'category_id': top_cat, + 'bbox': [top_xmin, top_ymin, top_h, top_w], 'score': top_conf} + json_list.append(json_dict) + + cocoGt = COCO(annotation_file) + cocoDt = cocoGt.loadRes(json_list) + cocoEval = COCOeval(cocoGt, cocoDt, 'bbox') + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + dict_map_result = {'IoU[0.50:0.95]': cocoEval.stats[0], 'IoU[0.50]': cocoEval.stats[1], + 'IoU[0.75]': cocoEval.stats[2]} + return dict_map_result + + @property + def eval_session(self): + return self._eval_session + + def evaluate(self, session, iterations, loginfo=None, generator=None, post_func=None, eval_names=None): + generator = generator if generator is not None else self._generator + post_func = post_func if post_func is not None else self.post_func + eval_names = eval_names if eval_names is not None else self.eval_names + if loginfo is not None: + logger.info(loginfo) + return self.run_graph(session, generator, eval_names, post_func, iterations) + + def build_eval_graph(self): + g = tf.Graph() + with g.as_default(): + sess, saver = load_graph(g, self._graph, self._checkpoint) + if self._fold_bn: + fold_batch_norms.FoldBatchNorms(graph=sess.graph, freeze_batch_norm_delay=None, + is_training=False) + if self._quantize: + quantize.Quantize( + graph=sess.graph, + is_training=False, + quant_delay=0, + weight_bits=8, + activation_bits=8, + scope=None) + return sess, saver + + def run_graph(self, session, generator, eval_names, post_func, iterations): + """ + Evaluates the graph's performance by running data through the network + and calling an evaluation function to generate the performance metric. + :param session: The tensorflow session that contains the graph + :param generator: The data generator providing the network with batch data + :param eval_names: The names providing the nodes on which the network's performance should be judged + :param post_func: The customized post processing function to evaluate the network performance + :param iterations: The number of iterations (batches) to run through the network + :return: + """ + + initialize_uninitialized_vars(session) + image_tensor = session.graph.get_tensor_by_name('image_tensor:0') + eval_outputs = [] + for name in eval_names: + op = session.graph.get_operation_by_name(name) + eval_outputs.append(op.outputs[0]) + counters = {'skipped': 0, 'success': 0} + result_list = [] + try: + for _, input_dict in zip(range(iterations), generator): + # Setup the feed dictionary + feed_dict = {image_tensor: input_dict[fields.InputDataFields.image]} + try: + output_data = session.run(eval_outputs, feed_dict=feed_dict) + counters['success'] += 1 + export_dict = { + fields.InputDataFields.source_id: + input_dict[fields.InputDataFields.source_id], + fields.InputDataFields.original_image_spatial_shape: + input_dict[fields.InputDataFields.original_image_spatial_shape] + } + export_dict.update(dict(zip(eval_names, output_data))) + result_list.append(export_dict) + except tf.errors.InvalidArgumentError: + counters['skipped'] += 1 + except tf.errors.OutOfRangeError: + logger.info("Completed evaluation iterations: %i, success: %i, skipped: %i", + iterations, counters['success'], counters['skipped']) + finally: + if post_func is not None: + perf = post_func(result_list, self._annotation_file) + logger.info("%s", perf) + else: + perf = result_list + return perf + + def forward_func(self, sess, iterations): + return self.run_graph(sess, self._generator, self.eval_names, None, iterations) + + @property + def eval_names(self): + return [fields.DetectionResultFields.detection_scores, fields.DetectionResultFields.detection_boxes, + fields.DetectionResultFields.detection_classes] + + +def parse_args(): + """ Parse the arguments. + """ + parser = argparse.ArgumentParser(description='Evaluation script for SSD MobileNet v2.') + + parser.add_argument('--model-checkpoint', help='Path to model checkpoint', required=True) + parser.add_argument('--dataset-dir', help='Dir path to dataset (TFRecord format)', required=True) + parser.add_argument('--TFRecord-file-pattern', help='Dataset file pattern, e.g. coco_val.record-*-of-00010', + required=True) + parser.add_argument('--annotation-json-file', help='Path to ground truth annotation json file', required=True) + parser.add_argument('--eval-batch-size', help='Batch size to evaluate', default=1, type=int) + parser.add_argument('--eval-num-examples', help='Number of examples to evaluate, total 5000', default=5000, + type=int) + parser.add_argument('--quantsim-output-dir', help='Use this flag if want to save the quantized graph') + + return parser.parse_args() + + +def ssd_mobilenet_v2_quanteval(args): + parser = CocoParser(batch_size=args.eval_batch_size) + generator = TfRecordGenerator(dataset_dir=args.dataset_dir, file_pattern=args.TFRecord_file_pattern, + parser=parser, is_trainning=False) + + # Allocate the runner related to model session run + runner = MobileNetV2SSDRunner(generator=generator, checkpoint=args.model_checkpoint, + annotation_file=args.annotation_json_file, graph=args.model_checkpoint + '.meta', + fold_bn=False, quantize=False, is_train=False) + float_sess = runner.eval_session + + iterations = int(args.eval_num_examples / args.eval_batch_size) + runner.evaluate(float_sess, iterations, 'original model evaluating') + + # Fold BN + after_fold_sess, _ = fold_all_batch_norms(float_sess, generator.get_data_inputs(), ['concat', 'concat_1']) + # + # Allocate the quantizer and quantize the network using the default 8 bit params/activations + sim = quantsim.QuantizationSimModel(after_fold_sess, ['FeatureExtractor/MobilenetV2/MobilenetV2/input'], + output_op_names=['concat', 'concat_1'], + quant_scheme='tf', + default_output_bw=8, default_param_bw=8, + use_cuda=False) + # Compute encodings + sim.compute_encodings(runner.forward_func, forward_pass_callback_args=50) + # Export model for target inference + if args.quantsim_output_dir: + sim.export(os.path.join(args.quantsim_output_dir, 'export'), 'model.ckpt') + # Evaluate simulated quantization performance + runner.evaluate(sim.session, iterations, 'quantized model evaluating') + + +if __name__ == '__main__': + args = parse_args() + ssd_mobilenet_v2_quanteval(args) diff --git a/zoo_torch/Docs/DeepLabV3.md b/zoo_torch/Docs/DeepLabV3.md new file mode 100644 index 0000000..6783af5 --- /dev/null +++ b/zoo_torch/Docs/DeepLabV3.md @@ -0,0 +1,62 @@ +# PyTorch-DeepLabV3+ + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Additional Dependencies +1. Install pycocotools as follows +``` +sudo -H pip install pycocotools +``` + +## Model modifications & Experiment Setup +1. Clone the [DeepLabV3+ repo](https://github.com/jfzhang95/pytorch-deeplab-xception) +``` +git clone https://github.com/jfzhang95/pytorch-deeplab-xception.git +cd pytorch-deeplab-xception +git checkout 9135e104a7a51ea9effa9c6676a2fcffe6a6a2e6 +``` +2. Apply the following patch to the above repository +``` +git apply ../aimet-model-zoo/zoo_torch/examples/pytorch-deeplab-xception-zoo.patch +``` +3. Place modeling directory & dataloaders directory & metrics.py & mypath.py to aimet-model-zoo/zoo_torch/examples/ +``` +mv modeling ../aimet-model-zoo/zoo_torch/examples/ +mv dataloaders ../aimet-model-zoo/zoo_torch/examples/ +mv utils/metrics.py ../aimet-model-zoo/zoo_torch/examples/ +mv mypath.py ../aimet-model-zoo/zoo_torch/examples/ +``` +4. Download Optimized DeepLabV3+ checkpoint from [Releases](/../../releases). +5. Change data location as located in mypath.py + +## Obtaining model checkpoint and dataset + +- The original DeepLabV3+ checkpoint can be downloaded here: + - https://drive.google.com/file/d/1G9mWafUAj09P4KvGSRVzIsV_U5OqFLdt/view +- Optimized DeepLabV3+ checkpoint can be downloaded from [Releases](/../../releases). +- Pascal Dataset can be downloaded here: + - http://host.robots.ox.ac.uk/pascal/VOC/voc2012/ + +## Usage + +- To run evaluation with QuantSim in AIMET, use the following +```bash +python eval_deeplabv3.py \ + --checkpoint-path \ + --base-size \ + --crop-size \ + --num-classes \ + --dataset \ + --quant-scheme \ + --default-output-bw \ + --default-param-bw +``` + +## Quantization Configuration +- Weight quantization: 8 bits, asymmetric quantization +- Bias parameters are not quantized +- Activation quantization: 8 bits, asymmetric quantization +- Model inputs are not quantized +- TF_enhanced was used as quantization scheme +- Data Free Quantization and Quantization aware Training has been performed on the optimized checkpoint diff --git a/zoo_torch/Docs/DeepSpeech2.md b/zoo_torch/Docs/DeepSpeech2.md new file mode 100755 index 0000000..32087b6 --- /dev/null +++ b/zoo_torch/Docs/DeepSpeech2.md @@ -0,0 +1,51 @@ +# DeepSpeech + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Additional Dependencies + +### Setup SeanNaren DeepSpeech2 Repo + +- Clone the [SeanNaren DeepSpeech2 Repo](https://github.com/SeanNaren/deepspeech.pytorch) + `git clone https://github.com/SeanNaren/deepspeech.pytorch.git` + +- checkout this commit id: +`cd deepspeech.pytorch` +`git checkout 78f7fb791f42c44c8a46f10e79adad796399892b` + +- Install the requirements from the SeanNaren repo as detailed in the repository. + +- Append the repo location to your `PYTHONPATH` with the following: + `export PYTHONPATH=$PYTHONPATH:/deepspeech.pytorch` + + +## Obtaining model checkpoint and dataset + +- The SeanNaren DeepSpeech2 checkpoint can be downloaded from [here](https://github.com/SeanNaren/deepspeech.pytorch/releases/download/v2.0/librispeech_pretrained_v2.pth). Please point the `model-path` flag in to this file in the run script. Please note that this script is only compatible with release V2. + +- LibriSpeech __test clean__ set can be downloaded here: + - http://www.openslr.org/12 + + +Please see the [Datasets Section in the SeanNaren Repo](https://github.com/SeanNaren/deepspeech.pytorch#datasets) for the format of the test manifest used in the script. The [download script](https://github.com/SeanNaren/deepspeech.pytorch/blob/v2.0/data/librispeech.py) from this repository will download and format the csv to be used in the `test-manifest` flag. + + +## Usage + +- To run evaluation with QuantSim in AIMET, use the following + +```bash +python deepspeech2_quanteval.py \ + --model-path= \ + --test-manifest= +``` + +## Quantizer Op Assumptions +In the evaluation script included, we have manually configured the quantizer ops with the following assumptions: +- Weight quantization: 8 bits, asymmetric quantization +- Bias parameters are not quantized +- Activation quantization: 8 bits, asymmetric quantization + - Inputs to Conv layers are quantized + - Input and recurrent activations for LSTM layers are quantized +- Operations which shuffle data such as reshape or transpose do not require additional quantizers diff --git a/zoo_torch/Docs/EfficientNet-lite0.md b/zoo_torch/Docs/EfficientNet-lite0.md new file mode 100644 index 0000000..20f261d --- /dev/null +++ b/zoo_torch/Docs/EfficientNet-lite0.md @@ -0,0 +1,38 @@ +# PyTorch-EfficientNet-lite0 + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Additional Dependencies +1. Install geffnet using pip install +``` +sudo -H pip install geffnet +``` +## Obtaining model checkpoint and dataset + +- The original EfficientNet-lite0 checkpoint can be downloaded from here: + - https://github.com/rwightman/gen-efficientnet-pytorch +- ImageNet can be downloaded from here: + - http://www.image-net.org/ + +## Usage +- To run evaluation with QuantSim in AIMET, use the following +```bash +python eval_efficientnetlite0.py \ + --images-dir \ + --quant-scheme \ + --quant-tricks \ + --default-output-bw \ + --default-param-bw \ + --num-iterations \ + --num-batches \ +``` + +## Quantization Configuration +- Weight quantization: 8 bits, asymmetric quantization +- Bias parameters are not quantized +- Activation quantization: 8 bits, asymmetric quantization +- Model inputs are not quantized +- TF_enhanced was used as quantization scheme +- Batch norm folding and Adaround has been applied on efficientnet-lite in the eval script +- [Conv - Relu6] layers has been fused as one operation via manual configurations diff --git a/zoo_torch/Docs/MobileNetV2-SSD-lite.md b/zoo_torch/Docs/MobileNetV2-SSD-lite.md new file mode 100644 index 0000000..2d5a172 --- /dev/null +++ b/zoo_torch/Docs/MobileNetV2-SSD-lite.md @@ -0,0 +1,81 @@ +# PyTorch-MobileNetV2-SSD-lite + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Model modifications +1. Clone the original repository +``` +git clone https://github.com/qfgaohao/pytorch-ssd.git +cd pytorch-ssd +git checkout f61ab424d09bf3d4bb3925693579ac0a92541b0d +git apply ../aimet-model-zoo/zoo_torch/examples/torch_ssd_eval.patch +``` +2. Place the model definition & eval_ssd.py to aimet-model-zoo/zoo_torch/examples/ +``` +mv vision ../aimet-model-zoo/zoo_torch/examples/ +mv eval_ssd.py ../aimet-model-zoo/zoo_torch/examples/ +``` +3. Change __init__ function from line #27 in vision/ssd/ssd.py as follows: +``` +self.config = None #############Change 1 + +self.image_size = 300 +self.image_mean = np.array([127, 127, 127]) # RGB layout +self.image_std = 128.0 +self.iou_threshold = 0.45 +self.center_variance = 0.1 +self.size_variance = 0.2 + +self.specs = [box_utils.SSDSpec(19, 16, box_utils.SSDBoxSizes(60, 105), [2, 3]), + box_utils.SSDSpec(10, 32, box_utils.SSDBoxSizes(105, 150), [2, 3]), + box_utils.SSDSpec(5, 64, box_utils.SSDBoxSizes(150, 195), [2, 3]), + box_utils.SSDSpec(3, 100, box_utils.SSDBoxSizes(195, 240), [2, 3]), + box_utils.SSDSpec(2, 150, box_utils.SSDBoxSizes(240, 285), [2, 3]), + box_utils.SSDSpec(1, 300, box_utils.SSDBoxSizes(285, 330), [2, 3])] + +self.gen_priors = box_utils.generate_ssd_priors(self.specs, self.image_size) + +# register layers in source_layer_indexes by adding them to a module list +self.source_layer_add_ons = nn.ModuleList([t[1] for t in source_layer_indexes + if isinstance(t, tuple) and not isinstance(t, GraphPath)]) + +if device: + self.device = device +else: + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") +if is_test: + self.priors = self.gen_priors.to(self.device) +``` +4. Change line #93 in vision/ssd/ssd.py as follows: +``` +boxes = box_utils.convert_locations_to_boxes( + locations.cpu(), self.priors.cpu(), self.center_variance, self.size_variance +) +``` + +## Obtaining model checkpoint and dataset +- The original MobileNetV2-SSD-lite checkpoint can be downloaded here: + - https://storage.googleapis.com/models-hao/mb2-ssd-lite-mp-0_686.pth +- Optimized checkpoint can be downloaded from the [Releases](/../../releases). +- Pascal VOC2007 dataset can be downloaded here: + - http://host.robots.ox.ac.uk/pascal/VOC/voc2007/index.html + +## Usage +- To run evaluation with QuantSim in AIMET, use the following +```bash +python eval_ssd.py \ + --net \ + --trained_model \ + --dataset \ + --label_file \ + --eval_dir +``` + +## Quantization Configuration +- Weight quantization: 8 bits, asymmetric quantization +- Bias parameters are not quantized +- Activation quantization: 8 bits, asymmetric quantization +- Model inputs are not quantized +- TF_enhanced was used as quantization scheme +- Cross-layer-Equalization and Adaround have been applied on optimized checkpoint diff --git a/zoo_torch/Docs/MobilenetV2.md b/zoo_torch/Docs/MobilenetV2.md new file mode 100644 index 0000000..2ace385 --- /dev/null +++ b/zoo_torch/Docs/MobilenetV2.md @@ -0,0 +1,70 @@ +# PyTorch-MobileNetV2 + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Model modifications +1. Clone the [MobileNetV2 repo](https://github.com/tonylins/pytorch-mobilenet-v2) +``` +git clone https://github.com/tonylins/pytorch-mobilenet-v2 +cd pytorch-mobilenet-v2/ +git checkout 99f213657e97de463c11c9e0eaca3bda598e8b3f +``` +2. Place model definition under model directory +``` +mkdir ../aimet-model-zoo/zoo_torch/examples/model +mv MobileNetV2.py ../aimet-model-zoo/zoo_torch/examples/model/ +``` +3. Download Optimized MobileNetV2 checkpoint from [Releases](/../../releases) and place under the model directory. +4. Replace all ReLU6 activations with ReLU +5. Following changes has been made or appended in original model definition for our suite + - Change line #87 as follows in MobileNetV2.py +``` +self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel +``` + - Change line #91 as follows in MobileNetV2.py +``` +output_channel = int(c * width_mult) +``` + - Append line #100 as follows in MobileNetV2.py +``` +self.features.append(nn.AvgPool2d(input_size // 32) +``` + - Change line #104 as follows in MobileNetV2.py +``` +self.classifier = nn.Sequential( + nn.Dropout(dropout), + nn.Linear(self.last_channel, n_class), + ) +``` + - Change line #110 as follows in MobileNetV2.py +``` +x = x.squeeze() +``` +## Obtaining model checkpoint and dataset + +- The original MobileNetV2 checkpoint can be downloaded here: + - https://github.com/tonylins/pytorch-mobilenet-v2 +- Optimized MobileNetV2 checkpoint can be downloaded from releases +- ImageNet can be downloaded here: + - http://www.image-net.org/ + +## Usage +- To run evaluation with QuantSim in AIMET, use the following +```bash +python eval_mobilenetv2.py \ + --model-path \ + --images-dir \ + --quant-scheme \ + --input-shape \ + --default-output-bw \ + --default-param-bw +``` + +## Quantization Configuration +- Weight quantization: 8 bits, asymmetric quantization +- Bias parameters are not quantized +- Activation quantization: 8 bits, asymmetric quantization +- Model inputs are not quantized +- TF_enhanced was used as quantization scheme +- Data Free Quantization and Quantization aware Training has been performed on the optimized checkpoint diff --git a/zoo_torch/Docs/PoseEstimation.md b/zoo_torch/Docs/PoseEstimation.md new file mode 100644 index 0000000..1b27c4d --- /dev/null +++ b/zoo_torch/Docs/PoseEstimation.md @@ -0,0 +1,41 @@ +# Pose Estimation + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +## Additional Dependencies + +| Package | Version | +| :---------: | :-----: | +| pycocotools | 2.0.2 | +| scipy | 1.1.0 | + +### Adding dependencies within Docker Image + +- If you are using a docker image, e.g. AIMET development docker, please add the following lines to the Dockerfile and rebuild the Docker image + +```dockerfile +RUN pip install git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI +RUN pip install scipy==1.1.0 +``` + +## Obtaining model weights and dataset +- The pose estimation model can be downloaded here: + - + Pose Estimation pytorch model + +- coco dataset can be downloaded here: + - COCO 2014 Val images + - + COCO 2014 Train/Val annotations + + +## Usage +- The program requires two arguments to run: model_dir, coco_path. These are positional arguments so you must specify the arguments in order. + ```bash + python ./examples/pose_estimation_quanteval.py + ``` + +- We only support evaluation on COCO 2014 val images with person keypoints. + +- The results reported was evaluation on the whole dataset, which contains over 40k images and takes 15+ hours on a single RTX 2080Ti GPU. So in case you want to run a faster evaluation, specifiy num_imgs argument to the second call with a small number to evaluate_session so that you run evaluation only on a partial dataset. diff --git a/zoo_torch/Docs/SRGAN.md b/zoo_torch/Docs/SRGAN.md new file mode 100644 index 0000000..1c3c7c0 --- /dev/null +++ b/zoo_torch/Docs/SRGAN.md @@ -0,0 +1,73 @@ +# SRGAN (Super Resolution) + +## Setup AI Model Efficiency Toolkit (AIMET) +Please [install and setup AIMET](../../README.md#install-aimet) before proceeding further. + +### Setup Super-resolution repo + +- Clone the mmsr repo + `git clone https://github.com/andreas128/mmsr.git` + `git checkout a73b318f0f07feb6505ef5cb1abf0db33e33807a` + +- Append the repo location to your `PYTHONPATH` with the following: + `export PYTHONPATH=:/codes:$PYTHONPATH` + + Note that here we add both mmsr and the subdirectory mmsr/codes to our path. + + - Find mmsr/codes/models/archs/arch_util.py and do the following changes: + 1. In \_\_init__ append one line ```self.relu=nn.ReLU()``` after + ```self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)``` like below: + + ```python + super(ResidualBlock_noBN, self).__init__() + self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) + self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True) + self.relu = nn.ReLU() + ``` + + 2. In forward replace ```out = F.relu(self.conv1(x), inplace=True)``` + with ```out = self.relu(self.conv1(x))``` like below: + + ```python + identity = x + # out = F.relu(self.conv1(x), inplace=True) + out = self.relu(self.conv1(x)) + out = self.conv2(out) + ``` + + These changes are necessary since AIMET currently doesn't run on some pytorch + functionals. + +## Obtaining model weights and dataset + +- The SRGAN model can be downloaded from: + - mmediting + +- Three benchmark dataset can be downloaded here: + - [Set5](https://uofi.box.com/shared/static/kfahv87nfe8ax910l85dksyl2q212voc.zip) + - [Set14](https://uofi.box.com/shared/static/igsnfieh4lz68l926l8xbklwsnnk8we9.zip) + - [BSD100](https://uofi.box.com/shared/static/qgctsplb8txrksm9to9x01zfa4m61ngq.zip) + + Our benchmark results use images under **image_SRF_4** directory which tests 4x + super-resolution as the suffix number indicates. You can also use other scales. + See instructions for usage below. + +## Usage + +- The `srgan_quanteval.py` script requires you to specify a .yml file which contains locations to your dataset and .pth model together with some config parameters. You can just pass the mmsr/codes/options/test/test_SRGAN.yml as your .yml file. Remember to edit the file s.t. + - dataroot_GT points to your directory of HR images + - dataroot_LQ points to your directory of LR images + - pretrain_model_G points to where you store your srgan .pth file + - scale has to match the super-resolution images' scale + +Run the script as follows: + ```bash + python ./zoo_torch/examples/srgan_quanteval.py [--options] -opt + ``` + +## Quantizer Op Assumptions +In the evaluation script included, we have used the default config file, which configures the quantizer ops with the following assumptions: +- Weight quantization: 8 bits, asymmetric quantization +- Bias parameters are not quantized +- Activation quantization: 8 bits, asymmetric quantization +- Model inputs are not quantized diff --git a/zoo_torch/examples/deepspeech2_quanteval.py b/zoo_torch/examples/deepspeech2_quanteval.py new file mode 100755 index 0000000..9977b92 --- /dev/null +++ b/zoo_torch/examples/deepspeech2_quanteval.py @@ -0,0 +1,296 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +''' +This script will run AIMET QuantSim and evaluate WER using the DeepSpeech2 model +from the SeanNaren repo with manual configuration of quantization ops. +''' + +import os +import sys +import json +import argparse + +import torch +from tqdm import tqdm + +from deepspeech_pytorch.configs.inference_config import EvalConfig, LMConfig +from deepspeech_pytorch.decoder import GreedyDecoder +from deepspeech_pytorch.loader.data_loader import SpectrogramDataset, AudioDataLoader +from deepspeech_pytorch.utils import load_model, load_decoder +from deepspeech_pytorch.testing import run_evaluation + +import aimet_torch +from aimet_common.defs import QuantScheme +from aimet_torch.pro.quantsim import QuantizationSimModel + +def run_quantsim_evaluation(args): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + import deepspeech_pytorch.model + + def wrapped_forward_function(self, x, lengths=None): + if lengths is None: + lengths = torch.IntTensor([_x.shape[0] for _x in x]) + return self.infer(x, lengths) + + deepspeech_pytorch.model.DeepSpeech.infer = deepspeech_pytorch.model.DeepSpeech.forward + deepspeech_pytorch.model.DeepSpeech.forward = wrapped_forward_function + + model = load_model(device=device, + model_path=args.model_path, + use_half=False) + + + + decoder = load_decoder(labels=model.labels, + cfg=LMConfig) + + target_decoder = GreedyDecoder(model.labels, + blank_index=model.labels.index('_')) + + def eval_func(model, iterations=None, device=device): + test_dataset = SpectrogramDataset(audio_conf=model.audio_conf, + manifest_filepath=args.test_manifest, + labels=model.labels, + normalize=True) + + if iterations is not None: + test_dataset.size = iterations + + test_loader = AudioDataLoader(test_dataset, + batch_size=args.batch_size, + num_workers=args.num_workers) + + wer, cer, output_data = run_evaluation(test_loader=test_loader, + device=device, + model=model, + decoder=decoder, + target_decoder=target_decoder, + save_output=False, + verbose=True, + use_half=False) + return wer, cer, output_data + + + quant_scheme = QuantScheme.post_training_tf_enhanced + + sim = QuantizationSimModel(model.cpu(), + input_shapes=tuple([1, 1, 161, 500]), + quant_scheme=quant_scheme, + default_param_bw=args.default_param_bw, + default_output_bw=args.default_output_bw, + config_file=args.quantsim_config_file + ) + + manually_configure_quant_ops(sim) + + sim.model.to(device) + sim.compute_encodings(eval_func, forward_pass_callback_args=args.encodings_iterations) + + wer, cer, output_data = eval_func(sim.model, None) + print('Average WER {:.4f}'.format(wer)) + +def manually_configure_quant_ops(sim): + ''' + Manually configure Quantization Ops. Please see documentation for further explanation of quant op placement. + ''' + + manual_config = { + 'conv.seq_module.0': { # Conv2d + 'input_quantizer': True, + 'output_quantizer': False, + 'weight_quantizer': True, + 'bias_quantizer': False, + }, + 'conv.seq_module.1': { # BatchNorm + 'input_quantizer': False, + 'output_quantizer': False, + 'weight_quantizer': False, + 'bias_quantizer': False, + }, + 'conv.seq_module.2': { # HardTanh + 'input_quantizer': True, + 'output_quantizer': False, + }, + 'conv.seq_module.3': { # Conv2d + 'input_quantizer': True, + 'output_quantizer': False, + 'weight_quantizer': True, + 'bias_quantizer': False, + }, + 'conv.seq_module.4': { # BatchNorm + 'input_quantizer': False, + 'output_quantizer': False, + 'weight_quantizer': False, + 'bias_quantizer': False, + }, + 'conv.seq_module.5': { # HardTanh + 'input_quantizer': True, + 'output_quantizer': False, + }, + 'rnns.0.rnn': { + 'input_l0_quantizer': True, + 'initial_h_l0_quantizer': False, + 'initial_c_l0_quantizer': False, + 'h_l0_quantizer': True, + 'c_l0_quantizer': False, + 'weight_ih_l0_quantizer': True, + 'weight_hh_l0_quantizer': True, + 'bias_ih_l0_quantizer': False, + 'bias_hh_l0_quantizer': False, + 'weight_ih_l0_reverse_quantizer': True, + 'weight_hh_l0_reverse_quantizer': True, + 'bias_ih_l0_reverse_quantizer': False, + 'bias_hh_l0_reverse_quantizer': False, + }, + 'rnns.1.batch_norm.module': { + 'input_quantizer': False, + 'output_quantizer': False, + 'weight_quantizer': False, + 'bias_quantizer': False, + }, + 'rnns.1.rnn': { + 'input_l0_quantizer': True, + 'initial_h_l0_quantizer': False, + 'initial_c_l0_quantizer': False, + 'h_l0_quantizer': True, + 'c_l0_quantizer': False, + 'weight_ih_l0_quantizer': True, + 'weight_hh_l0_quantizer': True, + 'bias_ih_l0_quantizer': False, + 'bias_hh_l0_quantizer': False, + 'weight_ih_l0_reverse_quantizer': True, + 'weight_hh_l0_reverse_quantizer': True, + 'bias_ih_l0_reverse_quantizer': False, + 'bias_hh_l0_reverse_quantizer': False, + }, + 'rnns.2.batch_norm.module': { + 'input_quantizer': False, + 'output_quantizer': False, + 'weight_quantizer': False, + 'bias_quantizer': False, + }, + 'rnns.2.rnn': { + 'input_l0_quantizer': True, + 'initial_h_l0_quantizer': False, + 'initial_c_l0_quantizer': False, + 'h_l0_quantizer': True, + 'c_l0_quantizer': False, + 'weight_ih_l0_quantizer': True, + 'weight_hh_l0_quantizer': True, + 'bias_ih_l0_quantizer': False, + 'bias_hh_l0_quantizer': False, + 'weight_ih_l0_reverse_quantizer': True, + 'weight_hh_l0_reverse_quantizer': True, + 'bias_ih_l0_reverse_quantizer': False, + 'bias_hh_l0_reverse_quantizer': False, + }, + 'rnns.3.batch_norm.module': { + 'input_quantizer': False, + 'output_quantizer': False, + 'weight_quantizer': False, + 'bias_quantizer': False, + }, + 'rnns.3.rnn': { + 'input_l0_quantizer': True, + 'initial_h_l0_quantizer': False, + 'initial_c_l0_quantizer': False, + 'h_l0_quantizer': True, + 'c_l0_quantizer': False, + 'weight_ih_l0_quantizer': True, + 'weight_hh_l0_quantizer': True, + 'bias_ih_l0_quantizer': False, + 'bias_hh_l0_quantizer': False, + 'weight_ih_l0_reverse_quantizer': True, + 'weight_hh_l0_reverse_quantizer': True, + 'bias_ih_l0_reverse_quantizer': False, + 'bias_hh_l0_reverse_quantizer': False, + }, + 'rnns.4.batch_norm.module': { + 'input_quantizer': False, + 'output_quantizer': False, + 'weight_quantizer': False, + 'bias_quantizer': False, + }, + 'rnns.4.rnn': { + 'input_l0_quantizer': True, + 'initial_h_l0_quantizer': False, + 'initial_c_l0_quantizer': False, + 'h_l0_quantizer': True, + 'c_l0_quantizer': False, + 'weight_ih_l0_quantizer': True, + 'weight_hh_l0_quantizer': True, + 'bias_ih_l0_quantizer': False, + 'bias_hh_l0_quantizer': False, + 'weight_ih_l0_reverse_quantizer': True, + 'weight_hh_l0_reverse_quantizer': True, + 'bias_ih_l0_reverse_quantizer': False, + 'bias_hh_l0_reverse_quantizer': False, + }, + 'fc.0.module.0': { + 'input_quantizer': True, + 'output_quantizer': False, + 'weight_quantizer': False, + 'bias_quantizer': False, + }, + 'fc.0.module.1': { + 'input_quantizer': True, + 'output_quantizer': False, + 'weight_quantizer': True, + }, + 'inference_softmax': { + 'input_quantizer': False, + 'output_quantizer': True, + } + } + + quant_ops = QuantizationSimModel._get_qc_quantized_layers(sim.model) + for name, op in quant_ops: + mc = manual_config[name] + if isinstance(op, aimet_torch.qc_quantize_op.QcPostTrainingWrapper): + op.input_quantizer.enabled = mc['input_quantizer'] + op.output_quantizer.enabled = mc['output_quantizer'] + for q_name, param_quantizer in op.param_quantizers.items(): + param_quantizer.enabled = mc[q_name + '_quantizer'] + elif isinstance(op, aimet_torch.pro.qc_quantize_recurrent.QcQuantizeRecurrent): + for q_name, input_quantizer in op.input_quantizers.items(): + input_quantizer.enabled = mc[q_name + '_quantizer'] + for q_name, output_quantizer in op.output_quantizers.items(): + output_quantizer.enabled = mc[q_name + '_quantizer'] + for q_name, param_quantizer in op.param_quantizers.items(): + param_quantizer.enabled = mc[q_name + '_quantizer'] + + +def parse_args(args): + """ Parse the arguments. + """ + parser = argparse.ArgumentParser(description='Evaluation script for an DeepSpeech2 network.') + + parser.add_argument('--model-path', help='Path to .pth to load from.') + parser.add_argument('--test-manifest', help='Path to csv to do eval on.') + parser.add_argument('--batch-size', help='Batch size.', type=int, default=20) + parser.add_argument('--num-workers', help='Number of workers.', type=int, default=1) + + parser.add_argument('--quant-scheme', help='Quant scheme to use for quantization (tf, tf_enhanced, range_learning_tf, range_learning_tf_enhanced).', default='tf') + parser.add_argument('--round-mode', help='Round mode for quantization.', default='nearest') + parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', type=int, default=8) + parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', type=int, default=8) + parser.add_argument('--quantsim-config-file', help='Quantsim configuration file.', default=None) + parser.add_argument('--encodings-iterations', help='Number of iterations to use for compute encodings during quantization.', type=int, default=500) + + return parser.parse_args(args) + +def main(args=None): + args = parse_args(args) + run_quantsim_evaluation(args) + +if __name__ == '__main__': + main() diff --git a/zoo_torch/examples/eval_deeplabv3.py b/zoo_torch/examples/eval_deeplabv3.py new file mode 100755 index 0000000..b4cdbb0 --- /dev/null +++ b/zoo_torch/examples/eval_deeplabv3.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +''' AIMET Quantsim code for DeepLabV3 ''' + +import random +import numpy as np +import torch +from modeling.deeplab import DeepLab +from tqdm import tqdm +import argparse +from metrics import Evaluator +from dataloaders import make_data_loader + +def work_init(work_id): + seed = torch.initial_seed() % 2**32 + random.seed(seed + work_id) + np.random.seed(seed + work_id) + +def model_eval(args, data_loader): + def func_wrapper(model, arguments): + evaluator = Evaluator(args.num_classes) + evaluator.reset() + model.eval() + model.cuda() + threshold, use_cuda = arguments[0], arguments[1] + total_samples = 0 + for sample in tqdm(data_loader): + images, label = sample['image'], sample['label'] + images, label = images.cuda(), label.cpu().numpy() + output = model(images) + pred = torch.argmax(output, 1).data.cpu().numpy() + evaluator.add_batch(label, pred) + total_samples += images.size()[0] + if total_samples > threshold: + break + mIoU = evaluator.Mean_Intersection_over_Union()*100. + print("mIoU : {:0.2f}".format(mIoU)) + return mIoU + return func_wrapper + + + + +def arguments(): + parser = argparse.ArgumentParser(description='Evaluation script for PyTorch ImageNet networks.') + + parser.add_argument('--checkpoint-path', help='Path to optimized checkpoint directory to load from.', default = None, type=str) + parser.add_argument('--base-size', help='Base size for Random Crop', default=513) + parser.add_argument('--crop-size', help='Crop size for Random Crop', default=513) + parser.add_argument('--num-classes', help='Number of classes in a dataset', default=21) + parser.add_argument('--dataset', help='dataset used for evaluation', default='pascal') + + parser.add_argument('--seed', help='Seed number for reproducibility', default=0) + parser.add_argument('--use-sbd', help='Use SBD data for data augmentation during training', default=False) + + parser.add_argument('--quant-scheme', help='Quant scheme to use for quantization (tf, tf_enhanced, range_learning_tf, range_learning_tf_enhanced).', default='tf', choices = ['tf', 'tf_enhanced', 'range_learning_tf', 'range_learning_tf_enhanced']) + parser.add_argument('--round-mode', help='Round mode for quantization.', default='nearest') + parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', default=8) + parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', default=8) + parser.add_argument('--config-file', help='Quantsim configuration file.', default=None, type=str) + parser.add_argument('--cuda', help='Enable cuda for a model', default=True) + + parser.add_argument('--batch-size', help='Data batch size for a model', default=16) + args = parser.parse_args() + return args + +def seed(args): + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + + +def main(): + args = arguments() + seed(args) + + model = DeepLab(backbone='mobilenet', output_stride=16, num_classes=21, + sync_bn=False) + model.eval() + + from aimet_torch import batch_norm_fold + from aimet_torch import utils + args.input_shape = (1,3,513,513) + batch_norm_fold.fold_all_batch_norms(model, args.input_shape) + utils.replace_modules_of_type1_with_type2(model, torch.nn.ReLU6, torch.nn.ReLU) + # from IPython import embed; embed() + if args.checkpoint_path: + model.load_state_dict(torch.load(args.checkpoint_path)) + else: + raise ValueError('checkpoint path {} must be specified'.format(args.checkpoint_path)) + + data_loader_kwargs = { 'worker_init_fn':work_init, 'num_workers' : 0} + train_loader, val_loader, test_loader, num_class = make_data_loader(args, **data_loader_kwargs) + eval_func_quant = model_eval(args, val_loader) + eval_func = model_eval(args, val_loader) + + from aimet_common.defs import QuantScheme + from aimet_torch.pro.quantsim import QuantizationSimModel + if hasattr(args, 'quant_scheme'): + if args.quant_scheme == 'range_learning_tf': + quant_scheme = QuantScheme.training_range_learning_with_tf_init + elif args.quant_scheme == 'range_learning_tfe': + quant_scheme = QuantScheme.training_range_learning_with_tf_enhanced_init + elif args.quant_scheme == 'tf': + quant_scheme = QuantScheme.post_training_tf + elif args.quant_scheme == 'tf_enhanced': + quant_scheme = QuantScheme.post_training_tf_enhanced + else: + raise ValueError("Got unrecognized quant_scheme: " + args.quant_scheme) + kwargs = { + 'quant_scheme': quant_scheme, + 'default_param_bw': args.default_param_bw, + 'default_output_bw': args.default_output_bw, + 'config_file': args.config_file + } + print(kwargs) + sim = QuantizationSimModel(model.cpu(), input_shapes=args.input_shape, **kwargs) + sim.compute_encodings(eval_func_quant, (1024, True)) + post_quant_top1 = eval_func(sim.model.cuda(), (99999999, True)) + print("Post Quant mIoU :", post_quant_top1) + +if __name__ == '__main__': + main() diff --git a/zoo_torch/examples/eval_efficientnetlite0.py b/zoo_torch/examples/eval_efficientnetlite0.py new file mode 100755 index 0000000..8e45717 --- /dev/null +++ b/zoo_torch/examples/eval_efficientnetlite0.py @@ -0,0 +1,240 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +''' AIMET Post Quantization code for EfficientNet-Lite0 ''' + +import random +import numpy as np +import torch +import geffnet +from torch.utils.data import DataLoader +from torchvision import transforms, datasets +from tqdm import tqdm +import argparse + +from aimet_torch import utils +from aimet_torch import cross_layer_equalization +from aimet_torch import batch_norm_fold +from aimet_common.defs import QuantScheme +from aimet_torch.pro.quantsim import QuantizationSimModel +from aimet_torch.adaround.adaround_weight import Adaround, AdaroundParameters +from aimet_torch.onnx_utils import onnx_pytorch_conn_graph_type_pairs +from aimet_common.utils import AimetLogger +import logging +AimetLogger.set_level_for_all_areas(logging.DEBUG) +onnx_pytorch_conn_graph_type_pairs.append([["Clip"], ["hardtanh"]]) + +def work_init(work_id): + seed = torch.initial_seed() % 2**32 + random.seed(seed + work_id) + np.random.seed(seed + work_id) + +def model_eval(data_loader, image_size, batch_size=64, quant = False): + def func_wrapper_quant(model, arguments): + top1_acc = 0.0 + total_num = 0 + idx = 0 + iterations , use_cuda = arguments[0], arguments[1] + if use_cuda: + model.cuda() + for sample, label in tqdm(data_loader): + total_num += sample.size()[0] + if use_cuda: + sample = sample.cuda() + label = label.cuda() + logits = model(sample) + pred = torch.argmax(logits, dim = 1) + correct = sum(torch.eq(pred, label)).cpu().numpy() + top1_acc += correct + idx += 1 + if idx > iterations: + break + avg_acc = top1_acc * 100. / total_num + print("Top 1 ACC : {:0.2f}".format(avg_acc)) + return avg_acc + + def func_wrapper(model, arguments): + top1_acc = 0.0 + total_num = 0 + iterations , use_cuda = arguments[0], arguments[1] + if use_cuda: + model.cuda() + for sample, label in tqdm(data_loader): + total_num += sample.size()[0] + if use_cuda: + sample = sample.cuda() + label = label.cuda() + logits = model(sample) + pred = torch.argmax(logits, dim = 1) + correct = sum(torch.eq(pred, label)).cpu().numpy() + top1_acc += correct + avg_acc = top1_acc * 100. / total_num + print("Top 1 ACC : {:0.2f}".format(avg_acc)) + return avg_acc + if quant: + func = func_wrapper_quant + else: + func = func_wrapper + return func + +def seed(args): + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + + +def load_model(pretrained = True): + model = getattr(geffnet, 'efficientnet_lite0')(pretrained) + return model + +def run_pytorch_bn_fold(config, model): + folded_pairs = batch_norm_fold.fold_all_batch_norms(model.cpu(), config.input_shape) + conv_bn_pairs = {} + for conv_bn in folded_pairs: + conv_bn_pairs[conv_bn[0]] = conv_bn[1] + return model, conv_bn_pairs + +def run_pytorch_cross_layer_equalization(config, model): + cross_layer_equalization.equalize_model(model.cpu(), config.input_shape) + return model + +def run_pytorch_adaround(config, model, data_loaders): + if hasattr(config, 'quant_scheme'): + if config.quant_scheme == 'range_learning_tf': + quant_scheme = QuantScheme.post_training_tf + elif config.quant_scheme == 'range_learning_tfe': + quant_scheme = QuantScheme.post_training_tf_enhanced + elif config.quant_scheme == 'tf': + quant_scheme = QuantScheme.post_training_tf + elif config.quant_scheme == 'tf_enhanced': + quant_scheme = QuantScheme.post_training_tf_enhanced + else: + raise ValueError("Got unrecognized quant_scheme: " + config.quant_scheme) + + params = AdaroundParameters(data_loader = data_loaders, num_batches = config.num_batches, default_num_iterations = config.num_iterations, + default_reg_param = 0.01, default_beta_range = (20, 2)) + ada_model = Adaround.apply_adaround(model.cuda(), params, default_param_bw= config.default_param_bw, + default_quant_scheme = quant_scheme, + default_config_file = config.config_file + ) + return ada_model + + +def arguments(): + parser = argparse.ArgumentParser(description='Evaluation script for PyTorch EfficientNet-lite0 networks.') + + parser.add_argument('--images-dir', help='Imagenet eval image', default='./ILSVRC2012_PyTorch/', type=str) + parser.add_argument('--input-shape', help='Model to an input image shape, (ex : [batch, channel, width, height]', default=(1,3,224,224)) + parser.add_argument('--seed', help='Seed number for reproducibility', default=0) + + parser.add_argument('--quant-tricks', help='Preprocessing prior to Quantization', choices=['BNfold', 'CLE', 'adaround'], nargs = "+") + parser.add_argument('--quant-scheme', help='Quant scheme to use for quantization (tf, tf_enhanced, range_learning_tf, range_learning_tf_enhanced).', default='tf', choices = ['tf', 'tf_enhanced', 'range_learning_tf', 'range_learning_tf_enhanced']) + parser.add_argument('--round-mode', help='Round mode for quantization.', default='nearest') + parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', default=8) + parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', default=8) + parser.add_argument('--config-file', help='Quantsim configuration file.', default=None, type=str) + parser.add_argument('--cuda', help='Enable cuda for a model', default=True) + + parser.add_argument('--batch-size', help='Data batch size for a model', default=64) + parser.add_argument('--num-workers', help='Number of workers to run data loader in parallel', default=16) + + parser.add_argument('--num-iterations', help='Number of iterations used for adaround optimization', default=10000, type = int) + parser.add_argument('--num-batches', help='Number of batches used for adaround optimization', default=16, type = int) + + args = parser.parse_args() + return args + + +def main(): + args = arguments() + seed(args) + + model = load_model() + model.eval() + + image_size = args.input_shape[-1] + + data_loader_kwargs = { 'worker_init_fn':work_init, 'num_workers' : args.num_workers} + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + val_transforms = transforms.Compose([ + transforms.Resize(image_size + 24), + transforms.CenterCrop(image_size), + transforms.ToTensor(), + normalize]) + val_data = datasets.ImageFolder(args.images_dir + '/val/', val_transforms) + val_dataloader = DataLoader(val_data, args.batch_size, shuffle = False, pin_memory = True, **data_loader_kwargs) + + eval_func_quant = model_eval(val_dataloader, image_size, batch_size=args.batch_size, quant = True) + eval_func = model_eval(val_dataloader, image_size, batch_size=args.batch_size) + + if 'BNfold' in args.quant_tricks: + print("BN fold") + model, conv_bn_pairs = run_pytorch_bn_fold(args, model) + if 'CLE' in args.quant_tricks: + print("CLE") + model = run_pytorch_cross_layer_equalization(args, model) + print(model) + if 'adaround' in args.quant_tricks: + model = run_pytorch_adaround(args, model, val_dataloader) + + if hasattr(args, 'quant_scheme'): + if args.quant_scheme == 'range_learning_tf': + quant_scheme = QuantScheme.training_range_learning_with_tf_init + elif args.quant_scheme == 'range_learning_tfe': + quant_scheme = QuantScheme.training_range_learning_with_tf_enhanced_init + elif args.quant_scheme == 'tf': + quant_scheme = QuantScheme.post_training_tf + elif args.quant_scheme == 'tf_enhanced': + quant_scheme = QuantScheme.post_training_tf_enhanced + else: + raise ValueError("Got unrecognized quant_scheme: " + args.quant_scheme) + kwargs = { + 'quant_scheme': quant_scheme, + 'default_param_bw': args.default_param_bw, + 'default_output_bw': args.default_output_bw, + 'config_file': args.config_file + } + print(kwargs) + sim = QuantizationSimModel(model.cpu(), input_shapes=args.input_shape, **kwargs) + + # Manually Config Super group, AIMET currently does not support [Conv-ReLU6] in a supergroup + from aimet_torch.qc_quantize_op import QcPostTrainingWrapper + for quant_wrapper in sim.model.modules(): + if isinstance(quant_wrapper, QcPostTrainingWrapper): + if isinstance(quant_wrapper._module_to_wrap, torch.nn.Conv2d): + quant_wrapper.output_quantizer.enabled = False + + sim.model.blocks[0][0].conv_pw.output_quantizer.enabled = True + sim.model.blocks[1][0].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[1][1].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[2][0].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[2][1].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[3][0].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[3][1].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[3][2].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[4][0].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[4][1].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[4][2].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[5][0].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[5][1].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[5][2].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[5][3].conv_pwl.output_quantizer.enabled = True + sim.model.blocks[6][0].conv_pwl.output_quantizer.enabled = True + + sim.compute_encodings(eval_func_quant, (32, True)) + print(sim) + post_quant_top1 = eval_func(sim.model.cuda(), (0, True)) + print("Post Quant Top1 :", post_quant_top1) + +if __name__ == '__main__': + main() diff --git a/zoo_torch/examples/eval_mobilenetv2.py b/zoo_torch/examples/eval_mobilenetv2.py new file mode 100755 index 0000000..acfe366 --- /dev/null +++ b/zoo_torch/examples/eval_mobilenetv2.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +''' AIMET Quantsim code for MobileNetV2 ''' + +import random +import numpy as np +import torch +from model.MobileNetV2 import mobilenet_v2 +from torch.utils.data import DataLoader +from torchvision import transforms, datasets +from tqdm import tqdm +import argparse +def work_init(work_id): + seed = torch.initial_seed() % 2**32 + random.seed(seed + work_id) + np.random.seed(seed + work_id) + +def model_eval(images_dir, image_size, batch_size=64, num_workers=16, quant = False): + + data_loader_kwargs = { 'worker_init_fn':work_init, 'num_workers' : num_workers} + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + val_transforms = transforms.Compose([ + transforms.Resize(image_size + 24), + transforms.CenterCrop(image_size), + transforms.ToTensor(), + normalize]) + val_data = datasets.ImageFolder(images_dir, val_transforms) + val_dataloader = DataLoader(val_data, batch_size, shuffle = False, pin_memory = True, **data_loader_kwargs) + def func_wrapper_quant(model, arguments): + top1_acc = 0.0 + total_num = 0 + idx = 0 + iterations , use_cuda = arguments[0], arguments[1] + if use_cuda: + model.cuda() + for sample, label in tqdm(val_dataloader): + total_num += sample.size()[0] + if use_cuda: + sample = sample.cuda() + label = label.cuda() + logits = model(sample) + pred = torch.argmax(logits, dim = 1) + correct = sum(torch.eq(pred, label)).cpu().numpy() + top1_acc += correct + idx += 1 + if idx > iterations: + break + avg_acc = top1_acc * 100. / total_num + print("Top 1 ACC : {:0.2f}".format(avg_acc)) + return avg_acc + + def func_wrapper(model, arguments): + top1_acc = 0.0 + total_num = 0 + iterations , use_cuda = arguments[0], arguments[1] + if use_cuda: + model.cuda() + for sample, label in tqdm(val_dataloader): + total_num += sample.size()[0] + if use_cuda: + sample = sample.cuda() + label = label.cuda() + logits = model(sample) + pred = torch.argmax(logits, dim = 1) + correct = sum(torch.eq(pred, label)).cpu().numpy() + top1_acc += correct + avg_acc = top1_acc * 100. / total_num + print("Top 1 ACC : {:0.2f}".format(avg_acc)) + return avg_acc + if quant: + func = func_wrapper_quant + else: + func = func_wrapper + return func + + +def arguments(): + parser = argparse.ArgumentParser(description='Evaluation script for PyTorch ImageNet networks.') + + parser.add_argument('--model-path', help='Path to checkpoint directory to load from.', default = "./model/mv2qat_modeldef.pth", type=str) + parser.add_argument('--images-dir', help='Imagenet eval image', default='./ILSVRC2012/', type=str) + parser.add_argument('--input-shape', help='Model to an input image shape, (ex : [batch, channel, width, height]', default=(1,3,224,224)) + parser.add_argument('--seed', help='Seed number for reproducibility', default=0) + + parser.add_argument('--quant-tricks', help='Preprocessing prior to Quantization', choices=['BNfold', 'CLS', 'HBF', 'CLE', 'BC', 'adaround'], nargs = "+") + parser.add_argument('--quant-scheme', help='Quant scheme to use for quantization (tf, tf_enhanced, range_learning_tf, range_learning_tf_enhanced).', default='tf', choices = ['tf', 'tf_enhanced', 'range_learning_tf', 'range_learning_tf_enhanced']) + parser.add_argument('--round-mode', help='Round mode for quantization.', default='nearest') + parser.add_argument('--default-output-bw', help='Default output bitwidth for quantization.', default=8) + parser.add_argument('--default-param-bw', help='Default parameter bitwidth for quantization.', default=8) + parser.add_argument('--config-file', help='Quantsim configuration file.', default=None, type=str) + parser.add_argument('--cuda', help='Enable cuda for a model', default=True) + + parser.add_argument('--batch-size', help='Data batch size for a model', default=64) + + + args = parser.parse_args() + return args + +def seed(args): + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.manual_seed(args.seed) + torch.cuda.manual_seed(args.seed) + torch.cuda.manual_seed_all(args.seed) + + +def main(): + args = arguments() + seed(args) + + if args.model_path: + model = torch.load(args.model_path) + else: + raise ValueError('Model path {} must be specified'.format(args.model_path)) + + model.eval() + image_size = args.input_shape[-1] + eval_func_quant = model_eval(args.images_dir + '/val/', image_size, batch_size=args.batch_size, num_workers=0, quant = True) + eval_func = model_eval(args.images_dir + '/val/', image_size, batch_size=args.batch_size, num_workers=16) + + from aimet_common.defs import QuantScheme + from aimet_torch.pro.quantsim import QuantizationSimModel + if hasattr(args, 'quant_scheme'): + if args.quant_scheme == 'range_learning_tf': + quant_scheme = QuantScheme.training_range_learning_with_tf_init + elif args.quant_scheme == 'range_learning_tfe': + quant_scheme = QuantScheme.training_range_learning_with_tf_enhanced_init + elif args.quant_scheme == 'tf': + quant_scheme = QuantScheme.post_training_tf + elif args.quant_scheme == 'tf_enhanced': + quant_scheme = QuantScheme.post_training_tf_enhanced + else: + raise ValueError("Got unrecognized quant_scheme: " + args.quant_scheme) + kwargs = { + 'quant_scheme': quant_scheme, + 'default_param_bw': args.default_param_bw, + 'default_output_bw': args.default_output_bw, + 'config_file': args.config_file + } + print(kwargs) + sim = QuantizationSimModel(model.cpu(), input_shapes=args.input_shape, **kwargs) + sim.compute_encodings(eval_func_quant, (32, True)) + post_quant_top1 = eval_func(sim.model.cuda(), (0, True)) + print("Post Quant Top1 :", post_quant_top1) + +if __name__ == '__main__': + main() diff --git a/zoo_torch/examples/pose_estimation_quanteval.py b/zoo_torch/examples/pose_estimation_quanteval.py new file mode 100644 index 0000000..fc4af80 --- /dev/null +++ b/zoo_torch/examples/pose_estimation_quanteval.py @@ -0,0 +1,499 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +""" +This script applies and evaluates a compressed pose estimation model which has a similar +structure with https://github.com/CMU-Perceptual-Computing-Lab/openpose. Evaluation is +done on 2014 val dataset with person keypoints only. This model is quantization-friendly +so no post-training methods or QAT were applied. For instructions please refer to +zoo_torch/Docs/PoseEstimation.md +""" + + +import os +import math +import argparse +from functools import partial +from tqdm import tqdm + +import cv2 +from scipy.ndimage.filters import gaussian_filter +import torch +import numpy as np +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + +from aimet_torch import quantsim + + +def non_maximum_suppression(map, thresh): + map_s = gaussian_filter(map, sigma=3) + + map_left = np.zeros(map_s.shape) + map_left[1:, :] = map_s[:-1, :] + map_right = np.zeros(map_s.shape) + map_right[:-1, :] = map_s[1:, :] + map_up = np.zeros(map_s.shape) + map_up[:, 1:] = map_s[:, :-1] + map_down = np.zeros(map_s.shape) + map_down[:, :-1] = map_s[:, 1:] + + peaks_binary = np.logical_and.reduce((map_s >= map_left, map_s >= map_right, map_s >= map_up, + map_s >= map_down, + map_s > thresh)) + + peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse + peaks_with_score = [x + (map[x[1], x[0]],) for x in peaks] + + return peaks_with_score + + +def pad_image(img, stride, padding): + h = img.shape[0] + w = img.shape[1] + + pad = 4 * [None] + pad[0] = 0 # up + pad[1] = 0 # left + pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down + pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right + + img_padded = img + pad_up = np.tile(img_padded[0:1, :, :] * 0 + padding, (pad[0], 1, 1)) + img_padded = np.concatenate((pad_up, img_padded), axis=0) + pad_left = np.tile(img_padded[:, 0:1, :] * 0 + padding, (1, pad[1], 1)) + img_padded = np.concatenate((pad_left, img_padded), axis=1) + pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + padding, (pad[2], 1, 1)) + img_padded = np.concatenate((img_padded, pad_down), axis=0) + pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + padding, (1, pad[3], 1)) + img_padded = np.concatenate((img_padded, pad_right), axis=1) + + return img_padded, pad + + +def encode_input(image, scale, stride, padding): + image_scaled = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) + image_scaled_padded, pad = pad_image(image_scaled, stride, padding) + + return image_scaled_padded, pad + + +def decode_output(data, stride, padding, input_shape, image_shape): + output = np.transpose(np.squeeze(data), (1, 2, 0)) + output = cv2.resize(output, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) + output = output[:input_shape[0] - padding[2], :input_shape[1] - padding[3], :] + output = cv2.resize(output, (image_shape[1], image_shape[0]), interpolation=cv2.INTER_CUBIC) + + return output + + +def preprocess(image, transforms): + mean_bgr = [34.282957728666474, 32.441979567868017, 24.339757511312481] + + image = image.astype(np.float32) + + if 'bgr' in transforms: + if image.shape[0] == 3: + image = image[::-1, :, :] + elif image.shape[2] == 3: + image = image[:, :, ::-1] + + if 'tr' in transforms: + image = image.transpose((2, 0, 1)) + + if 'mean' in transforms: + image[0, :, :] -= mean_bgr[0] + image[1, :, :] -= mean_bgr[1] + image[2, :, :] -= mean_bgr[2] + + if 'addchannel' in transforms: + image = image[np.newaxis, :, :, :] + + if 'normalize' in transforms: + image = image / 256 - 0.5 + + return image + + +def run_model(model, image, fast=False): + scale_search = [1.] + crop = 368 + stride = 8 + padValue = 128 + + if fast: + scales = scale_search + else: + scales = [x * crop / image.shape[0] for x in scale_search] + + heatmaps, pafs = [], [] + for scale in scales: + if fast: + horiz = image.shape[0] < image.shape[1] + sz = (496, 384) if horiz else (384, 496) + image_encoded = cv2.resize(image, dsize=(int(sz[0] * scale), int(sz[1] * scale))) + else: + image_encoded, pad = encode_input(image, scale, stride, + padValue) + image_encoded_ = preprocess(image_encoded, + ['addchannel', 'normalize', 'bgr']) + image_encoded_ = np.transpose(image_encoded_, (0, 3, 1, 2)) + with torch.no_grad(): + input_image = torch.FloatTensor(torch.from_numpy(image_encoded_).float()) + if next(model.parameters()).is_cuda: + input_image = input_image.to(device='cuda') + output = model(input_image) + paf = output[2].cpu().data.numpy().transpose((0, 2, 3, 1)) + heatmap = output[3].cpu().data.numpy().transpose((0, 2, 3, 1)) + if fast: + paf = cv2.resize(paf[0], (image.shape[1], image.shape[0])) + heatmap = cv2.resize(heatmap[0], dsize=(image.shape[1], image.shape[0])) + else: + # paf = paf.transpose((0, 3, 1, 2)) + # heatmap = heatmap.transpose((0, 3, 1, 2)) + paf = decode_output(paf, stride, pad, image_encoded.shape, + image.shape) + heatmap = decode_output(heatmap, stride, pad, image_encoded.shape, + image.shape) + + pafs.append(paf) + heatmaps.append(heatmap) + + return np.asarray(heatmaps).mean(axis=0), np.asarray(pafs).mean(axis=0) + + +def get_keypoints(heatmap): + thre1 = 0.1 + keypoints_all = [] + keypoints_cnt = 0 + for part in range(19 - 1): + keypoints = non_maximum_suppression(heatmap[:, :, part], thre1) + id = range(keypoints_cnt, keypoints_cnt + len(keypoints)) + keypoints = [keypoints[i] + (id[i],) for i in range(len(id))] + keypoints_all.append(keypoints) + keypoints_cnt += len(keypoints) + return keypoints_all + + +def get_limb_consistency(paf, start_keypoint, end_keypoint, image_h, div_num=10): + vec_key = np.subtract(end_keypoint[:2], start_keypoint[:2]) + vec_key_norm = math.sqrt(vec_key[0] * vec_key[0] + vec_key[1] * vec_key[1]) + if vec_key_norm == 0: + vec_key_norm = 1 + vec_key = np.divide(vec_key, vec_key_norm) + + vec_paf = list(zip(np.linspace(start_keypoint[0], end_keypoint[0], num=div_num).astype(int), + np.linspace(start_keypoint[1], end_keypoint[1], num=div_num).astype(int))) + + vec_paf_x = np.array([paf[vec_paf[k][1], vec_paf[k][0], 0] for k in range(div_num)]) + vec_paf_y = np.array([paf[vec_paf[k][1], vec_paf[k][0], 1] for k in range(div_num)]) + + # To see how well the direction of the prediction over the line connecting the limbs aligns + # with the vec_key we compute the integral of the dot product of the "affinity vector at point + # 'u' on the line" and the "vec_key". + # In discrete form, this integral is done as below: + vec_sims = np.multiply(vec_paf_x, vec_key[0]) + np.multiply(vec_paf_y, vec_key[1]) + + # this is just a heuristic approach to punish very long predicted limbs + vec_sims_prior = vec_sims.mean() + min(0.5 * image_h / vec_key_norm - 1, 0) + + return vec_sims, vec_sims_prior + + +def connect_keypoints(image_shape, keypoints, paf, limbs, limbsInds): + thre2 = 0.05 + connections = [] + small_limb_list = [1, 15, 16, 17, 18] + for k in range(len(limbsInds)): + paf_limb = paf[:, :, limbsInds[k]] + limb_strs = keypoints[limbs[k][0]] + limb_ends = keypoints[limbs[k][1]] + + if len(limb_strs) != 0 and len(limb_ends) != 0: + cands = [] + for i, limb_str in enumerate(limb_strs): + for j, limb_end in enumerate(limb_ends): + # for each potential pair of keypoints which can have a limb in between we + # measure a score using the get_limb_consistency function + if limbs[k][0] in small_limb_list or limbs[k][1] in small_limb_list: + sims, sims_p = get_limb_consistency(paf_limb, limb_str, limb_end, + image_shape[0], div_num=10) + else: + sims, sims_p = get_limb_consistency(paf_limb, limb_str, limb_end, + image_shape[0], div_num=10) + if len(np.where(sims > thre2)[0]) > int(0.80 * len(sims)) and sims_p > 0: + cands.append([i, j, sims_p]) + cands = sorted(cands, key=lambda x: x[2], reverse=True) + connection = np.zeros((0, 3)) + visited_strs, visited_ends = [], [] + for cand in cands: + i, j, s = cand + if i not in visited_strs and j not in visited_ends: + connection = np.vstack([connection, [limb_strs[i][3], limb_ends[j][3], s]]) + visited_strs.append(i) + visited_ends.append(j) + + if len(connection) >= min(len(limb_strs), len(limb_ends)): + break + connections.append(connection) + else: + connections.append([]) + return connections + + +def create_skeletons(keypoints, connections, limbs): + # last number in each row is the total parts number of that person + # the second last number in each row is the score of the overall configuration + skeletons = -1 * np.ones((0, 20)) + keypoints_flatten = np.array([item for sublist in keypoints for item in sublist]) + + for k in range(len(limbs)): + if len(connections[k]) > 0: + detected_str = connections[k][:, 0] + detected_end = connections[k][:, 1] + limb_str, limb_end = np.array(limbs[k]) + + for i in range(len(connections[k])): + found = 0 + subset_idx = [-1, -1] + for j in range(len(skeletons)): + if skeletons[j][limb_str] == detected_str[i] or \ + skeletons[j][limb_end] == detected_end[i]: + subset_idx[found] = j + found += 1 + + if found == 1: + j = subset_idx[0] + if skeletons[j][limb_end] != detected_end[i]: + skeletons[j][limb_end] = detected_end[i] + skeletons[j][-1] += 1 + skeletons[j][-2] += keypoints_flatten[detected_end[i].astype(int), 2] + \ + connections[k][i][2] + elif found == 2: # if found 2 and disjoint, merge them + j1, j2 = subset_idx + + membership = ((skeletons[j1] >= 0).astype(int) + + (skeletons[j2] >= 0).astype(int))[:-2] + if len(np.nonzero(membership == 2)[0]) == 0: # merge + skeletons[j1][:-2] += (skeletons[j2][:-2] + 1) + skeletons[j1][-2:] += skeletons[j2][-2:] + skeletons[j1][-2] += connections[k][i][2] + skeletons = np.delete(skeletons, j2, 0) + else: # as like found == 1 + skeletons[j1][limb_end] = detected_end[i] + skeletons[j1][-1] += 1 + skeletons[j1][-2] += keypoints_flatten[detected_end[i].astype(int), 2] + \ + connections[k][i][2] + + # if find no partA in the subset, create a new subset + elif not found and k < 17: + row = -1 * np.ones(20) + row[limb_str] = detected_str[i] + row[limb_end] = detected_end[i] + row[-1] = 2 + row[-2] = sum(keypoints_flatten[connections[k][i, :2].astype(int), 2]) + \ + connections[k][i][2] + skeletons = np.vstack([skeletons, row]) + + # delete some rows of subset which has few parts occur + deleteIdx = [] + for i in range(len(skeletons)): + if skeletons[i][-1] < 4 or skeletons[i][-2] / skeletons[i][-1] < 0.4: + deleteIdx.append(i) + skeletons = np.delete(skeletons, deleteIdx, axis=0) + return {'keypoints': skeletons[:, :18], 'scores': skeletons[:, 18]} + + +def estimate_pose(image_shape, heatmap, paf): + # limbs as pair of keypoints: [start_keypoint, end_keypoint] keypoints index to heatmap matrix + limbs = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 8], [8, 9], [9, 10], [1, 11], + [11, 12], [12, 13], + [1, 0], [0, 14], [14, 16], [0, 15], [15, 17], [2, 16], [5, 17]] + # index where each limb stands in paf matrix. Two consecutive indices for x and y component + # of paf + limbsInd = [[12, 13], [20, 21], [14, 15], [16, 17], [22, 23], [24, 25], [0, 1], [2, 3], [4, 5], + [6, 7], [8, 9], + [10, 11], [28, 29], [30, 31], [34, 35], [32, 33], [36, 37], [18, 19], [26, 27]] + + # Computing the keypoints using non-max-suppression + keypoints = get_keypoints(heatmap) + + # Computing which pairs of joints should be connected based on the paf. + connections = connect_keypoints(image_shape, keypoints, paf, limbs, limbsInd) + + skeletons = create_skeletons(keypoints, connections, limbs) + + return skeletons, np.array([item for sublist in keypoints for item in sublist]) + + +def parse_results(skeletons, points): + coco_indices = [0, -1, 6, 8, 10, 5, 7, 9, 12, 14, 16, 11, 13, 15, 2, 1, 4, 3] + + skeletons_out, scores = [], [] + for score, keypoints in zip(skeletons['scores'], skeletons['keypoints']): + skeleton = [] + for p in range(len(keypoints)): + if p == 1: + continue + ind = int(keypoints[p]) + if ind >= 0: + point = {'x': points[ind, 0], 'y': points[ind, 1], 'score': points[ind, 2], + 'id': coco_indices[p]} + skeleton.append(point) + + skeletons_out.append(skeleton) + scores.append(score) + return {'skeletons': skeletons_out, 'scores': scores} + + +class COCOWrapper: + def __init__(self, coco_path, num_imgs=None): + self.coco_path = coco_path + self.num_imgs = num_imgs + # sys.path.append(self.coco_apth + "codes/PythonAPI") + + def get_images(self): + imgs = self.cocoGT.imgs.values() + + image_ids = sorted(map(lambda x: x['id'], self.cocoGT.imgs.values())) + if self.num_imgs: + image_ids = image_ids[:self.num_imgs] + imgs = list(filter(lambda x: x['id'] in image_ids, imgs)) + + return imgs + + def evaluate_json(self, obj): + # initialize COCO detections api + cocoDT = self.cocoGT.loadRes(obj) + + imgIds = sorted(self.cocoGT.getImgIds()) + if self.num_imgs: + imgIds = imgIds[:self.num_imgs] + + # running evaluation + cocoEval = COCOeval(self.cocoGT, cocoDT, 'keypoints') + cocoEval.params.imgIds = imgIds + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + return cocoEval.stats[0::5] + + def get_results_json(self, results, imgs): + results_obj = [] + for img, result in list(zip(imgs, results)): + for score, skeleton in list(zip(result['scores'], result['skeletons'])): + obj = {'image_id': img['id'], 'category_id': 1, 'keypoints': np.zeros(shape=(3, 17))} + + for keypoint in skeleton: + obj['keypoints'][0, keypoint['id']] = keypoint['x'] - 0.5 + obj['keypoints'][1, keypoint['id']] = keypoint['y'] - 0.5 + obj['keypoints'][2, keypoint['id']] = 1 + obj['keypoints'] = list(np.reshape(obj['keypoints'], newshape=(51,), order='F')) + obj['score'] = score / len(skeleton) + + results_obj.append(obj) + + return results_obj + + @property + def cocoGT(self): + annType = 'keypoints' + prefix = 'person_keypoints' + print('Initializing demo for *%s* results.' % (annType)) + + # initialize COCO ground truth api + dataType = 'val2014' + annFile = os.path.join(self.coco_path, 'annotations/%s_%s.json' % (prefix, dataType)) + cocoGT = COCO(annFile) + + if not cocoGT: + raise AttributeError('COCO ground truth demo failed to initialize!') + + return cocoGT + + +def evaluate_model(model, + coco_path, + num_imgs=None, + fast=True): + coco = COCOWrapper(coco_path, num_imgs) + + results = [] + image_path = os.path.join(coco.coco_path, 'images/val2014/') + imgs = coco.get_images() + print("Running extended evaluation on the validation set") + for i, img in tqdm(enumerate(imgs)): + image = cv2.imread(image_path + img['file_name']) # B,G,R order + + heatmap, paf = run_model(model, image, fast) + + skeletons, keypoints = estimate_pose(image.shape, heatmap, paf) + results.append(parse_results(skeletons, keypoints)) + + try: + ans = coco.evaluate_json(coco.get_results_json(results, imgs)) + return ans + except: + return [0, 0] + + +def parse_args(): + parser = argparse.ArgumentParser(prog='pose_estimation_quanteval', + description='Evaluate the post quantized SRGAN model') + + parser.add_argument('model_dir', + help='The location where the the .pth file is saved,' + 'the whole model should be saved by torch.save()', + type=str) + parser.add_argument('coco_path', + help='The location coco images and annotations are saved. ' + 'It assumes a folder structure containing two subdirectorys ' + '`images/val2014` and `annotations`. Right now only val2014 ' + 'dataset with person_keypoints are supported', + type=str) + parser.add_argument('--representative-datapath', + '-reprdata', + help='The location where representative data are stored. ' + 'The data will be used for computation of encodings', + type=str) + parser.add_argument('--quant-scheme', + '-qs', + help='Support two schemes for quantization: [`tf` or `tf_enhanced`],' + '`tf_enhanced` is used by default', + default='tf_enhanced', + choices=['tf', 'tf_enhanced'], + type=str) + + return parser.parse_args() + + +def pose_estimation_quanteval(args): + # load the model checkpoint from meta + model = torch.load(args.model_dir) + + # create quantsim object which inserts quant ops between layers + sim = quantsim.QuantizationSimModel(model, + input_shapes=(1, 3, 128, 128), + quant_scheme=args.quant_scheme) + + evaluate = partial(evaluate_model, + num_imgs=100 + ) + sim.compute_encodings(evaluate, args.coco_path) + + eval_num = evaluate_model(sim.model, + args.coco_path + ) + print(f'The [mAP, mAR] results are: {eval_num}') + + +if __name__ == '__main__': + args = parse_args() + pose_estimation_quanteval(args) diff --git a/zoo_torch/examples/pytorch-deeplab-xception-zoo.patch b/zoo_torch/examples/pytorch-deeplab-xception-zoo.patch new file mode 100644 index 0000000..de7f477 --- /dev/null +++ b/zoo_torch/examples/pytorch-deeplab-xception-zoo.patch @@ -0,0 +1,176 @@ +diff --git a/modeling/aspp.py b/modeling/aspp.py +index 5a97879..770e60f 100644 +--- a/modeling/aspp.py ++++ b/modeling/aspp.py +@@ -68,7 +68,7 @@ class ASPP(nn.Module): + x3 = self.aspp3(x) + x4 = self.aspp4(x) + x5 = self.global_avg_pool(x) +- x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True) ++ x5 = F.interpolate(x5, size=x4.size()[2:], mode='nearest', align_corners=None) + x = torch.cat((x1, x2, x3, x4, x5), dim=1) + + x = self.conv1(x) +diff --git a/modeling/backbone/mobilenet.py b/modeling/backbone/mobilenet.py +index 6fff541..9edce54 100644 +--- a/modeling/backbone/mobilenet.py ++++ b/modeling/backbone/mobilenet.py +@@ -5,22 +5,21 @@ import math + from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d + import torch.utils.model_zoo as model_zoo + ++from aimet_torch.defs import PassThroughOp + def conv_bn(inp, oup, stride, BatchNorm): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + BatchNorm(oup), + nn.ReLU6(inplace=True) + ) +- +- +-def fixed_padding(inputs, kernel_size, dilation): +- kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1) +- pad_total = kernel_size_effective - 1 +- pad_beg = pad_total // 2 +- pad_end = pad_total - pad_beg +- padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end)) +- return padded_inputs +- ++def _make_divisible(v, divisor=8, min_value=None): ++ if min_value is None: ++ min_value = divisor ++ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) ++ # Make sure that round down does not go down by more than 10%. ++ if new_v < 0.9 * v: ++ new_v += divisor ++ return new_v + + class InvertedResidual(nn.Module): + def __init__(self, inp, oup, stride, dilation, expand_ratio, BatchNorm): +@@ -33,10 +32,15 @@ class InvertedResidual(nn.Module): + self.kernel_size = 3 + self.dilation = dilation + ++ # More generally: padding = (ks // 2) * dilation for odd kernel sizes. ks is fixed to 3, ++ # ks // 2 == 1, so (ks // 2) * dilation = dilation ++ padding = dilation + if expand_ratio == 1: + self.conv = nn.Sequential( + # dw +- nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False), ++ nn.Conv2d( ++ hidden_dim, hidden_dim, 3, stride, ++ padding, dilation, groups=hidden_dim, bias=False), + BatchNorm(hidden_dim), + nn.ReLU6(inplace=True), + # pw-linear +@@ -46,11 +50,13 @@ class InvertedResidual(nn.Module): + else: + self.conv = nn.Sequential( + # pw +- nn.Conv2d(inp, hidden_dim, 1, 1, 0, 1, bias=False), ++ # It is stupid to pad here, but we need it for backwards compatibility ++ nn.Conv2d(inp, hidden_dim, 1, 1, padding, 1, bias=False), + BatchNorm(hidden_dim), + nn.ReLU6(inplace=True), + # dw +- nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False), ++ nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, ++ groups=hidden_dim, bias=False), + BatchNorm(hidden_dim), + nn.ReLU6(inplace=True), + # pw-linear +@@ -59,14 +65,12 @@ class InvertedResidual(nn.Module): + ) + + def forward(self, x): +- x_pad = fixed_padding(x, self.kernel_size, dilation=self.dilation) + if self.use_res_connect: +- x = x + self.conv(x_pad) ++ x = x + self.conv(x) + else: +- x = self.conv(x_pad) ++ x = self.conv(x) + return x + +- + class MobileNetV2(nn.Module): + def __init__(self, output_stride=8, BatchNorm=None, width_mult=1., pretrained=True): + super(MobileNetV2, self).__init__() +@@ -87,7 +91,8 @@ class MobileNetV2(nn.Module): + + # building first layer + input_channel = int(input_channel * width_mult) +- self.features = [conv_bn(3, input_channel, 2, BatchNorm)] ++ # self.features = [conv_bn(3, input_channel, 2, BatchNorm)] ++ features = [conv_bn(3, input_channel, 2, BatchNorm)] + current_stride *= 2 + # building inverted residual blocks + for t, c, n, s in interverted_residual_setting: +@@ -102,18 +107,24 @@ class MobileNetV2(nn.Module): + output_channel = int(c * width_mult) + for i in range(n): + if i == 0: +- self.features.append(block(input_channel, output_channel, stride, dilation, t, BatchNorm)) ++ features.append(block(input_channel, output_channel, stride, dilation, t, BatchNorm)) ++ # self.features.append(block(input_channel, output_channel, stride, dilation, t, BatchNorm)) + else: +- self.features.append(block(input_channel, output_channel, 1, dilation, t, BatchNorm)) ++ features.append(block(input_channel, output_channel, 1, dilation, t, BatchNorm)) ++ # self.features.append(block(input_channel, output_channel, 1, dilation, t, BatchNorm)) ++ + input_channel = output_channel +- self.features = nn.Sequential(*self.features) ++ # self.features = nn.Sequential(*self.features) + self._initialize_weights() + ++ ++ # self.low_level_features = self.features[0:4] ++ # self.high_level_features = self.features[4:] + if pretrained: + self._load_pretrained_model() ++ self.low_level_features = nn.Sequential(*features[0:4]) + +- self.low_level_features = self.features[0:4] +- self.high_level_features = self.features[4:] ++ self.high_level_features = nn.Sequential(*features[4:]) + + def forward(self, x): + low_level_feat = self.low_level_features(x) +@@ -141,8 +152,8 @@ class MobileNetV2(nn.Module): + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) +- m.bias.data.zero_() +- ++ m.bias.data.zero_() ++ + if __name__ == "__main__": + input = torch.rand(1, 3, 512, 512) + model = MobileNetV2(output_stride=16, BatchNorm=nn.BatchNorm2d) +diff --git a/modeling/decoder.py b/modeling/decoder.py +index 5ed41d0..ec4485e 100644 +--- a/modeling/decoder.py ++++ b/modeling/decoder.py +@@ -36,7 +36,7 @@ class Decoder(nn.Module): + low_level_feat = self.bn1(low_level_feat) + low_level_feat = self.relu(low_level_feat) + +- x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=True) ++ x = F.interpolate(x, size=low_level_feat.size()[2:], mode='nearest', align_corners=None) + x = torch.cat((x, low_level_feat), dim=1) + x = self.last_conv(x) + +diff --git a/modeling/deeplab.py b/modeling/deeplab.py +index 91907f8..8308934 100644 +--- a/modeling/deeplab.py ++++ b/modeling/deeplab.py +@@ -28,7 +28,7 @@ class DeepLab(nn.Module): + x, low_level_feat = self.backbone(input) + x = self.aspp(x) + x = self.decoder(x, low_level_feat) +- x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True) ++ x = F.interpolate(x, size=input.size()[2:], mode='nearest', align_corners=None) + + return x + diff --git a/zoo_torch/examples/srgan_quanteval.py b/zoo_torch/examples/srgan_quanteval.py new file mode 100644 index 0000000..929e752 --- /dev/null +++ b/zoo_torch/examples/srgan_quanteval.py @@ -0,0 +1,197 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +""" +This script applies and evaluates a pre-trained srgan model taken from +https://github.com/open-mmlab/mmediting/tree/master/configs/restorers/srresnet_srgan. +Metrics for evaluation are based on y-channel by default. This model is quantization- +friendly so no post-training methods or QAT were applied. For instructions please refer +to zoo_torch/Docs/SRGAN.md +""" + +import os +import argparse +from functools import partial +from collections import OrderedDict + +import torch +import numpy as np +from aimet_torch import quantsim + +import codes.options.options as option +import codes.utils.util as util +from codes.data.util import bgr2ycbcr +from codes.data import create_dataset, create_dataloader +from codes.models import create_model + + +def evaluate_generator(generator, + test_loader, + options, + mode='y_channel', + output_dir=None): + ''' + :param generator: an srgan model`s generator part, must be an nn.module + :param test_loader: a pytorch dataloader + :param options: a dictionary which contains options for dataloader + :param mode: a string indicating on which space to evalute the PSNR & SSIM metrics. + Accepted values are ['y_channel', 'rgb'] + :param output_dir: If specified, super resolved images will be saved under the path + :return: a tuple containing the computed values of (PSNR, SSIME) sequences + ''' + if mode == 'rgb': + print('Testing on RGB channels...') + elif mode == 'y_channel': + print('Testing on Y channel...') + else: + raise ValueError('evaluation mode not supported!' + 'Must be one of `RGB` or `y_channel`') + + device = torch.device('cuda' if options['gpu_ids'] is not None else 'cpu') + + psnr_values = [] + ssim_values = [] + + for data in test_loader: + need_GT = False if test_loader.dataset.opt['dataroot_GT'] is None else True + var_L = data['LQ'].to(device) + if need_GT: + real_H = data['GT'].to(device) + img_path = data['GT_path'][0] if need_GT else data['LQ_path'][0] + img_name = os.path.splitext(os.path.basename(img_path))[0] + + generator.eval() + with torch.no_grad(): + fake_H = generator(var_L) + generator.train() + + out_dict = OrderedDict() + out_dict['LQ'] = var_L.detach()[0].float().cpu() + out_dict['rlt'] = fake_H.detach()[0].float().cpu() + if need_GT: + out_dict['GT'] = real_H.detach()[0].float().cpu() + visuals = out_dict + + sr_img = util.tensor2img(visuals['rlt']) # uint8 + + # save images if output_dir specified + if output_dir: + save_img_path = os.path.join(output_dir, img_name + '.png') + util.save_img(sr_img, save_img_path) + + + # calculate PSNR and SSIM + if need_GT: + gt_img = util.tensor2img(visuals['GT']) + sr_img, gt_img = util.crop_border([sr_img, gt_img], options ['scale']) + + if mode == 'rgb': + psnr = util.calculate_psnr(sr_img, gt_img) + ssim = util.calculate_ssim(sr_img, gt_img) + psnr_values.append(psnr) + ssim_values.append(ssim) + + if mode == 'y_channel' and gt_img.shape[2] == 3: # RGB image + sr_img_y = bgr2ycbcr(sr_img / 255., only_y=True) + gt_img_y = bgr2ycbcr(gt_img / 255., only_y=True) + + psnr = util.calculate_psnr(sr_img_y * 255, gt_img_y * 255) + ssim = util.calculate_ssim(sr_img_y * 255, gt_img_y * 255) + psnr_values.append(psnr) + ssim_values.append(ssim) + + return psnr_values, ssim_values + + +def parse_args(): + parser = argparse.ArgumentParser(prog='srgan_quanteval', + description='Evaluate the pre and post quantized SRGAN model') + + parser.add_argument('--options-file', + '-opt', + help='The location where the yaml file is saved', + required=True, + type=str) + parser.add_argument('--quant-scheme', + '-qs', + help='Support two schemes for quantization: [`tf` or `tf_enhanced`],' + '`tf_enhanced` is used by default', + default='tf_enhanced', + choices=['tf', 'tf_enhanced'], + type=str) + parser.add_argument('--default-output-bw', + '-bout', + help='Default bitwidth (4-31) to use for quantizing layer inputs and outputs', + default=8, + choices=range(4, 32), + type=int) + parser.add_argument('--default-param-bw', + '-bparam', + help='Default bitwidth (4-31) to use for quantizing layer parameters', + default=8, + choices=range(4, 32), + type=int) + parser.add_argument('--output-dir', + '-outdir', + help='If specified, output images of quantized model ' + 'will be saved under this directory', + default=None, + type=str) + + return parser.parse_args() + + +def main(args): + # parse the options file + print(f'Parsing file {args.options_file}...') + opt = option.parse(args.options_file, is_train=False) + opt = option.dict_to_nonedict(opt) + + print('Loading test images...') + test_loaders = [] + for phase, dataset_opt in sorted(opt['datasets'].items()): + test_set = create_dataset(dataset_opt) + test_loader = create_dataloader(test_set, dataset_opt) + test_loaders.append(test_loader) + + model = create_model(opt) + generator = model.netG.module + + for test_loader in test_loaders: + test_set_name = test_loader.dataset.opt['name'] + print(f'Testing on dataset {test_set_name}') + psnr_vals, ssim_vals = evaluate_generator(generator, test_loader, opt) + psnr_val = np.mean(psnr_vals) + ssim_val = np.mean(ssim_vals) + print(f'Mean PSNR and SSIM for {test_set_name} on original model are: [{psnr_val}, {ssim_val}]') + + # The input shape is chosen arbitrarily to generate dummy input for creating quantsim object + input_shapes = (1, 3, 24, 24) + sim = quantsim.QuantizationSimModel(generator, + input_shapes=input_shapes, + quant_scheme=args.quant_scheme, + default_output_bw=args.default_output_bw, + default_param_bw=args.default_param_bw) + + evaluate_func = partial(evaluate_generator, options=opt) + sim.compute_encodings(evaluate_func, test_loaders[0]) + + for test_loader in test_loaders: + test_set_name = test_loader.dataset.opt['name'] + print(f'Testing on dataset {test_set_name}') + psnr_vals, ssim_vals = evaluate_generator(sim.model, test_loader, opt, output_dir=args.output_dir) + psnr_val = np.mean(psnr_vals) + ssim_val = np.mean(ssim_vals) + print(f'Mean PSNR and SSIM for {test_set_name} on quantized model are: [{psnr_val}, {ssim_val}]') + + +if __name__ == '__main__': + args = parse_args() + main(args) \ No newline at end of file diff --git a/zoo_torch/examples/ssd_utils.py b/zoo_torch/examples/ssd_utils.py new file mode 100755 index 0000000..8552327 --- /dev/null +++ b/zoo_torch/examples/ssd_utils.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3.6 +# -*- mode: python -*- +# ============================================================================= +# @@-COPYRIGHT-START-@@ +# +# Copyright (c) 2020 of Qualcomm Innovation Center, Inc. All rights reserved. +# +# @@-COPYRIGHT-END-@@ +# ============================================================================= + +''' AIMET Quantsim helper functions ''' +''' Calibration wrapper functions for range estimation ''' + +from tqdm import tqdm +from torch.utils.data import Dataset +from vision.ssd.data_preprocessing import PredictionTransform +from torch.utils.data import DataLoader +import torch +import random +import numpy as np + +class VoCdataset(Dataset): + def __init__(self, data_dict): + """ + Args: + txt_file (string): Path to text file with location of images, label in img name + """ + self.data = data_dict + + def __len__(self): + return len(self.data.ids) + + def __getitem__(self, idx): + image = self.data.get_image(idx) + label = self.data.get_annotation(idx) + return image, label + +def work_init(work_id): + seed = torch.initial_seed() % 2**32 + random.seed(seed + work_id) + np.random.seed(seed + work_id) + +def model_eval(args, predictor, dataset): + import copy + aimet_dataset=copy.deepcopy(dataset) + aimet_dataset.ids=aimet_dataset.ids[:1000] + calib_dataset = VoCdataset(aimet_dataset) + data_loader_kwargs = { 'worker_init_fn':work_init, 'num_workers' : 0} + batch_size = 1 + calib_dataloader = DataLoader(calib_dataset, batch_size, shuffle = False, pin_memory = True, **data_loader_kwargs) + calib = tqdm(calib_dataloader) + def func_quant(model, iterations, use_cuda = True): + for i, sampels in enumerate(calib): + image = sampels[0] + image = predictor.transform(image.squeeze(0).numpy()) + image = image.unsqueeze(0).cuda() + model(image) + return func_quant + +def get_simulations(model, args): + from aimet_common.defs import QuantScheme + from aimet_torch.pro.quantsim import QuantizationSimModel + if hasattr(args, 'quant_scheme'): + if args.quant_scheme == 'range_learning_tf': + quant_scheme = QuantScheme.training_range_learning_with_tf_init + elif args.quant_scheme == 'range_learning_tfe': + quant_scheme = QuantScheme.training_range_learning_with_tf_enhanced_init + elif args.quant_scheme == 'tf': + quant_scheme = QuantScheme.post_training_tf + elif args.quant_scheme == 'tf_enhanced': + quant_scheme = QuantScheme.post_training_tf_enhanced + else: + raise ValueError("Got unrecognized quant_scheme: " + args.quant_scheme) + kwargs = { + 'quant_scheme': quant_scheme, + 'default_param_bw': args.default_param_bw, + 'default_output_bw': args.default_output_bw, + 'config_file': args.config_file + } + print(kwargs) + sim = QuantizationSimModel(model.cpu(), input_shapes=args.input_shape, **kwargs) + return sim \ No newline at end of file diff --git a/zoo_torch/examples/torch_ssd_eval.patch b/zoo_torch/examples/torch_ssd_eval.patch new file mode 100644 index 0000000..8baed03 --- /dev/null +++ b/zoo_torch/examples/torch_ssd_eval.patch @@ -0,0 +1,123 @@ +diff --git a/eval_ssd.py b/eval_ssd.py +index 5923915..e09c7e4 100644 +--- a/eval_ssd.py ++++ b/eval_ssd.py +@@ -1,3 +1,8 @@ ++#!/usr/bin/env python3.6 ++ ++''' AIMET QuantSim script on MobileNetV2-SSD Lite ''' ++''' Currently We apply QuantSIm on Batch Norm folded model ''' ++ + import torch + from vision.ssd.vgg_ssd import create_vgg_ssd, create_vgg_ssd_predictor + from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd, create_mobilenetv1_ssd_predictor +@@ -141,7 +146,7 @@ if __name__ == '__main__': + elif args.net == 'sq-ssd-lite': + net = create_squeezenet_ssd_lite(len(class_names), is_test=True) + elif args.net == 'mb2-ssd-lite': +- net = create_mobilenetv2_ssd_lite(len(class_names), width_mult=args.mb2_width_mult, is_test=True) ++ net = torch.load(args.trained_model) + elif args.net == 'mb3-large-ssd-lite': + net = create_mobilenetv3_large_ssd_lite(len(class_names), is_test=True) + elif args.net == 'mb3-small-ssd-lite': +@@ -150,10 +155,20 @@ if __name__ == '__main__': + logging.fatal("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.") + parser.print_help(sys.stderr) + sys.exit(1) ++ net.eval() ++ # from IPython import embed; embed() ++ args.input_shape = (1, 3, 300, 300) ++ args.quant_scheme = "tf_enhanced" ++ args.config_file = None ++ args.default_param_bw = 8 ++ args.default_output_bw = 8 + + timer.start("Load Model") +- net.load(args.trained_model) + net = net.to(DEVICE) ++ ++ from ssd_utils import model_eval, get_simulations ++ sim = get_simulations(net, args) ++ + print(f'It took {timer.end("Load Model")} seconds to load the model.') + if args.net == 'vgg16-ssd': + predictor = create_vgg_ssd_predictor(net, nms_method=args.nms_method, device=DEVICE) +@@ -164,12 +179,15 @@ if __name__ == '__main__': + elif args.net == 'sq-ssd-lite': + predictor = create_squeezenet_ssd_lite_predictor(net,nms_method=args.nms_method, device=DEVICE) + elif args.net == 'mb2-ssd-lite' or args.net == "mb3-large-ssd-lite" or args.net == "mb3-small-ssd-lite": +- predictor = create_mobilenetv2_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE) ++ predictor = create_mobilenetv2_ssd_lite_predictor(sim.model, nms_method=args.nms_method, device=DEVICE) + else: + logging.fatal("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.") + parser.print_help(sys.stderr) + sys.exit(1) + ++ eval_func = model_eval(args, predictor, dataset) ++ sim.compute_encodings(eval_func, (sim.model, 3000, True)) ++ + results = [] + for i in range(len(dataset)): + print("process image", i) +diff --git a/vision/ssd/ssd.py b/vision/ssd/ssd.py +index 962b9a2..d5e6676 100644 +--- a/vision/ssd/ssd.py ++++ b/vision/ssd/ssd.py +@@ -24,7 +24,24 @@ class SSD(nn.Module): + self.classification_headers = classification_headers + self.regression_headers = regression_headers + self.is_test = is_test +- self.config = config ++ #self.config = config ++ ++ self.image_size = 300 ++ self.image_mean = np.array([127, 127, 127]) # RGB layout ++ self.image_std = 128.0 ++ self.iou_threshold = 0.45 ++ self.center_variance = 0.1 ++ self.size_variance = 0.2 ++ ++ self.specs = [box_utils.SSDSpec(19, 16, box_utils.SSDBoxSizes(60, 105), [2, 3]), ++ box_utils.SSDSpec(10, 32, box_utils.SSDBoxSizes(105, 150), [2, 3]), ++ box_utils.SSDSpec(5, 64, box_utils.SSDBoxSizes(150, 195), [2, 3]), ++ box_utils.SSDSpec(3, 100, box_utils.SSDBoxSizes(195, 240), [2, 3]), ++ box_utils.SSDSpec(2, 150, box_utils.SSDBoxSizes(240, 285), [2, 3]), ++ box_utils.SSDSpec(1, 300, box_utils.SSDBoxSizes(285, 330), [2, 3])] ++ ++ ++ self.gen_priors = box_utils.generate_ssd_priors(self.specs, self.image_size) + + # register layers in source_layer_indexes by adding them to a module list + self.source_layer_add_ons = nn.ModuleList([t[1] for t in source_layer_indexes +@@ -34,8 +51,9 @@ class SSD(nn.Module): + else: + self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + if is_test: +- self.config = config +- self.priors = config.priors.to(self.device) ++ #self.config = config ++ #self.priors = config.priors.to(self.device) ++ self.priors = self.gen_priors.to(self.device) + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + confidences = [] +@@ -90,7 +108,7 @@ class SSD(nn.Module): + if self.is_test: + confidences = F.softmax(confidences, dim=2) + boxes = box_utils.convert_locations_to_boxes( +- locations, self.priors, self.config.center_variance, self.config.size_variance ++ locations.cpu(), self.priors.cpu(), self.center_variance, self.size_variance + ) + boxes = box_utils.center_form_to_corner_form(boxes) + return confidences, boxes +@@ -109,7 +127,9 @@ class SSD(nn.Module): + return confidence, location + + def init_from_base_net(self, model): +- self.base_net.load_state_dict(torch.load(model, map_location=lambda storage, loc: storage), strict=True) ++ state_dict = torch.load(model, map_location=lambda storage, loc: storage) ++ state_dict = {k[9:]: v for k, v in state_dict.items() if k.startswith('features')} ++ self.base_net.load_state_dict(state_dict, strict=True) + self.source_layer_add_ons.apply(_xavier_init_) + self.extras.apply(_xavier_init_) + self.classification_headers.apply(_xavier_init_)