From 65e7c90a770f574e7f776f424ab4d746eea4a834 Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Tue, 25 Feb 2020 13:48:24 -0500 Subject: [PATCH] Adding usage examples for common tasks (#2850) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Usage: Sequence Classification & Question Answering * Pipeline example * Language modeling * TensorFlow code for Sequence classification * Custom TF/PT toggler in docs * QA + LM for TensorFlow * Finish Usage for both PyTorch and TensorFlow * Addressing Julien's comments * More assertive * cleanup * Favicon - added favicon option in conf.py along with the favicon image - udpated 🤗 logo. slightly smaller and should appear more consistent across editing programs (no more tongue on the outside of the mouth) Co-authored-by: joshchagani --- docs/source/_static/css/huggingface.css | 22 + docs/source/_static/js/custom.js | 69 +++ docs/source/_static/js/huggingface_logo.svg | 48 +- docs/source/conf.py | 8 +- docs/source/favicon.ico | Bin 0 -> 47890 bytes docs/source/index.rst | 1 + docs/source/usage.rst | 597 ++++++++++++++++++++ 7 files changed, 697 insertions(+), 48 deletions(-) create mode 100644 docs/source/favicon.ico create mode 100644 docs/source/usage.rst diff --git a/docs/source/_static/css/huggingface.css b/docs/source/_static/css/huggingface.css index 4adf8f753..808f8005f 100644 --- a/docs/source/_static/css/huggingface.css +++ b/docs/source/_static/css/huggingface.css @@ -1,3 +1,25 @@ +/* Our DOM objects */ + +.framework-selector { + display: flex; + flex-direction: row; + justify-content: flex-end; +} + +.framework-selector > button { + background-color: white; + color: #6670FF; + border: 1px solid #6670FF; + padding: 5px; +} + +.framework-selector > button.selected{ + background-color: #6670FF; + color: white; + border: 1px solid #6670FF; + padding: 5px; +} + /* The literal code blocks */ .rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal { color: #6670FF; diff --git a/docs/source/_static/js/custom.js b/docs/source/_static/js/custom.js index 04cdfc1de..ac9388531 100644 --- a/docs/source/_static/js/custom.js +++ b/docs/source/_static/js/custom.js @@ -68,6 +68,74 @@ function addHfMenu() { document.body.insertAdjacentHTML('afterbegin', div); } +function platformToggle() { + const codeBlocks = Array.from(document.getElementsByClassName("highlight")); + const pytorchIdentifier = "## PYTORCH CODE"; + const tensorflowIdentifier = "## TENSORFLOW CODE"; + const pytorchSpanIdentifier = `${pytorchIdentifier}`; + const tensorflowSpanIdentifier = `${tensorflowIdentifier}`; + + const getFrameworkSpans = filteredCodeBlock => { + const spans = filteredCodeBlock.element.innerHTML; + const pytorchSpanPosition = spans.indexOf(pytorchSpanIdentifier); + const tensorflowSpanPosition = spans.indexOf(tensorflowSpanIdentifier); + + let pytorchSpans; + let tensorflowSpans; + + if(pytorchSpanPosition < tensorflowSpanPosition){ + pytorchSpans = spans.slice(pytorchSpanPosition + pytorchSpanIdentifier.length + 1, tensorflowSpanPosition); + tensorflowSpans = spans.slice(tensorflowSpanPosition + tensorflowSpanIdentifier.length + 1, spans.length); + }else{ + tensorflowSpans = spans.slice(tensorflowSpanPosition + tensorflowSpanIdentifier.length + 1, pytorchSpanPosition); + pytorchSpans = spans.slice(pytorchSpanPosition + pytorchSpanIdentifier.length + 1, spans.length); + } + + return { + ...filteredCodeBlock, + pytorchSample: pytorchSpans , + tensorflowSample: tensorflowSpans + } + }; + + const createFrameworkButtons = sample => { + const pytorchButton = document.createElement("button"); + pytorchButton.innerText = "PyTorch"; + + const tensorflowButton = document.createElement("button"); + tensorflowButton.innerText = "TensorFlow"; + + const selectorDiv = document.createElement("div"); + selectorDiv.classList.add("framework-selector"); + selectorDiv.appendChild(pytorchButton); + selectorDiv.appendChild(tensorflowButton); + sample.element.parentElement.prepend(selectorDiv); + + // Init on PyTorch + sample.element.innerHTML = sample.pytorchSample; + pytorchButton.classList.add("selected"); + tensorflowButton.classList.remove("selected"); + + pytorchButton.addEventListener("click", () => { + sample.element.innerHTML = sample.pytorchSample; + pytorchButton.classList.add("selected"); + tensorflowButton.classList.remove("selected"); + }); + tensorflowButton.addEventListener("click", () => { + sample.element.innerHTML = sample.tensorflowSample; + tensorflowButton.classList.add("selected"); + pytorchButton.classList.remove("selected"); + }); + }; + + codeBlocks + .map(element => {return {element: element.firstChild, innerText: element.innerText}}) + .filter(codeBlock => codeBlock.innerText.includes(pytorchIdentifier) && codeBlock.innerText.includes(tensorflowIdentifier)) + .map(getFrameworkSpans) + .forEach(createFrameworkButtons); +} + + /*! * github-buttons v2.2.10 * (c) 2019 なつき @@ -85,6 +153,7 @@ function onLoad() { addGithubButton(); parseGithubButtons(); addHfMenu(); + platformToggle(); } window.addEventListener("load", onLoad); diff --git a/docs/source/_static/js/huggingface_logo.svg b/docs/source/_static/js/huggingface_logo.svg index 84974866c..79a9e5d8a 100644 --- a/docs/source/_static/js/huggingface_logo.svg +++ b/docs/source/_static/js/huggingface_logo.svg @@ -1,47 +1 @@ - - - icon - Created with Sketch. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file + \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index 18d52e9f2..763b3ac70 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -20,7 +20,7 @@ sys.path.insert(0, os.path.abspath('../../src')) # -- Project information ----------------------------------------------------- project = u'transformers' -copyright = u'2019, huggingface' +copyright = u'2020, huggingface' author = u'huggingface' # The short X.Y version @@ -105,6 +105,12 @@ html_static_path = ['_static'] # # html_sidebars = {} +# This must be the name of an image file (path relative to the configuration +# directory) that is the favicon of the docs. Modern browsers use this as +# the icon for tabs, windows and bookmarks. It should be a Windows-style +# icon file (.ico). +html_favicon = 'favicon.ico' + # -- Options for HTMLHelp output --------------------------------------------- diff --git a/docs/source/favicon.ico b/docs/source/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..424101de717bf411d1db8456030c97d184d4c005 GIT binary patch literal 47890 zcmeFZ1z1(h+Bd%GZjg{xL=3t+MFBxY2_*#qu|SXzNht-zqCv1g1qB5qq*J9!QIS+S zHod7m-^^B?x|GTdD`ksAV!&+#N{{49;1{1>x8^9lGf6yI+DS;if!e>YV)_kjn7YqjW`&U08 zg&_mGF%&f3j0u(!1`j?7xqDQ=p6@aLs15R zF{A;O7R<0qz)S_UWq|!zs~`>7mb(c6#?tRtY5`z^?NJ>Ee23c5!Dl^42du7|gUL~q z1F#VU3gJJ7ao0h^zG0yMA>dPf7>Y=$Lw5dRAGMLMh<172I83~Uf97v#xbItny* z8vr$v6F_sXHBhm11REPyVS(7H0Z@e4v5Vjo>Wh9^1K_g2cir(K{ngEbEzdguZ_6^^ ztDgtL&)NZRD<1H+0l))`cs&4}Sm6JyHwN=&6r9}-`=fWfEqJg!r3h>;=mCfpZ_64G zdGr=+ebNMYpbfa7-qjiam{`O+uJ;Gi6AFfJ!73ZX$J@FF6r8<)WM~rLc@Op1xCE5! zP6M%Pj{t8cv<1}liDrPq_qWOX(LcqSGUYRP!3A}IFz`X12nR3JJ;Kb}yh^x7yoI0- zl@Ei*z(MMtd>IV)@AQ2K29ORpkli45K12)khxZ$xy*9#k3ElY5pr;iLuRr|&c1ZSC zH@;2@>W2&BWI_G{%SOm^7nC{kE2zeQ{UrIPjz3g@(sTk2b#lSdD0Iu z7gm84$c0**0J7!x<_|V15dT+bL;O5e222i!bzs2~4XmSqx?+YpLvuA+6Ao?hYR{58MFZ{#Eked zqBRR_kMzUZ|LmwN(;b*|ixClf+0!yq@rd_h zlI1l}F9q=--)Dh5kt|3CL>KX9gJX%{XOK6NiMR8cE(G4<0e=G?@VtX3^iW?wL^|Tux1=ELH!^*kU>4K(?C}gBY2^FFfOlw`RR*@=Qr-({^Cb^EpNcM zfZ`s~9qO|I`X15)vL{y)0D@KHKvP!_?2?lJy7D?eS5^_|%4vaJ3MxQXN)$*1+yXq% zHmLo^hHt)s{1DMXen0``Xq^FgFg2w9BX4vMW+yJXzK8mU82O>i5MNXX;|X37-5tpV zV7J(Ipsl6`l#ZSN(!N)Lta}hp+jj`^7YFJ_hX7v_T-&04-}|o9L4J*aIxLukwx1b0 z{wMwm*eE+VzQ~}y|KR&WEJJJ7?RQFm+}R)?^zsAXff%_zzyfjc!?pCzxEvto8UWYW z3xDulj|J#T;1A`0G74=!Gvf6p{PxJdp9c1Y|wK!M1y)K%jc^PZ&fV)`Fce&w$F| zqhM3n;6GtN`b6u0re^^3&4U&E)aakq>iFJwoSoD7QAQ~LKg}huFMr)4(A;AHbfv|C zuA~^)n$bXzm4FfH2JS=D_M1Q-6a%}(cR)WWCG2t5Wk7Ql6^!Lta4Z0@2It+c)IZCQ zp5b~c(*){?432>xeSoI}4@FNDaR}!q60DNfA1@G5oTfd_5fb4?C zAG%+!N3zqw*eMA88oL4(!Q33oaQylQ(GkOS-3v4hQ5+)p0n+cfFCn@=KCjoWUw?X! zcymBG(7GyP3{s!R-u*S_f7F8U!EK|<_|Y9Oexvyit;7D3=Q{77u822^gGlcV(4WET zQa@Vb{)5x+?*Mk#{QW#w63m0~AP2^nMraEp+fO`x`3&`g+9LZQ-Ur}y39QY6#aWHt zbNurgJa){n3$Ei;;9Q32pm_z^1JNe<1+v5Uf}Yn4n)7L(ucLYFB*eP}qYcU-{h7nR zt^w0yd*@a`SNLZrPY%QZ<0log3tEd4<`~Ee#TB$(WQJplu-1lH#bBFf$lj~V9bjQW z>0fjDeG4!@#s%l<*w1rd@d z1kbOj{C8@>@Mm@~+9LtR2Gqf1zZzV_i-ND6|I2pQ{~cZYP33P5{H=liy&6DyLbM>U zf#r`pAv}Be`}e<718~kIhJ_0jd06(tV)MQ1$K&ym@SgVHl?=80-shj_z+ahWS>1xJR|E1HcHbmrz0X5pciU zI{Url>I!TH%wf@kd0bOiOyIpPEM{PNX%JpP#Uua$kd6h+Eg8bR(tfz^bAao;Ft`WJ z9fo<}Wz_2155KZB3}uOfPlUYc?=Yi!*hT{Kdk*j`gw8dPk1Jf;?kxD8`+W%4n(45h zE6m{#%J({QxV}6B@r-@z`i=Qdf7_sp>ZP#XV_1HsgPyJD{xkP^Yr64&~Lw;hI^c6y~*Sp<~1KEs~uQ(qe{nC>M-qpuH5LgX-w=zvbH2 zWu$^@VLiA8e=!8U{Z?0yM7BT50H1e3y|1-Q!vO^OuIqQ54;Sv%>b>d=ohtFZX5N>k(g+Q$_haRFF>5 z9)=qhR=CGPwxoFWi@on(&_!q9h&Rf!Qy@8@ebvk05Qm_l43H`z$IAxyfQ!K&;QO?1 zK7z0y+z0~=%sC*t67=#HypU|D4a!TRJsu%P2x%ZaGs6BT=ZVhM(eq#FVf`IFq=Rg= z5$>Ik9B7Y8urJha{s=^lo1ce&H^2hVrDmsm-+z;j0s1S_J@P~99O&UiT4{Z%1_~=l<{N0qxBhVQ!P^Im|ypOaxuRTnF;QY%~^d3ljwFP{jZ@ z_x0H@d`V*WAsG-(WQTQm{*?cM?|+m9VMAvQ1pULYiJp-ROr_uOqg?f0=^wR6 z`)4}1XGQ1P>v~82gXBPTX<(cnoS7{z^+WtLuxtSP4EYxm)E~l(`Xbw*GY*uu zNBRFBc@wZfe(U(rJJjZ%{2a=M+M#iQa@Q!|jqXw1AN}C{&(G+LfDDcyq<3^afv}*l zh_ImXc^@H%mmdh72|KcsUMlhD{fIYxvR(MSG^bcb|`Vjvn12xDR|M7}hS zVh}GZBoKdf`6TQQ+amtxtb!8qA;chv8}UN8*Y$^VNbobr59uA9+o1T5`Y}TJ*RieZ zc>PKk+fW8H4iR2d(0L!~i+mH=57`Oj_SbceVi%GD(I$e}_+9|;1*v{@32O7(Sp$sy z=ixor33ZOf00X?D7>sBj`=N6J5~y3G6NDF?K~X~8Y=rL+R`ed}7LC*2VMjSBgb|%T zp|(g)WK)zQLimt<5I;2Tk#5j4lAC}5>IcO^be6^@z^0{>UjJyb#|X&);bxyhs=8G!RWh1Mxt1 zUH4NoR*{}i`*j)6T!`j(!f$OYa(xblXU?aVU?n;SLTymoBIq36lfZ)f8PPyFJY;L6 z`*oi{zJPeGk8eW0AGYPM$HQ}{H6ZW?o)17-xZi)11=$as`5+shE5eKHh~z< zjbTI=)uXXUk9;5Me-HGnZ@<+Of8d8C;2b=0?+YAXP!Fix`Zz#meTWCbfczKf8rcNt z8rcN3Bh0JFpWgwnEgRmlF&4uVAIncOehtG9bc+Rx8ItpdjxQ-uP;jvY%0@A`a;#w(L4gX55#ntqbLKP72p}E3=m1H{&o(8=s=oij1v4F`Y0M}$hL?l(i1ft6Ud*x$NU9| z|F_@zz_WAcwNS?pGdhz->mFoB6hqhPqwD%Tik0Zx4xLfz%1AwWzh zkDK%Efd)YKLf<5;WBscuI)9aQ43 zMZm9aX#Csr*dKWzSvcTWMEXbjpYK1Tb-X`c;rV%ZJDl5)57Iz?`B5)F(*5xs#Tu67h?~IT zXWZ-0a364~4dO>*=HJDSY`m%D3lOXrCE)zi8T5C5Bg_Fv4tNeOl-C7)6P+9XwT*wo zzx$8453JVl{(J=sGk#qw(D&ikMPmo8`Ti9T^d8N#Kb}qgWgjT-Z)N<6AHjd%9E1Gl z0Gy-0|BQdez5Wc>5y!@1BlH~eS%d;J-{f$qBk>v#O1q$c%|s*2Ic+st@5wrM*fT9KbkwM zQD9$ML@R&Fb-X`c;X7hD2AUHGzR8UCKS&3EqK$M9&*U|=cL5z3*U_AWzF$y1WC4Bd z`*+ko`xNY>Z0P`Wp}eTQj-ovL-bNj4Djoc_%usJAcY(ecKzWe`2o9{Rr2UNf=VvJ6 z#jnsxXfK22aP*DBpYTK4d<~0)Z!1ttC(KLmJ0At7bHBy{WkLQf=Nd?m^V>Wu4d-k_ z!2f#smp+R0#tPRH$aieuJpcV?{xja6pJBV5Xm5$W=|tlm(fE}O2y4x>Dxk4vFJaDA zGd72D@zd||L;pc@gQ^)^SHL-1Q+E&ar&qrpGjP5|`7^X;ESrF+;QAQ;{g-F4uniHw z%@tKo!}SyFi}FUlv;op79FJ%o5_<805ZC`zTtx5r8gTGCsgFRgdg8a(k-pgAnilC# z73Ocg{noDktn1%B!}b~|zr7E}Tx35cxOPUd>`#7#d>ry3;Qrlv)aJ(m+Y#Ese*aXz z-Uhx$eov0ppHP2AqY%yN@;v;z`FD8#Tn}xqyqML!1hGT?qxHo4UjCOn*LnXJt|;c9 z{14Kb?LYiJ^v@Xox(3QC2*>htav!7x*B@x@k8(=C8Vmm|nUIZM8>??~f->>*iKf_(p{x<|Ot9)qwyhuD$7zCu3#ZLNRw-$VKPR*(=8%vIk&6y2b2 zBK@Eo5hI-2c;K3Z;7|X790)fmXnl|3B=UEDIM-I9DESThpT33sJ<7j+0|`PIRzeWv zyZztt0Ei!&--SiUfZF?YP^jlf(hJSVbGj#u6FGzn2imk}TO>+R)!Eu4|GDwFA55k4yLHlQN zSWnph!`6her0;D|UO@`_-wn8?gd~v8(RoTNy!)qrKl1M(`p;THUb|qfr3mIx0Tw=F zjll2fpq`aWzxgpUj3MaXmN3A=0p-{Z^CRfIH5{(98)sn$^uxg%^2#!l3dsIvDE{ZY z2ym%Y`~7#g|4rp@4g9Tvzcui;2L9H--x~N^ z1AlAaZw>t4(ZHpBhPrgrTyjYxl=zyNw4(MuPo;Grizz6wb zW_uj=?!|0__mmi-u;cJQzxmdQ3tll8;#A`G8X`<8$v^JVOKm}UxGPu3?9tXZ=tVSM zOp)}!)EwvU;kmqE_578{=KGAA%+3#~iggPq1*wUHsJBs@;7wTftGD+s=#WjPhX;hW>@>EW>m)^7Xsyh&DC^zCt*>WVLx@=0k*DmbGsuVAFqByxt*a`ZelGf zk|{&%Rz2=iBAh&}0^)1uAI~sWY@(E04I$mTI6|-MRK^p0hUMv|3(+pyi8X9nLfRq& zpJL?q@?a{P9t^KAKNV08brG`NBCg!9m6R+h z^nOPwum`7Hp?=~H&CPJgHg5Bm3f$Q1<@n$s7G7=nEBOv-Q|*_W%P#R{EhXzjJ2c#J z-a&=+G?|YT=E11lr=Jp96%OisNB^)XGPC0N=C|=-MsZfB3lp${6*s92oA&Z11zpmm z4;3#`3by;CC0`^GyeYU(r6WK_rT*k+${Y{j=>kr}rMleGQc3(t2YN>)2fK$jB}eh# z5}n`&9od_M@n16q&G;%X`n2RG+2?HprpdQtS=za!&GZ=ud$Op8iMu3gvVRPQ(49yG zZ7-w`a=$N)Ol2=!bKzE%e}4UeLLQ?ZcZDkJmphO>^#EV&8}l4D=D^V zS@s!tuQ}lx6MOu;&GKzBO42VMd(8#vOXY(@Vom&pZdcAmvjrYVH1)|k?uBEoc*R62 zx^J|CRAQeIHB0?uy&nC5)^s0Pa4d6gsBB<@<=&Dh3W4z@#lDT=YkS#ts#Rpiy}y2G zqPX+oYFiRt?T1s3o1A2KlZ>V96So&ne7=&+8p<)hqcG@Pc)s$6o)k73_UvxPz!tww z^%$Uyb0j4SAJxp@2>;UN+%F}=6B)(B9gk;Q)to;SV5ekuWO3()&weo>FRg7QL=*5F zX%}jo%*I?JRYwfE^^UO8nmTS0x$1&IZE~AYedC-^Ed}&{Q@Ttg<6P>G_Ka?1G+q^PBlLHdabAn#0>Q zr}&9`1xd^AX__tJ7-Ouer7qA-5N$q&^AFDBr<`;$igDZ}YDsjhxk+5LeWR787hRtf z)t;Gq+%^TP9`8y|F1b|a^3_}t)~3s1a7`m77MR_?``m0}24#@UdqG;>k5BJZc$~#= ztmxT9IeGh0+)2k*g~X|(iQ>Tc(#%RQMYMK&eWSIW;h1jjU; zIXkj3UGN1x>jgJ4n^=;YTgF_{cX(qMwiv|3ne;VLPU$d4Vn2m&(;9HJDJ&VT)QaQR zoT{dnZtFM^896B@dv44&=vbhTJLqz^vVLhnwCiG^0mt0jq0GQ}F?(aFv!3dFW5K<% z_wg*jWkXwT)zKPu5!*J|oNSin@K;qU)xez#df!lfsZx9ugu1LKxeWTo56E0iAsX~` z+xj-jQpzCC$At1k3VTI?LH3S(d9^)D8+QhHvUCUVcUthS)n9I5!^o`B&8mfdD&_Ag z9d!NpRU++y%=8p4opBOxCDd?IW^F`WBJ!3-QT=4y=mC~9(xbyVF$3+Z4kj9Wl4fe< z-eQjC9hJfd7A!a8Rf5Y(_inx!AG07mf7eJ>KCZ4ts>@0&J{y!r%~YJ|Ci zYpRCFAGr*cXL6P*k~x;U4hHn(7dGiCr?eRj_kASF3w)MW=XZsrc&;)!H_-7mhl9zc z+HgH(is7eRk?*^>)Sk*c+!9aSw_(gl>EYDd+ULI111Cey79R6b6Y;0eaM7<*s80>P zF~&(J4hGZ%Zrcq$Ou)qUbWb-Wh0QLC2p#Ne6RB>)iuVGf-JhMwrG%1IJa!RB zGUNsK2&`@(nlcuYuZf*L@~ zBC^ThnmO4AU;Wa-BSP0B3)mQ`1#qg7E>Z^Zy3aVS3UFMV&S=t#&=}jK-*(`G_odfz z)Wj~m&At6S4$oSmy|35ZE{&k1Ca%wyVrkq>gqgM0^8`El_U}FS%Gg^CZlnZOOV)!8!$|Cw%pU2y1KL#0R(wciNRchsri)|aDdOIqM zU-R*v+fEs1IbhvTFYFkM30QjH>N~}+tWdDg(~7-ifK#PZ9V%coZ{ z2Bny`bGHqQ(xk48%wjjc=%Bv1hBKz;tvaC2p{*85>=Be|_nbZOJU*-coiES(y;E}e zfm}tT0=-j_VVq(Z8+^uawI_?{?$L&U9Ri+7{-1j8yvU{%cAl(tRS{Y|8q;`jgk3%9 zBIEK&N*SNngU>iOh#cV6oR?=0!?KKPF>+T+hMSa=k_A=zinXOHx-?QLEFGb| zGr*LeRJHkJ#mEN#T{M_D5byq4jX7rXxyZ4(GdQ-S3q3Vm6f({kCISgdUs@z`@ zt8c63Bu?{j5Do9+q!L$4;_fzZ8Mmf&PCnAjToHw*bYmq>3V+wc;m>%gl(Oc1{gguG z3tx$aDesMsm^6CBdrxzRQe@Xy+>&O}xTZj*64gt6q4c&T03A$NU=&%Ee?&Qy9B-gJK0pv7}8q;`S3`22DRnc}1nq-&$;M zYzo6ZiM@0DbclKM#wQrsr;Fw_*3Tc-;hv<5i|yS)d375lmSOx;9hOR`>-n{J%=fZq zt=|Zn$B7KzX=?3r)Am)>w9!3(qcKXsjucZr^|r)+a}ht)L>~TBb)6cq?F;7_pR$m~ z;_4kDX7wEY^jepm));B}8($ZU7EI-23;v9wpg+n~Wq#4*H9M8KX6cg)tb8^XEtK=b zd0d!7h6G7v9|(_($33yh9ym-o+mwD_N>Di`_pIpGwaKgymld^xNk&hcSZbql#HZPh z9uPgImlfp`qQSm}o#a49kGVqlmYxt9mxKOSbY800&{J6`-)F>zKX=E8?oYYIn7>J7 z&rr=tBDdrYj<^&ePn>7PP5l!d?i+hD*1Q+S{E26c-X4FsBnb^`1XxcmFg6B?w)U-P`zmVoKAqc~%$Z|O zKm5?7Y<14c9-kg-QcogZ+Bj^}MEB&%V#l(f$WHTJW#cJyEdmUME?g45S8FSSdCW+3 zu}wP8r*GW;+{YT{;s9i>PA33l`Bw4V-1Gw1or2DMQf? z#bXiTph>dSz*=Tv{9@g)IStw?(N1d`q#EzL#B`Q!>0ApklISQ58>#rLG_o;mD)9w* zQu#BZrdojurFj~gC~?v_`j*?FjGZ?&6we;rs}G}W+d!bk1us_-y*yv;L4Sc0O^eb~ z<2S{Vt{#4#^h{N?=IJ{zoyFr3*UU*idD_wSZjDTrkTKr4{PAA z6O~Pjil%{kzziZ2S(^D(mqG#ae81dZLRgY$3m;5P?YY9fF z8$J4li#ML&S3Hrp8O~*nDp#aZ!HalbQoaXYh6G6C4kTDLZ}u?sA85vpH&dP2@#GEp zYEaLz4hQ>DCyz~0#@@5j*sO~2sR_f0=>EbsL3ynl2BH!>CE~ziAr16Yti&07>=hRQ ziO{v86RRqzeM3ydn{TU%KY>0RwUe(~r%Y5VfDVKX`ipP{xY|n9^O??GpKoR3mJ5j} z-}Lo5nPprk$F-5D*4QKQLh_+xotGKSyw6dZ7MF4gU>zv~dXf!06J+5)^Op`^6W6$? z%qQ!g+K*Lw!@57Geu^kzyXkXRid^j}7e9&5a%~ur19@6OrzitRaO4Rp*S8)fHlpkk zn5ty9xYlaZ5uzX36VF`0(r-Y6<~`Eqs`_Te;ki#}t4kIxD6XmBtdA<_tc&dDk_P-6 zEOJSy^+!vRL^-f0HBG~(v<1d(clY`(ALPp=Rmm)l>BDHf&HWr4sc_F2E7eA%!vo{7 zpUzY|C4;E;!?EQo|1I=tF_&cTEhXbQ4_C!SN%*I1DJ5+U;S3`>ew3lU$n8XR;fWfq zeUwRSZ34QPsS z`1AVa3>vVG`(Fu z^ioYXOz?rR#-Md>(s9+AHA^G^ZSNcml^Z~1b6w1-agj2)Yl>Qo@7b#ckV#*NK z=J#Jj4)l1NAA4Zy&9yh=ss;b8*D7Ckp46p{1H>~+l7Uo9-13zDr7m=>p2mf3O^-_I z``CxEqV5v=B8b@D5LIm8e#5H}^Y~yAZxK#T!D`1FiSTp<<>)(Q=_evoKfmX(BPv@B z@4%X9s?MgA+0H(R&3=+cHTd3Ds!F!IqkewnMHiwKMYJ<;N!g-$T;1Z>Z6_++gNKQD z#o#NzJjL#w&meDQkNObf(INF(O3c0)w_suRyMr}qS~D$Wxxl+`E$3* zjXT>X>qKe-O!_s4?Kmp~?G*(|49ln1xb#L|-l86RKl<>Fx~>c90au>zB}EOUE{>OW z3cewy5~gwK!Omk7Q?^e%C%6p9t26akN3YOKsj5hcW3Ja9ohtL#(NO%Uz%W&cX<(mC z!%a=E;jsPE_r=Ix3Ww@SvErOvb02ws64_9e($;lhQ0?WtvmT|6mBQyP^L)6tCrp~J ztnJe={G&T*$E`1O@tij=Z@uX)KlOt8K-ux(o$_gG`34H&Um5XS59G{v&&}8+c@SX) zZyQRbSa-Hh4LE)9bSrE#3bhX1UhZlsrLRh}y~WqN#-=O^^W^Bm?k23nxfiKdwM#_` zh7v`b&Qo5uif49xK9FPrLxz%YZCOd#MB$QF^b+3d@)=^qad8#Az65xcj@!(ix$zh` z`9XdaT%<{kLzORy$d>yVR8f5PrDa*^hIA#o%W94mABOm#Hg!-99N7MMQZP zU45T$lxjdCzBJ{~;q6qPM@lSmJ*+p|s0)absys2H0>aI=%@tWYr;nBFXt*7C*5$<* z-!0w5E{^oQ82+_)t9k3ctxVluL!-ntMfBb%K6;Rq)0fkGR`Ww&+%Ui4V6v>PW33k%vF=WeF$ee?sW05RvI`_LuQ&^iP^#^8 zIZTS#dCn$9jjsRB?W+$CEG*V8`8_`zh>43&aZ%eD+_E#5oTQb5g?M^}@hO)9asOvr z*hwYZfqe?AqIxYO=@~}z}y+mf}I`Ho&zZrs|6A@CE02*0?8}aTOavK$%ai@ zvXgw}aM|@HsuIVEyY)yT`h7Q_WaOIKSMM+vq3f5^D~!(`7cktG_~P2R)66f_UY@7# zsy}*}B=25>h7G5ETItmU`!^Ob#}|FsNfbD+vs2q$C_}661kjf!f`lt4JO}ON2dA^{ zahnH9zYu>w96+vmYw&B7Cw)0CZ*R`Q9oXiyq;=^&uHtK2+!Kp|JY#;0iIb{F9=0A`(uWzTu1QUHKHJsry73 zg(}{vS1q;?>2usXSByW_Ia+c?nEarupXh0-x58ux9V%Bkg|FF@Ss2EQdnGEyNfN2w zw3v^bS~6YXBiS$VvFB0w1NVS7DVS)rc`T7SUvN#wk%Xo-6BCx}mvI(E9dKUWcI#LW z$qu268F^ztj8n7uL7|>H?_Oax(g%@i^JVmbII^~~>2!%Ul4o=KhphaD+Jj3*-BOaN zY?L?s?gq|7)Hsm#M? zl`SqTw74u4GlZSXFvHm=476P$;qv^NpDu~JWZu`R-(*d!n6dQO{2Uv7JM=N<>TcIr5a;>L>`7$!yXEDoJc>~7khmsNY+ zD{DvWu2RYJ?NruIbI(6FZL!3pu5o6Dv41sL9e#It!H8?+o`}V?oGEc%Aq!EETDHF- z?zL%5aFqWk4g1xBO*#8dr`K>FbGh-LPW6cVl(tuIby(#rZ;qUW>$JcHE@|q)xP^T$ zPL?bbd-wUK9N{jx$gk|OTg23MHPd&7>GXVakB=mERl4#?Yp+(Vg^=#p&D|JjVuSd)_4 z)`0stYa0E9FKCIHnUm85sx~`hlEN><4>O(CZksP7Elzkz#!tc>*1W>WtUP~br9w>Q zgriw5rh+Se?y1Pb4pM6YQqRfBD_*#Exxp}|9RaG?eeN+4W?Xj~gcU`pT$LS=w{Ko0zpP3h#(3W1rc-gt$x>TC%zD)~bvl#3s zlli!Avszt6*)1M_!85$~!e1y_*<3Ru&i0~qTd>A^ntKUr1c}+LE{`fbb9MLf$e-)l z)E8y-QYa}W#+Q*Ni)idfxk^2_`H7;EZ?!VPp);fDy0QVn`r4h``D_0?#{Dat0e zN|(Wn%Lk?$vY=NC>L{_5VsAZ0rSE;AGx`io->P-P+1D$_IBmW7HG7?`f<2VQn9NzW z6A!9Iar34m*1d~LPno-S-?HLqrlE2h-(Jwju#>?b^we!imaSXu*WJQ7uEyyX`4#&y zFNFdd5tuH63L>)oBG@6D~evH zuN0)!@h8i%e>5;>PrZ;oYZ=MGh^_9;UD_0J&tP@pJ~ig?qD7F!Crnw=EMA0hZ@PE% z9&M)XJppOUxLEd3Pd|HeX*c67l9wO%M3EQUov_BWc|M`9&8B?Is<+?Dc=`0gBNuFM z61EmRS3LQtjm1^s`pk>8b1`_j%12ojD42=Gl4*8l=-ep4zhcg|_4F4DwzL+x#NmV2 zId*-^^6ifX^r>q;ZfH$n1iKpQHQq)ZITf8Y{&8?L zj#qn6v|{@4TM_SDZ}qVB-iyBZ;p!e1%)#a7&Q2EK%8p2mnm5KTvBFQ6?MA*TQ}S0= zIa_@EqDL{6~xrRZHhp!f0=U$tjyPb%Xat7@8f4XL$Z8YVxcu>1v`%@=;RyC6C^ zL;j&v_(BRvkP=lp{?kfDdT9Z_V&O+inVr=tE9WbOOB$8k5<~P~jd6(swXXtq%~^ul?fWWEAr0c}}b?lfK@TN0Bd6-xW6K znnzP%XmZr#lr&5(=VbJycWjBL-8xWaQdg7|96PvW;o<%8`^=AJuXZ1*sp6rC+*?{L z@4KPR|0bDRb7rFFyyhyU##1Hchp~orJdrKJ`ivh>vm5#zA!@$k6z)5K^CzNMsmdaW zKG<#>(8IX3M`)P(V{Of;ZG?$f*^ zpxl$}gm+}qk+Sw98azTXy1PcT@v59>aY4Hb!-YJLB^P-|nT@(#Oz$tL7b`d0Ro@-R zec(!V{ItAAkuA5^%TFr~<>mn^mSv33E7t^~Jz7lji7GHACqIan4NO@wJrqmj;5O+g zmDig+^JypM>3!<+YBiND6Zx)Z%C>D2uxENlGt1zfx>)-15ckY9wvIkjrf%0+s#4kW z9up?EVfAnx-0g)G3ac1BCfT^dfz9EHQqMsL?=N2|mNt-C^_%2x+3qgsZ@U~4H^@*t zbw{4f?8w0fme)AWS-(0ful%J|tFLwTs&;|ZW}NdZ_Ha`2t3eKmbn#QXR8 zo7>;(oL0)xKmKmh0YMp-^l1xUDTT-@iD&Ric(uZueua&RX_NH!TNNzvZ>?*I*Is$O z+3ht>vJmF!f8lu7+ao5XoXa~n@GrzVH#`lpy%lo%#;n#tlmBI*>K>#6)K6oKc|pQ+&Hp7!eyj?z!8LBAbLlg2de+_KDpyA0Be;AhGc=3W@sL4V**@ zFHL5{k0)YMj}Atw9kL~fc0ab=F-a{sVzEb-x%eJY&4qH7grap&c%1>G@K=p z&CZ)uA?nyRwoWGFl5IN z&#E$TZ}}Xib>Ox)ot-^9(O&lKJ!|cG3?~*iV)R=KF2T>eYpXt4dK_q8sqp1FmbO{H`!u+e@l{h!mNe-hAHMzu zRu~j+TO1ECur8YD*jTwHc8uhMJ2OW1u=P8WIRBofZ_jCILHph8Jhh9iFUSDXLY*pI9FAD#0agExR_1MnzVTFkYhh@(xV(b=Ah`U<*O*9U- zpU8E5b5Z4X1o^AM@?L?crBfO78Z!~Pfp2hamzJKCX3S-}y$e+yloOTrUheRzG-eFf z-Po(GTPRFXI;fyz;}_>tC{cRKo};QWyP~Umh**Ah;GR#(^^NzIgTnhRKi}O@bS52> zzd5h>VxFmTvR;)Y#+HOXvn{XmT^`ZiMH-_XHos`8@!UD?9EaW;ecQ#9xvi!RR{5N( zL=TFqn?_tQRXd}be{EBFRL73#j&aY!)91vxHr!srsj!tx@=T{`ZY;YW;TI50eyfQz zZV!Ijr(nU+d$(NeeOK?UtPGYYd$CkCnZ0z!^WE5o$!)5=e&oT+lGK!rZn5(pC$X`S z(@m9jzH^QCtX&;_X z!dpfW!!U$(79bjtcXf)uq;i=IcXf^4_hors)~Bo{!$C7#Eq|J4$t=0XdixRHv9Q z6Ouy8&+IvEm6NLGJoAkA5A0>*32^dHr_#@SFdgFF$ubrtlDK#NoY{n8j-sOaIr35U zq{%hws6N&Clbj^j?la`L%P2|*JI_`=#XMMd%a3sgQDZrWlM}GZ1i5daxLVD+r{A$ zX!^F|dWJZ0RI2M_T zX+FxUumC|K+j$?4F9F15sMo~Y^0B$?J=<$-s<||mbK}gXB+gMkCjXwP0bO<>|dmd zG2J61U-8`WNx)H;kHu1?7`B#|)7e2a;`Ij_S%)<+^_T1dZgqZ^hY1NW+ReTnsEJZb z$Gt?Fw(gzreo9qu&`F%SV|$wy%QfL;ZpUKH_^);$Dux9tYHO!H7dBUyX^S(y&(G#? zdRcRp%87`Ft|Olxys6n0XF=2}*F<|P zt@z4S>w`Q;3Q)dCih_jEo0HG}ipofsjoux2DmU4{jdi&=!@y_440k?AqJy*#zZ?*F zydlsYyQ&|<| z&)xgPdKfdoh*?V8earsRtgoLK98ny(S+iJ@s`FXC$3a_D$J}7?q$2-y{-15bwAUH6h2e?x=rmCkI6kHq=)F2 zE|}i-klv+4s(v(MSDmtySZZM91?d4#yzO4)#nN=ee$G*~a$TvbAt(5}J@W@IlSD^Z zdtGLnu{nHE8YUAWT^f5#OO(_u;36U_JkM}`@u6V5F)MX61vIW=8{hY^HTEl)#fyZ?GJDJ0~VMU9Eqz zS0PA0R`NotvPZahvU;8*A6EHo*j`oeLZ^=;#hTl%EH=s>r@A9Bu*BzM2|W47jWP^| zIA+=Q)p6RAZC>~~zWJ+PE{}Z+J<&C_rgTT^CP!+wvF*FBe7YKAbdFa%lvHe!{l}hd zonOZ53O!FwF!;~&ZLjoMvZ-m~eJOUBY&%Zg$^NRLYphmTq*Ddq zWbPUFogQ7C&e}=ES9IDo-%j7(qVvO0Y>=J}u&vd+6uJL~scxyLZna8BRLrbOo`Pwc z2Bx{daBAeUSON)~Ld&eeO|k$A-szES?!l}Hdo>B!Fr%vz-D<+CZ9tR;%svR|dt*8g zwW0ijW6M7L3tD#sT;(Lr zcP_W$7ORQ2-*ZJ^>C$*-cDx>APCt8a#1|WtOFt^W(*X%9=s|t95^I zWYfu}v&t&b^jbbP!?U7dskh)d7<>bUuo^7UkHZNYaW)Kg%66B_w7qGMZn9DXdju@S!w5gF! zp;@ENJoHC;DsaWDacknW4j{^)mfcJ2*z)+E3#L+x+S3!v7+3UtXxWYIwOUyz0ggMuivuMcO~r( z_DKqDNQeH_Fbazc^2*e8y?&V@W(J()uHG3mk>e3w;SWgbh8nU}{oGg7aWjiUVPpRC zawj|AvDalS4m+MZIrzRQD9bF9==dEeZo_ikM}}o3H7T7BEBB4u?kmL6oq@wkx%`@M zDbeXHk}yp1_=eD!%PSnj@>kWBI%15kj&5ra$aBXttNMIp?d7dmctRPFDU{Yw)2OsK z9}KFuxp0?Uexy~J;kiT>R_XhU@m&@N{VYzeWk<5>y^QAFC;cg}PdT4vG|)B6$o4ip zgPZ0d>3fw&AHvrq%ON{C9KokNajZV2!R(-<(^~0+1v`~Y&43r5osSnEz8Nb??x z%W8a@$Z19>u0!Z6wvdNd-B^3OO-FveMfa!J{1XzqJDtw8Q%W%ouY}C%RwNv&t!#Yd z!$G~7S&M4hHoqV}t47|Ff!;i^43aP3^_x*$d~(E3In(i3193^-P@Ipb=I#>}QLem$ zo6;zztLnZadMt#VdgWucMlV3=ZvJ-8jh01xcB4$Wd5=yUIAJj*{`sAvwDZ0eDWL_* z9Qnfqf+ds}KARSH^w@gBBd_h1v1(`4kH=_^>Frj#5-fFm7qNWYl`RW(@FWg zZghh9p-f#U(`EX#!*zCtA1GeGdqRl0e?Ep9vvaaFr_vb9@N*(>?_@bMrGU&hE!=5%}uhd3t`;=yL|LF)8mkQ$4!wty=9mzh1lNl0X zCs$p=WTiN;txB>fa_&cWttc6v->2aQXy#W80(18~YPPR=cBQdd+tU0XqjGbebT#p7 z?kSykq2d#BO_s8@Y#Dot3?( zTj*PJ(eBB>)m!>biB5O?$%xC07pQIRI%Y&8Z_PZnB<_vrs$j*4)cStF*>Y!~CT7QCUrvE{hW{ogMN0xM1o_yKJR%=t}}Po|det&$Y(hxqtD9 zMBihTWjr@~(|ASh#a-^E_jA8|9msX7`tXFgW&@AbnDM?=S)yzG2?mdvw+MRU7jZ8z zYjv*9nGTN_{VFOJX3xgWYYQ*c?7rv4c|$jwbDk}dfBy?1U*4gLYsZSEYfs!UE{G^t zppnOXsW_)QAp60iA;XmG^AbotXZFl%v+^)=4G^`PbPvxp6WZ ziHICc-c)z%Vw0~2V~|4Gh(v9iQS{P@>zP$9elI9!7jtk4kKr^x1Fr#a;oh=8^@Z)N)?sC`VOk<5gmV>escV202O}A@Kr*@*9VAB0~ zbBW?k@zM^5n+q3Ws*N#KexlA<6~W3IEbJfYq~1N$_mb?b{ec4l@|!c;2fJR=Ox|2^ zuBTzwa}uTEe7bObp4`69q$Jis_x0+u3e)&~4ln-a(Tx<0tTn|E(w|&oF0W=vZ5J^D z{&Bs}7`pfxk7kuVpD-YLY414n zC5+NtmHupt@AgA`n|B3PRaN2}?-VSi(khz)5IHUYntN_6qTruX?iA1-Xo~=-R^_JL zDlNDa1mwQG=Y;~`C{osyDl)nqU{LaANWUlc_$$lE;@Kg)Su3#t<75}(&1|?ZK2>W^ zQreT3GiLB2Ht;-=lYfSfx3dfGDNveJ?+kO{bt)KF89W(sN$vk&O#cgU0Jd#A^F;cg z9x2j}bhDjBcG8W?M+vPqk91X^pyu9sWVCk#hTjaI?jWDo3EY=4fC4~=dEfGyOdDR_ z##0vpwFTNqec#Tl(foU3-e4Y;k|Ib@@(zP^CFwpSYKGL@%yVAYA$FV{xRZs7cIaWu zIP+^3Ok-Yx9c7*piof1xb!;YUl0WO}y>TtP*|ZDH>KIiLE?`LX=>9ojfO@4AL6t&3 zaqrhVJ1nNX<|F;K14L6li;!OU_&vcD;w?ee5B@gl2Q$r>wb=s9Q|s|A=wuK4mGx`0 z`55241oPRn9V8~uoqaYBJ}KWme?z;b0Ig!Xd;sS4U4nErzI*4Mha+$e40tEVsX9uC zI1WVIQKar>G=x$#W4dt>n|xqCS`nM*+~te-sv52eJGcx?CwD414l$3bbut1=IIo+5 zC^h+Z{-1MJFKXG5qAVGx(vhRPAq|OZ|`{^ zqK?zK{3Rppp47!xkr;@gc_-m4n@?xwB#Ci(c{jwm7b&jrkm)1$@Gt&W)9Ms*jHmWc z7)X3e$h#jwP)2-s!Jm2c-QlBOT^KfSnvP3!TLEepzY_73fjQwhgFWFq7~h;m_;-Y$ z6>!@~Pk8s?k*MM8%P_9t6?1#T!(o2YEQbl6OXMi&8 zoZ-B-5%!DEGwrRVumzIbLLWL@Yt#?6)%-K&ugz~NuiYBy`v3VCmzBDe2zQi4YUX|0 z1VD9M^7Y)pm2R!ARUNoxtiYY9z))8zNQ_I*mVjW6NuK*&gd&K(!!fjN9N zGXhV2=-tSg^zIOgGP7F$%?ta&gZe6r--tO?at;N45e?r<^=f;-_^ap43coPa8>?OW z&gOcrAaHo_OQiq91$|)!_)}=4z%cE*|Aj}w&oK5+p7$4DYV%D9VaL%z5e_Rc#{SRZ z>zUt3K1Uji!<{8kU}tg1<}h`4;&K7Z-@mM^%$wk|s%9yRn)W`O;!5&6b?VglM7dXP zG-m>AU@K}aH`58cu+y~!GU@NV#3Ivb)=!iu6f*k}CYuuSgF}A_E8u(%{PeV-GLWIA z8JYGs=kb=YeIK?x743{sJLzA&$Z^crZB!b0B_upIKZS{2`7A%5sQ?__d40OUj)>-eX> z&)!(&wYJoH!UBNGEF%0FwfP3&r*dnhiL>a6igwJ6l=wn~WrD|c=kGuZ zIL0(=!yGifb`gcdE75HwUxx>I0Hb)W-|zqY)_8(X(LRCLbn;mpgPwky2iIjmVp_4->0ncqb> z&Gg}DsU;WEhR#_XhD8FoQ8uQPQKdN@z0t`_L>-fNV@cS@t#FgOUdu!Iz676}@Yv$}vBf{}o)knJ~AgfHCj(e3K8tJ&Z>hn=KZtw{$InaENIh z2}Uq0JHTu8_hQ7Hf3^AKM49ko324m{-QktG=4HrS`Fr>lz}r8tJkAQ#@`2j- z$&C3m`?V*sYUgE@_grz;1I|oe>nzNRpZDAufgc|~8D5VNI7ntyF&cdt&wA9}@LMn1 z7RyPhsLbV%Y$v99E1Eg4lJ(DRwDi&p*mv^=E3-S&otCA8K7GH@s>cZxwuW1FDeBn@q`MtJd3|iKjX*%m%*rVw$R9hUcoBgNw zRhYD5LDZII`b!xGyi^AwFRYV}%u(i-x>71C6S?j~hu>r}zm;#MpRK!fTe&!dv(V12 z_3+sk+N0&J|JN#z7F&T;PiZApmS#Q|?|>(NFMH6;Z_@&x%KUF=rjT%)G4TEyak^ju zXzn!C3pFL#d08jaJRZHY1MINn42@P`Zchf-$C=P+ro*k5oqG~Hhg^SeS%2JRGyRtbKDhcYZXG(0Oj?=gESnx7#|lGD$Ih{s)M|5nWMj2R~b zAqu0_SZxm9guMQ?JKX@(G|=u|(s_LTF0A-HUt{=l3;nNBZyNp>-u#1RQx#C+_^c@4 zRXqLM@oySDIeZtM*AFqz`bn7oho~IJb|*2>TV;NWkcrM-&iG67Urs3iJI~>OPCnDM z11!@m$JuT#z12Dotx9SU*6ZvQl@)_UZ7@I1=)3Nl^TVIKXH|Icv4QYAi+dW*lhUn+<*A1Y z0opyYF!SnXnffhkL$l?__za#-LPxHW^+P&tpVLx?tahq)#^?g@v0U;dgnkGe&0D^?Us4rWG|dG0TgE0w(S>_ ze`N7+7`k?27`hxhxVb5%Z?X#57N<&69e?6XreB3wu0ziBmZvbbADZ?5Wc8^1H9z}j z>AZ60H{C=8KrGh+s||@X!F?t`=dKIkyq7cn`xO*I($H2d`wWD@Ct}tm|B^@v=3hfT zUnQ(B%x~-ehH%5H>C8Kj(8MfS_g|P%!OL|>$2bfSX^Eg`*#$n$O6M$lJ4LO37XAf= zP%YznuN5E>*Dh1d($@c4Z3T!{Kz4XZeVqDxEP<1UdcwZzR)mw+FAsB$A^;TxrxO^a zoM7qltjqU=xsTr(`j>%&W_LE)AI{vkQL7fGdF?Wedo6SVv)5saW;QQ{u#3191{3LP zmvW~HXzOZO*&32o%gt>wZL+JE;6J`j=JSEsR}?DKdEqIaq3b+&kYD!na;Z6hzgisn2b@Av2#f!@W)g z2}je=C85o9tod8cqPdmvU{RT3$NaM;#6@+p4h|>uEH_&ptpS{ZM{b=Lj(!bIc=N(A z`v{*J<9Yn1Us1WOzEjvx=7}Eq_uPY_?;$(G(8c@191izriSe^~*o7YUq7_i4ImNwe zKUO2mi~7O5?FaV)l;xS#!(aPB|IT_WOn;l{7yG`MVN7kg&0Kq4h9?F;>_Z5tN0+4( zyHCXm_*hXytDXxmKdLQ!fsk)^=)L^XlOd1;ARI|GnP8JxiW<#lwQ@6{wsrNF>%W^K z3<_~-(P^F{!ZUw|84^MvR;!h^4h{q2f)X3nB*5^}=+R9>;plf3hZ8p~46}CkhuLW2 zD1-IFdkRfTUFwUL;o=)ZDL7(DMFAFtu1=lQiz?a`uazvF(F z=%#@9U7vujte-u_$LnT5fjl1zps1hg^RtL!SahC1)r`=n&nNX>AuzNqgsZP(SbAPr zs;?2@YJ>pVuQ4F%3gM}|&1e4OAo+%0Mhn1LZiSs}5ZO+p?LCA9#39m5M;Ui9($o27 z$7`uVfVua`q-ACKUO&MAW7HIlrrGO~)Uwrqc{s%k7afCZ4~N0^hr-CAZQ)+7$?^VWLg%R8MQY5=K zT#f9Z56wzlojWq9=LeEiA(*%0%2a2aM5hK($du)6Tm_M4txiD+w-tc7W$+#j6zMtt zU9Qa1F06Cj7Wj z6lo<79>)u)IR`fVCytiw6~yI48S~gf4@8uGqPdjakBO9cBl6WCt{pEUp!zH&BDJ)R zy%d6KCV$)KY=cLGr4W-u>M+1DWVKK0tK0Iuc-A^lbIZv77KJR1X%qsJ1>(~gkkn~e z@o0Z3h}#Y3?V1da^G@ln+VYL>8k33aS~L%NK|18G^6NzcD7f}weed{eS!8)wUTz5F zk_(T`ed7+MO&>@nTH7>_RaL824vT7fB!C%*UWFUNaUEzMmwP$lw3maO#GdJMrE z&iR+oKz(M?p2|n_zZKe3`Dv&14^jR~_+eF*FZdlytxuJ2HomXEc8%^I)99hk8V&&% zGf<0g-@HV@Wxg)&P0@}r(6SXc?dZf=XgZNS3rE>0f?omhfXCgt(CCZ@g_JxiF)ML* zVuGn1n(^LBOpX!t2b2v3TwG!sz;_bZJ}m%ERyAE&d!iM9nXW}Lk+lj*l{?Lm#Dz@b zvfVPgSXFW9Qrr~hUv1OuFSaNbcAA34p_lb&MgZ7ft}NB@m(qs%dU-nb0u;nTSZshg z%z?fHg@SysXc^;s*z&+0?s;DX(xm$bMyO9uq0+fbp2iQLrWpfyC7`MUDmn)a`~=en zDPC5=$P6&~GCJ`TjBdx+FWC^l*@=0yVm*zHTYQnorfH@_6sDftmoooe+Z@Z#u_S@z zm1hSl0GeHVB#|sC?2y`;kUx#cj=mcEW@g=qXBj6C-}S8(k2*=40sAbT7)-x$*$|{d zXM~zH{WD?R?EaFhz0%s*u-0mLLLbd6W>(@5fc~dS6-C3dRVb$30=mo^b;Zm>zk+QRCY{TB*eTE#FQK#OTrOiovJ6Q(W!gwrx zfyD=@KqDLqXhCH4s86Z@Xm)9-1g0}EJFuia&-!}_OHXo&#ZaGiy4*M0M*BmWXm+N@ z?spc(K4=8yE91MvFE37`bk<+L#Ws{pPS)d+8PD>x232sbGw6q&PH72`i(Pr(^B&?X zf^YKTj3%W{C$O9kBLs>;US8KIN}d3KD+=H^+CK%Hv*ZAaT;{6HBV$1K`@7&n#!NNO zqNWC^n)6D3?^8eod!~QU`2of`0Fo2494%JqoNXf=eOPc|`{S0_(q1pfq(fhcJAEBx zTcK;KFT*)LZx&=~hZ%YOC2hoI-Fc+npg_|ooV7H~KQHgvr1qz4NIXkG#pW6jpYc}; z4hQ6$G7H;`%<}|}In6&e}|8X}Xtn zR#ukrEThqaA2AU<^#{}QtIb#IqM3TAB_pF0cR^M}$N$=PwWq5$Qw5v-5VgPD)FkBj z7|bfO_R8oeE7H0`tc-GK$m?i z>Vh3-EK-@-Hkq&5Mt?xomb|uR&#kjF8_Ci?OW2{mYBHT>sqN3SboBf;(I)%IuQSEr zS|ehkFQlW?^#qamVZvsZUyDzzAEzxTFSX6J&9v%}Z`j8)W8|^gmvz_X-@`CzXl>n# zhPMrQI!^%XB@1%T0nhbIIpbd>4r4y=!1LR9#xbXWy2mzDhnhnInbex)*RH~(PBo31 zZFTuIx|Q%W*W%iM1`#A=SAF^0h`t&msXQWI{TccMQb3&PY*OHq5kN|Qr!$Ui@%;Tf zlf1re_Nn-9c5Xg0E7qnTV`)Sf(vN~z|576XUtf<%w^imZTj-N9xfY~OKDB=KtUlke zd+F*V>2)-op!_nhoS}2!jU)k}Rv8C*RJom)P+$%WtcDREkdDlKXyYAZ=f(va*$UWL zJ4UC9TS7Gxix4gIIQ-BOa85X-TsJi7duT*NmVFeVl6^ZMwG9*>^KV6n9is1x%FXi8O|blG6b1BE3~J!bjiTfUfUQI}<-eU8Xk#<|y`PX|EmD3D!e{NX8|7I4><%7msekBwLjk~wA#Bia zL!$(v&)kHt@ZNMB9gqSxaxX%_!y1OE_(aTGk*|k%Ey!O?=X#$9mZM26kw^>;Xk=W{ zB9q;A=&9dKJN&?e_bCvq3$$C31R$FE>O9k|O)Vp(9#av={*d|eG;*UlNc5Srx6%9h zXW~RA^&rkXN3&$%Mk2Ly5sjF)`9)y@^A~OB&PYuf(c~{$LsDPoM)$V+FlBGphO)v+ zPvr|0gW7-TRgH43fA&3iy5X3jp#Z3+hVlxacTouE{Sr)75U2i?jXi>SvjO8`zsyMI z(Ds(V4i#CemQ*Vv8vQd2$%#Qox$6oR}u_mEqn13n$%@@B;WYU`S z^_Lm*>+mab^hz5JJ@xV{tb~N>RwCB@rwNh%_h2b)LOJUzOVa$+d^~*Tf?p#Ke#-%5 zHHL-)fOW%H68-s&lT@D`yp>Z>{$KNi=@iEZnr$I6K81T}or8V7zYx zGElAg-Y8D?%q%3xBEm7hOampa;3H#w4AD`P6;|?_G1OG=IWMVJ!`If;X}D(A$=hEt zzKmKloXZN#`Y~EySf7^J2+UV2v253;>ElXLnIF2$6SU^%%!EVFW`ua7y2Ff0%lg!}F^=Juc=r0YF4C`UV0$k^@K4;Me~Y9sMmTJ}!%# zL^Y3XtmhNly$d0N>2rYViC6;?j+=oTj}Ym6 za~N3$*~WY|ADdAfT3K2zYP~!HCCX`qh+Bq6&2G__emtKcMf-_wXLq7+M4I6~7kH=X z-)L<|md50o(vJDfHxIYcLpI)*zET8LRz~e_tS)q=*1roLI!K?!HWam^HiMdL@wVlo zA$hDcHxknmDfpjvSzhj_96l@z1qVgz9fJ=Fb~JN5`Iya zlq%CqS$uz`4+&LgW|4_nn`SDn#iNY}o#imV&T=}*4sHaN z>54nY)!mDg%F3{wnUkvNO;1@SlkjW3;!F#KEwgc( z1jb!}piZ6h&&o-`v8PnUek-Vq`6;xu2)(X{jCLIBuUK4pn?0+KTXb*UC!psYjPW;Y zM%W<0qNUNG%%Xhq-btf3{|#;VLSBsQxjg;y>?ZpiBLFZF*6`w~*~@Xi9s=qozS~sy zG5Ko(oBm zGKG)x$Eje)`Mm;v8`ZB-KGs=l!==tCLyksywDXzuaoikdSW2{VZMot~Yt1zssr_p_ zeJ*9Vvd=t_rAWA1Zxw*Pibj2J{d{i!L_tzkXl>)F*Vflzt#%!WmzrHBj zauS;O$)A#cDg8aA%HyNCj1d4ly@s%4^lkmcmj6Lw-UM=B%t-+nBjS(JF&mMVTQSv- zgTzLiR93z_sx;mFzMJXo)OzrdW^3U}HPWgDJV2yT9<_NkGbA!EiPS?$N~yZ05IbDv zkEO>F{`!s5yuM6*az8*YMwrm`Q3z*Cn&79!ZZ3R8X(TP z*~e?iBX4(TO|j!HqoN`GPOPH47|uo~AF&ze81Ro6p1(j z0ox%($Wf9yW$0AN0hq*b^AL$NrNm7Nh(t~JwdAF?s$bikF_Y$?Q*@0O*k?Mb{A!F# zTOIQ{9v37S2{Xrdm73bNJ$Hg%(PqdN)~>rlLEOqC+yZzGiM; zI0NEoNs+jlDiOw}4|VdYy>;Bh?YZR93*+0zYV1Nw4V;!XKXWud@t2A{Fp z>4*LFp{Bm{t!gbZ_hot~-Fl&|0qQ@5vg#$F?QuA)kiUB$^_5p*A^8aN_4dzzS9okT z>GR=Ad7XtpWJtJOYEU%XF2mSd_#n2t+8SRaULhKMX%ZKbxCf$IrZGwPg`l`;AFR#l$F zX7f)kv;s|Vt$>268G@;#9Q)t&E<|Zmx3MwD-)Wu5028gl_?*W-5pN31IF%X^GCoeD zC-#-AHXZj*1hPJG*b!G^l9^0j)(7^#4n)V}wLrB%9F`(NX$8WmP=M4XtX@ZRpZ-^{ z>_;fZ&*PQgMV^qBhbVIaIMEvN({8h9Q&u0dmnRPMU=vg2GWI>}>f5nko_#3D$9|W5 z*N=TS_D+VoF9P5WA5)7_FjZsbH_CyVD}qnvx=m*h3PN z^^dn%yDGOW|F<1#pST(g;b<8tV7r_eQ+p}ICKIfaTB&RbwZ0BAzuI}C%x}I~AINz9 z(18zpmO=yeuQZtn3)5*q=`+xA=xkIZ#dE>Gr;Z7|~+kROJ~b0Pe+duN5#YhI7Jv~Px?1CQ$~769`2dcqy1uN44i`5+gL z!FX1(5Nsff;iisL1rjSZmc}V9t`6~tKMm!*z2QrPn z06`NEBQ?wy z1BR%=VI<>jh6aZb2KGoJK@f4lKX-s_LNba}rFbUUv%K?uMXDOyX+p=5-cPpn%&SZ7 zbL*iAs!8us*VOI63Pt?^GFmgoUDcLdc5OfC|43f;Ooq0{`nY{tn&&FCfH=@n^z24Agqf@neP(%?uYzGdf_VvoW>6$ZZ{*PwAmEEEi`u->@*d^&S^ zHDI<9LZiHWcRE0#Q6p&yxY^}6II)UlU^>U4ddZ83vMirQ?=4@SheM~N$`H-^I4sdB zNfpeE%ASm8nXqrxhosi`v?eVs=_apVY`ZWB4KIV1=OWYtY1GiIfD4H#(xyE1oD@F2 z=fv;GTGd>&+4I3uM`~l|#Z&_A2T^Iel*Z>0kq5FCQkb95Ua=BMjp;VVpHP=fg7>fv zO=rK$C**Zbk$DoUDh{-WT5%$l6=B}{(Ef1-r7SY(gk~sg{ua zlt!`bw#T85dj`ZoR2q$HK}6}a7hUStuht{P<;S99z#C;V<{83cUzMiEJg>i@Y(F|O zNV(-eBPvkK()4fv%)d9CsgY13#WZ>L?S&AjP%feh%E?|So%Mw~w6v|r2VZmj9U^l4tX89wBTeiUZf z@nEA8rB(oJ&GKsaJMVm^n(hVvLzrct6|i+X6({?N2m#B95*6cQHRIcL>oD=F43URe z5#@N;PKz2Vk!Z`>bUNOPYgaSsxJtphZP!chT8lD*No|k)Cq7d?r*59s(-|5aY$

5 z>M9536)R9H05dArGG;GzAXl%~=+Q%Z#1w0$K&Xf8KEQJwVu{WI|JCCu^ zib2~Yy?GGl3QT?dR14@NXT2suW^`NRk8BUY4)`Vxw|!9p)@Nu90^r(`i1D=<-<3ri z?I~6UIWG8i_M+kM!pqRWqKG>lO9Py!Mwd(p%m{IN$QDF_l8RhQM6TP1G~d9A|C zTz?j0Wrqb8Ro4TS<2---RMZ5^98-ZQzOlW|SN9YCWFCK9kifh4uvFo()RQ6~OU zw7Iy~XW>5ll=ub3p4L~=tRFFHGzBZ8Y3?t;++PpFEkg*nqIM$f6~`(@6{LFz+!EjH zK_QlZuUDWbmZzZZJMjAcEaIpDs6O~a_9FN9qTiz+84hHmf(`WGk8KE_I~misxySR* zeJ%o80U1GaYml&(sg~7rh~}8cs1*dpi(6+~DWrqas8v0Rs6mLC|$*JlMs{88H-?$`BZx!*L!y}S4$b0^CWtso6NV&-IY>%5|ylDYU<{E@3JWKMh55%!6K+EdZLRF?TiA^1#9dV(L4dcN848H^= zGarVX1%0J0wrN~#1*H37j(}UsIDj(FHpaCiJju_G^&W$5d*P>IfHA_yYNON(0LKR# z2yjvZQ0xEXp@p_DxV4YC*&99jgs$Y(EvGZzHA7JDw$nXMo&G)i3XOv2L~TLN+y z;2ZE0Q3pUUav{t`$h0C{3W}TZSK`BU>n^U*EH4tkzH{nM0Z??XX<;MC!ySs|Q3m4H z1z1`s(_H{q|Ao-wVekhE!hEcndEk5w_t~2LGJMp(Hy^e72;l%4-p_s=hBkF<%k(WV zezg$uJp%LJ@|F;;_`CX&>+d)601bW*ShZ1LN8tI~+(3XE8^E4#gmB%%VJ47~YP(0q zlX@vCmNuryN+EGLK|==6aITl02Qh(jFw{EZMoCMavHS!guGFNn|Jx=v;<)ZVg=pNq z>~`!{gr7oWKSb)9iXven#?0T9i!McOrL2y`I_ThW89_@{gbDdn~Lw$PgtTFpI7qtKOWVNO#%0Jj3c9C7%u2kd=Eb;6By^C+a|OT^+iY*O?pF*Pp>16G@o9ISLM3hoQDV4y|X} zUL4gzWO^R-K-6>L6^9xF2mqCxd7W4Yk1#~o4r8Yr2x8tI3|cGzj5UVGsqq-NWXxUd zjR%4ry1i1l+UMi0L4b`~vz!Le7(MkFUG#F{m~;V+JI+kPeh_pw`Dzx~=uyJvCcD;| zr)990Jm(<@JOf}bV(6ZH$JL?xAYx#mu9KOhSKbH#B_^I+vCYv!Xs% z+k==Wl);q>a`9ql8W!|05RrFEflz*+` zm+{(rt%AFE6PI{84Lz;3D+t6;KGfndKUV&ml=SwO+RSb}lOXaK>~j#>-Gj9%<7X#Z z)oXk4l4<+_l%I!zIHRDpuT_xsyrl)EW89O24ng;2{oz>vCs3wyu$uE>IlSJB9{`3z z_SD2fBn3Dt2jY$<8e0lrCQ7sti52Alag=nK=YAiE5t)vEqLV9PxmqIE@fm4cDB%f0 z9uKw*Tjs~Kvf zlsO&ah*p>QYGsu&Cy^WBu9f1)X-P;Ec@#b^1}i&S;n52o%B}ev+Xs{N(vUJ$D`6hQ z^LdotggTR96^N`LaEb3qkQTQ9Q>Jyzb7qNrzabDm6j^J`WO>f8Um48>J}F>7xEH^y zzw+|3EFF$1ohYIEjew^5JFnmles24YL35j7emA={6tLhTDl~-QQpWG@Sy>8GYH^FF zW{_kbx*oxqu_Em+%FKFT6?MXqSPb&`sO?cJ01!=ql5(I2Q+wV8HA!>g*+88j+cD)& zf`n#?!iI=IHTo*771#Q^)lxy=^Lo%4&`Ed*=~}84>l072d)x0C{}f_yQI0`;wge=M zCmttspix|U+;UkhRq!+6B!nvh=HFTAuYSKrIyjIk>Kg;b#G6hO2U!16~GysUX8at>hy_zA04gn(JuAQv1B z()hFR4Xj_w@PILt$0>`@F=I_4i?Ept;?xZ)NvHCF4>t~FM_;vq7xAwJ5myp=QvlXY z!IV_Df_$dKt9m+<4&#svE$=DUXQQxIqb&QO(MYE)G)jER{93ld@T&J^V3g%om&Nk^ zc6fc%j2smJ)s(js=HPx~k2|`@g2;~^hXOb*Hrfc7Hjdl#qvyVn_H=Bx>E|pws2;lD zsVh#S#5`%ay8-oKX_7`l zaOaprR|cA-anlgOcFZuTEMbH9rgxK1%F=J^sDsk87gB^&gW{&eBk50NGa#P5p-);h z^=+_xpe1pLiw(P(T67fN1oJyg_ArnOe%0K{08JdeUK^=i0NljO@76{bcfI)jA)NP8 z>Imbdg2Z7%oxk@R>23#(U;U+cpSKF)-!PLw{HXo?h)D`#i4PCvn8^4)!>4&EOop(y zQiaJLon%eWMpo8z`<*E5*{?FbCjf0>CTr8CinScIeiKX~sxNvy0%>&$2ERw7pVUSi zBLHv?sA4`?8=>L#ef}7e{y_-81dUXif!2T$YxfR@1y}?-FgBtf00~XBxi-t0e+^|NRi;OwM_dw26hAl)ORBpex%P$2aUY&t{KqN(2rQ|u&;)u_T@wb7eyHJSx(EMt$ zX8mOH-}>i-aIaUUV(EkMINIbMusUOaWrAyYprpX%UI6=k5W<(9K!xFhSH?=aLN>r=^eALK^wS&%)TBykX5ACYG%8>7SY}-p!rou zo7X9Z5l-n2lg>>kUZ;S}?>M=?3CkfR-{~ADYIg8@1iU(x=w=~@#tqjJBs5g!Uwq#X z9`R|k|H^~}VGoh+hXBCTxCiX@aexqb8xP9|6@D@J`e!lr+uouOuxJpXAeaNO&0A1$ zA`Kx>T0IzKTx+KB1M#u}Ybh?WY1mcc8>bd=E;l3(h(?tjSB!z<$mB4! zUhC-5;@L^kOPB^dy>ZDZ?-PqBQ6*mwVOhI0-k!1l#F_o!)?jz`-3`;bXCONGc-AV& z{EO}r!b3j<&zzr{%iqBKuda{Wuv@EEfDXY`^$Et^@A@)sh3DhMK(M4Zi^gS8^B^vG zfav^Ix?6zfV-lc5pvEXCYGTg(OEtz;d?sp(rp;*thY)|N*GU{)6L$<{jJc`gUG`t$ z;)h$8A=DLQXA;<7di32I?99#RK_J{mOJD-Xh?@U?AzbzTW|&{k;sfiozqQEiS%koA zc>fk7mBIo13g3DOWBTjSI7+tMY)i9>5u#?|%EJ*RdN4>@iJcWoBJu1$KSwoDf;pWn zlMGg41$ge`Ocw&S)v>*2UL3-z+K=EKcM!+~Iz#0s4%a=C9sPfwYW8OZ{u<2x*6gPC z*J?Ke1OUAjK0$b7!wB^ckM9fN`@b2&?eF9b6>Z5cP@gIJC=4`D`>4*mxdcqw0TA8F zDM9twcjCPauh!oV^G|oM>vT#g1m@FkGffD{J(-Dr6cb-<@8|8^BqR?s>(tBZZ--%X zz~4o$3*qA5Bj299n2nx)k%v!208qK{kShR;iV(PfA2TQ)3Yb9H{J{`@@OmWREfv$X zT}X&l0Z3J_j1dM1hne6nknwG!A2;=D$1?>K0-D5rirqy*QJ_!b#@SDX6VTmJ7&Bvf z54BNCXYbq5M4ho@8J0abgv;K z1#kS55bpRVOd3EeC0kxI^eN6SEp6Kni>IUPU-ajsvJUxJc| zW)fumuowZLPe5PMbP)u0h5g}*(|DvE{p6D8!vrF5}pV1|94TcBuYaFyo^j3cqAgV&jf zP8$};5gEg~7ymeQb`zFNf5ZiB%#O5M(~qK-vOMs`m9}TUkX5&@WypK)(YH-pfY|UV znExGRX>4v1AOsLa;jM(^G@}`B`$PygzZnhp`N70!cNEky?Sb(mjXErq5? z4k7STa&(OVHg~f?y|#Zggj?UvzJQM-=|Hj?uw6caM%9U0UwlxGM=cxr7|HZ25cusw z8fYa%%S2O-sCGsMCT$w3<(ba>1lt%1jO$|%(?JO9AdWtdJ`-9o`87Rb+UmcUM^<3v zRr~+bwoJ&{%&e)cID8|X!Hc^(JrUPE2mSQN95Njh(A6>~(vXOfDqf;yH&4nvK{tyRkLHwQyqqIUq%@!Pqmqh;Bv1($c8ha52o_^!r_kJt} zhYeoq!FKqd;D~+&d&_kEjB>Zv#Ft(o5I1fIB@(n0hDjrFNoYr5c#n#ns0k^7F1w0+ z_&u_PJeh$RVHlz8%IXp8$bI%3>(=1j=UJL8PVK$d=3$rvo_f&Q{Y$Z`&Vq3-!DsOk z1o~%r2fkW?Oi0@ot_pPs=eG|O(h6v;gHc66dxhAhtYOv4#vlKY)}Po1q}Qa*G`r^e zF@i%a`@uQ&Y}I3=htbTvm?WTb+2$cP@vMDT2|tktge(4v}E|=Wk|_xH3`fxRlmo&K<96 z2F8_j5SH2^@RbKn9HtsBoXlp!M_5~p1zq(pn(&xLU1KaeM7jWC2bM&Ijr zJ-m_M&DoG1F=GPNtdl$ldO9(K!!LQB^t%o@w z*#4C~TSUC75jaf#Uwt;#-RD{t0-!0vsC5D2Wk?0DeIU-Y(zFchi&$6tfOpY+=DB^~ ziMZlFT3cMpX!;o9O2=y4Orx8&B1ycMe8@cdG$Sm2AY=R&RhU0*6Hi(FzD$Hy!u&rd zi&+0YqM`m_$~{5=N(sMDz*7;RoC$ygy6uw)g7;KDZg32ZX3gR{>6WWFfeM)v!WsSX zI;#iQ)mN1W)&KFe|Eh6E@5jWNzzzAT&CY!>A9s2OxPcK;A!<2=lz}|_jnauqMj;?G zNtyi0T%&DGEa2F#%4*PUpP9H27*k{w(ebB>SMghYJSOTWzO$FM%^30Dn5a{CLtQd$ zKe&Ra(8pKy?XS3|x&F(qur}sj6aEn9$3xjXOc*-|f+tYypAoKXzW4?y4&K7{$PaOn z&wsPDj!A0r9c;gT>2-_v`pGz;1_Cd_@xl;{CE0_4vPY%!|FIC z)u~~6PtRJ)P;Kq=DhKHdu5D)xZXmPYf$={vkxVBn04y9KfEOeDGC^wqo3RbxhKW62 zM-cp1#c6o(=8Do%QEB>~DWJ*keDGy&4&j^^cXq4ovzy)&!jJz5!Oa+ZD*QSffsq=f z*B7$jQk;5E=LoxJG1GCscGr;cU4R(E-@^Qu&J%;S2!N~#2!ac_{$0Xz33zeZ1S1C# z24AbJC)mzVK`Q}EHd=Qx)iO})+s0LotuTN438_t;(;yo^Qu#e}!QSs81nj4oD6r`q z{R{j>2dGT%ae1qs#uy)CxcqqT28b(sghUPRBm5bRkLR^L&{hGERS7}xV6K0Iz}&`| zkB?+gTL0Bwup7%@+s9Z}@HxEDH{#L7;T8#vY@_KpDa~rV&dsFE7=PY5m4zVZy^J7*zWGRcr7 z>crm~6LRjlimC4@FMjA@y2HU+L%0P&u<^YJ0<2fNV_FOiYWl=Ja28^KS&&7SSDbk( z9?dl9MU!dWsPR8acpHrWMXN9wFsT9{t1yD#{#?JD@EpRT?0MUNoeEP3=sPed9FA

SVnHKj}4ePGtpa{G9`}5sGF&)(7xUzpq|VdZ*`q?i>&YTU3d$C|WCZ3q zgZa2jG$Geqi|h2EFhpOgC_&1FR}l^nG#i!^R#t=83kYUz&=Sk8d$^XN)i(9v2+wpl zVN%7ggI@zuHkR>E{9>*&2@c`>j=lRsc7C2U_IF^hRqblgZ--3E3_`UJ>~#J{gJrtT z!1WC>{DS+?7Y~N<8OP5%58*sSged_WTj9Nge}(apmQw?g|C9rQU?GJ*f$+10#}Eby zQvme*vE5h&n_!ijD%QgOA6J};$DnNm#tEiVB@k&c!%p^V4QsEx&%iVpWY_*0Un1*; z2M5`%;(|!LteX zp0a{#3vyN5@x2w{FuW510TH^yRzYEKwVN{$hA1FL5EAfATv7}V%|WToMvHi8gWg|P z(haR6oLl{c@m)pdJaBux!ylm;FNbib?9>B&N29_&5T4SO(q(+{eU#>CsU6zD@12!r z5%(A=eqGlUfBJtk+qD*Az@q9wTl)>mIfVXY6@2@}9LMf5cwE(p2`{6c-nA4rgRl1! z-kT!0+k)GiOisH1$m#_#&FA_Ngy$33!Y6Q9mgS;#|AK;b)_vlu_vT-H@>L<~ zvTFnCe{dg~@8ocJxv=*aX(4^TzyHMc$NBTQ%+W5{>=3`>$aT$QIw_}Ts;6=oK1|D1 zZTAOxn-NSi5bqmdAYC zX}|V#Hkfbb4&_O(YSR}?`E~w6`Y(SgE~_AhD@h!OtY%ktzJA%#E<8=(oXdeq!R<-= zw+3%$wU z-uaRXhF{*d>~ETpH>HKrc=dD-$!{fNn;4)*n>yCLJI1fF4vAp;4(u0@17_KOZ zDn+09%ghnQ9xhn$oXy7GnC*j0-#PaGDOTIM>^Aww`Bj)P>fh;Mv^o0Ft;y%VUY>WrfO?O#Uxti=;lfG>IKl z<|yX8%sR&-M52EFhft=^Qy;c}Icrw8-1dwFW59i3fxANUVqJFFJ0>nLvzor3IWW9- zdBeSj;QI)94h3yK6R{v(Z($+d!*yym%(>qAH42Bg9NRbf0Mo=1U#w#Dm&BL6IBPLu zBh!LFf5#fDWizJmiwnK;zK~?mzo0EB!C<-h$_Ev3ZTnjl694{xJ7=R8v!jTsbl~bK zljJvi6aMr4Ph-a9V@^3rN)cU}Mduzl2{30iO|azG+uXu(XlMTWb17|WE-;_{dd?!| zVY~M>>yrLEwmV$e4l&K$bi;X$pQY8|DmVWZf@?2w*Zpw5Ctw=F@aL(9Z^gma_x_ll zP;KU25wlr}C(p5HQ+n2^Y`6RN-KSs9|6fyd=GUWJY@2rO*=YVk^k$6N?KZZ@D&Ajb zGsK;^E;(yqj>qeRLXsj?i4F0J=>?m=9J&QO%B$_IdQt8=algmB@k{0NUoh+cxwFBv zhC$}BO|PbUI{S3{#|s%Y%-ol`s@`_a;c|=9huUlZwKE-9zWdMDgI}vf8#!DQm;^f< jSd;`vWInjV@rGg9=CkTyyRYSfZuj(b^>bP0l+XkKpBP0x literal 0 HcmV?d00001 diff --git a/docs/source/index.rst b/docs/source/index.rst index 215e6cba6..5c593eacf 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -61,6 +61,7 @@ The library currently contains PyTorch and Tensorflow implementations, pre-train quickstart glossary pretrained_models + usage model_sharing examples notebooks diff --git a/docs/source/usage.rst b/docs/source/usage.rst new file mode 100644 index 000000000..8fb7a4472 --- /dev/null +++ b/docs/source/usage.rst @@ -0,0 +1,597 @@ +Usage +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This page shows the most frequent use-cases when using the library. The models available allow for many different +configurations and a great versatility in use-cases. The most simple ones are presented here, showcasing usage +for tasks such as question answering, sequence classification, named entity recognition and others. + +These examples leverage auto-models, which are classes that will instantiate a model according to a given checkpoint, +automatically selecting the correct model architecture. Please check the :class:`~transformers.AutoModel` documentation +for more information. +Feel free to modify the code to be more specific and adapt it to your specific use-case. + +In order for a model to perform well on a task, it must be loaded from a checkpoint corresponding to that task. These +checkpoints are usually pre-trained on a large corpus of data and fine-tuned on a specific task. This means the +following: + +- Not all models were fine-tuned on all tasks. If you want to fine-tune a model on a specific task, you can leverage + one of the `run_$TASK.py` script in the + `examples `_ directory. +- Fine-tuned models were fine-tuned on a specific dataset. This dataset may or may not overlap with your use-case + and domain. As mentioned previously, you may leverage the + `examples `_ scripts to fine-tune your model, or you + may create your own training script. + +In order to do an inference on a task, several mechanisms are made available by the library: + +- Pipelines: very easy-to-use abstractions, which require as little as two lines of code. +- Using a model directly with a tokenizer (PyTorch/TensorFlow): the full inference using the model. Less abstraction, + but much more powerful. + +Both approaches are showcased here. + +.. note:: + + All tasks presented here leverage pre-trained checkpoints that were fine-tuned on specific tasks. Loading a + checkpoint that was not fine-tuned on a specific task would load only the base transformer layers and not the + additional head that is used for the task, initializing the weights of that head randomly. + + This would produce random output. + +Sequence Classification +-------------------------- + +Sequence classification is the task of classifying sequences according to a given number of classes. An example +of sequence classification is the GLUE dataset, which is entirely based on that task. If you would like to fine-tune +a model on a GLUE sequence classification task, you may leverage the +`run_glue.py `_ or +`run_tf_glue.py `_ scripts. + +Here is an example using the pipelines do to sentiment analysis: identifying if a sequence is positive or negative. +It leverages a fine-tuned model on sst2, which is a GLUE task. + +:: + + from transformers import pipeline + + nlp = pipeline("sentiment-analysis") + + print(nlp("I hate you")) + print(nlp("I love you")) + +This returns a label ("POSITIVE" or "NEGATIVE") alongside a score, as follows: + +:: + + [{'label': 'NEGATIVE', 'score': 0.9991129}] + [{'label': 'POSITIVE', 'score': 0.99986565}] + + +Here is an example of doing a sequence classification using a model to determine if two sequences are paraphrases +of each other. The process is the following: + +- Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and loads it + with the weights stored in the checkpoint. +- Build a sequence from the two sentences, with the correct model-specific separators token type ids + and attention masks (:func:`~transformers.PreTrainedTokenizer.encode` and + :func:`~transformers.PreTrainedTokenizer.encode_plus` take care of this) +- Pass this sequence through the model so that it is classified in one of the two available classes: 0 + (not a paraphrase) and 1 (is a paraphrase) +- Compute the softmax of the result to get probabilities over the classes +- Print the results + +:: + + ## PYTORCH CODE + from transformers import AutoTokenizer, AutoModelForSequenceClassification + import torch + + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased-finetuned-mrpc") + model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased-finetuned-mrpc") + + classes = ["not paraphrase", "is paraphrase"] + + sequence_0 = "The company HuggingFace is based in New York City" + sequence_1 = "Apples are especially bad for your health" + sequence_2 = "HuggingFace's headquarters are situated in Manhattan" + + paraphrase = tokenizer.encode_plus(sequence_0, sequence_2, return_tensors="pt") + not_paraphrase = tokenizer.encode_plus(sequence_0, sequence_1, return_tensors="pt") + + paraphrase_classification_logits = model(**paraphrase)[0] + not_paraphrase_classification_logits = model(**not_paraphrase)[0] + + paraphrase_results = torch.softmax(paraphrase_classification_logits, dim=1).tolist()[0] + not_paraphrase_results = torch.softmax(not_paraphrase_classification_logits, dim=1).tolist()[0] + + print("Should be paraphrase") + for i in range(len(classes)): + print(f"{classes[i]}: {round(paraphrase_results[i] * 100)}%") + + print("\nShould not be paraphrase") + for i in range(len(classes)): + print(f"{classes[i]}: {round(not_paraphrase_results[i] * 100)}%") + ## TENSORFLOW CODE + from transformers import AutoTokenizer, TFAutoModelForSequenceClassification + import tensorflow as tf + + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased-finetuned-mrpc") + model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased-finetuned-mrpc") + + classes = ["not paraphrase", "is paraphrase"] + + sequence_0 = "The company HuggingFace is based in New York City" + sequence_1 = "Apples are especially bad for your health" + sequence_2 = "HuggingFace's headquarters are situated in Manhattan" + + paraphrase = tokenizer.encode_plus(sequence_0, sequence_2, return_tensors="tf") + not_paraphrase = tokenizer.encode_plus(sequence_0, sequence_1, return_tensors="tf") + + paraphrase_classification_logits = model(paraphrase)[0] + not_paraphrase_classification_logits = model(not_paraphrase)[0] + + paraphrase_results = tf.nn.softmax(paraphrase_classification_logits, axis=1).numpy()[0] + not_paraphrase_results = tf.nn.softmax(not_paraphrase_classification_logits, axis=1).numpy()[0] + + print("Should be paraphrase") + for i in range(len(classes)): + print(f"{classes[i]}: {round(paraphrase_results[i] * 100)}%") + + print("\nShould not be paraphrase") + for i in range(len(classes)): + print(f"{classes[i]}: {round(not_paraphrase_results[i] * 100)}%") + +This outputs the following results: + +:: + + Should be paraphrase + not paraphrase: 10% + is paraphrase: 90% + + Should not be paraphrase + not paraphrase: 94% + is paraphrase: 6% + +Extractive Question Answering +---------------------------------------------------- + +Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a +question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune +a model on a SQuAD task, you may leverage the `run_squad.py`. + +Here is an example using the pipelines do to question answering: extracting an answer from a text given a question. +It leverages a fine-tuned model on SQuAD. + +:: + + from transformers import pipeline + + nlp = pipeline("question-answering") + + context = r""" + Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a + question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune + a model on a SQuAD task, you may leverage the `run_squad.py`. + """ + + print(nlp(question="What is extractive question answering?", context=context)) + print(nlp(question="What is a good example of a question answering dataset?", context=context)) + +This returns an answer extracted from the text, a confidence score, alongside "start" and "end" values which +are the positions of the extracted answer in the text. + +:: + + {'score': 0.622232091629833, 'start': 34, 'end': 96, 'answer': 'the task of extracting an answer from a text given a question.'} + {'score': 0.5115299158662765, 'start': 147, 'end': 161, 'answer': 'SQuAD dataset,'} + + +Here is an example of question answering using a model and a tokenizer. The process is the following: + +- Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and loads it + with the weights stored in the checkpoint. +- Define a text and a few questions. +- Iterate over the questions and build a sequence from the text and the current question, with the correct + model-specific separators token type ids and attention masks +- Pass this sequence through the model. This outputs a range of scores across the entire sequence tokens (question and + text), for both the start and end positions. +- Compute the softmax of the result to get probabilities over the tokens +- Fetch the tokens from the identified start and stop values, convert those tokens to a string. +- Print the results + +:: + + ## PYTORCH CODE + from transformers import AutoTokenizer, AutoModelForQuestionAnswering + import torch + + tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad") + model = AutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad") + + text = r""" + 🤗 Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides general-purpose + architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet…) for Natural Language Understanding (NLU) and Natural + Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between + TensorFlow 2.0 and PyTorch. + """ + + questions = [ + "How many pretrained models are available in Transformers?", + "What does Transformers provide?", + "Transformers provides interoperability between which frameworks?", + ] + + for question in questions: + inputs = tokenizer.encode_plus(question, text, add_special_tokens=True, return_tensors="pt") + input_ids = inputs["input_ids"].tolist()[0] + + text_tokens = tokenizer.convert_ids_to_tokens(input_ids) + answer_start_scores, answer_end_scores = model(**inputs) + + answer_start = torch.argmax( + answer_start_scores + ) # Get the most likely beginning of answer with the argmax of the score + answer_end = torch.argmax(answer_end_scores) + 1 # Get the most likely end of answer with the argmax of the score + + answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end])) + + print(f"Question: {question}") + print(f"Answer: {answer}\n") + ## TENSORFLOW CODE + from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering + import tensorflow as tf + + tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad") + model = TFAutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad") + + text = r""" + 🤗 Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides general-purpose + architectures (BERT, GPT-2, RoBERTa, XLM, DistilBert, XLNet…) for Natural Language Understanding (NLU) and Natural + Language Generation (NLG) with over 32+ pretrained models in 100+ languages and deep interoperability between + TensorFlow 2.0 and PyTorch. + """ + + questions = [ + "How many pretrained models are available in Transformers?", + "What does Transformers provide?", + "Transformers provides interoperability between which frameworks?", + ] + + for question in questions: + inputs = tokenizer.encode_plus(question, text, add_special_tokens=True, return_tensors="tf") + input_ids = inputs["input_ids"].numpy()[0] + + text_tokens = tokenizer.convert_ids_to_tokens(input_ids) + answer_start_scores, answer_end_scores = model(inputs) + + answer_start = tf.argmax( + answer_start_scores, axis=1 + ).numpy()[0] # Get the most likely beginning of answer with the argmax of the score + answer_end = ( + tf.argmax(answer_end_scores, axis=1) + 1 + ).numpy()[0] # Get the most likely end of answer with the argmax of the score + answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end])) + + print(f"Question: {question}") + print(f"Answer: {answer}\n") + +This outputs the questions followed by the predicted answers: + +:: + + Question: How many pretrained models are available in Transformers? + Answer: over 32 + + + Question: What does Transformers provide? + Answer: general - purpose architectures + + Question: Transformers provides interoperability between which frameworks? + Answer: tensorflow 2 . 0 and pytorch + + + +Language Modeling +---------------------------------------------------- + +Language modeling is the task of fitting a model to a corpus, which can be domain specific. All popular transformer +based models are trained using a variant of language modeling, e.g. BERT with masked language modeling, GPT-2 with +causal language modeling. + +Language modeling can be useful outside of pre-training as well, for example to shift the model distribution to be +domain-specific: using a language model trained over a very large corpus, and then fine-tuning it to a news dataset +or on scientific papers e.g. `LysandreJik/arxiv-nlp `__. + +Masked Language Modeling +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Masked language modeling is the task of masking tokens in a sequence with a masking token, and prompting the model to +fill that mask with an appropriate token. This allows the model to attend to both the right context (tokens on the +right of the mask) and the left context (tokens on the left of the mask). Such a training creates a strong basis +for downstream tasks requiring bi-directional context such as SQuAD (question answering, +see `Lewis, Lui, Goyal et al. `__, part 4.2). + +Here is an example of using pipelines to replace a mask from a sequence: + +:: + + from transformers import pipeline + + nlp = pipeline("fill-mask") + print(nlp(f"HuggingFace is creating a {nlp.tokenizer.mask_token} that the community uses to solve NLP tasks.")) + +This outputs the sequences with the mask filled, the confidence score as well as the token id in the tokenizer +vocabulary: + +:: + + [ + {'sequence': ' HuggingFace is creating a tool that the community uses to solve NLP tasks.', 'score': 0.15627853572368622, 'token': 3944}, + {'sequence': ' HuggingFace is creating a framework that the community uses to solve NLP tasks.', 'score': 0.11690319329500198, 'token': 7208}, + {'sequence': ' HuggingFace is creating a library that the community uses to solve NLP tasks.', 'score': 0.058063216507434845, 'token': 5560}, + {'sequence': ' HuggingFace is creating a database that the community uses to solve NLP tasks.', 'score': 0.04211743175983429, 'token': 8503}, + {'sequence': ' HuggingFace is creating a prototype that the community uses to solve NLP tasks.', 'score': 0.024718601256608963, 'token': 17715} + ] + +Here is an example doing masked language modeling using a model and a tokenizer. The process is the following: + +- Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a DistilBERT model and + loads it with the weights stored in the checkpoint. +- Define a sequence with a masked token, placing the :obj:`tokenizer.mask_token` instead of a word. +- Encode that sequence into IDs and find the position of the masked token in that list of IDs. +- Retrieve the predictions at the index of the mask token: this tensor has the same size as the vocabulary, and the + values are the scores attributed to each token. The model gives higher score to tokens he deems probable in that + context. +- Retrieve the top 5 tokens using the PyTorch :obj:`topk` or TensorFlow :obj:`top_k` methods. +- Replace the mask token by the tokens and print the results + +:: + + ## PYTORCH CODE + from transformers import AutoModelWithLMHead, AutoTokenizer + import torch + + tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased") + model = AutoModelWithLMHead.from_pretrained("distilbert-base-cased") + + sequence = f"Distilled models are smaller than the models they mimic. Using them instead of the large versions would help {tokenizer.mask_token} our carbon footprint." + + input = tokenizer.encode(sequence, return_tensors="pt") + mask_token_index = torch.where(input == tokenizer.mask_token_id)[1] + + token_logits = model(input)[0] + mask_token_logits = token_logits[0, mask_token_index, :] + + top_5_tokens = torch.topk(mask_token_logits, 5, dim=1).indices[0].tolist() + + for token in top_5_tokens: + print(sequence.replace(tokenizer.mask_token, tokenizer.decode([token]))) + ## TENSORFLOW CODE + from transformers import TFAutoModelWithLMHead, AutoTokenizer + import tensorflow as tf + + tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased") + model = TFAutoModelWithLMHead.from_pretrained("distilbert-base-cased") + + sequence = f"Distilled models are smaller than the models they mimic. Using them instead of the large versions would help {tokenizer.mask_token} our carbon footprint." + + input = tokenizer.encode(sequence, return_tensors="tf") + mask_token_index = tf.where(input == tokenizer.mask_token_id)[0, 1] + + token_logits = model(input)[0] + mask_token_logits = token_logits[0, mask_token_index, :] + + top_5_tokens = tf.math.top_k(mask_token_logits, 5).indices.numpy() + + for token in top_5_tokens: + print(sequence.replace(tokenizer.mask_token, tokenizer.decode([token]))) + +This prints five sequences, with the top 5 tokens predicted by the model: + +:: + + Distilled models are smaller than the models they mimic. Using them instead of the large versions would help reduce our carbon footprint. + Distilled models are smaller than the models they mimic. Using them instead of the large versions would help increase our carbon footprint. + Distilled models are smaller than the models they mimic. Using them instead of the large versions would help decrease our carbon footprint. + Distilled models are smaller than the models they mimic. Using them instead of the large versions would help offset our carbon footprint. + Distilled models are smaller than the models they mimic. Using them instead of the large versions would help improve our carbon footprint. + + +Causal Language Modeling +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Causal language modeling is the task of predicting the token following a sequence of tokens. In this situation, the +model only attends to the left context (tokens on the left of the mask). Such a training is particularly interesting +for generation tasks. + +There is currently no pipeline to do causal language modeling/generation. + +Here is an example using the tokenizer and model. leveraging the :func:`~transformers.PreTrainedModel.generate` method +to generate the tokens following the initial sequence in PyTorch, and creating a simple loop in TensorFlow. + +:: + + ## PYTORCH CODE + from transformers import AutoModelWithLMHead, AutoTokenizer + + tokenizer = AutoTokenizer.from_pretrained("gpt2") + model = AutoModelWithLMHead.from_pretrained("gpt2") + + sequence = f"Hugging Face is based in DUMBO, New York City, and is" + + input = tokenizer.encode(sequence, return_tensors="pt") + generated = model.generate(input, max_length=50) + + resulting_string = tokenizer.decode(generated.tolist()[0]) + print(resulting_string) + ## TENSORFLOW CODE + from transformers import TFAutoModelWithLMHead, AutoTokenizer + import tensorflow as tf + + tokenizer = AutoTokenizer.from_pretrained("gpt2") + model = TFAutoModelWithLMHead.from_pretrained("gpt2") + + sequence = f"Hugging Face is based in DUMBO, New York City, and is" + generated = tokenizer.encode(sequence) + + for i in range(50): + predictions = model(tf.constant([generated]))[0] + token = tf.argmax(predictions[0], axis=1)[-1].numpy() + generated += [token] + + resulting_string = tokenizer.decode(generated) + print(resulting_string) + + +This outputs a (hopefully) coherent string from the original sequence, as the +:func:`~transformers.PreTrainedModel.generate` samples from a top_p/tok_k distribution: + +:: + + Hugging Face is based in DUMBO, New York City, and is a live-action TV series based on the novel by John + Carpenter, and its producers, David Kustlin and Steve Pichar. The film is directed by! + + +Named Entity Recognition +---------------------------------------------------- + +Named Entity Recognition (NER) is the task of classifying tokens according to a class, for example identifying a +token as a person, an organisation or a location. +An example of a named entity recognition dataset is the CoNLL-2003 dataset, which is entirely based on that task. +If you would like to fine-tune a model on an NER task, you may leverage the `ner/run_ner.py` (PyTorch), +`ner/run_pl_ner.py` (leveraging pytorch-lightning) or the `ner/run_tf_ner.py` (TensorFlow) scripts. + +Here is an example using the pipelines do to named entity recognition, trying to identify tokens as belonging to one +of 9 classes: + +- O, Outside of a named entity +- B-MIS, Beginning of a miscellaneous entity right after another miscellaneous entity +- I-MIS, Miscellaneous entity +- B-PER, Beginning of a person's name right after another person's name +- I-PER, Person's name +- B-ORG, Beginning of an organisation right after another organisation +- I-ORG, Organisation +- B-LOC, Beginning of a location right after another location +- I-LOC, Location + +It leverages a fine-tuned model on CoNLL-2003, fine-tuned by `@stefan-it `__ from +`dbmdz `__. + +:: + + from transformers import pipeline + + nlp = pipeline("ner") + + sequence = "Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, therefore very" \ + "close to the Manhattan Bridge which is visible from the window." + + print(nlp(sequence)) + +This outputs a list of all words that have been identified as an entity from the 9 classes defined above. Here is the +expected results: + +:: + + [ + {'word': 'Hu', 'score': 0.9995632767677307, 'entity': 'I-ORG'}, + {'word': '##gging', 'score': 0.9915938973426819, 'entity': 'I-ORG'}, + {'word': 'Face', 'score': 0.9982671737670898, 'entity': 'I-ORG'}, + {'word': 'Inc', 'score': 0.9994403719902039, 'entity': 'I-ORG'}, + {'word': 'New', 'score': 0.9994346499443054, 'entity': 'I-LOC'}, + {'word': 'York', 'score': 0.9993270635604858, 'entity': 'I-LOC'}, + {'word': 'City', 'score': 0.9993864893913269, 'entity': 'I-LOC'}, + {'word': 'D', 'score': 0.9825621843338013, 'entity': 'I-LOC'}, + {'word': '##UM', 'score': 0.936983048915863, 'entity': 'I-LOC'}, + {'word': '##BO', 'score': 0.8987102508544922, 'entity': 'I-LOC'}, + {'word': 'Manhattan', 'score': 0.9758241176605225, 'entity': 'I-LOC'}, + {'word': 'Bridge', 'score': 0.990249514579773, 'entity': 'I-LOC'} + ] + +Note how the words "Hugging Face" have been identified as an organisation, and "New York City", "DUMBO" and +"Manhattan Bridge" have been identified as locations. + +Here is an example doing named entity recognition using a model and a tokenizer. The process is the following: + +- Instantiate a tokenizer and a model from the checkpoint name. The model is identified as a BERT model and + loads it with the weights stored in the checkpoint. +- Define the label list with which the model was trained on. +- Define a sequence with known entities, such as "Hugging Face" as an organisation and "New York City" as a location. +- Split words into tokens so that they can be mapped to the predictions. We use a small hack by firstly completely + encoding and decoding the sequence, so that we're left with a string that contains the special tokens. +- Encode that sequence into IDs (special tokens are added automatically). +- Retrieve the predictions by passing the input to the model and getting the first output. This results in a + distribution over the 9 possible classes for each token. We take the argmax to retrieve the most likely class + for each token. +- Zip together each token with its prediction and print it. + +:: + + ## PYTORCH CODE + from transformers import AutoModelForTokenClassification, AutoTokenizer + import torch + + model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + + label_list = [ + "O", # Outside of a named entity + "B-MISC", # Beginning of a miscellaneous entity right after another miscellaneous entity + "I-MISC", # Miscellaneous entity + "B-PER", # Beginning of a person's name right after another person's name + "I-PER", # Person's name + "B-ORG", # Beginning of an organisation right after another organisation + "I-ORG", # Organisation + "B-LOC", # Beginning of a location right after another location + "I-LOC" # Location + ] + + sequence = "Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, therefore very" \ + "close to the Manhattan Bridge." + + # Bit of a hack to get the tokens with the special tokens + tokens = tokenizer.tokenize(tokenizer.decode(tokenizer.encode(sequence))) + inputs = tokenizer.encode(sequence, return_tensors="pt") + + outputs = model(inputs)[0] + predictions = torch.argmax(outputs, dim=2) + + print([(token, label_list[prediction]) for token, prediction in zip(tokens, predictions[0].tolist())]) + ## TENSORFLOW CODE + from transformers import TFAutoModelForTokenClassification, AutoTokenizer + import tensorflow as tf + + model = TFAutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english") + tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + + label_list = [ + "O", # Outside of a named entity + "B-MISC", # Beginning of a miscellaneous entity right after another miscellaneous entity + "I-MISC", # Miscellaneous entity + "B-PER", # Beginning of a person's name right after another person's name + "I-PER", # Person's name + "B-ORG", # Beginning of an organisation right after another organisation + "I-ORG", # Organisation + "B-LOC", # Beginning of a location right after another location + "I-LOC" # Location + ] + + sequence = "Hugging Face Inc. is a company based in New York City. Its headquarters are in DUMBO, therefore very" \ + "close to the Manhattan Bridge." + + # Bit of a hack to get the tokens with the special tokens + tokens = tokenizer.tokenize(tokenizer.decode(tokenizer.encode(sequence))) + inputs = tokenizer.encode(sequence, return_tensors="tf") + + outputs = model(inputs)[0] + predictions = tf.argmax(outputs, axis=2) + + print([(token, label_list[prediction]) for token, prediction in zip(tokens, predictions[0].numpy())]) + +This outputs a list of each token mapped to their prediction. Differently from the pipeline, here every token has +a prediction as we didn't remove the "0" class which means that no particular entity was found on that token. The +following array should be the output: + +:: + + [('[CLS]', 'O'), ('Hu', 'I-ORG'), ('##gging', 'I-ORG'), ('Face', 'I-ORG'), ('Inc', 'I-ORG'), ('.', 'O'), ('is', 'O'), ('a', 'O'), ('company', 'O'), ('based', 'O'), ('in', 'O'), ('New', 'I-LOC'), ('York', 'I-LOC'), ('City', 'I-LOC'), ('.', 'O'), ('Its', 'O'), ('headquarters', 'O'), ('are', 'O'), ('in', 'O'), ('D', 'I-LOC'), ('##UM', 'I-LOC'), ('##BO', 'I-LOC'), (',', 'O'), ('therefore', 'O'), ('very', 'O'), ('##c', 'O'), ('##lose', 'O'), ('to', 'O'), ('the', 'O'), ('Manhattan', 'I-LOC'), ('Bridge', 'I-LOC'), ('.', 'O'), ('[SEP]', 'O')]