broom/0000755000177700017770000000000013204551155012723 5ustar herbrandtherbrandtbroom/inst/0000755000177700017770000000000013204542661013702 5ustar herbrandtherbrandtbroom/inst/extdata/0000755000177700017770000000000013204276216015334 5ustar herbrandtherbrandtbroom/inst/extdata/8schools.stan0000644000177700017770000000055713204276216017774 0ustar herbrandtherbrandtdata { int J; // number of schools real y[J]; // estimated treatment effects real sigma[J]; // s.e. of effect estimates } parameters { real mu; real tau; real eta[J]; } transformed parameters { real theta[J]; for (j in 1:J) theta[j] <- mu + tau * eta[j]; } model { eta ~ normal(0, 1); y ~ normal(theta, sigma); } broom/inst/extdata/rstan_example.rda0000644000177700017770000225132313204276216020676 0ustar herbrandtherbrandtXUMHwlQQPIA)IQD@TDBLRLE(!(mU=}|^wg?3k6ۥL nG-\mIHɹ9sruptś~Cnm>\-ܗ|P.!s\FaIjabY4@-l~ gB ɿ0/b7RK2,dEa,1;8-e~+f֥ڀb8")}dYdzn #JP.ۏs_LWgve!oLBi7A EqZ V7?[dlyTbc Ծ3[ʽʳPUSTaNATD*V o(rbi{-н&K@j#\M:WfM6J )Hݴ`An*s6vȔ6xM*ٌ2ԧAν}p%RQܙmiyӍŌJի@ۍ $`3[R5W{x%] t 6AWfcvDr~ٷ؞MŅzeഹu'1ٔr :WBN$UѬWJ4 #b4! $P$%HSL~ TwHCͮ'Akq-[{7d.k^ O@ŇF -Ȉ m kL j" (ًFf_HK~6PH؃3<2ޒCi-~0hMY:}"MVC챏@6FP :3[\Ȣ7(f;^`7uSH^ېiU{ŰBOlv/FjO F(!=p0gF wo-EX00Vz-z'MKO:!a{h`7ƨ] WiFժRh@s:BQPK!w9L/-1eId$1u'J9w^@p!|9QP9$ }l`ƘCT֯YaLr'3vS:OH畔Pom~$6a{ |$IȆ8jhi+N.D Nġ)g7 `|PIﮎ&EQەMLG YȶbK |o$#FesZQ:Ʀ޶Aa :eAͧ;j,ǡZթVUjS}ڨzh} V8_[W\5Uvuv>01sj ڍе4;-A$hXUNzpR$sb4(Džz*kP!_['$m$۸W~hX_joJ}lM,R 4U/+_*qA_x\)%h_>8^$ .yu^|.K8P+m%g P8x^%$<}]kkQ.9D D~eD7xZnz*6_yn%Jl@S?(~lKPs@$P=n@TdUj},F,3z*48팚ڐ^pds9™sI*PsdGh#* P93b"[Hdue'Aۨ'!LheJ7> JN5Y/ă|˷iyQ}/W8,2W'8}+zHpTѬ=O ~m(*7?PuŲc`tv<5WOj-]m+' fse?DIUw?6]ҠA+Vhc놲0&)}Ҵz D-..M\ʈ유d@aCxu/4kF6B$<>7"$ Wu|Wzu7jO .ر]>nPi-1F)\7@oRאBlzF?p@# ̃ >p>Mث:΃';9Θm u">w/*"ѸÅp4@4*˨_6DmvW6'P?Ym\GhLwAF"b>aj82/&+a$?WhPT#iMI7)~SM_?39&`@[p JMutpF$k?~B=ܟ^ٻUbYvT6 WHE"jh1qY;cu9-& ѤT]AީuˡMOӢm [^k#j8эZ>Xt\ ']bj!$ D~M8LaJJ-/X#W.ANKA=$imXxRc=7`Uc,v'{螭lE{TLItOߢp ˌ/OX6ay`RCAE2ҚV&HݷD#N=Ce ([a2M&V%d˜@3/g{,Ch䠵C$I;-T p, èꍂWXr.5haa~h t˖Ϡ2%}؜H625!j(mbҨD[ TNWC D$N[Q{.#;`c@i;v>_UɬJdpvZ`>p.G@B@/ٽ9:&w ]Gcdn|:NihsSMIt((#צ$9Q:;KPϻ:ZT"ImpdݾmMcY ~&z8a7潨Acխ۶P²``*O34S|Q^x@eޫSyOڻl9'XeP[)9#n6"k}MpD29pI)*SEk fg3=y5~Ad]I&UxEjnTz9*#վ]%{6ԮnĪaO&14S)ϡu MW~jbžDď$?''}D=<^/&yOi˅84O0[𸔦*׌Q֯IEsOAMIq)4|q)"rX[Āf` [P?x:sQםW”@3|ĝZ{q~3Dt|yFJB\}Awprv47ٳ%?q?mmu`S,R܋C n9NP>pzv b@#wTV9]Eb^T*z->i6TJJU&y?w]uH ."{W( a*qP+&hvܟV jbyFَ×,hG64ON()e,:#o3CviނV84AD5Sv!c+hր=IJ\>$ɔ1a I=JfWUwb,eIBQyQT?8e/g۴Af[}j-;l|:Vzj}~2ءp#3FȪtڲ}wnARNd"$]PY7[`0GZ=E3*1""Y(A·?ŒA`+"q;SNyY&TИQfp=vJ N`[׿*l=vLGVA)<ƙ57էva"Al'4RfV*$ԽN}GܳvwhZcNvyUaBx7$r:r6{ͣڠ<Pΐ#Tr[M'he<} } eeG46 = ƙ7z 6#*RQJJ~иcڑ}s\.P|(݀*D^U僎j#/k5}M:jL<9e tXx]Hʀj"ֆI܅FS mI"_^3J~➍`CCs$z3/6:NVT]:&thcD -ʸUz> W/5WȒtR'>P,tZ+:j*_N*E+nYCz\D)Lܦ寋Z%>oXFzT|/s}S˺[g4 ]ZVGR=2B1q֟$["04n/32D@+'ԀpnI.W]Q ٻ`PXguh%$)6knf0ϹMh7`͏$F/)˘t< 6Ns/$Oc_ރis@goʵEYɍ?~F~=m)`*ZB￀*jY3eZ[? =a{>*M~ǻv0`LS]u|,'OeO 0YN?Pu ,xZޑ^:m/ %A%2dkÔ(7Uކ+ـvS5$qħ9 Fw9XCφ(|A;CJàH-Wi'j{a_N1{vVFe1r\өt +?*>p ?uI$W!gfDvOn)M'FyOUUgjV~ub.!lb~y;O?WlQiHzc+>W> ``DӞYJ%z?AwqLO/=fAAރ[6^ItEx\s:nρy'AvgahUFjo22^:ˠٱ - ԮXiz?@|PUBuL*-?hx{"F0L;ېڎ7ҳG_pd;َ̛&n%Ɠ?@UJR 2CΆ2)3',&j Y&ո֔5]@1yZG@[e؞JPBuaky0P0+}<2=v}Ov$]/;^H! 3'/rnj:Hڎ=CEL"sPigfpZ}h!rP0/H:廾:M{4C C6x hk>kYeBV;6U<ŐlnJY#(lX 9;3 <ٵӯT㕇@d#ull7OzK7@I2MPSSawjܙr;b`BƃP~m CPLyʔ&^zN7%ɣklT~[J% ^qMCLv9u %13C$#ڞB^MS2_fw\.&ՕoiH!# PeU<`w\M 9݂5eCu㑛W)$P:ǧQϥfAֱ5$W2xLW$̀b6H!O(ӥ$xZ"nɽj =Sl&M8+UUB:{ CQ}-_9"&:@,}ll2Gh c{^O"qVwմ>3l59u3wCvψda(h-k Po7[]7!]#ľn/*cJ:SSqR s\y nbF%[C@r=␅1-/WpȗvL2).,}bwER*ݰd:h|esj;A)txC^E# H]1_P^6 ~8*F+[6ߢiL?Ro m{&w.՞u5+ْ ٯ<;sM=I8/ŕm iRe4m%Y{|I U8 DNׁ]4&$P\~zM?qLH| _8 i% _WfԉA'˦CનhJ`Boh7n!NMa:w/ 2'SnWoŭOPBy1 w')uqDNlinzicS&}\T@w=~"X$[/}xgÂ?oJLж+*5AYWgE!{}{t!g^ B(,4l\} xQ45ysFD↧X &n$y6|[+\u\L"W7_O؉>P&~"[1z=W*NWM)doܱi$=ӺHg'FPQ%){? clPpR܀=?'S%%*BYOG@ `.!Ϟb,=־\Ӌ8(K{o$kɕT?Ï_6jr+7M򗶳>"δtccM#JDv.a^ 7LBz=jǵZ;n׫67_tIzTptERHGSr6ml+R dt8HukG^@B:HqQP}x<!mwNI_L6g77T{LܩU/lLlJž5PqZ[Fafa*I+#GїU%k ys8 hG9Ǘ10Y-|jŮU};# KWzɮq)eb|t3 !./>dWwq'ս~Z 9R+!G Iq}11`ϥe wVurQmĹCP_(PLmI.ʹB:ܦ!LYx2~0d3SWU)r0toQ^׉,EhqkC+WTZ C( }RI>4 A2]6ɁгL}fw LPR; (nNi$Sn_L ϑy;=1U{iHV;G@ښH]eT_S0憯{ ]HηIk1э2E_y;f >cDs"pp3_QiƘy)Ad<2v=t#UI"SN?JzqԎ/Ҭ O`#f!٧'Im V ZVؔPFft4nV{=fm?>Ut2hfkguW8ѕMBfQPB[B̚pwF]bkUi'D4Gak y3&NgMw-!kp-3_tuؽ9 &8MB9~SQ`7|aGnpŵo'8@\/-_Xe,)o$슡ә\8K7 k߸~ GcsێBƉbnU^G=.A@akzAmPgPpsSlVUha (] d6DUQƐ%l{شڛ=J RpeKn+=N2ˉJ r4e|ٗFpy*`AWdg_0?`\5J3 {ͻQ0h|KDzҗ}ҼE C}nB{NkA.q WrQ1]l1q5׺o@\O"w~PEU}1S F}[Ky.B$.> MC}:{7@ڳ{;MҾ|bBD nqQJf^tGEvd\^[=P _6 PGgA ޒE+%Nu@uÆvñV"tf'*VM1Fn$_PXh >+(j]~=mMB5Wv,m^N3+ߝD )M۽vW\ڃʆL| ~J U4>dH>gMdieHSP"cB&{:(g8G o{-@>d&KЬk-!Ҝ/d֑ #!T{TIüCu]li((zV˼-Ni)T0Uz$G1_ d9ٺ"njL[&bَʊCiK;0i"3+ ͭ]) YfW>s? ݪrd?ֻ~|MGF5AC2o[$*%lZՐ.rjMVHHufMQHݧR^F]۬v^4s_2-X.;sM϶߻OfݸO>$|H]܇aB63)[:7@YLDlbwRжN/dj36Ifl^j#P8sD<Mex>` R˄G: | kdJ7Ɣ5Jb/@1> Jo`[t |yI1_v~^+#.'5X3jjN1Ci;6,e7*lzw *&}UhwGAEEҕ!Pq:?IZhSrt qhYVje e>f)][IK/whڤCPͳ.I(wK&dab wPjQg`4vk[2b[ɊTXG.P o|ƹC\hpXxnue+^U= Cܔ#t-o׳M:9 XznR%8LQBUͨTD*<Ęp_ȚGw*3Pqsɵ P<G(+~sc~i!Kȵ#n<b2GU W0}hMl'bҎ7{9X LdqP+tgcQ\q;zԇ0A݌*t2! il <{cvZUk@ .H l6;;qCL@J7#~zXJViĤX&ﴟ͛dὟw&T^r> ymAAش.̫͎۝y&+Ci .ֿQpEfBW7SBBF(n+Ua% +M&F19CBPڌ(gK B X.!{JL2 +~q[[cPIuc[a(š| Tɳ>3ռo4rI!sOKDR^>Ƚi 97Pɳ,ZISt{<\i{:3Ϩ PʻPӠⷆ2]zȘ4Bad <#Xo7{Bg~+]ךBŐaޫih=2PeU.ܭR'.HCi۲ijAF |re8*94Wm F%b r_1k6UBD>#5W ;Md _-cq*9"&!M5[Q>&:4|ΓDԛoW^g .T|‘C[dα}y'q5r7 dz+-cm|+%ݎAѬG}2K5|kG='9\Zz8+˲ #!:-|gK[: [+n NQ^EU}*m8[:lml=q.v8g/[7w[; 8w$?edutupX5ytЌ|fhP Z_ s)t%?÷}`Cz`Y3 8PCV@^bpd(0oS[5ISY}U`]e`SMck9C )Lz1iy&^]6?=\ Y\>_/W)0X^BMi,`HoJ0|7^VXlWg*WccaHB] 傏G- Tga!P/tVrml*f7G4Vﲂ~;|YyطseGV *wۢεgb+8s_O{1W7mr@3Ƣ _la_A͏A6^P} QMJ«oT?Z@9zM_|ZEj cC=x2qM@-|evh#{pC0t$8  F:u= #]wy'g&@Sp|h}?h\dԍ#ut*#_&3>SDչh{+_AB;!%Beٖȵ̼$~$Y?O~y4ww-.OO_.7epI~5VOS\Mv"|'??i˹\n-O17?O݁?-?q?\n]XnvBt~:@8rpOR$Ibpptss9H[laev2Ѳc7l=,(٤I/U~1b h#%222efefefb?'Y({)AýRxPۥʭyemߕMV ezn4eLݮ[C7}~WAJ(^ǫ} --NB+yXMYPO}vF ǝf ػ$P䈅ض"yVB15vZrn .2sA9&9W&(9X@8ɼQi&(Nz? \B&vNY(CaRiZ(¥EJh''w)eݘ|Jk{̻Չ)w)Avc$'BR\TZs8mJH2'!nfK Y;tNkFnMs:^0۽BS km,ш~cghRu>)TiBO A1Cc:IcYuBF>Fzuo&G/9ބ>cRܽc\0>FnFqEb1Fzy-.2$m@zPՃ==< KW85q4r5!9Nj鄳Ak*>:r }% =T. |WC.uN(B'H[u ;فFd ꮮjSsBv ivѳѩ ?s6\FyQUlh1(3+E:YFh'h iz"ﴇʇ2W@Ѷ-LEsG wn/\M1Ha X/Q+ [W |=RȮʪ*)9Fl+ৄVW̷@s=2jCPH_8]$HNl;< DJu>?ne`+ڔ ʞڢF\=QIP}tlh)xڧ~qQY[RwN#/λ"~HY@V{ewt]l V *ZQ*dߡI$yUT.~ȜOZ 㬽+ڈ;9ftR0!>Sz8y ?p RwwʙBcS‹tXx2(f_uxJ\?=Xضxy+w+N6AQRӭR1gKT */B1lbPX A) -.8Ԇ}ȵ .SƤRnrD1Ю $9*]h /QJFʵ`EEP<)~(C={s+,|NA:#WEB~f L oNV)LoMۙl!oP4t:|2a~r/J+n vpc+LB$Ҕ7PmA^LUb~'#;֡3E r}c?\Tŕ g} ? Bî`l@v ȑQ Ѻ<w?\%}uAgPeYF*bZ}HVλ`t1lFCW˻a^W`bfy23QΨc`;{!Le\݈ 5ҬUcm>%#[-ds|:wy0*^ŨF[_>bEe~B[2N2|ye<ʽ"ow3BŲ5S=/TV˝ZAw>݅rF^}u3vU߁: v7>d]é %eha 1JqVEhzg6g4ƽsewGf 21m+8?fGz.;BzC;8bV#j>k  M˪w@<ܐsej} oq2@w ތOю|bu+;o| 5Zw zw͹|H ni 40Fk͆HixX~TJM;x,PqFeqOdV VN*ˣvFchAGȪrz9ӯG~_Q6}(h SB:T$h[]`-5VA%ի j3Yr}gPjoĆ^mbۡ*(7t%ik˾u-b e (l0oZ؜|st3UFF5oOښhy\>l2l.NVFAz;ּ7FTiA En')#~Aą34ڠdD6h>YjA3{Pko>ޣ fobAYG036d}*=d >h aU᧝&j-n#sX9Զ1h.U`Z8JAm+IIn@ t4 }̨o5Jk@n ۣ80ddh:( t;{V#LhS ϓ8'D }vGBP1c##UvY,Knz^Ԝy-ػl1 7NWP_. gjL.NѠ|gu*Jib;tiPUu`QtfPaBڬQ99@tlj641l"hIFѹ#̾?vJs2 hӣN!O'`l1K!)}RJP;7>Gۓ D|6tZRjʒ=3k.e ˄׹^F&/5*Wn` ;=m5BlGR$6lnz`\S }qGw{] վJ$j$Yv_ԛdOaYB$r=A_f[n cTnk5G_,}{xA+#;xǍo^M0Vbak8P 졛'rzV{"j F:GAa'ەl]P7޷;hsAGyNK0@5.:*5<:IrߪZr-ख़Gt(ۈ?aed=IF7ȊN)*> pG>c_@%cvЧuB8pGu$`d^vFѤ ;T7^ '" nw-A.?w]o4h t@.䋆VW?@2i7w4;2$ 0Ϫ+5\V! |̜σVn,2dDqe$hD"?doy%:ʱY˩TfGUdU;܉_fp=w3R-TJ6a֓vL/g8Wd֠*`dEhx:dwsQPC\0s 'g;(A~}RCQQ=22i4\p*u` wԬhwR4\-^AIdlQ`eR]O(?*J h25g+>:7Ps5;{L .kf\RW$5CkK6%gc÷xL VOYutXpygFKѰlIvT2TUcx+JR0O}fr, Ul=PF@?Las0 xVy?/ ?y]=2K'f `dJG0ub 06D*ſȖُퟁkP.pVEʧdtS.&حYr0VM쨗죹\'JuI7z%ԛAg$MAka}[n\a{Kl'6v.Pc=F¥t{ewin5 XCˍ|ڷ@h񼫨z@ N𾌙|~ty0 n{EFO~T7 =_qGHBMV w+[ւ{ZvID焈>kLcƞ܀+OR9Ѡosu&FgRP7'PK֎q\ȞSAa^m^è׶pչ:ؒe*CEjoDu;t̉iO(sO=.:7%P?t, 4S'ID-j :R!Jr%5 UU(΄(ʵ' ^qE=G5Wܳ>C]62zQB1>1G%z/snt>usBky}Tw}zvm"h|V#h ʷُ}Z^3ϨhWD]$G!}m/C$ݹSz3ᦄu@ *em:"i4tTvRA5MǶmڢ,ugCX=L{OZN?:V:uT}jUqr&UZ0$?5%uVF#QIW9`*EC&fjˠӽ78J٣ dyЌZe!4z)-ਫ਼Ԭ#BǏx,n$ &C~k4QjՃY^ Gh*%{4_<{)\·ZS(:Qz{M^h4Y¸r{pb6頻(PIaBP~^7xIsn%D6+N3~fJ>yr%1("["6mqv 4Y!%dGd2ơ1j'+UI^h=`Ndu,KtZQڵ[h2jNZ,_Ez j6qZ\Mĸ| Y;J6ya> DwxXQ[Ei]_$@ᆘPZ)Th0L(J ?`r]S"{-j e(%}Cߨ]hF{mގ A_Kʍv ͦݨS[mô>n;bya xWVd}sM;RyC͡۵ D<#TcAw2>hjo$plA )fL@ܝi_^rg nRyj9azatqKMbnڋ@x$5ӛEc:ܩ !I;@\Y_6*_{tAUQ邚$cWEJjT/W*Q8[TA|]R4FQ񠝓}v[̎!Q0P 2 {Su[@wڮΛ`hv 10ruL3VrN+B :3;Q7 `;x=#o) :_LzM.}qկ 5sU)WATIvTw"ju:JO0}gF0۔*hx+]ty+4ӿ F7 +"@aC?r索C&F!U'H%oSiR8=ym9f\íceFjrakBxN0xu$dF(_E),r`yVkb;~ø=Ӌ@0BJ#JLY<'6"}eq*ݠ}"nnR=q8uREII|M݉O/>\rR !jJx!̧cJfE03'"@SP1 %J4'M_1pN:b[=☯^x :_jݹF,ޕ{"}[Cj=[oF) z+Rǚ%"jV#^4>iJWLjJN;{抗+(K5CMk(GDOv^}b Q_g]!)%=[!idY@Rnz!hG4PʿYhkǡ~;[QqpMTE%ԝO_㒉b`#[lww+E Og- | v3/C]b/װgqp*:?JZͺ#ޡDX40-bm8uւ|]ܓ݇[q1x'U]D*j}fhQEvˍw#* EAV>ڻysFh@R* SJ0ܚ&>M[ecneHE8G/zUδ&sv-f{eVΟ.V3n% L۱`nv),%N}+bb-\۾6(˭wPڗ0T1(7 *q3P+"e TCy'nPt7Tt2QW/: ;L͹0A*(N`sG ʱi 5A! Vd:sh 8r$"% ޯ}z'Ah{6P&WYpuң-Dw~x~F(~n($'èĠбӬ@1{(X)g"U&GmNJkȠM#r4P@V;IMtHYd6C"Սӯˁ$-W=ф"fTL+9eUj~;n 6}SluPٝ>ƌ(V ww ѹ'jWh-u2I;(26 Qj+ߡ|ܴ4PfGNP@ű{dgם˪y(Í%caa#a( _Ok8Asi+/LkY:&@96?R@*5=QN#yd!Zw͍*NPƼp,-bEw6_avLQݮh?4E:x&rUuC }6 {Yyԧ ~v6;8 q[I dsDJ]dɉTM ǛP yۡcEfCSMM 1<'(bõPڽY[6;J]g= eCNWѣX(z|䤵xHqG `ÈA"$@UR܆\bcݾP7JEH&%'O9ͨ ގΏ(B)  U+UBI4PwB] w(ryMz(q=YwpI>$ ʷglM Y%""ş [}%[ C3#3Ɋ,]! v¼ :*jwڨs^ Fݍ?@y2fJL`H+VբK%]W0+e;!]ے`j{g4 lң1Psu&? ~і#dRI qߪj z[.UHw,!qkI 9 ks~7P"`CNӜu=?3ywҶp@k.=g shp&2.HrMWFmD*B^QSzӮX6*<, lfWyb;7;,M홼SP~jsУxzFV !:={7/g̓PiG lA{=%xà3T4}:8fI4HѸ:^-T8CևUL>|B0]/Cqn)P9*-8~v%=lD=xhz ư6&#BE6Pdzĭ[)-ﳪBz]yP׫y(ĦHHV6Yoاaz[@c P4V(3\> \HHQӐ@ o&*W4i|axi?PNx[>Q|F>[V@[ *d/tM5@9LzȲ2$[#utڮ|cpzY*`odcbdI_C#Y]ו@=s&ՖEĆv:H:>Uxz~4QR9.[A}cC~lmF{ψ < q Bk$CҒdrUC]F-` ZK{AT q]Z +6Zq^S02[ ήzLa:4ږ EjeƂ^JN~YW7|`, X '&Jo ̬dm LNC!3O[bqJ0s>kH+u|v'e(Ig7 6(5}x!dS,8Lvc (@E#Rh!sBڛ`!C(]~A6*fO/b ~tC@({|A(nYw^ڢAHɪ׭QGэM(m7BIuP#䩺Pqlw(D~5D BayrM^:sM/iEgoAW?K }9$Jfm 3byOU`&pڷaaGbfe&W4BA]}Unӑd̙ѕiH_ڴrj27g/ B Cнԇ'IP.6Ĝ7Uhd뿷 0^ A^lV?;22ȶ3Bޖg8:yCJLn[> O#V ^Q q)Wm8Tg9(aztH-[_ՀY[% )~3j>T0xfYsώjU#IMsݺ!Mj +Β_F_ƔA%r.wy3qZ;bl! eV0v7[!O26(4<\YٕUD0&R^eͯP}=d{Ij&-%הxC~-ʏ%)<庠k}1]I[̳vq_.Bcu.ӓ@$P*b riU1?J(ބ>?!.$+@Ji>iզ&P<4MOvfEDe@L,Ľ8i* KvVA #Czm1~.1~2]S *( (p5Pv`(5!Pi5(B}0; Bjw>?FPt5q⎶ALģCOS9Zk~H$AUZOk bNTcjnq]"!ܐ{VMN>sk҇l5Yk=f2޺^ 5)D5PQ=^xyL%돤bԑ<bFPH/ȀÊ c+/8%N=žhAEC@ՁnAG;ڱoD0q$c ZYFN9 ĭxվQPKa+p{X΍昈ȠwmP/|޾HZtkc Tus?cս0CDʼa ]Mn 9,|oBc(}*dЯݗ@ h R8`L{^OGyEW)NIn;0ca_c0'zDoeQfmQs,"Eb$*dșs` n;&? :r@+kTX֌aFϯT>-SS`46kE ]oA+prhkz;Icuw8 䦇5W0'0 9cje|BQgކօbo !Kcʭ59+;(xԅydL3~:z/=ay.M>w-ʶ߆,eҍ[Ì{/`t6!]k :\#pIӓֻ;UC:r2D*G8dlr5 kVܵfOץPf m2A%sաX dt6T+$o 4i6>T _\_OSmۖYaezuH [\"U]}(6l5T"^O3l ckPuõPYC' @IhJJdZ+@ w?G xlJA^G1(ȂLLoA N^V(l&$HC[XC\7I ofPF33 azwNT|{Fa!}j{!۾C58Ty'^YOquװg%<KY߸Xġqq.~XT>كܰ3T`6g{J_) łߏqnʔABz!Ɉ su3V9/& } N&H [@fNd& ^5 Uy?*Bb9bnA)*q?R$ݝaPhj/ Pë3M+\ raU"y>٭k #ȎB1B'UB [riYS&?֟N|mk= s#s٠ P睹.c "v$Bah뎈&PzuǡwBRL@05݁;(_*^vջ2k*gg|i%T'N܆"_%ݒsʑ$$"DJ^b9} Rt/c~|Bogjz]< Ss!)RP ya}dDz*Ͼ -i|8~j(s‡D C|og@1.i&]{t:T8Bkv jMd n RD޻*gP@=IJ#bjt&T edl|54@&(7는1ؼyس1) ;$ vŽȂ1@9i΍%G|i@M5ʫPJgau[>pCS (rW n%݀Olkn)Ǐm*ηRw 's}(޼O2ΛBӝIg_l/{Va \l/I]|ސS.E7~EesQ!4|`B jiqHJ -<"n0"{r *J1q?WZ_A L.Q8\g>ԛCA/0l=s3$5fơLvk 6 &R~v٧Ie1&[ 8Cp>Fc}m74^04Yat:Ek؎-0k0^fzM0V hjxUla -mg؊qN$?08c(j8"W$qQ՝zཀྵ@۽O <4YW4M' Qcϓأq_}qLmR}yUpx/' =2 剗ϚCHާtѽʡ'l;9Ƶ>e(6›*H$m;W,qNڿ cL$g5fY9_pcwҴx6$b &F3OM󁮘&KI݋QRs7+ƩHKO2{7`\{.,oǸU?ǹ\9 y_<@!q}S0@s-1r{Π>-+*uN:1 =1OUNI19 .)Fg"TI-BYnaB͟{9Xs c|*ƙg3m;ጱڬyyNc}驛g3Flk'ƂQַlԎ?]NM-(vO ۊqLcOEc>dpI^[|If%eG.>!>I{e}-Nx:y|$dI6۰5 ௎Ae缜wqF܇ę 0wOL{m<_tƤ~' {Lx\H^U0EI[.Cta@eClGOLKⷬZwlN Oi #(eehS ˬie\Qzn 3[jJk :dUߜz]FAw _k$-" gd{ eR݃1)0c"W&Pګ_O!}Ӌvۯ T1BU=\j 'u2.Ot8^%z/JN\MИAϞAʷxP #20$'ƞ._5ZKct,^nKʎH $8#|剥sCk,Au* )>݂= ".['tcAړAM[D ÁwmR׀9#EI,0iFt J"TׁtQ`ix1ߧQsxo4u@;CP/zD|FJ;w]DZ`(w{^R>px&0P|\ qd+G= I ?҇V hv$ ="7=/qMZ9z悏 2?+:&g!>|vUQf[WZ'%WqA@9U1W!~g" -% %`:_- jŏN^  83nqeA#`R+9vD9+P} ֯$z]^t0Ƀ9 Zm j]MD%@i݆8 ="ƗǭID%^U Zϱ_  }LKւ;C©/f`ս`(}Ht0sGPW ZBl'KٯpN6FA޶S'c_ZӷP+h)6,C}F y!T'djzk3k'=Sʭ E.@/uguTq|i8耚/.f5յA#hYГs¤PZ__I@7;{}cgA7N;ϭҁ5=Oc>MW5H@ܾ*٠юf/ @B?Fh/wNrsK1.v%ׯ;_qy?ݹϣ\?=7ϷߙEM?HOO4.wo}ߝs8.vFpα9s 엇Oϣ7?vN¹ܼ|ɲ7WϯT.o{ҊnbaJok$CFŵ__Y\2я]џm)7.I~ݰO[j?m٣tl|{ILnOz? W ::sI|ېF}go-~Alca;N98{h;xۺQ\3)(=l7ݹZ:|iqgW-=wqvvKnpɒ]V~18Al"wo1:yj~ ߊA|x:nO_}\t$3S4[nڇӖNJY'n7a~ o9㣲tT)I-[XswbY/1NE{]X*NWץVE37JvJUlUT,~ 'kebٺ}+I+ 5D=+·uwu]KGmv4Xpuq6Ig;_X[/|]rqaeESW[|ettͲ;tus`4A?íwKPSs˷]vmO+G[%K=gkY)/=*-m=/uyHX nq`q֞nnEoOXXwZt֎ߞsy4q!/XW7/|XZbZ߾~Rb|[Ѣ E$7G=qK_,,dVO\i/OSHiȈԷG))%#o'/[{K7i_f]t,RmkgZ$\l~[N>8kKG|stuŗ#~r)]\/h"wK|~~g'gWĒsRIoꀷ?m!,6X^=\ԏp ;×;B~o)'ıXU~ oedY],_=Dg|;eklc{T{@R1ɇ>!IbY.bZ.KB}Kn+j-}ٟNF [n|G(,qY|ʃ2v\|c}WY9,qe)UM[7 Ph{fdk s01`1Շ" ZOX'/Q۱en: cۺ9X;55x:zXlK+7i;^Į+-g5aTžr1AK#47v?4xX$xf{tq+/0qoۀh|PS ?YTo]PI[??b)G[l[oO>Z8:8g.\U7K+ON=<ݜE=n΋Anao=ҸMp_3d"32\cgh)Nc#PKU]z.ں/t?5},;֎n?GFɝeQ+ _ro:?~,W-qKuޖZѧ]SZR~v[\) 5ߍS|?-[YэNRHmqxqqEznVeqB8bD18>ϝR~?Z /9|Ǐ" /II'ϟ;CqL?͒%/ZhO塉l-=~V<,VoQՅʀKmX:8ZR#eakR+WZ-Z?WŇŝ:(.W-::uSddDעfc,݉͝N/dC[*U]koQR?9 i|/e _!ܗ+3x5Hgl~z4WpnUwߪaqG, NZ^]\t7y Y7!Kk+nquqgja_AeC%-U󟫳jْXDo?19'$-,㧓Nxy/Ij7_4P|߷[~lm{0?ۿ$.2~hߕ?fyqQ=~gi"t?:E$[GwuyV6~ˢrfk;?' ].S+Z,p +WxžV'G`X:o+DDHaQh&ز)oRZ˫FIJsp$JʿQRUM-<|W-/Ϳ ZU7tfxA|?jendhヤC߾*I$(K~R&:\rLjyelQӎB9s;{.U~ۧ9yf pKq /#I-Fy|$E=!?oaYtooƤ+{P5? yKOlAm[}{ceD~D>_Y?iL{:59"jRB_!:3o8N{TOvDg0%O+b/-@pD(%A.Yٔ9N_B!N$L[$9=?_,/ In%pl/\,[0r[~-gQoZ>Nj2K7*Էξ7!YVϿQDpҢҤ$ Eh-R0t,[Ų,[(PpwET4;K;w~ν<9v߀yKGz=*G9eg  GL0EoXW[4_W rb ;K۔1Rf_ viuޙ]ʾ/KkEW}3^0ׄ3a~]c's%ǽŗu9D2>M-Zt]0z(zL #xq5^tTmb_Z?%چYA/#x郿zX:5pZ&N=6o.#A'{Ϳ~Ќ/G ÿ߈\6o8Z*-PH߽ _]Kۆf+j^'3hՀ_Ep-rrD U|EefY(!n3"1qCUjC:q6RzO缶oxfҴ~?cO3n+UU*E>!It'&njfё,Ju덊"3ZB [^N/]`c?1{^RefvvnP'@s4A]BB긿7Dd-J S}Vx`l8٩0$<}#>b)ӟ{oNcJLfX &zZymt5qʘ{":(뀾-t=ly/,kҥx#F('`{ZFsZx f/*#F HJ`;^wkWjPſ ރ Cu܌'։MWu31 | =7Io|f|KQF'U"hF~F =狏nnz9~y:bDʈ''OϺO~1FOF_Q-{q&qaeQ݁;Qgu o~ӅK1b3#>;όd/3޽6tE |txv!|02ݻ_c矯{/c<WJ_r׏S==-Y=zhwZâpjgfȍ Z{/cԧbËb* ꧦM s ˞4o~r̆e=# :SqF5!SD]E?={3˵67VT\SN.,^(IcX6DYA m})#<{wO'Nyz;/w?7LގԉSFz3O>w,t ٠7 w?J}jC_sb;53 FǐI;cQu!|g ox&3|嗚̐ژ/e_o{[@GM8Yl|v_AS$g!%H]S[JVGqZ|> 9ʥC'FB+ciS s!dNk*JY&Skw}RpUluTYm1Rqu  +!!Ժ`*>h2n;mQ*KS*J3?XGjîG6MI Xy{OXwDXx5MH<<4k(Y6zS*C<N@oW+}nίץz#=wNMA߇c՟Lj|77^< L*0)I: /I@$geñƸbX :c[uR%SmH1[GTD̍ӓI6 =9sbZim `4ՕMyV *Hs赠2ʔ' ) CV^9zŐ!1 Ƨƨ6 +\@w-Dj5|˷/OiEW$ .ޘPqo\W^jI$e0Tڹ׭6֕ (1션HZV 68t4<|roL)Xމ 9%E~A?S&K "PE8Ob([74.mlQJ34P`gp<dy4P f3!tkP sr+~aa+E@?J5[>p#VG^(IMyzdNt@T3kz ly K\ހtiZ EV[ ~C^@ T]05o)``o9h(:L8 bI44Q㢔}2{׎Rh}:djTY)+'$6Ze:x#r# eԂsIp%M/u%g*ϡy\,VԜ^RvPl׸޼Qh\ +S<6|XCz\ahmɝW3D?=$7M [\taOnAMn7v'^k( 04:4JRjar" =dF*[n4N+2¯*mbΠHC0jgZ摞䮧2 :> Q8[YPTG}l$9K>SN l ZB;gst. +-^GBE'Olgg`q4F `݌B%lʉX@$p9mV8wס4):)ⲭ[o C8Ms/ ~)qO?RNru;jx'::FEYװvkWi+e J\^F KM'CN2.y3,iDnN΃1 i|Κ`4?̓0nj0QmX쨲8ֺ!y9a *<}&)IZKrWPhZb3; ֺÛ!dqUk:!MjzRu(?GJ?gq9/mQRPGpI="O$DZ}>Ibk&#U53;'!mF^A7 5};`Zlc ;DZM6i?9C:mpH`Dխ1W5C*Kv΄d'@RQ ScӒm_s{GJ?-u'HfƸM:]3N8r6z9Q 3 8(Ogp:<ZW+44gW8;ezo+Z/6e\pC~GC?񛮡hZoVPC_=ǻn=V{s,buY8TO~.wa* zT,Ϥ5^J6h4Ȁͫk)4}Kp@w)fz5c>U`|8cijQQ Af%@#q e9(Mƾb ^OOUL~:on0y:xv~}ٸfX<1?nF8+XȬ`]fi<Ŕp5& BI=BL ]9F:5$&}3GSҦUG5L#H'D.Q)$љ,gzH%O*Cc_FO{{'M@_a8X]aq ~x??<Ӕ0q/ Jz Gxh%mwgb,r_r_88C$B3<1!"ّxΞ5yh$jQ+0 >'q*.HEN$mwVَ}$'Y 1Wh[&({TFH>-$['l7r BcKBFqg%ApkTB"ҝ 2BM[EwF9}@=Ъ$^rȄ r\?NSڀ\ZI!2zBrtGAM%w=!#CPZ aC6)IDQ5Ru$zֹ1_E(|,JzbʽD, V Dyh%dwwk*4$Qhʙ#*R `,Xq+*az:&K(GQoa1ܦf7%݃uFN81F "w{G" K) r(~u*u5ހ}?PXDyc?*BEU@AoKŸ8,ѫ>0`uSHYMufʱ> %B S<\xJ%CCX?W>Lvb>[A>_)E#D)=lddJ5. u:?7YB^%a(|;lPvXC©8F7dYq*L!{io ί_ p>]%bG=GWN_NWn݋AZػ8(44MCNTY ]ql¯i61M^If^=%N{{ G|ꡆ[E2ZC}}Iy_oݯڳg4nMP~OhDs.d 6@LCd|V}Kdr xQ'd/.QU4^ȭīhO8A`KRqcrz'xi+ioi׬鲫ƽ,FLFĠWS UyBsfx/?}WFˑMI2h[7U񬖑/Ji'+On1fmLGr&pEqWb@|++t,`hn)$ɏ;C̕ qG/9){j%9B[Ɣ"Li")?ZH ,}g&gRRCO?U}hÖpJ7u[s2%+2}0ǂqHeCKz?] *rM\Tma6 w Dem'̮[y6_܈m*=;o_P?X!VA»~m1^RJ |s3p(TOohe f܈O3 2bl? p 0O=3bh0RQ41fu.?(?'dEC{rr"v2$іqwN%{8uVU9M|JU`Dj(Khv? Ď}6+9-hi3'q+\两LB<M6"IFV5J_tZYa~/,&l%*%3߫k7}Љ fH? 3;5n2unBǦ~@%A ҬaQ뙶͹{0˽$I"EKUӚKiÝʄ!e b=jq -Or>|Y)v=Jrt@A0C۳CG윕 ³iG#`h +ZPs"n V(p Xy0Ud@ |q1 1@6CyCn: 'bo)]Rw\E;P-${=Oyz&N+N(fnRHFb ,-U;D7w`gL_#@a#}/N ŭFQW2KѕsAKH:B+brCVtrV`2 U9QeNr2VGҞ&,R7=m 9QZQdR+yX(3|@z]Ii={7P%dF`&$ qAHDșHH!1<^#D~|Q "<xt="KbSs}*3`0 /\KD+o!LD_C-z0?=5tAwr ctXڳ]~~޽J:=~Cpzh:7̺^,"!\Q ׋Wo?7{M}n <>*wO"Jd- ~'Gqj@ZV՞XICc04 WPZ .֘B=BW}1ȈS5n W}Jl AE!>ťX꠷xXoij|JlBBb1_`Z;|"ĴEZ?D"nB&Ѻ*SJ DDILTV#**aV90H[XKT9R"jL$\WGTL7?/JQ Og34%xGurZ͚oQ6<&)TȎ7%\ 8]%00؟YV-NCV2g5b iu-*>}%(gh/. uJV!-s4/ 2e#s7"?"hOă{a;C[A 71<_S4[3 ROeJ7w></+^*Y^`{oU*ED&X 7M&~b/eH2}:N⡗r&,4Nqz}K'Obӑ ٷuS4p;[,w[&SYc[vM+V`jʣպ ˭R3i;V0}P⹒[SIM|XU\ƷD[C{SɑQA]Xu"(q():k~1}W]Fidxj[ť@nŀNʧȃW=h|)EP]6t1CZ|]xc'z8։{m7k!igGV9#*16]C('2/r$m`oM8W&|M$'(Ҩ䵽֘pH8b`G@a}"ܧoWnZ53R NNxYN**chǿ^t5?lLIۻ@ ЅUkxV ae>=:oL0 F5|j$ؙÞoߪ]2].ri_p.)ht''0h;3"geB[F?8DBKe26q Zq3h[JEpBLzyt@ET v#8҄`g1<+Q'9.Nd3*rWJԿ|T9e[4jx4Z mZ (J1Sy+$Q6J=e1Sy+\{lmd4guN|*K|H- _`O5R u;NYujWƹ)-STTl&QvY}b$%3׊Ɖ<.ĤST͹a ⢅޹Tw'[ X'*ohL% 8-^YlX Qzc 0,ԕͫ,f*-Z^5B쀱_{3Wȏ?T|{Pl_p#3>21S"D={-V2jkA,`^p5Kج\T~1/_qVY B> ƭ>1S(Rj|gfy>nt|+>3O]R'̰*lQE!O#9\W:hFob\GqXz y\@ 'PN֔&#`ۼH= n)DfP$059]t5r}ԺS݇L9r`$E带t]bFnHhhk|D 6p؆~p72={"[{(=M1ZT?j) nX@Pml8w6uL(-PlGYnO(:1WSTɡOx@^ƽk,\v/q>. .|kT3 r;Wajgk^?1/fB&k: 0MP8Sߞ^.2%0)SHTdx8vq^q7\-sc_ÔW<-tF=FlC,s( qSJ],6$sw>'v,P mCu򪺠5>ZSUo%ٹK#);|kj\sTS:XtwHuy}\ 19ō%!x%5MxG㱏},l-) ~8KL_/| `Np z߮ⷓm/~#oˏ=/hnWΙeqW#Rhlݭ8SqzxAw3g4Y|^{#&tb&`MplaeKrxSAG춲1CM>i/i@ ;<] mD|^Ó7tV~xUED1 _csaڐPE~ֳ\scZ|??)_/@-ox.?ڧ2|>'iGU|OF*˿V>X+og}E}R E5Q9|P-P$Tag29TQ5Trз *.ӻZyjyɷj.O/#Vg/UwThdz>q{}ߥ_[~.fwy!j ?YknקN;O9;:q_00WާByܩ،4h~/i~Sa1gpޱMrҊ|{&$-MIi_L!KqjS2kcߣn1k{V~cuXUn7{ln#`W./ 1'¬hN#8ì!a /vS- 1EŽE (ebdwȀԓY :2̞wEWgh]\8N8}Ur{G#pKPu:5(2!)2H\2k;V]mW3OCڙw0{j'aȎXD]Yze\2xc>[AΨ4kB70Or:}G}%cM?^x2]nL A=_d4ț QPk0վ6NXgEYS4lr-`)~F)D8f+ކ:hB;Z`,ؔCcy34 ز hvوXd_ #_#f+xLJHlognmA 3rHu0XДM34 lzD%Emb?oF, ]&t9$/h4.=e1?0n'9JUem2M[R:>YC[\='9`2uFB=GlfnS. Zy)++Ws0RcV(cSS| |i/NƁ`oO Β] Gș @>l+{i#EZ%,CWn4%$ǯWğrW@#kI<ƭ8N+ab>(BL`SE$% E '^F;Phr+WOy Ų{SnW~)G{,^tCR ՓfP6Lsht!zxvSds;H$P2e2/cmևe۲"nQzn+ N29ZE@o- ֢\Z >V^vԍ[,d-9+T`ag'Gg'^)cs{8%Ek1{mV&.JۤTg/ȳȽ-UJwX,GY u*Wz ^AE)p?NOy^%ѕN2]mKϼ̃/x-~iũiuk1J{$ DW1QiGo+LǯRIe H .I ^W*O8ea~e(q?1_8!Qde?aQӗk.#x hL'v::MCٺ2CXd`V/25`]P F41TK^!F4[:G|MϼogV@lKe< }"ğrGGν樈`M LG|[BUuא=Dfgꝿ p97hȱ745SxIܑTf)XDdgBWcuG6ɍҴDG3ɲ34~=X_hsrPtGq#1(Q4gMC=Vاiݕa-2"ǂjt 0En^]LN54ohן+9gPp^5Kgp Fʾ`ƣAF_O  \0Έ\e^ e(78ZT RY7ήA}ʔ)DEvH3~Ğ{>)JPwZ5֓9K|ރz0) u,J8O;$f4Ьj =<ͪ&6yu^栠s'Tiz,ksE Y0f=ͦrk;.t?W^r^Ye, MrBc6ofbLUޟ-Im3p9*he4ڞTiǽK-ėzΝu*M#sP\\'tɡ?i!q 1tSPuN8`TFj~LĞrN~Lp?v֪K=)*&vJ;+p(m_?>^QRJSzqR3Sv B9jmz!-&!Nb9 |eJ9.uv"Rr=M: (촵7;1ʎw4پ '2hW .Dk`:dLl Yy%6oX}blnd8[e =e^!Gq!^|LǒE)MÖ#1()YPx'1:3 _uŒkU~ $Kz{:^=Y,+'tiA}g`+"Pəm$"zBM+PjƣfpIVМ Աt _ބb Tq l|썠u=3vJ(C~!#]f=+ v>%zF2m(Rܯ}xZ'*_].Rk^_9uHU^6-0XH̚9`.ۭW錛/42 zIƩOs:B U)HŸT?my.sb/%z;0b_;NF!]J?e6kdA#gKWEhnhɾhz(&kO3M/8.I5mŒ3Y)XVM9gX@2-rDkzK-/ۺ[/oTr?r5%bc'^vۦȚ5 $|Ɣ`@ϏsoL f AZoC4_žy\Jbu6DO}wgrC,]\:8 [zл穣H?cecG9Ĥ+Q 2Ι<ޔE\\iڔRr0N=*RrzL>C{I dhkQ?6:P17ˎb\C D@8,\] Ҕ+*E8]dK`vU2|tj&<ۤ1d o^; ?WyTg/#aSҮ:L0&0v;z処xzH1$ŞK=nezBze)Fx;[-iڔfc.psACL]vFd2qE8vetI<Sqz6z2*Ŷ[i:j o2H"f|^JϭtC3!د:bWFA`8S0)T:)3T tJ,r(RqqS?RM2([jJywgUxzIcZNbC5&3U#ITQ6P)y9Toc5eOa2)uDْVS T2b~%gCp7-Mw.HX?Ց?tG9ZKW_\ " BsMmYHW[sBIMq­gNu)ܚL!4!cPLm1^l[F)<"-I]P;A73a뤖MqSOPۗlqQ VFX\or\sucr`R]:STwtO֘܎os״P{>L:k죓՚v4C7JD +  ]s;3Zs}w8gI v8pp@zw8g<.Rcg|A>I1i:LIn-?=$1晲 akʎ~AyPE.P|*Tمe8ϐ 7柃QǨ;Sq9Mrm  ĞSF%Yoi5Hy`g jI-7d}6ͦԟ85MVϥ2N}BKv1'`"υ46abFpXN RuWe4 N\yl 3X`FD`Wn9;3ؐ] !ɉthx},R,!Th nd1nh vQ';4D?0-9k+pc8nj qxI3uq 㪪3JLc)ٱGVgl9d!9l_dqvn? O7sT,ŗش{--!}@1!0pHf!e/gT"`%} 0]8CxqK覌JJԦ93cb6Zഐ]tP@w-9i0| \U7vhkםZH iؼs&`ő &yN@;q{ϰG R_:i,sDˏgH/?Et\q&}N:(O3۞&:d:ґ0:pf|Aڧ} X2!6&MM$Ʉ, $B*!4e"]~DG$B*8¤MOIw)&)"92 ÔX ֑LL%=\7!SnBtbBDHnx*KB!~X!zpI`~;!KdB>{iؐ{SiD/!dyLH BI1uB<&$Rr;lrqbBB-Y&! !%9OIߤ#qL[㈐*.u6Û8<םI$PRd$P$ neKðbgc=QF*=ʗ%Xt㇉:Jz! ʯ랿xn,S9/y:rꠣjrN]ĩmEjN}SۋϟX<s"?η=ICeI3˟9\g* gdh :{*@CDI}tFXe Cg|tmiHsyo_1si /nTynP7xQ)|eFP 0^?bU#4 }t%(KztGja,HD˼ eVkx>ƴUŜ!EjZK̀z(D tt'ngFrS JՇ%OD,OW/Vy,)Pf>PY{7ߎN}H(5#V;utxTuqCM]mn'lBǚ쨲dsgٵbXssz/;vcHDi\XۍWs ܍L |35ـcfJz)@wDϐ9NNLM))\p7t= 0U5R]~E2gW~_򃑖ݏr7OI?{ 5^B :ú(5wb|}edܕL! xquHuף4,X3?X8ҐqMqpcꐪ?Rq)St va*")u8N'RL1NuH5|Nm/0cD/"P/M~F{ͮa86{ #~TqE?G;*&ۯN;vxW65n5GEZ]C~9Y<'\<+d'rh1+{TIMգLW/^D!N-F|㾣<8c/<\`$TvxU❎bq P*r4kJ۫hҮcilZ`/$;|iS 3nwiZđ#RWJzDFƑbg~H3^w|J),8~6O7%(hQ/D?@w$S!U.Io>F)LXKSe?U,c(HAN?O:& 6 ^FЎ*7U:aMYQrcpiu2FѴAM(ܓY߶zb8!9`F}ݗ,?LM2ŒFܱqQ7ԟ9d)s^璘i39<3g Sjvtw٨  HvN,PX'ؒ-Lr(a.T\]ŞZEധdOl)Bl8]@lh⦵0j 8z4W6Ň]ԢjQ/d*>-jCn0 x`Q( /up2+ONgDnHny[c17 f~g;͵Ќ=j)ERhpVrwMyV)8^r|^ϥKUb/-u3jrHLX~'JK~b]Pl>E-޸SSbub\ў6ُm}۟+ WtWOW튞3x:]QT(]R+\Ѡ=x?W4KtE{]d骮BkpE?wΕԟJ]O 8OSh-xzK awi#rF/jK>*m;b 0ָm+ Lng?I&(IY+rڋCzѐdOG!Қ6OI.]+;7睥yE3?D㨧 O Qwi`6~*sAVBz*ޣn{կw#$utO$i,fOB:FY[Yby(&$Ͻx[{jOFǬ8,lr>I啇ԍ&7b-]۩|#ܛ|;~;N;~;x/BR8{._|l+6k'(_E iy紪6=c 05rAVRzڏ\g_V9үn$6Hk[x/Y'n}+EBi]dҨ|L?9EϼrՑuZ컡"C4T0_ j{>ޠcq|xK"*?s?F3W| ʼn|'mЋOfJD&QbAq>ejJAů)rEtw *|7BVԭު7NR@ZPi"-Ғf5fT~Tq_ZF~56ͯ$wSPJa9Xy7?/l ,\4nT>Hw}Ľ8|=#4 hѥ&8M;5tk蛩Ko!rm9(҆ֆD11ai}[iԸӸ96 w9\-Du[TNAzA#34{GJAAFAFqYQw" "vDe>)dAށMާ5~qN>Mc{̿5xÛ41܊:F<Io {a)-5B3ޢTPJao -s͘@Րٍ|vѳW- N.-j2d`MN4ioɛo$}Vިa}zMCWnǓ?x"fUvxLWOxVp蠤yI_Ldz2z^j&!bBneB(: I:BNτTObB,QOՌTEX_Z\ǻ0)nB(nBeBVn#Bn\sש~)LH(BI$ wDŽz=^j~ϗr:jnFPҨ _s2H2 |+r'꩚ ^jz!,rLPNuBV![㘐4v꩚XS5q(dyLd&M寉s$΄i} %|_ 里*麗8j"2(iuA]uddHn3.Vh{6cՈ/I$PRI$t$w8@pρOt{޹ƹ7 QB@΀Jit)oOszo䞞[t/Sν|grOY鳑\"NMpj!zz)z7O-FIއ{" Bx gpЬ3=AT-0 {3`0#jψ .}DbZ+ɉDvRf0?$pȘ_Oس63f@ N3 cby M!.)/@xZ seL1DEyJ;ݖ45c(c\wO?ASBE&tg?A,Sퟎq?6G}Kjµ*H{N=UU{ͧށOW2oi^^TsU5%GEctU?ƽ:ʽzaտ.UUa.mwyn]Uj+ l/|Mֻu9(m]NuS{*'$J`D|ڽW3*D)4'^Z]#,om%|?|qw1w !5}ľm-\umۨLllCBm)}h!EKNۨ+2jFt$:~_Јˎ;nfqqmۨRyVprsDiNa"9G]աbuJ1_U!Ҡ#z@e(?#_OiLLYqn3#Yaи aR5% IB>L6Cp~#ʹ7,|iS /mYv geLk7_m=aZ-Q$h)eGx7=Jbws#pQ?s=, s/p՞ hKH=c $>/R {kBUwq/m-m$16oic&;qFbi&6\(]ѫU64 d/Z>wTyP'2뿚({6Tm, -E(\<A-Zp-B>o?pW :SY :0Tϳ&{T47hճ"*NV??a]S]u?/&Qa!NйNn ?k?<ܪ_( KIōK3E,ze^;/el1YN6}gbkaPdqn7NJ,NkQdN7FEspY"^jE![+(]lqhSī!S56N\I|97S (P9QZt.>"|^Nc=8 s1'j!;?ē޸I_wdQJ2QB_||^x|havZ8ՄFΒɟ7o…Wp2Bs\zof81? WiOKk6-9~#w4ym IUe )Tvg2 /uSұ‘ , '^tgP| ~PQ!gH τUe]3EâJuzBW %xsG ek3 1m;* o9H(xkSr!lʭ<Y (VTn$R9kCպZ+OB߈_/O /:W8lYA_C[Wr#~'h^$-c|ݺ&9+Gvmo ɷOl\o בZtGg1-$VSha8UH qiϑ}\!dgrKMjo{)g`y-[nbڦW_ʉ5ȴ*W*ZL'N>"\{]eƅ#MJ} ESz%18CX=N4l~Ù$F'[J@dbZSNrE{ʥUh e A8Qhh-_,7n҄e(oHMI03 GʦV*$O-G8ۇKG4f" U6԰El.9X$cdcDoPllػnoaNoDOx-Dr(DKWfC6VE!qPpCB/v2QX/ C45D7Z A8iJPJ"ilp5x4v_nxEo_ز.q@,I_2pj*zC #ooT*B}PiizD@ $PG{AMPZ ). 7T PPRPrOv\چ^^ @) z9vߺoW ;d բc^-j֑Gm^&^_":[|Bp-zzh^\`_K+xG<[ wXh H*'TV3kADOH[0a^m寠 C7?7/cE٬e\E3J^8|qB%uDVfy:gʽBݽ(YU;< BdTnV+hŽRQʓzV.u$$g] yI^ɒDK1"w=r]M^M:4w _a/Mymcj|ȯ3 !Z *mt7t[z"&]{YН˚v\:& ~/^e_BW<m/tËFR6;P `7k*+.~hgR03B ; WB\ !'w\PG2ѵ\DՂZPTYo"BfGJ|Z.:xC '5\@uE/ uBP?xC)"@&xCU uCMP!/ eA,R@ U N ^qky"k&A׈֠E TxF=zBrYõH^dEH-;Fd/H0gka`g6 ` KDaڌ\PX臢h0- %/ka o[I=GMG 5lt{iẴ"K["Ҫ{+laqwĐ%4A54]q'=t$]Z\Nr`悅# A^¯+otbYB!}>T4_ūU#Yve|v0a/^Γ C.=]ɤ_@ڣ~H. ^W*Isux%>X1M!K3?7=||p9v44<ЫEqCW*VKڨ~1-98i7F6֭` ĤlIYѨ]Eg ;=\dXu!趂n+X wzY4e}V C. e126P}/&j9O0 uD%קJʧ5Qq~*9-p- g HZD- 3ݵ^h :'j{7ā,ĢCA)gF@ľ ǰ —#N㌏3Xq.QP/%/s7^{a֜dog4Oi}%|IUwJ)q%ܕ薤8X$Ֆ+5zxqcX?ȴJb}8j]T]_$ruru|\QK.'^0:4EU_$ʍ;줲TcascXc@ale^ )"^[HlGaş~T,iy?  [[٢ڥo'%=*e3KθQ1GQqY!`kQd3Vg[@El뉃yÙaK7{yHakKR䓜G K88vJRqI>R)Qj2}w ǀiʆ+yfM${]*WZV~/\/eW70}#HgɑTX ?-.:zM݈h[Œ"eUUazn-wdj%{;mBv.,k-%9TX7[-sX/gN)uʘc~ԃgg){A398qOn k D@8Z Nk'|?:lJ"zlq-}yXz̿-*g*?g(Ty4Uq1ij#8~E }ZqGu,V 1,`ddd65+#\W {gW cnW2WWCک@ c?KW~{Fܟךyi6D*pct?LW ':e .7u0ȿN0x9CU:Dt}\F6FrC#_/H=}R;='q1-]GfH#e r<cV38G|&a>z:16[{r,/A|9ԣÇ|;T"jhbZ*z 6б9NQ>:D|._pG5G]+3Sz_U;4HcoV_Ϳ_[V1__^S!(zrUEݯoagMӱ h,|>H\Y\n:X\;sQ{]ڻN`o#& }i2uC{M/́z7w7_G2A*>;sPxt8gq\39 ۚ5@AE!//JLy:96x7;:hk/NFSڅ"Sk K$R)K-rCo um@cgT_8" uƃθ6U8­ !\P7??y U_`']e<8mNT: iIr,(<)L;gOfp]bTw`R'"8)Fd$.jLEOĊW%DAQ[czɈ4*wxw1Ga.ͥ"9IV9Vݎ) %z+LQN=3+AV84:)W*[km\\h5}Y-ŗˊ˒%l i%f_Gqtu"= :FJEhkA v:NX(L$R ^`-"v+9S)8 cZ6[Dam=Tע(1-'c:O>V" [svQEպE_ĺ1Nѽ_ƃ:q2~ x[oWq.Edӳձ4L} _Y˴~fLtˆ vK腭(BzE~&ljFlFP^?\-^<~fR@ve0a"Ϳ}H -9L ӟEJnaqV A5T-e48.29(8XJvdl9eUNNf1ٽ6پr,Πb):m,-!}@ƏgLSq!gLx\`>ZBV.Xn}<]̧p*O ''XVVShe[nfZBdLIqۜ{y#n8Y +XHPzX& L6nY jhLÌuaG$] iWIH/} HvEdE9/& W >hsLFɇ K7[JݏEǂK*AND?)W*IcKQ/& \|҉+^0 aO4v79zfFT"~NҺ/K)+SFW2g>*wjJ8IlfVtӤ}3̱; to ࿡ef_3v|,}y%"c75 jK ęr^Hi[3thOp+msH{3նq?6_՗[dlOݘ^C=|޵yQ_'&;)Y?z x~1 1;Z3C.M1^,~RŸ ?M9( -(/zfO2[ [3}7bfBlތ3wj3]~-/сD_)DVCKŌIr!'\^yl3Ws ˇqS~Ύˇ/7[2*JZPjАq;~ >Le:B}/&r2%𪼺E%އǍ$ òSOxH%^%+Ѣ`yTRo[S6q,+i$Ej(Ri;qr+¬h(Y(>\e D9㔣5H."FycHE(;.X)dLt|ArϬ!?!N*/T4UMhn`Se\3Fc)Eqq0bhhJh OW!֥4n{%EYetS*JW*D{aįyYv1iA:ĺQ҇%Lҙtu?xk%xg@sե !.ݏAVV J)Y#ra5ET-*opXOAע q|JCͣ8<屌0>Y&# ]M"FAIxI$%gJj^(' ]kEV}<ݓY"w>ꁧXN3&|A7əKoy; r4N#Xui+[4}C"űk8,CbήbnĻ>/bf u\]7iևM|Ĉhj&x>m~nJW=镵ZORu2zun,,cmeFfrm޳0uY*1jc 9uՍIzmP5~OPk=p&;NFyqxe |9;[6~_2qZQtf3+PeG#OFVecT]k -y<oÃzaDa| B' rj.ntJ Q5g4X@Sh "m 7xdo0߮<ՁW(JΙY<,BrMʭ\k|ٖdF*% dpӭj7bc 9j c68/:2\!k@5~]7zftv:4He %0X5-Jϵ:.JѫM/2Kve:s&ϜoC#m Fh*}YrR_M2(>S^=I^ZueE_> EߏS,vr }.Ax*hvmV46mI|RD{[kq KWeV9#*vC~E5馨ݸYNK_= y"M?K鉦Ӧ'Ӛ`\=jXfW Z َ*, Hm۝R[ TnϧM[D!'SFHEMM6gMˆĒiP( 2>WKҫy(s 2c xʍ" ?3jBx I(=$q~.>C6fb. +B"kNMC?||ȳۦ3Iԁ]; Ar 7~ԶVi7FQ8;]JLAc3LrQLeu]3H7MZ#v~ܘ,CP``15t$_DjlweTҝ|TȮLۣt>qzt\×bx$9r3S~NzHP]l ^8)Ex ]mߔhzc x/'922zDΘd XOlZ,s@Ni+kWw,r\1N.w0qhwd'l5VGUe0KleWq7z0zKYg>F@Kp˔Ҁ?qɲF.'zfF'Süi$]na-W)O+loFj>9S΋^F4ok4ʮQ*7} iD(gYSkHC#$\Uy/#W>:k__T5Vj[Y}OKmK50ViƓh[-%DjWYnh5dOՕM"vޤ}=oO[}idOiIG;əa p@rf)QVRZ0p T.OCftOfe\L^qQf~ޢ@ ZDVxjǎ.Gx+p dy:A;NVp;InaipTwdf59!^!0X(q?c z[r2q-}hn"5RӮnC2nic*\1V.Il-+,]iC>.F)j5Ŧ 5D؞Dc]@$ vtj ߜkNR/I6$kŀ:%Fr@,ӥ%@"jΒtǕJb'$_GEIR ^o)Ư;i7-tMC /w|%Y"s+v˕O]3wVXm*T;5 QDg\#QJQr`\~wCC矯|i6ᴟ䥾|HKQ/u" f| W5b,sx7B;x9Vw~ҟ]ҘW`)J'ugn$9*_ )ڵ ⺊KZji+0O@S؃z3#8qb8")W^\uY\e̸LJoΡv RvHK44A?pFyhh/Yj` 0{o "Ԅ&IO$%Kr {m[|vcî]obصakoyrB eܤЇ[zh܈\hK0$G}5{W`/@K1ƛR_IbJds7Bi"2yks[ jwU?qs85,aH:m%8x+`α1)#$:OZSQȚ9bhoR7 hyo6CўN4"`;F}̝Fh- Q (,]LEqf5TTOqj"do76_lNQ3~\.ܛϬcw(ј,&Gҹ{r=K?)J)RfBR5,&r,E{eY]ZojREi7T[+TtU_Ѥ*0w;W<2Aζ Wh5h`h_[$\C7J 6u}5u 8E*DYa_NS ԛpR|(x9;jȭæ9pΎ%!*'NFV}$TNqPi`jQb^#խTOe: QWsahPSŠB^NfʏIZ7hwz,?SQ}~-7AEE䡏8rwl٢{_t!n%d\5{ƍ\+m/bX"`__)T݅)Z1rݕfoaVzQЇԝi"V_вFW3Ol]u']X&F-Uu?P8/*dٟ*K*4,jyYB]i>^4ߞ{{.u?ݷX1k\{U<4f5ݣ}5Y5[E;sy>sƊe-r {"O66OS4HEnKçT-OecVVYNâr V/{_WVzZg%ǥy6ff)0\gW'uē:QgIΥǹVcC 3)`ҫ){e hL H `KIQV9XKO-n|% h0t6G>@d!#YyDXɎ*SQN{uJݒcB' !+oɠRUY9&ЫG#0YEe_@jYȐ)I-8&χ#‡5yH&Ld142BrI2l)#f4S'"5bck2?]uűUYi77n71uKɎpQ yL'ITE7@:4r/ʓӞ#1Af@)MGCd ,`-chbjձFQі';&揾n(LO'xP1n9\]"ʔMurJ]hPl9[AB*S?O#w?cRm~\xV@N~c8،k%5tu|̒@w8fO0`߆Ofq]jnEu4[H+XlkI,(}" %XҙOac΅Q90ܗXlJI'% 7q͸n:>Cu[e̙U8k.Q0XJ^ FJCUzGiҠ[iTog@ -1n@t?X:R(NU;U֪fsHRpH`BL"Q6p`$y0 lnXa;6<ۻOvԘf>)__NMkޥHl;z?ýb!Ym9<_@"/@cx3Ә[㔯im3e`!ߏYEIo&ݻߑ̏ӻ!M/[aUIZֳ ^:Z6ά,Fw/v/!=TU*P=]?GcqYMԲ-^ j1;)3弅=LL4 ji&>G~g=wm$.z/XL9[/gAB3SP"xpXa3Ɏc0`ru݆^CPpշٶE^q,>Q NSvA(N=4﷡Iz"o:}iʼPqF8q,[)uNwyi頝REݎnIG${:@oR7{>9Uw1e6'.;m}ZLh<[ʝ^MshAݺG#Hʇ2]jѹ*@2&2lOiz:c-{hJ bRK Jcgp[@DɮHv@C`\趔z3ViRt C)qN0t^QI}bb$QOFsxL4N%w~(28)4IO_?WJhKBs$o?4=NHv7 R8L`þ/Cxÿ#3eK6|LP<#7/sdQFS7@{\XF2Y@뢝DRCn^y&&](`mĖLPcdaSiFiE0Q>HŬ Hf =wilW*qW([G=> YX+^=j9QA dZ9CtXtoEk8(ɏaM pds{&B&>k$(=ڄ QfqlV^b?@8|BFn< h x™Xۭ8JM/ѻz jPf3">ܭ5fsX ?>If'Ri(4%ҳ'CFjzᜆ dHDe(ݔy7[JGUkY54Z<{ Zיjaόx>s]C1;ПG_ғVܣ,vyɣ+cF/VeOT-ݠ!W!~@XZT؜?A\cg+$s>bw.H4뜵+I-rg#鰠#{^5eGNZq+jv 5##5B5MJH%Zz{cJU8%h\ߘK"~{}ⷣS*~oMoC[3ω'~[zR~'~Oĥ,'HmƠuka'9,SZluB+wUתKS c5#ng1)2 -@;rN =`Bw?h P9yx}VbdUdzvBsKxӕ2^+z<.?۫gt?._:ޫ^坺o>i.?߻^u۸Y/uߗ=Q$d3.D0.2 V7Bw4ċMM Wj?֟g[t ΃q58 XN| qϷdل@Ljvk|%~e ?D@Q@nMf!2[YWtqjnII.Ҍ# p^&wYb3";"S7-lP~*MbQx Il@9Ye{z:j:[DQYL7';[lYa-,Ρ;zy=ym rnWE35hU^n~'W>?J!nï~&P4 7wl"~%/ۯy -}Gd{aI } w#4 _>[%Y$:[^GݯIlZm(2P*>9Y$|3WQdk.g"i. -/1k֘[ߋ.YmYٿ z54򑀰32:Wm/տEA@ܫɿ?Jc/tJ'oqe.Y8or%q 4)T?('=)޿EUu8xc(! P=3JefT`L1XVe2, -PRq̻H^]}Μ/|dg^{^{"kR2Qd 7Dxb#!F2}ŮZ{;7&C_1›{wFh-ɋ7I G2:,Dx޼9GeXq]VmfҋylPMz4tԺV$%D7s9V';S;EN*#݃T(#z@T[V'Q#ҐwH q<* sx_ˆPYw,h4?oyiΩPUө1:)^u7Kpm.# :#dH.$z gޙvYnYo D|t4>Pɨ] ;δ z/]7]u:W9ҏze9}jD ,|8 ۭÐgMеq7$!>a+s/a@3]QYͨಽ|,=qׇ>#PDC%w}ٙ/oF6~ Dx{^ݽBZ쁥~ ӦBLa+57znTr5BP@[<* Ok u2LW\SRu!B.r],5{_C vޗw$0t+x=DBYD` _އb).Fkc GQ4Ƃjo%tj )1աZ{lwix[b?pyi#pE'_ȫ?>2M[d| 5kw'SMwσtI0>fl(@^K7 }\=G6)GUA}3NrXh=Ai)n.cI $10A;^Lp'Rp- &qi#A R[[_.7tuH[E >M;DK(: ^~=k,|4zRlj /_ý\7g wh}̻[bCl?WC.vvWKzvSG)ޫh[CroMm/y*;kQ~ '}W Ƹ"E]vEm",qY"QguF~hoqNknNsvO<24&`edAaYxyZe6|u$]) o."-7= ]sw>Ksw\` >/`|1B:H| xh(>DwG':SJH|'{yA"yƊ+n1+@ ?c^4}L7id!L yz_eQJvaBGd^̎VdG7:`GioJLvl!Uݡx̙qR.idXv+&jøPF MZ"iD] c8a$\n"z$u@᜘չ\۸V׬P1+CB# mŻ"_ .J(j`xPk:.D~1uήt2DaSU<^S^=UcQS|>'k2@AA2@,@2Ḱm9D{$V .W_85Ĩ(6%7?(Ϝ r=s=U׎y.,t7oJU¸"sv@Rv܍1YU5ʕh璏IJSql4g!wyi IZlhѴ;GbxDy-r#SCul; OmNva5) iiEuj;XqN \%\υ[![$9W L=bwrnqQrKfKqZkT)Qj6m=Ͳۜ3qDxkVh *sE~neø-0g{E95|6 1ڀS"~ȗ7ߵ }c_}]!{7ӷW$r{&I<>8GZ.A[Im7Ad-zGzv;>׋ސzZWw55Jf-9Պ 5zS Ы:`gIQoSٱtaW~ d7%pJφRV{Ng[~ Ao> &sɶ\xP~|(‡ siKG5$b sI1f+ ler_f.ـ+'U,R7-Ri:*<+ Is0v׿t]U?6|vFBqbMGNoLL@_2;ys'EWKkk#;INn>lḛ1! yA0͇ń#WuQp5=쵄-:O,*sv)\mbJחLtI͑KⲊ83;Id =&rVBmށ[6g* t}2̛Qk`72mS)yƦ)sܴ{BjʕbeT# C5b\{|!u4 xɫʁ/6q<'y]K R_.I}q}k#$  9w-E=d|T3+{L R>Hޏr*0s5]&UQڎE}G"yh\R!?ΔeiEttJ= ޲Ϝ}&#qŽ@1A4tO+ЛZDwCj^mikmOQomhےksL|.8y{EF}4,H._(ʛZy7׹\\7QgaE |8W=5rs -QpPY\w-̇_^yG-A|(Kȵ'O,B%3˟r04MxJ hn^Zk AvQ%;а%۹ww@#_2&kQ5˯&KBuE[t~/I'F}8;p﯃OEɠ(# 9GE%D>v>r>2UquPiνmsH޾q,HSM:QSDP]gݵnܾya#⩛9 :uu V`du>R/ڜ|xnM֬0Xh'a]x`-3.?=|4gi8H:b5 :H ]lƵ㟼ԥsۜ,'ӶߊEf6^mN:@ܮmCVM+f^Q[k-[p̩`R[q$>(#^joDl.6{M`KRyF#JDﳀ}Ida) lďbi^$Q_^?Pzb&weh2[\}u7dN9RF0@@QwdF%;yfcl:A[]{qL_ZM`cs)T53(1`URo. %:gasE?”]A,w$LFrV1'#O7>CL=1̄:_!3,)i(imOÞbqb=c =QCGwhinm% Yl&(ydOP| K10Ct ?<H_ZQcԐf6iv8`c3oco#qD;\`Q]  O_]]] 99q1e=wYv;v*-2ۓͱɖSsj-:=3[-h!4gcasޟї`Q;ե2*;cֵv [бe:*ԙҺƨ *qʽUfB=QVHWB"-~TTPSjD[htgP*-j%NDK_ UB펮BMq6PE{P8 *kDZէ]=O|էI8>ӧ I}mb_Xw>-]3˔gYM&Xyign$\cݍpM 4:E:C ]0n":Dwruju[B9U)KPk*|xGv<}uMi*T:| I04)xC yrIbS!}1sWnMkh0&RW]Gܓ2G)vӕRs~+Q1u&nRV哢|j ^ʣGhd`ɘll : t- \$}[DhVZD5tM֬6Uu)JSukn@oHh6\T o23]y/VuoeO>jbF$;LR56>Oˁ2h/6fe}<͏tt6{.ˮ`_t(]@{A:eG1fY5Gץ˶/Dʍ>lUA=ۊUo4B/www7z/C1Y0ܠ벿𣨼Fil:)65樺 ] |wTm%6:w!ReV-V i%Z Hsv.(!Œ~Q@+=>PJV I`QnSٶZp\*B"Q$nֳ=Idūw\oCloд7M>i n{e}#:} snqfHKN)KkPOց{;֘3a$90j-َ6k'|[kf H"]ToPH= %(Gx4yBLdH[H 0$594<։fЗ:RS5yFrx]2hLNOclDI{zrJeƅ"s=т|ț\`nTT{E]7WК$jًU?q`=( |ٝ@g{—>@-Sۙ]3<>;iWQŏ)E<AF7#&{<_t nq$u7f=32pC3/BFq32;>_E#0_q+:ⵐOXJD,zvfxsjj|ЕL \^ؿft~^qut*C֗JOв$]U- P3Hk[96a)uQHK4N(C>!'" *j+UJgiCW6ehM@/y#愇qfsD9/ӽwd#RP4(dE$ip֬l'ؖ?R5ڑve:2%qcz=e= ٕVNȕvFL ԹiNiI!#-4ƴQfQ,cf, }Ҧ 6 5JixЪ0RZ^nOvXV蘸! jD9 FSvc$v@XI}ӬFHjN߼ȧ_.(Ry;t˾ig6Q{,~3nТ=\d4tޞл /&BȲoDfPj0@:c{[<юؐak;Z "WvpALjнJubS;lxD {[ʉ>lg~4 ְrʇp>'iSP"K %+E%*Yp}&zO4[_9"wXtƭq f3 6F4{tm զ@:'g“Q*Mj3 Έ_CEmIguLQuJ4fAg;:C2!'UӱF}pRHZ^Fv'E|F"7Xk?o?q5%H22p2 mFee:GX;`;9@$H%CDI2/ͼL bt3׭b[42\kښ/ żσ/Q41q_l%_Ҹ+} *ZkD{_l^>M>>m2Md㟓-;O1[K;i/6yV *@WYÖ%|ې0yh*B[hGlKbƆ.]lAhr5b/jJWZi:5s:߼xbu:~\Ư_jqz] #nQ76|s_/66~j[|+Uhj)ucK#hi&~?I5~]ȯKw67-=Znז-$N7`(Ʒ$~t~ǏbU3D)!#]F{o5~k+92jѮ@<RO~i\Cfk^:@IX<Sdgc\2^F$B^˔ZL` b#'iqJԁt -1D]Oq3O14yn&1U޿xǜӅ>!A5MsvS4#o a^'@?P22Zjk6*a{D5%rt84QJH.ɒ~1?Gz+6'EO]7 9ӈw(ea4DM?X9a.gʪ LŮyh'M?O=m$hC3jD1pU2!am:F>F艚L2ٙ1_cuGRfL䉨).J5AfիDIVBΉVw,khn0AVnJH'Og;$g):k[LJ X2KjKa<&Sv ăo>L=w{JBPդo.8):0 '1FNKՀS4jx>`xS$h! 8i>=/MΗOmģsHyGKY-CyF u:u .?pD{/'QSڬI0j^lJPg=7>|FM}ѯ1fBTCjsNDS2h0/k }<~kBCg~:j:m#o :FD*4.l]Yl6ni#dcu?NO׃EK559tR 5 -utLlΩ6NJ|!6cg 罒vتnQ Юhh Kw09vpQ㵴R4 aSy5FfV3M[_kBX|DBS+RbuG E}`98T',SY@s>HݍFlb+pרriJq5*ay<īz X"AOj}!V뛱>LчE'OOeՇOSo,Mr -GsT&9IFE+Fr|B ָX;ߙ[i}nNS+dAR6c)>?6RS_=EF6'ș (FQ;URdJMq9p̓$7d'6V!^Ѽݬ{x'^??CJ? G5 Bcퟬ^ -By xrP s`gb?>UUzkUp.+?+,dz_;N\A[br=i(zoFw*)<NgN<(>/`Ij5|Bº>I fnEbv oe„/ӱ8kLŞ_-rƕYh&f?V4ӑfW&e#f[IZ>Ey𼒧0-o/zow8քCUh0ȣKG(c6krWj#d.Ѯ@%"mqHKH_JZ=^SnO# YզeM}:WKR;Z-W#F}KCZ͍!:/UFzuA_x{uC(*U13 I:+nVKX.XZ ?.8 a͘*ajKxښP_PV{hj,x<=ǠgM'I^۷]K؟!˷󣋒{GQj? \ %7(͌ihH PjO _{G߷KH2qS~^':0ƢYG<a/'cYiVH|cr$vitBzhQ^3倥? _]/s.D!x?L- ^oc.x y!]b'Vlr[[ۮ>ۏhg%Yk'Ll=}tE$5gLkٙ5GuMΘ.gmxR&>Iv!Fc]y4NZ~/}:0i>[8}1 ce g3]>{T3̳}1J}쳛a6նD>)fMt8YǸ+6_N9i2gwQqzg8==pvPʏ}6$7+-,o'ïnm|_}63]>;3]>dmy|AV>7p}vq><Ϋslj} I(1.E(:^|zd?hz{5A=1zvzts[Yo}-> s ap]#RIxi27Rp*̬+(S#WG &@RWP%5߱nu Yq`@*,@U< U‰Ux*ƳnVij߲r.b$gvWxVZD:Z?E/7J V`+!d&*<o7\JѐȢ6RЯR m"|I9&g.8* =:rQҠNxqXy,<z >0̜@I#D1ԧ MD.)M jJٻrɮK@c6@}hȨ){ ; ߳81}|#ޚUjM݉ < "mG {V,%o3xgBx9g(mM%,LlHm5{gz_BG8o/i%It/]%6k 8܃g@VD.FU:_)V]Dî?楷=@s2"Os! VE;50޶^owEF^nS4e.65$"fOZFZ Yۀ> Gz&#`<<9[ "˜3S348'Ed:qHA+r#S#dV ösHI1WYbcaI)z;nDa(O~?{{^;zL~/}5{~m{wN߯G}-p&|:f9c:R1f{Q1"Kx07珞1taLߝ{x珓DfC>zW|n5vOV;l9jF;$~݈{gFK3cv T9 "0 wkZ#OE81wnM:x>I9W%X UMDʇBHmTPvbρ *4(VuvxjGR>(7w`0>$I7Ʒs ` ig,[̳t8R?6}U?}eǨpȂGW~#}}@: c Ů2߷WDP0OȼZAP2XhyL2-47a( 5b8"mBxEF 1|Ers2@~x `A,άAuһ`I_A p3j$Bأ1;* 7%>L(`y#sl]#ض5[.|@A/|I\B-{>*j+$#~@F &ݕ%(jpvZb)*L}Pv^Z)# [t dk/9 sփ-2 vr'* >ezKɟulB2LMc_?'Ɣ BMVA~#`A 򢿣+Ք$YNnj5g[i1zodHke^y^RznՂݒr膟Mo3gŵ}z;2]e>fĞU\zxĚ,bvfP^U熀r.ҥaŧ:7|n`MmZrό^4z!-ܦvc8r*,֗plo[h~1g܅agt18OL)U*jqY\A6# "W2TX b򰺻5[Ps\1|yoyr찍'5fv:#5 Lq&G.uE\!M,nvndhf?'lƖDZ^]VӅEk:t 4k!k_M\ᗴ0H!mRܸUսyT?j:g_kڼdTc-^`& Zo 򠦿˃S[5yL4Ln.0ʃL|V E Nc Axxfm2L M4T֥'M , &nE?oithĮEM$<~P'\S:8 p3od1AF6;l i}Y֒u6k6ScTB=|"0VnǺl ]=I̷_m[63g>nK[}?#jFʼy#K3&ß9mt@?kJK&#>L7CS?_1gɟ=e= .?b:G1u=gtDKܐ(iȹGeν9sq^g1eu;K٨q{KP^@P1siF\ZUǽwͥe5.ݻF.ul`.]A_{5 ]JAKVǧwwrAl7}߄آ&E}1t(R+jqY8;:9Ҟ8v S,Brڙ?Ҿq$w~<|&:y|L0\*eW5SXLo!BK 2=KD>`W >X!k124Dߪ ooke|n /~*{[R⍟qE<]`Lb05q)/p]00f}G6<|~;ɷ-)3$;Qi ?i (\lصd/3UGbV\qedu&Aռ n`| @ңhW&GzkJL f&A"҈5,ki bc:`qw #o&a HO;rMv%paS+ W[Q!cp+ XW/bWьiFkЏ,g .*/Y% UV47A6z!GܰB;z6Mv/d=,LaN'梉Hw}?O@(wn>dSH# θ^A5gGQ-q/=cUD/zdC>?%[Θgݏ q¸`$7;gEoR3>A g<)t7p~^gנOHjl2N>|Җ~QG<*e/gݴy7dsH|gF iyGg-+i+%Ai?0r~N Zfcwa9{a96Q2Ըr\*_&W|(7Z a1|.A]%ɖYSW/%:&#Sv cjqۚ=sd|p.ŏWŒ1" Y ΫhyS)":3bjZx^%B+ (&, TEPaP$BkxLT">@}m MȮJQ9sec%9N(} xk=UK92[s;C1=9 8<YJfG</iAJ y|Fq1æ0>8_/Rs½q8?א@|p%8f3'OO7!9]t]1y  c Z'`9eOK`13F=^Ӻo%o5fHk2̚q ]}?4?z`.#e-Ja9Y]ѣ! b6Uuț"żΩ1:)^^@s!}EPLտd_w~ۥCzRUpr}SBn`E}һW tXD7@_bbX;Ok^aYVE4'i1V/d I .*x|tᆭc˫4m]:jy0h[6xU8xkctGY s+-jwu#0/'`:0#8f[6i-9qe3%N8զ1"+2)3H􍪃yϽ+-c6wC_I߳_1}|y.D]!#]/@CHO?#}=:YmU;Y&w:(yf_X/ixuݪ>WaNk~/AT'EXfv4@e#5csa{^Q1`(E8x^wI;jak6!BZ틁_C,߾՟wpOmy $o\2ja&9l]3gP7J?olp7 5FeMѳ\g"P]k]*v; L|>csv3kDPuk6!a6U gY^NR9{ -cc'FZ5i}dz'*N."Y: WqՕG%E跮O=3__{5rZWvSWOΩÛY)NHx V|a}E__r_3}M)Jïyd} >.E_?of|:Q|cHU5Ų-|(>VxFaնdB䥳A0B2lS]}LJ,`#OٿpVUg &Ë(L`ڈFs#cI,{)n~BV zqmg׼ZzSPoɻg]!3 4=@ 77͇hnq  4{@C;x=v[,@oPsڿ㌆^ׅՐS\>Nܿ1˺mE菙߶ǵ6)cwI-?ԸnDͿ"`K> >Ž>,ďNe qOHOI2'|Z˿e.bKtq{~Ot\DZ)?f+5ӎBŽe}:q-;.Z Z|^꣸i|>$F!ͱY.YC/Ӿ z:nUV[l%}?ߣZW>TqTF|H |H4cwsNk#?dO= Kv)҇38#>RH>b:e *5})#O>gGËH@ԣ1,}ح3133|I>F##[5# <7֡ ;_.ϫKym"OOc̫[񦮦#{|+#nSG,4z֨ &T>k cmN@GL Gf[[i*u##Lk#S\d۟g}$}]h>a'IIOG#jQIa}D />BŲHEm}$T*$+)$%B{̳WkGRdz>"۴GkBG,T>䣏 WtpIP^'6.NϾ3R:8=OLuqʩ;cA\zf^ 撙?F_gx18XK^gpҴk 7ExzG9{3W;H!ގ:4.);"(7)5dZ--I_k?SRhxb;&ODށ%0G F㧿 ޻(B7kopsZF@ϑ7ǦHz QoQ5>mַ:`" 'ʼnHD}$\vNrwyh(7!HT}S׽#=#G4KfȊ9< ^ ^nHX!ŝHo#S{`-n9ډc׵VXh:\O{Z#4X0Xd[ܬy&yFlևH/f \чqTt-$u:bwoXxf|)?g, u㌡a{DpPy1O#d=0wiNDtk ҞRc~hS3 ڔll3]LГ5΃}@;[_I51: m~t`s);%^41#6Fn%uF{ Qt5;0RQ%Veܴߝ <9'cNcqQ9G9AU=0̰LZLI4@sL-lfjTKz}yy`rX)Qg0zd7 hαgZu 3mpt==4gmtQ/Xr'eûz8Lz>๾|e߇±%@S<5Q0^KHm0"'сN܆ǍޗI&ڞ_dLuG_Y@i1jͧ0N[i`,e\́T (sX{~ORaN4rZv#N8U]})o f2٬7?)o"z-8%g=Zиx8U_qΫ8>|~g_|f}k>u2?Y|vUWuj,Zst _W}泛^zHӧjrCF9q>%}=?Y|A6F,DCufn;?OJ^=I~$ ~M79sǫko$K ̏YKq\yɂ9#ח93(9CuhlM=WurF'rÏ S9 #!&US4x}U@yŤ~U&s4C 8ߏhzpn8oI8ߢ3fR꾚09d{`8R\Uy۩S1[C^sˡ/}[KNX:;~{3k ΤY 3~G?ߕSq>68~es4;poa7Gc/0t"ͪ=O<$ab22IBGHYGG\ų06ǸQˢUk)q9ޔ e! 3CN㤘/s، ̨`+-1d]ԲfS +2bw'kuvodf}F_da2׈Ɉhj ѝDp&6UJz:whIWO1®SY?6IZ6LQ=0hÀWOLI3赔ޏڎ0t"q!٢IΌ" xt$$p$2J _&K.:VWűX"-0LN/Q|l~ӽvN92gNKIѹ31 ZAX5ԤD]3䔦I<&᫃ Qo>G_@'z#[OzDMyi|B,?EiNO65#/29,0%pG#G j.O1<~_g(D'z_qwr#q ꬲ@a}FȐ2Fח 2Z7 K%s%lr9@0"!G2LPBv"AȐid䊙\z]@b=!WQA`G`B`yu3P$ wF<ːOFc` 9 ϛ r3djDF΢T,R#O((FB*OO-gXi^,KlWzϵ6MxkiqʵE@,2b =ӴuQ{L{eY^{aZL; 0-S^6Rmz^# BW;O2 /^[IuINoi< \~u"ѧZQ{^{V+Sh^h^蟧z>"Bz\(D/SȝY1 Oo ɣ:D맍i~mM)cƝl`EMR=\GIM[2򳴗iƖdʛFV2:@1\ה'3xB'W>Ӡ/"_򿰜do2ޮ)af;GO2/0'x3]Q(©bÓs۩z3z)㼐B)m%$dL1kرDMJy*J3"j@嫑D < HaHIN0^Ǒ9;*K"V cTŒ*3JbbD,,}YZ1t]Fd2 \ txOw2+7w'[jM /n~R[hWD3 2%  C d@eo, bd0-L=>'LrU  egx +c7K~3f,ϖ}gDM~>;q^ ΩꐟƱlzDWQQʒT4Lobۆ{luſ3?wy;0h k罏{w{-|߹+Ƶ*m!R};]~\ckv}yOmdb= u~867Ǎc-OciL64 #|,k> 4~7Z=!~L1cu-^?9)B!}\oV5qjX]n~ޙQόV>#3x uyKΫ|KG)8,S&#UDRR>[쎉&coUAVI.G*aHNJɪ 4)I0Ȉ# }.;}TJHMԘӸ[['G޹rȜkgMx s8OC25:Ƞ>Ur@R;QHo`n&=9:|Y'w?/4w5G u:bȥSWj´o>W c⾑ھ}d yxr/Ѝ'Ϭ"Ih M#x[3taԠ}s>ݹMԐwU BꬢDőCFҜj?cg&N.ăVp4;9|>4JeMx`$ԧ0а9p\SJ h<R2ˇ+EZ\ah88u0rhՒOi&>\yk b$^R)xЉC:m_%ZwT:v#C'>7Ewb9Iíwܧoe;t^)1iMmI#"F)MҸd'+z*)f-3S] 9 X.QRWױ=j9OjUsm-5 >j(qG19;\31UC]|}&sypǃY=VS/=LVsFת5YP[=@L`bG\p,7/*NBޅOK' G:YY:$b)x׈{JzN:%y|XӀr;ݪlu@tBg+?"ieZP]=qY~C(EL(~4W1CLn0/90~d0rh3K﬐|7L*$9u^,7M=t/Dr,2C-n^^ZV!na]@澎LqneT!-JbJRtCg[g^# YD5d3H4?主|n9 Sp$%Zs6>Ky17zq2ۮ'p"y;"1bk|k>ZUx@hsNB7/ߪ ~TE|FܾG5 )h^3Qۏ|wK_) Es)L=K>k9nO,'6gos,6nhXP35t"h]<<8 jmMCnK&MHm1dݹqCLx sg)ܰk|ql]gOC.V韛HL( lr;;)QGZ'M٫e9gq64{nn}+[jW!_3PvCoo7wZo'ZӋ~۫%ֈWpgR۟v[h|Ʈ[  qO:7& OL1=;Fo̍uUVy!#햝BmF;;?՜vDܞ>ά꥙+$v;6غݗ+IEԶ6pH sV";VC]e ܞ>W!)l6G]fOQɞݜJC-0s6 |;C_|`:' w)S"oQ7B0++5T^Țu@-\q 9D ҩcW+A 1emwe7&AɃ5)= xE:RO UG*bR*1W:%BlCrjRyi=!ϦifxVjz)@OU]zִy頚D--Z*5lt{USyΏ.hwrAn>TR'ԧbl&7e]j:u-5`MHU,A82c+E4H}52 bCJr"[Rd^g.E楁j&OQ)0D'm` 􋷏x*:,W@Δڥa4҆H[mM`M tLu1Ii@C}naE @JS kӌmwxxF^0&?,5 3및@Sԇ쎣߭YO/dj93>^m /[-{NԠ'\@==7ӹ==}ӎۃ ,4Ħ?=8_ .oUI+BσQ^xFO.*t_4wYl1mnŗbsr6?" Y9{E8-&;e藗Ɂ{9&=$ۢDu+yQ>@ig 2ȵ3.o>YG;o9U6|jXA?2tqAt) u g 2 tXtmj#7h¸95ЋڅltoƓpJ?FS2ka{f|avXx6 ܅7DCc) r*L&\m~/Ƒ\<yҟ!"k-@Eo hjܑNtaw{ZI {Mȹ}wB CZگM"H~r4O%1i<^(E.t?ՀP'Ǜ?12dZx4|î_q6XpYsw˵D?$MGMbxr6H֓0mm`PixZ[OLg;ĭHL U= _/ėA0^O//DM,c/1".:#@r􋱪"`< 5v$ 6$;ق<ʖ$3Gb5 }$ϑѝ5~QF &Ӹ;ȇƙ8t{ ܩ"Ny'(}x15󣸿gS_nHE_]'Ew뻢f/N?:d6x6Sa0!$9p31ĮH 3cX h?6sUM3Vr*SZ%F%B(jZ9Lδ'/G0-yQx:IM@LX@`IF0U*W :֎Cϵ*bgulM4xZі[A`Џhg lU#p8:jK`wޗ= <7]{}k orohW0t]lEB`݌L*9\UQ!_U+4 gi 0C[ 𶤹5xh/v(ѻthRY]GfhەuUq̙ 1΍6.M$C8FP^$ݎ %|\Ϲk'KnsP @8hto:beϊ9 A|ķp6rSQl?D9?j]!=m 8vAxm4:mjܒ0n ]tkj5i6a3]cYF*)s[59OIH8c;/==M.[T<& SUKe&o.eb:uq&)3eS[2\WGB-nׇ8Z[Ա8?^7.3i]FK(nJw.lIt䦇W~q,"-W> R^'D&<:IŧKuv(H̉5t{乗4RE==M׋{:ثg2 :=x`΄K$uN%OO/s]=BT[ )]Jױ[97 Ž&{E[R65jA`BG ؂#Y1-@F<8#Y$<IJ>,11sIkOqNL_OߊT T{Tzlz@2Z\$"1:6Gg`;NM9YNi8KKE^9,f+A{b4YV=g#Yq}@irO'9͙c :>KL='6صIM gry8OpKva>|Gc]bBB@2ቋaB(hV/7QT(cv78b 2o^gvķ8K+\|' jgV翧YZ~ЄN} md<>pLG |+p,UܱTE"a:8YBcYIh*@l rw>*cZpGnZ]~w6$T+FRz\<si}hzGn>k mvu+xr%2ݩ&^a {Z2͞iRd@Nȝ8Q㏷/zwn>ȳftbnމNLubZPH֋71:Pr%/ՍO|cpa.R [ȋXʡk+K撏.2݈l;ÝOBB@j/'5YT&By7 Πt^Ξeϫ*Af<ҍWV+KGC_ձيw:.6Ea7f]dΙMq`hT̢Ic#(\_ g g^%a=&ʉ֎!DC5Ϯr791fF@~ua2ʹ:aӑP>._ר_9P(7AF#:ʋkL̑l@J2l,[\Qa8ol^> a̻<a~0ǃy%>v49@Ӯ̊Юl%5Hg!z!An>аI4%Pѡil:5]kQu¬WGG((.d6M՛^)9/  W8&żtW_]"NVȀ߄kMFP1FP& Ao@]e$hWZT:H>Ge$HiK$f5ҖV'xeu/ Qo"9ֶ̯!"uxTa"DzF5"׈DĄkDoHkD 9ODZzHzn@%6(p\~Ӂ׎<ڥ^(wR~蟆~iY`=CH'kDo7шuiiJ#`ӈy|$N$&"H F"ћT/#;HUv,?_ogćLz) KF'KэM:sE6\x5C9<l9\R"键Ӭ:#͔j#,eTG{K{-R?ͶrNkmf5J0Pvlq1R8]'uh'_azeAѺ3BOߴe5<9GeV'ŁJWNwb`flY&0U}0v0w3epfia%|GDl:4.RJԹlzyWSoVY[tT|MjS'l)ԛey&9:@Qldø4ԄOf[ڰSc+w,&7"RCkt GIV'Rf~/mZwFH3o1 (CX-'ou>Df'Zq.@9pA>[՞[J֚`e_+;vY.2lL7vzHWܣ g%Hߚ펁&:ٸȉ>RoEl.S?IM `YiBq &?3ng@"BF[A݆̲͜t()RCt(-x— KğnAE9#M!Ɇ^6h,*/P+0^s!DSZ$:FS'h"F}H5kg뾺Í=1L'ԜjO{!$nX JEvsM].<=g&r,':hs6AP3-gd_s~0m-gO7GAj;;u7f4Í7N5ÍfyÍ_JF+)K9$~H9'ssɐYH1Wʰ͌~0Fc

Fٯk;y`D޹=ڏ3Lfh̦=sUrMPyU;lԸT#R6Ȏ0'er݄CE#@O[wlg. r體sJ~` i&Z ` v68{iDZ g0j{ig0ylߢO(Mh zj-fRW!&^ɾ$58PPէ‰\Y\y(b/DaΞu{uPłCj|[`fDyg?Xf9T#> &/bxEd}KH_%c$\mz^:8鼜bx5{ `0_tH5__'XQmW/,$/?51t(w9EͪG|<'#Պy~-uf1.wki-]uC4 HAJWҽbo%Z#p w@ }دFb e[1HFًjmH~( 6rpF~ NAdC?0~Xgfp2 3b.Lv?-g` 1g!gبK[z&fC/nqp* _46)>ڽfTй=lY}؄r9:$ Bם#s䡋3-_$^$i)0e#7_tb. 0Pt9ʘH0G =C>,Z?TR7O1K hj5{łoMA@_Snb-1>OaeHR8ؒxNr<%O܄M}imEPrA KAߡ.Wh0G'lȿjDUUE(]Z -j=b4q0Ür)M3 "&ʺM9q(Rm~6*! FR-Ai{nj.Xp}PO$-..Eb*21Ao`p]̛ots7|sO-4BdshsmSݤKD\981Q@4)1н:6Ŕ_ է-B}yPɠO3O=E kӸ:p|mZBukP/k ˷+k]۟w='ߣoZv\HapwXmn7w?pRrkHe7&`܉֎4j {޿ъD(CD#9nUoqͮfl0uW$R*A+Ѿ>,`7QN1(]k?c -Z >yH9Ab-LO97ăARGn#7d(-:F2׬c0gA*ɟBa\)U͈'[ ʼnEt%&:apP\ tN qzJ<^fCXE'ٓrZ /͓0ޗBrupk)/!#7F =_q|30NNo2/(  * &0,$6_wiSj;4)$9lXix:s@R!mO ɀ3FA"9U8>ؽĬ}s4TMn:;*>Q,QiM%6D$ޔL ? l2v+j m0#E;8OƲbő%{&tb;h(+n9Jxs"8\|7c,o8)zOm!A7i-<ȟ: Qn A~) WJOGhܳ_oʰCc77IaxҤ憣dk&JPN?Do}%lgw0R&2WQǃ|(]?Ͽśb!꺨Шڵ;Tut sCeP VCiwxԧރQ<: j^:r5kT:v*ێ mieW!^uw8uK҆_?**N@cL+eE\[ ap #+@U}!sDH7pG%51{\n.!pKp S6wfi _dtr_?L[1 yt˴'vÆ֎<CO&sae"*k);t@jr85a pn;X4}:@=ԩ_/~r :Ф8GW(ȭ R괌fPTQCL"|PIHƣƲj/IJ*ssfp)5Ƙ/Eמ ȍ S?Y&yLId(YR,pcڝk8usAv@%eՀPtLrz0) ɜә !RH tmK>WDPPU)[D 3jAOv-Ɖ);v7S;Ǝ.M52eGA@Ő: VVrȟWڭ-tu=Ok'+ɟɟW3?^]Fd_?[FO/џc.'OOF՟S?zO?gԀȟ 6A7c4 [n1Gq5#2 \dMqЋ2هP( z96$Uڧ|Z{M-CWo;I6 :>OB7O( HP#AD2j\ݼQDM~=|g;*IPUi=YI.x]oKx ,x  I^>V*]Zw]E %kW7\M>要>._xI15*.;^aʇ4˼3o]}Y@,Sl?ȖD!#:%^M> [>dߗW,| S{k]uMk>MH?a| ,nnݫV]N QdFqg.nZb )  Nk7)[Fp^svgMlE=,$]ҵȣuˣKHGKʣ7I Oѥ_.+|h/wrkˣn`|M?|#^]Gv,-_ &-s|Z\m܇ ݵLR8|6E-M eQ_E=y,SiuIA4`IQ:tp9ˌ&Cg)3NU3 Mp{R$p v],\:4;a!P}`I#JSQGj$@-)dz%A1JAJi?FB;kGp9(pE.\_SttkV<r6+l :ħt4/~̧By:xN9?Iau"aEՇ^kDPC`g[14h;θ[5O"le3![˥O׉AܲnYvW{9 =qJJ7~6H$tڒǻUc;y}QyJs=ŢAv]Ɉ ۜT4C~yМu7R+HҍfEu:CMYT4 }KGw[a<(̰u+U,'ϕQ8U%]]m;3(ש峟8zaLXy/ 5mކ7;\;+h>% X"}>H,= WG- RA_%}H7$:KCTTE7vrպߑϣyLԫ{-Cgr\%)(x p6y&7RSY-LoZJZ5/o[GCGKBVḨrʩYT,r_$}c ,}>uem USXXBx{-Skt#24F{h7jSgu멯f}NY`d4FbtW2EM_YEqU1n;:Pݛ{z>b@XGd^MN_׻+݃Y"ŏȟ޼̈4h01B1o=vV6Fx=k`y_DVKtY.r4Ep9ǜ>W|htW#֔J,cs é|X+~Nlu=IDz0uJ)Q,[?kxj2Ԛm+5ga69-v@Zt i\%ƛ{M߸=4*㤘UUKңi~T(7o7defezhU` ?qrmbD!M5y=:F#8RqfdڜJq8hp@x:)|2#F6FK2?T7ŏ@*5dW+X48V5K12?yTxsq.W$#-!ۃ?Cg,nT,[_nS2mjFMO[2%(akOfqqQY-albb8N={*ZS랊jHFCo0LH KkI%VLޠͤyie;)!M=mI6cl; ?OZ5M[Vdyw .`Ok\I?{Y/I !mc&2)Xך%VGTefqw<=b kwGǹ4v<8onE>G9D/KkspvŔO< xl&hcpPm .}Q; pgq!t$ 7;m -wBw- nq"qM%L08[ZzȽV4R,_b ?{naf j.Jhę>cEdXih&zNaLnǭ0,U6*[!lrfPQ+q[VD/y(f%)5l=EcKHҔ5A* CׂhX'}Ӡv瓬qC7#Q+U /BF۰m,mZ<~`|}y^GZo_H2y~e00qa?r ~2Oޑr\ @HpiζF(\EGЕW>xχUtRZC<0h0t!_W!є}пjN}/irL{ ¥h9n2Q4!/Ajq⓯iFW%%)*~>BQcz! jŲІ̀M|wUr9mGxOKw|" C1mn%cu'Z'%+~J6R<#V\Hk4?eZ\6SUUB6ҳ @#LEպ?Cg?+_yWԁhWyƳE/m!QQF]_x w*aiK~ާOY-&b=ɖI-uXk>"ˀ91UvC6Ǹ v^%d_A% p/AK] ):,^ H< &JmNlzp75 *DȫeM+,L6\XΦ3urbwpr؏T4+Y<.[QmA}xZìka'[Ζ,tsmRb=X+wϚŗbsRqi2/ "cmg2 㵩h8ɴ+ꐶ?bWk!^,nġ|0K.;7֨y]u/_z~ loBN?:0C'ȋE[9K^Bk5$D 4Jl{Xk0ZBԨ>Qy(L0m chUX1gbdIcD"F[ZJC p@ mѿ)Ya݊ƙA>Ox> 0!=K.C۩/jR9EP\;v I1[QC?h&*vxs۰)Vuh+TC IX^( q2dڭ)rgA ȓݏ(+Lv)=~G7yI<@^C~T/K׵W_{ g3U2߈8pA|W$_y#4kBhu嶯|&_ 4@Ϲ|K,_ ʗ-o5K=s>W/bҜ"/W?$%Wܹ(!̾!_Qo b!y } ?d:e!~ȓjC\B C<'~*9٧ď Z,ϴB߽Ç%QS4qLr# PC2kn`n2;|Ȥ/=j'~!5|H7שq^FDnBpT/٬yą/~ͱ{N&==0;a>aK3`ܖWq 0L<K` .aE/^Yk@2hC4A qV[աOМ:O߿1~/2^" *ifGDyw o 4tΙ˳4TxWD,8K8Wr*:K{80友f>L͍]~|5-Px s 0j )B.Klš/A}K  g^(<Έ^?[z#-ױ,@mz!|#z}son;K5~9@y8ݸa;KO< 擫}bM>&Z805 >Ja^(1p_ - | B5a @s& ?fa N ^:(T/総; > 7Ӯe/_~#/gߗ@zۋ{)K\z|~nب˙7y>6jJzsN&7`S *ùCf^E&أ"?򡏊GFcyvdw0##+V [-t{wߕ>_ja ZB|& |Kk\C\nZOދ0*?`^Kmtz]c u ]o>t/\n$~I<9\1h\}肸⣾~Gy~&X#~_ɠ>N+ +T2QAA7fx^4|;dqtgr1#cƎ09K{>=Jsp>l/QY0u@O0jTQ3\ (cIz$( ^xG. 'ڦ1zAWeuwWQDLB"1b.2sNUL}>>uNթSNS`f|Y(~R.<`@Ge0 `hp@ @שTHx[7dV{x~  Pbǩ^]Wv-~'@>y"8x` Nak2ğ0 s ŽTEuס];GSw^aǛϛf!&6bq4zAėڣ ړy e&~ @.M^H I1w]?} M!T<&. `׮o~Z u`աKp+$!brd}__##'0c0/w |Bſz#OV+ :s=Վfҟ0!bȭd44vWҰ-<-).Z/35Fފ#dqAmA ugB] 98oV=jմظ_= \|%4,;u 4'R$Pj(KeIZW}J≹4!*l+̓RU$4I`ԱI0I`Iנz= , Tю&hUΙ6_sq+ #b'O{O9)O; ;Sxx<,\Y߯pKW-|/L?y3̪FwF.k1'I J?M2{`Dxwl`߱9HcD_v06Aac61 Rcd˄hF{1,yy?j~>u.-eDڱѯ*AQ2h[㗶EAի)< w#[Gfc)^0#:r?hc)0Kψ"l˜?b6>˫Kek|q%>Spr3|QkYЮl~n1z} gyvm"e\gQ_bqp?ugZ||Z;?rۗO\BD9@ r$qM?^n/~J*s_eg ,|ͣ`C3+/s 3Ǹ ;dʔn !-@1jn & #/s-j1]8f/|{6"s:< LtŏFe\:!ȼ ( (=1]dW] n0Q޾sB\-Ő}v18|Kj/ɫm⻉S˅oe(9S/_f. E )NT1ݨQe !e޼:jZ:9#umP_ne//f$U.`x(m$ 3vG$;ix)m4Fū#Cwnj[l[^ngx}'E>2/[u<'㶟H Ln^)IdoIXC7]usbh㍅)*'9.n+*D>QwkX *$[BK"K[wZz<$k}i]m;"W_6Z#+gO&KqnՓUErB:ap)?QZn}66o5ݬ]grjMNRz$n!A*„BI]#; w<^ZKu,[뺹I2!`gpqݔlҤEJ Ϟ)eo{-Y>X]X !g[}4A_?2űeac (RrϒgZzQ?ͣF& aT$sQv!pC4Qۼ;Po5\uح\+pJUlS_$3*ABF [<uݯ]gsDj"y'0<rRR ,@g 7-| &DـD*ͩ5!\,e8Ծr^3P3l?p+ZXSmtJ}nҪ5>ɵYWL"~V ,P'QP#$~^1gA2)ᣧ\&y11] ɕR\zT-}KxB2N .3}<\/ᢷɶL_ DAϗ{=*3yϔ¬)…*HT`bPZTß]"L A~&*Qo}-BUS+~Ĭr [^-(]<ݲ 5C>&l/^HGU=@Z(ɮ%LQJO]s;Fi-X.[D !=@C?ˀ6Ba^kR Ї͡x jGF4=قrӤy)P>="@RUFO V^"_1u e8id J,e.Y/+Ex(џls;/yF__ꑯ%-9"KV}\)\ۄ:S繞OC[ن..} u~PP_Ԭr~4d+"N*#qaDZ(7-!MZYit+ek|/AJR15\o=pZ \,O?1q%;Đ1^`(!e)ݒ5p.<*+ak Whn-5C< 2A4{"=Ɓ2Ne bO&}_yHVSae<4SMN%wڔ$7[z  v95ғs} urIX)zJVkON$g*kSvNv\ʓ6݁DZh ʄU1$p"ky)[rPzjU).Y{/{cwQ{C}9nЙYVֺm-?U կDg1!@WFd-upPh Cc@G4XW0 ]ȩϮ2x MirAe>AlΣڛS Zv]$#nҥ ȴP]3a*_\`y' CSzN?aGvK1(Br7H7_A|ۯ?D6_o"o{U}^/mW7+i)¾_Ͷ2kWeU*.ٿ _ ڿD#e{<,ޞ7@6c/kYf{RA={9{Lkݿ,k_gdf΢=_ #_nwQ`Vn[Y\Khay -đ\t+5٩, ۫qKf0WpִĔ aRQ3r 4?֋RR. |hxhL܋K&`L2g`Qz[3Jdh&iAɪ{ҽf"# Kлyn7 ]zhH UoH+Oޒc!h،.?yqhE+Ewx̻Vi [_}vEu.Z1γd .Ɠz#z z=˄_/Ge ץecmP @i'QO- 9 XM9{RfceЦjC^,i3m3_4ۑS4zS$;Ay{jʨpP2O?XIjLZĨ0u ץevYQm:vO&Mp/w;STi`%SydH[e3v{*%¸ϷZpQw_dYfbf/ɚ_܂Jx)2Ϧ<~)jsEڠS<0Jkg]<YSVSQ}Y<3΍` In~^QX|0oI׳`j6Bt./ ق5*G.ZL0o ’#$2$ž*ڪGX2=j<<8;% cC7XTs!101q#$*0q $0%t'öiBZyP>Y7{2S̛6~VD;cBI mHHnD~bD­60xf(Kzoy~MK+mN-2(W,>dy/қ,o&vv$[X軞2 []wby;Yyj ݪYR3<ږyA7hFm^z4@G$-0v7.~OeyXqܸhgNx|xzh#$HNx:bkb;:4 4^rW[r/_eI*?Xkw 5{,3|oiShPvRl)SLMepTlzuOgs'K*Øxe망Ot ]߉Iт1 z\jX2^Ae~CDV('-u:hCD2cvz-F 0ni:dcÁ60kL`)ꃖ)- /?H.q`:cT?.[S;Ĕv Nc.F\ZXKͳB%=\,PW+eXH'yC9Y13:=C:Sd?* j}Y-%#,EEx}VۡR0p"]hSYw4!4!_v~MȟX ^hќ Th=+4v_DP[ h1hX޿-役29mVjyю9g^&p~t ;w[򪾳|a<)UNJԹb]gP95QǷNŅ:\2+@E$FE$bVZx`|@ 4{Y^+q* *3vD%B{Ш:Rˤ7s*nb ˢ,ayvYP}?[ÇsmeCEDp5eŰl Fplsғb-ބKEc2h)W~Rc%+8]ځ _}BW} x|5sLәS_n0^w/kc~xڪvn)>wwM"9䯧+-=K:aCnT|WY |C"a`ok2u&mF7V0c ߩBڙ%'|S# F-feQƟW74_=5O9h3URSrz |2bN4~fj|/px6q ώ;U?@_c0#]GT)Uq%SOHtc OC) ~ܯػ[C9y7Mnɻ)_wUcn&cʻmh;nhʻVy~ɻmcGn[ydDP/ʻBwɻɻGG0T]}yZ?,gv$ ۑw+:w1GSy#o;]/)Z_ Kmwk@U3a{1SI}JG7W(ZH .$SUӃP5 2WBQHd[1Q?&,]61Z  >x0(\6t|ľ+)*+%VV 9JY7zr=6=෢&$+Mozo]YȯqmIhotSB޼5{-ؔ{:I# 5o|a o8ƒӸZlƒ؍踃e#1?o5zA5+NetHS}V2C4P4q jT=IЫ]g\?Db})L@Ԯ9ba/~8;Sln<' K2rLZBD?azŽQxjDP[_&d? 'FRs"D~w}f DlLPL} =>NARNӷa-gbn~K7;C v|b>kytHꙛgFz~p\Dc3#b,]3BSc[`J7)&!&d7D'qfEkJZZY&Z3Z[*=?Z~Ch kʡZXtoLØϣEO4HG#ԥwaz&(tR;Uc~Y5=f֟߀q'1Ő/MǀA6UN[&/`6tO& D03lqzƮ a i& cMx{zШKYey9ަ[3_5[O,f2ޘ[ =0 5s%CRzgz8_J$:nTފ L_> b n$̫ Q:3J50G߰9f==1IY]WXH(4 /%{LŖ4SyѲ+!a`nzyh-!^~R\ "VoDa&A㩿 b?=cbOB`l BdpHI?!HH]&!u>L-ĺ0e҂Ahy`Zl&-5BMZ_DKGhbcuZ޾h3BZZ_2ؤ3w -|j0${ +Y]6$,T~rg0ePYL姱*.JJA VyI",שNZFeE|I%fa.gqkS3vŐUsg8[im]44w*fM8.CubxHh;MǙ0q]f 㢡>~ âؓ*dD.j =p.ޣ=1쨽XUAMc~9=yg~߅4@e&fCIb?I1k|3(hĞ#e4xyi0/,Mt4wPDbw:/pձ1 hw (M[pc`Z {Gjk+-"ynIR ?&-U uZahn2Sԯ3iǡh; 2ne8"yK- bEZ>`*x1ä3*8>xG-ޔ`ZR ZVӒce.<4iy}i9ߤes uZ-+S yԽ*8>m@-gJˉZ ;LZ6MLcguZ'ZM AI f3Up|ܟDKrT0ii~h=Oݤe=_vF˼&Cߤ3 Ryq7w֣nij1uɘ@aE%&n n!^HK#}cޤFР-o3,^J^-xۈ|ϟl8c~?+Cp^z+<ҏIVF.7IHa$l~& Y_Z?bDiæ= #7ۃ/o<('",F$o 2 ~&-IXoeb?eLFZR]ϼ- #z0>_#Jǝb$̾La;Ll7umo̺l'PovQgcz0t }4M~,@"n %1^(PqZ? 0 h߀b]H6;|e_&$> L# =Ve` s7ִ`0S6У7y̼}:fadկz>ɼO`q~yW&h)o1&}LZ0SVz'`Z)@Kϔ,$Zr$nIX~_eh'|Zzs[ Zlڹ :g4*u'&&x+Lvy&3&&;|!5s˰XN_g;|.cfc@"Mx49Э.Xh9 I.bR>t:-go!Z0ZVr+"o2Y Z^l:D5٤eFl7\':-73Zn{ fc2@ f귚[hy`ZJn@Lde,"9 @˓ˈp,ɤ%Z&L<5ѠQbXhy-)Ƥ -xAϾդeJ $n2i2ouZ$qb!>n6iL^-h))>I65[hTo1izhy HoR^iD}i6 I&-$ dśp+0_D gǕI%+z?3Sy[$SQL6=eM .s#(I~E p q6"Ġ@F,=g#|_/S7dכKo4c^}/Ȥn jN GzdVxOV/),vzыi!od"(c֍5 J20Y^;}ӎ)s;8FZX_@gOL=-eO쩐F7c~#]Mu EV "9+tvL osA$7M?(1sw/31Ya-T}uQ[Z;!nX$c q35B|f E.úppg]O L0gmb<=ȫ#GЉl =퉻]Y51 }^˙ x2X_oAu6+y+*0+y5` ڦD+b hI#m]C{-7ǡ mpM&--6t//o_3\ſ܇hF[Q r)O.FlaK1f8fjfbl3;g)fg.F=q5ZzOklgX^ `)skl@C;3veZ5V>4m=ˠ}M1E!qhoЊ8{C&<mRT^ˡ& vS:ОDh:1h 9!o:Ў3h_^o'uC m^0C{րNKȿτP^m m:dBzaB[.Oȡ}0hU= h)oZ"Sqh!%&94o'LhǮYDŽ%"4 !mPkLho"-&0m'^c@ʡ mkm$B*A^h Z>B[c 4,fbf'0y3avN3&4D((e( "%J<{?f6*WrZk%Jb%Y%5 LĠ-Ch~bfChs8ߛЪ9m %m|Lh\ œ9 ZVu2ТFf Q ZC)Q8ڗQO1h_cGmڄVȡM@hϚ^О 6U\ŇrhqhgynVқWTiT8V/Us)8?#4,f1h%XkB Mh9rmf4لV~%ClBKТ :ijO#1fvbv}9Xyvof'1vc?&>a7Mma/]m%wjAx(>&rv}Z h MEhaZZH 1fo1w13/fgfg0 WY1EG #}{.xw& .d hWZ mVRk3u~ f@Dh=/qh/#?6 A#shKMh39mB{C3}n6N Ӏ.[3=pk g@̡@h,^:Ԟlg~my^p c} ݺ;XYww{`!7]fxl.OtZ؝}[-Ogsy 8w7ΜTi3fɡsM5CFJ Dʿ-îHkP8D[~EW#eևцd/LTfMEV|R͞Sաj}мエQ]_+ǫuL:I8Fgq1a'0Ǘ3]jd{V~%S-^㠏-|z}Vŗ?Gj_RYBeg(HY"Avr#'VnHL K'c@R)XR7m|oRt弨lpApHe *ygo6h#ӽWAyu M[I\FѨEՏICPc,%u~k|ROA_(3XǼS0=*y/N@"#/Z哼--GQ[SLX綺-g t<3XG-0ȨH $ܮrNp h4+T71%});Ee;HVQ{/i=Z U=W$ZS -.[5gIQ;͇s̃A'gU4a<& %xzbjw@Ui`\O`+H`'90p>CzFKbNډ _ 5}A0fnA|E87EOȏe*zR6Y V= B3`nmgJQ([Ųk;`U&U& oΙX c:.rf 6glȟX'q<~AzLl b[τVR%?&Ⱥ8 ,/h˽ڃeGEho S|<#fJbaX9ӛ/Dcdzoaې]DdR֭R%)eNvXQUh`%esGP55=s7HU8: dڸ>%B}Lm2M]&8<0vڤ&բk&l9잋&&:i9&ŠƦOWu##9bv>>aCiq?ҕ`տ _Y X壦_Jn d/ބk v`cS"@.^BLYWV0~9;Vz{gw9t hX*.bH|Sw j?"-A߀|ޠL7=TLh6=/]CZu@> A>&2qp? yտgY['*XT" 5LprWz =Ę@姽wwxq=?GOFʨ]R*@dXzf¯?hu2K}뀹 48_>D˂Znᴼs(-l2fg-/hol2ed?3ZstxSk7NwM' <=+gX6|[e!.60|rU,}=e]N~Yz)7[].\`bke~t'_` ppyg';^T#`2̓'axʞ!|ԋitv!"5 7&_ {UŶ/̯6@[-Y0/Rz`fqp3nL~E(&jDWfDe>~=O@1KP.(6 9ow*){y37l'Jε:}3wzWM3hg@lx;]3\階ZڿAHc`WbWh:w.~΃ǥ+Vԯpj 0[Y.2 ~Z. ht5? ޖ}6ZajbI]0//Y|h^X;bC,;7-kgƋ϶ix̀Q@0*ejue g($: NaL}k}:5nk kyDDR?Rv gYg󱦳s-a~PWk#n_K}̔$yaBYb)iW*2oz |!::2sƤoDLٌ?n E,+`-"J.ڪ`@,ގ\2('9[ꔬd|H@wv!7Ҡ}ni͠}{9=Dw )kĪ׿@RN?T.Bqs{Aт;( k%v. z B?J-nՑ ԑ,ttG`g;bjWjs(>|6-o5c{QPp#dֶ"Wd]nu$l\_׉+X@[$4BzY,K5 EW^1!䗰MXwuF9i&hy񜩢:ph Jr̨L B2=i['{m"^leVz!dBc?Fʘ\0>ߊtXkv*=Z{Iz}pr񁷉-*/Cꗫqaa%E?Ծ·7[P:)d1N[t[t[txl}`liLpNP* AuȂW?W\KlMlzRouߦ?6Yq:Zˠ)>a0YEB0x$|jE?p< c |o+( ُo>)HS(ݢB>h'Лp4 t_g8`x?(mu':EΪ F0F~J޲h0׉'q+#/ϋFM״@{aREpoGC*;d4PJW'KoO|Ɖ7d6=PC |+~zQM; B /|}_aBMÄh@1SmDbDKrRZb{+GX?a an| ٷ"[e[N8f r0=_"NI[ OFx^=̤:Ks]*lCg5R6j_ɿٝL4'aűJ6%X&KΟsefP΃5/loFE!k92[M`sJn8?V))i"ty:#?K9XIQ֜*MlB.NCؽwJj]Jsxz$ά:u&Fqϴ"rFq#怑ZbI7/_I X vA!qñ¹}aPl=aG|lм'Vh?p(L?x(W1HB\e"_, [9gZ9tz7A =H}:P_PS?=F:y7l3rhX` {-fd )P4G ClXz/S)5%S_Աc{V+5Ik7ʣp݇m`A <̨ ? GL9uEz#ZȖ1O[]$ De^Ye#W3r1-i-8KUĂYkC F<*N''d'RſCT(_v367ZP^A>ҟ2ب?NP`%&c>]A;3׼JT[`F ơ>^.Ļ}悎!(.wf\ܘ׉@ӰzZcћW fqfEfw+َ a*/Q77šnIdQ١W} zIXb\?w& / :qbS*q7j |=^띒GFZ{.~AWh?˄ 3`uM=-V|˳ylJCfW8\f6ϭv^ƮWDYَQ6QQ Oi(zo:pZuxL$6if@Uϩ/ZiE&e_蒫lk_ Z;h~gޯ__Y=0zm.e4۹!hEf${6(w˘]U5F(.@4 <@ ژ@rZZG[etamb 9}rE>fm}Fd5 fofgk[ڱ;zut5JÆ }TM!W3ZpE_"]XkЪS>J5-@ @j+R'KvF}[-? _n02^vט /\nYɽs@g%^wȜ ?mc3XWT$*Dʭ>痨aBb֡ڐV`Bb7PšJtY rd髁rBy,ZG+YcԸ1E_Q4bMyq5FgaReW%g=gaΤnωe01L_ Vlշ| G?l'q<9Bl h&a7'&7!RCe%~LyX|c6zRs+r `LVjhgoQ]f)Ie4ؐ$M%3D %` Ӣ:ŽBfDV݅\:d-o Xss+MC#Ft0vbr#MμUƅ\:/&{T/$60B3#U#6b]ۅeS)|@%g6&TMkqJVi*N ˔˄>vOV$}j]F/l?cO0!O%_6VEQ&p;'l+Ж'yhs\IU{1}@+Pωw!Av+e܅?d=%AC&Jâ7D Mm6:k3mQBmc+82CB\F3,zc>4Qrƍ.v`q5;d>\܃~CVdg3b_K 0sPsJ=_mZe KN&`  *vM[[5ǛlՇ0\\DnfyrY{ANh*)0ʞG^/8;o9 Uqj9Y )x<WwJI˵Y@ # vb&r6j9Co9*62~m"%+Bf\ECG@@YlOg;anAƵm'L9W*w$;!;DJiz'Tr)*e$g$%ndR8cT)0a"c*fe@dBc [i+i }?K0@K{EQڐE-/qX̂v3?&w4Ӻ5͸ lb%j4xM:q0gQr|YE+bkJY* iH#%-xοbNN_Taāx~R#{BeP"BY1{wn&jZ,$jڭQ8DW6-i="m_D>aY8 =ٙ¤#mO9O$,nYOf Ƀ3Ytu5<g ˲͘1]4C*Oû:xM> ~(x ~g+x&wXOmX, `d`MN5:/Ur77_nOKa|Ym |ڊ/Rk[UgC '#~0"Dcf 3`}k=լZŁ{=)&xXvE,c@RVXH)kVSxLGyl| jXOނNΰQLAK&/aN1*1U[Y$F] cN\Fu̴b9u*K \|( ?#t ̂Nޑ Yټ< k9@Yv|8bda {< f>𖰿\(x Zmǿ 7DH:[14{mpur^!q&^!lTV*Seڄض|NbzGs"|\D!N& o!iXYi>9raf %o|}#,돾ٮ d`=WL%>+qaM61T7kO29wGx*Ae]84'! <8/΂D#ccK <)G*F4k (_ x& ~_e:RHIw-؊ocƆ;9X->$7ܼۨy}Ȝ9EԀ5eb{aM uD UFu_B$+bӫvH){0PjS3|WrC^osѦHm6PjА VaM%Hj#;[~~nq~0! mAQT%5iQ!o%ݔ v@.S68xV6p% 7"W8D";Ja@N% 'sjI&+bNlG**B#7v6J;²5:&ȮV-J@I#3:iR2"4LYH858&r`ޡJ@hkŽbݲ` 픕 Cۉ#0UTi}"(]H7ȝ Ka2kQ${ÿ[Ca,eAN~z7k7 ׬гѶv("/ ~9V$ Te4!:+$tVykz">rlIm0F-z+UVNE+!.N-^Mf{@~အ09s?T%Ŝ2N%m?_@_O@ܶmbjf[ N^&yAgqȸ:!ϒ|w_R*d𤋮[\ GG"SejS`$k##*G(d w3g%&'Epۢ6aV^P/ν 59:G"CroX+NDp=[^H l՟DU}ITIK ßs<3=7bd/~tG(3^3<)Ai [x.9 | #a}4>ه}f۹hXXStQ2*wr0fHB.n~3cH_fWVaj7kQIQGz/r&+Q1_U~ߏ#c~!,_/N?>atC0 s:)꺥+NhˉPBo cxtEGv-esjrAQGoS^3ף `3Sv*w2 o Q1(d Əſ.ڶ8 V JPLNVJRDS8*;6F=(O=6ڭ,dQ w%qI._!ؽa3V | @ٙ,MeI~ >#xڠnV$*;4LȿRZ6f.C3`d]ٺܯ̞$DzuI0߇.1-deg=fM#y0ﶺ8u7sBz`"]>AY0~ !FzCMNO ˉ<(S3N M鎌j*|oUIk^?ɳ 2C<{Bޅ>w¿lww}+{ w8ÿ a^{mog[}?ÿz:x=?4nHOa{Z=3Gؑh|OyIk@FN9z}그r 3 ĥ M\ѧ ~2y 8bQ'A"{IJb\_;ڳCj YxY0\D#~ HGfkh*"{-5[ѰDJM-rDj #/ 02Y߽5;v(`g6}ofцv7 `=oc׳M.u"",dul-ՕIAm M]PEpWs  r 9T] Ut (>-ÓYoIƃbNrb`U!|$n:< /mKbcz7af텨fpc+ V!Y5baW@L<^ƽdĊ6JyvelkCfA@q{~u䢉GJ!HvCQd`$-:",@'0oZL6#4f%k\+kҒ] Y` Eu/%;4-v*#0kdRMD ףF)sF%jܖ,CyfIhY]8 `)U9Pr[x06[?[ Zk!zં[~P.oCLN* I(!̵x':: 0 !f:1m9i+7"11NT;+ Nd<8*"EWynyH@?Yl"}Er{ʍU' N1MVdwGд6bj2]Y}ȩE8H@-;KS^BL vrêGݹ$^<'߶gIiTEʋk}?aghMamDQ01H*C h 4<]X6 M(4!/`3>΃K`'q,2t.u)XLh-wFT@Nfl1~/K(.ބڰ&fx[?Ÿ1̟0ugD&aHW0f 310w:n-% t31;Xn~sFщdᣴX2V!%G<[V&%7xњ0f =_S=~5WM,-~J&HDv@町m-uQrČ0zědW֊>8nڋv`#)GzI|_IxhL*+ȬtV@ioZHr[Anqȅ~Ztxff5b[Fk r0.vѵgrAg21½[ hsFI8Sm(E{Rh*bM^w Y՟ 3̪敓/x\@{=•m?p;rNa18Uz@jB:5 Y_9Ex̵N>/;Uap9 f/ϭ3dm:#iQ@}<ڍqbAT>tRcȶJ[<{nV"[oPw'n2tVi4z>Vn,D1 fµ&6ώ+ _M(yk4M{#{Ga۶m+ yoJvV!#EpٵEY+I)EA6?O2a1 :tXN2n%ֆz |r 걌3?hAh .?X|oɨb B-4?M|4S$d`9 KKi#Xc!>:` Ķf?Epj8FV|9ݕ*_mD>{&| wC0\'9>x #R_d//g>G[1'1};Y1(Yܣ8w\ﮉuL6#eU]哬 ,V. hYz4Yh#s,s$-@":eЍ3)_KAח/@DWO6qdb%!)E1r9tes杻AɷI˰;jyZhX Yxi=nK(n׾" nڵk" te'ĐPEs/_fs;1 )Ye}Kmm@2$vƹR|KggI6QkhXkLY}_"W77Ս-gKd`r+xզjc56)Mü=N^ yIڄx/&΢/bcT&tTZ^4JXii+c.t۵n, O?--ԢUpr" )[0.yOd}!v NGiM~|4O^S%OvٵsJ9߻M ~zXc z,Q$3+P}6j';%-'o$@ex=(ڙYXwly x;*CzWv]\*lCf=#.!FZEQ:3SIc|/>0'ײݸ{ʣ҆ 31oqE ">(}fv.C YX.@$]'.b`o}\Jox\aG9 XAZ@m];Po^4*lH~Du"JKB1B~{,#e'2՘<þ7"j/- |L#dP>:xBx?YV,`T1/[uѹȀ~;Eh}s<=K>Kڑgۓgӳ$O^h;IN[hsIU]{eo$55!qmYvVXI|5thrSWdJ#BZn-Rj1/vB-nGA-Uڢn:$j z儲׃5ARdJ.O)`)!m)Ѕ WҲg P*ux?KQ9OVCiGR5dqI՜kk_VR[w OaC=xڕsPl=@[Ȱ탅sO84AG%vDԱB^0*;,GBYO%t6r nnU+,Kʩ㲪9w˶۪>xBlI-i9=/){O;%h/t_Ԯ9%`}:\& ΒvRR6i8,֭HA?!Wlۃ?|hH܎~z^-4@YLBj̵UCPJ[;*~d _0$ ݂a4i㿳=|gԑvYewT-f@$9G HH*3vJl++-r7kڕO FHdG Ij)xeRmb#M'0-r#Vѿ!Ga?%\r *}g"j&eal+܊ |C^ϙ2D4@sH^MȷP6WhO3:c,Z1Rt{q͊"rU9 U?$pҷ%Aخ*.zp)mŠczi^ɽrs8~JM#$ }yݶV%ȫiM,`r֓B w9e̟W0~!\ v?]BHrUnEvd(ӭ&.Z1c39C1bk >[I[V0*kHSWpv);Qxs SKscv>@)$qXԙq3 kx]mW8rm$A|~+ ;FmJt g/. ?V+A E߽^LE?h ԩn* N:F- eԃq(fA=cu1 " 6kR+;/#.y@w#~Kd+Gv%Ӌv<ƶ )?/^&~ì$sg,`jIT|^fhwW5-}6$m$*E:umQP+,OWajNdp(cæ*XJGJ~ÓE:}Jd'5@/bXR$-sˆJs9r4r9+X4X0̨C3IXSm+dl2 }u\UwK wr%GKC{Nډo7?$U/T"̎htWʖv74O6aJx-%eaDɆ&*><^\_\^ӓ}-t?-ȉOA5&S_ũ! ^Leh2-x\Ih_ޥ0;la#3~F>ȼDde ʥƣlrjt>Y!(J/Fas_SRFf(E>~3d&L|`#>Yp9ܲ `%(Chna7AgEE>_‡ج(80~:7~GsXI8,,KqzVt~U\t?(%)E %d9ʐߴ _q`v]ΉFu󋸈{RCK,'ϒEߥ8 PN\dbaƙ0Ezu$J\@g&%s_dQE+a0m[p`U8(JV`^`Vy Wg{A=*Q`!{_=AKMa,:ßShq)fTJ^>IL_z5>qS 1 SxYM߀ZT Gƒ6RSB aRP0=`8xv3w|rVx<g7< :{7ԃ @Br U}EAՄ阑fHիWČ2oA2!tIK?,:L"F25k,1ָm1m+,S`'m>Av{kO3WHlvQQ;QM J5*m%wr[rbnwےs9?< gbKϰßcRڽDLDY3:yk=gnjҪ?0̇i?&M&Q$$o9uL"<- h d֦삙 _P'X? fe~4~T@tP~x+# k!F ȾC1wAm'?y+jgiaE%aLpR{*П҈`,Bx/Oq1ESJϰL#D-a{؆|O)Zxd%+46([0$\|~R2Paz[ߞy*m:V%{0NK 7?<<.wxniO5k["N*\Z@lΦ5h or}@Hr' 3 qb Eĵ3 ]bDW)A\w +kWR5[b{~&d$q3zl&kb"b{qu.i!Zh*\ C"hFYd8+ȠDe=$ /Y˝SkmŧV* t=&+̿T$tEzDWo~nSER;?1FU0, 6-k*XU*UZiwcFTy.]R9@]w-/I*w}V7 'QRםDQOح$xY4wr\ 0:X~8(ÓoXᕼX[y :!ЙbrEO&ي-HNF鎎'ElhlaM6G(tR,.|?lx}M:XߐwŨA[Ex'7ggS!yE`B+~1m!K۳џânWՂcw){X׹EbIUM!aID6l+mx_KptLEw⊡Run elPEvkaT / MVg:pw 69WݮXD`-S+%1v2KeL>0p (k [!MM}/|>Qg!ɭխ܎PO?NZgai<f7:' ZQv6xϺz}$vN痄֐ǀɓڋàb$BΕtߩƃh!NǞF$ڈbopӣ}5䭱lK^hIމSJOh 6<F= ޑ)E);SvrM̖YV>Dd 3`V4G 0^%8 >c@qw GZhH㢳T4jW[!}*OL*f jh6:=M,wŖ0g.{)2(,ū3]_ `De[p䚵bv{܊Wɾq5^<; Qz71D79 QIlgdIU6~x\"p $yeٵ\C3@g:uPpl+';[`'_"ÀrtOg]a!d0ma0Bp*,B$MGn!rBs ! hu~sPmG=*,A,Q~qGh-oƗV3t%[Y~SG(ݰ?p&u]]>cCW~S8~K`$\ deoC! zT&:ot6뱃?ձ ˺`{ ي,w\!T6ҞgiH+~,n؂-O:USu4,m)Cf$ /,ˤwd[2:5.dd=YNeADz&c%lFftx$,T1x@ rVdlmd ,{ $TVyd>,HYӂI#݂Jڐmo*]nn}pwAqtKIB 5 ҵm(|Qvzn6 +K`4u-n0 3=c!Y|wVzk;\m42@,nAtF'O*mNB>EnXjM{mexV&.<,Z<%f{ٶژN GM0ڍ<2 3.+?xK~οGm5~3P avtHFƽo&[#3*zhB]h,hVrtBTBW5Nv~`ASd'Z,w NqrhT)B2&[UIHxwHX Ɲ3 f{窓L.r9},/'^t#EC6b@Jn]hh[Et5ϹGbLwa{6%?9-'ǘKMҸOK7 B7{RTmm5’[q*kۻ0W.MۓHgqg{Efz=T5/%G[EG mNs4į[^q E~UzH(pYK ߹*+łD){ǐJt~$ڣۓKC'ֶ’i->`mr=Q$+J#y)b5X8S r8%sV'sE-:nDQmH96~8a!FJ2DK,\֭'Iv%_ݮ<%&%3@^VyE={-ؙzD8HɎOF+#hxɢh3ŜA&ç,Y͘_FH)umif-lI [‡AOenvZdHl[?؉t`!8l^[&%g\U$;{Nd(B~hd"_-(#J& L:u@QovuvWc2FY~RJZob,j)'Ey4_ !NrWPA[爷Жa@x#„7ϊ$L_ar~j9Wffl g`Y$' $A'(vJɹˍ 1!vb`%d.C* Pv̹]m 'T: ǽv  bv4m+:/f ډo|YAA3NaQq.ΝU=Drۃ(}a<5 6_J\ Km[>l)"Sai4ֿcEcC\'>ֱ4}Ȋ2}yw]R =M75Bj> ^ }yv|]\ᘄ-ӿ%X]1XatC0z*Ckoˁ^Sh^ʋwoê _?JXrx Y/r2&Vc<}?9YL?>珻`XJQg笷#Sb2=&"/79dX.so_#IesUR_:ʿNgSv60᭠Jd-.{UdA6}k1bЩ0tcMG6F&4ۼ÷:?ʋ@=;KߋbA"UIr^p+GL9%A{=awk}+@?/ΐV";q(iE>/=gYo`LG0]AcF/ƚk)wx(D"-8tBuVgh΁,9&yKdFFd[Y(eX 6cf^@KXۑ@RWWKz,&o¯;2PXtȞq:SLo+OB5%t Z}&td'1,TX0i&17DI?ɨhQV:iDYeVU&]54YB1@~Ask5:WaVkگeEo0עT 3=4܋}Cw+(R|Nϑ:(gi,G!;~ѡJ!jH~<3UX1FaDƉj|@P{kի/FRr0]tE0\Rvm다C#ޅ$SH?<IaM[{khs&:Q4̺4V{ s[%6ߑ\i[n)xHߞ36Fz~.}hylMi;q<8Ǯ# B7)E]~0t(93׍ gÇ%v6AYm+w+%XvI6-Jk|Ø񍢚H7v2eסDEd&9+3VMovQCq(mv6c@%:4P5cS6֣;GPd-z1')WcȮfxH5g&m(ߎNΈIM9auI8P=2YvUpk@r^CYSIt,˩9+-A.lCqr}j )Iv5-sQOU!NI2Jks-2?<$#5$ܝH&hܵ՝^syNN7 z!=vN9zz5q,A">f?0ڈBtf?j'i oい$ tAGK=ۭεAgvw rJT^h{h Sb<̓InRoE%7Crը=V֣;FlU %+G 6[[w:Er."'d߁щL78YFm20\%u*p"_V&qj=׷V Tl'wF;=Qh3Rd]g+Tq:^IߎIZQXOWƜi{Ӿs]tY8xVeڗdzdFQ楑IoF/Ic$JŒ\Ub hd?Dž hX@tɮ](Zrc|Sc8ɯ񘸮UWɯuOtV:}}ձ%?kP/ǐ# f*:̯M SJHzNm;;j0I=ү YTF'U/O;h6m6rH,r&tDm۱mxVy?:WU֧%gٜxʶ\j #}1tPTۭ3dGn |xc77:aY9 wAT'KšMt !gÂ\f]d8cL dW,3>!0<|i8O/dI<ZY=gEL@8SiZpA5/5Fܹ9 ?Ӑ2_e@dw&'@WE[HegӽR*]=P->Y~Ӫ3&ePxڙuɘ0l:,Mʼ5<̋vQ2^5Wg_RKsőI?Y]Z-b,ξ$/?],1Xe%u]כD>y"mNVtwZEk}8:䱒k; a7kt'|C/nQ-흂zg=bzd1w8͆19<߅ c[q-=7erܩIw(Z?s=]ƆZqBo2˄'ql3d1S5|~CݿG#좚e۔ SxɐxZf$w!yc 42 _x0=tѐdTnXDeO~B?+|V=fqylc6$sI{H_͝I=,;ovoQ gA_A T{!͛j/V4sTqY=ܬ|-,d>wmVw+сR_Vq1^ r05=l|r2-?A8o/<=4O¿bt* l$J6Ws3d^3#o 2&Hnjy<`y[uG5gOPBz[ZE҂B  daiVWݕdIQ7f61 Rot%{7,J$^vHjR 3*f[]MI)cE)ֿk`Pn)|}S4/=q *- tx2f퐬Q ?q\)f\~*#3 2U2i_.|݈v# x073W=ű` NwݯM$ 0 ;ĠH1Au/] ?΁`Zoar=?S-Y~ե׋n@{1{уbݜX^6H<{G[*= ]RJ'2HӊTCs P5#pgTL Џ#'9$#X%ӂ?{AɣBTg L _'kƋJ|/TxHd xV$@jNj:v-H𤢛gGs> r{uy#*wҢOڀ6߶k3:JMu7Sw(KpCߔ|3gh/Ō l)Y{~T퇰Rtm'{aB'F/@I18@=X?.f\;3}ZE$Q٦?*6A2pt$NJAogu$tC_t{1(wRUhm`'ܯmmK s/#ܯS;ږ%4BY!e =}GQBĀVG?޺Qb[G?J`CJ?+%= s&/?.dLbF\Fxрs5Q%wHk+U1Qu`/Ё?!=䑎_tuKN6w)җ$3E,w5i gnGV07m?ΎsrXV@E̪AӑfFsxZBP&ov{JD+㒳短1;qqK3؋q{?`fm΀lW:e g&"r CLנ)\oQέvv Zp+kipbFVy䯊yq<},Θ'~>j]zcl]:c^V@ ci-%(>[$`Cm69dl)squB`> $i®zfք=hƩ8Z.9NjOzlԜqnIq^v%¸21ɶ; @m;hiCg!036Ĭ8Ȃƈ\sJ xU?MI#j bTQY$ J˖@UP AȸJPI08;Ȍ{ BXD ,p^uJOy{šܛzI5y]Je[8r2ϐX)9rXoH 4w řek_VSEҴ:*kʅ7uM _g~Uj]*R~wIJ ]4h1:^ہQiZL4GscwNZ`*>Rp; d]0"TL«9{`f>,~lz{3]㨏S/7S 1^g߆}j㍶Tk!\ {OkjL7Gdfܽ¥c5КV#.@>[fԢ튦Ly, 2~+J߈&fP:;cHb-EOa>Y^/cvOP1 )?MN+zOqм'[*1P F8'[,jiŇZ>GRP d*r *1?GWNfI-=ǝ6 u;2v%;6t &W_p+q|HA(z7Z3_W_쀵sk;w[;wB@l\?u ssҒ;wC>L~#ig=W}]–INcu#Lj -g/Y3OOcg{"[T:/s C?:G8k<۵D:{5c6L[)?'q4~ %q{..0~O~ * (1LGL}6o,sdQ\T<{_p7"3͂y(QH`3Qx13 Rs^4T7f^G%׸ !?*0K֠ M HxxWO98NOY bXӰFwPo@("7O: 七*Hz?&˜9&kX8՗32-:^q 2Q9`Z-`q!'գjoq,<` ^DXƣ8f'#hKV֯j~ {(ef!b\9b?#Ш?;k\D<7E eh*'Q}Oѫ7(W߉򳘚"2Z&QŨ%=ȶg7sz 6c1YlS>ܰ"=A9;DLHq )n,%?O̵9vcӰ+TxZcWn;!o yT2ovY[zbgHK_3%OnΒXr=c.9p-]zy{,i[Z#߆t {ΐNk0&:A~sz{"Ch٦V*p<a3@}x04_(JY+;o#ɱt:PV1KU&\|i( V82|3!ṇ-ypLgy/ RC q#ò "2Fb90kEYc{ X|r}-BA'T'ljg|vākLQzOhg}[ȑd>1d.  D 8֖{A)NBQ]4]4Z%PrB-P,}1]*x ,9f!(pմӌqTKx1k+t%$nkN;|f24 XX TShr iS+$“x?w$'sNҊ5YRg52T;j.|PccOc^DE %6J|qX>mn3b䡿ar{AoZX7f&,*&%^D"WU;B7x Տ? 1E?^h+<&KG41s߄1sN8o3oo,_#3p+^ƺ_[<_7L<h)KUY?3]yޘ߿?n7o$}[fq/vf?}.v(Q|km ̛/G{7$uGU?2X UU%]O<-ӛҜ#l~ÎFoӓHLP?\x벭n$rcTI?|8񇿩2#*R0W9eƁZykm)pqZ`BݠF%]xZc/"oTivbW1p遐/ 9~*s?|G.+>Щ ۬}}}oނFzl~{A#N\A?y&ڼcqd}9aIG00y 7;s Z. C6\ZT)>IqɏZb8ɲ5l*p%p{6}]oFc`C1C[#oH aGk1@쭒+Mm3~ ь܄Y݌y/U7(4hp" (ۄx <6~7+Fvmh=T ߏm*pȠkc̓fϑ YҢFc=)6\'|H|Ƈ Wf`WŇ+k">pg nƏ Zpݪ;d`-)`a0'k[)>=?>)xBgm1)L\Ꭱ+0(F\f FӜ@/r,Ff:0\i<"y..~9$\ S"\Z{/xp8>?!ֿD Hc xI|Lofajy >.x2EP|]0+eYV.K|;vNM >5 p.?<$>\oƇ[4@Wp}3>r4ýşW~n>=pC3>܃߲P{|Cp'D4 ]0cýFipPhr4 ¨p,P 8(\6,;(ܨgcymGyW =qOvze)e/y Z)8(g-*gqpiEK_K#pmVȦuªU'=-!\Ri+6>jvŨYޱA̮5fD6*Vvx" Jي])" DlV6E[C[x#ާ !R#RC_)Zob_!f ?ȅ (>h}jkVWݛ)C FI(6"\p'Kd@[q5N!9),~]8emZl|}'=YLEn V{ލ!xQ?z+Vv8 {Ž+;^V!(mTY qEPYuOZÙ-z ffz|];Bݴ[ M U2YΣ'{x37z Ɛ {2~[=2`+1ޚs BYB,pZ*mϛ-͘O YtA&b^j5N̈SVzozŠY`}[D<1 6銼|o6M vnf01P?jݴWY%eGd\M"r1ӧ̆azhTaz-Cu3}8o`LB??~X6fᦗz9G0~X- &AMU9^h_S 칞~qB/&}rJňTUWUʠ gooS( @EޥVWOȰW da.=>)4u>c {)ZuWqHg~pS0Ka`.ѣR O_/#3+WؿciDQacz4°y }(W2ʀ9k=N}}+{0@Q&Vd)h8cm2ú_Kgtc^xFl{gxF3z|_fg^Klкr]AxFkH8ƉB'ʍ!1Qg3JT#ּRq<\*h2_\ʣg>9]HONd7o3Z,9 prFg-RjeoY(Q0#k<R mgH)kخx>X^^iCi;8яTǐF\;yᖐFC@.u񌎇3ߟ T}4Rye# PЊ64=RQ%7aE/ O .!^04YtE$<֐E᱋&u\bdugx 3~W> }f>?TVw)ê7C52;2?R(Hۼ}AT譨nώ YVi#Ԇ$~Ш?8_q㬞yqNO|Gq?uyJ ^8|82M]xq2>F3yNBy?>N' u<[_15k-h^\N>}%>C+Eq*s\V4Z #Rq Y+Xv%>N+@qR׉ kU@Z y8w31(>|@q~'tTe G&ŊAqNz/;Ho8 VT$-Tc3쮅,zDw!QZc~'@o#?'͠$((I==)BE-ׇb#WwLf[QR,>pcSj/Ql#t\fhLHbj+7_#xyMƇzC[Cl~_ե[ft)>?3YC:{$Erۿ˭n4͢{BjŤsKɎ) a<.1M@ץ{Q/#[oP|l@NC-@r0=ӄ0ʷGtg};io(OcU1XUqQa&ӿ[ C4oMh"^D2/Ͻ ei1oND7"OO[֏HAwH!o))~ K~xkS3IAչ/ثb;^ʏ(k-xZB uQ{ߠcc\joq;! Oib{vЂ'~_&8ů)O\]BJ~lN]QxoOWLq<}iQ|kƠ8?+ėAx⯏΂'^u!6sG8x^d3)%_PϚꇧYa~6MaS_1: >uw:OaQ0>u0":G}mGm G Dਟ, ]Mwu+}G1xQ4*9£^ǣ+& ~^e飄=fWBwsCm$$M£> )Q50P\\ 35 uxS%:DnmsVG@5: i񗧩' GNk˪w_2U;βYV2Y:t4t4#)xrVfKe^Bk&1`ueA,^֋f/KK~%_7B=!A)#/+[:[}t $Hz=!8/ /9Tߣgd}3)=K*U"Kmg|HEҗtlB$.V?<=>!>besxjiկiQc-ӯ,E_xt#9r$ ~e2ؾHˌ#Fs3n9nB߽!G39.2־@B_3~D"x_Z!W4JA!gC x+@ZunYBh"=/7>p#?|>p >7$| >0?L>Bj|`0>D^PˎA8>|de~ln0>āc1ׂ"~|/Y>{i+>U|W}_|㼺w?-ӖBHvN{#oӄœ4DŽӄj~>p x =˝o8^(yLMG1J>GM|; Vy,V|y$">'6 Vy{4|9n˗&>p4~m| }k4WӥM||i?5x=e|)`WM7Y4W/q6;V{;HW|wzI(-yωC cIUl}'ky+z% ـ/f8!a"|NJG*zT SCCJޡysh*?vb?u: ]6"ׅGAG?(Gg7 Ż>v3.m<+]^wWa cbrn8;Ƴ6:=u;!a^0]hu7LCV30Q`Fu w.< lopwf~ݚ9 : +aݭE VS=b#˓>dzUO |Dķ3y8Mv=ķ?oq0!F'ʍY͜Low?\TF|`vY-~Vo/M| x-Z} hRu 1i:2(3+s5ϲZ0>׳ D~uSPp+BS[D 'eTü"MlB9EڲM +3^1|=MnEF|ϧ0cp|+W Ƿ4E3U)) @I}tہL|7^vo+EVJmRRxJU],dԟ'SaZ+{E\*@V7sn TNT@4#[͚l[]&Ķ*M|<ķ? oD=6Le<ǷC`lg[@:Ubv׃dј"̓fv&7\F5GӶ}}vy`r"|cg,ƃ[D`AHཚф0MUp'$߳ Ngӣv7((㗳wL֯;Є|>zU>3(D>o7I-' |~۽M7}M];S-t $HMuW&~o86/ o%_bGڌ{ell¿O̖Bqh*lt`&4t??M`{½Oh /fkUşiݎFbq43M̑~ 0W/>Ya4>7fȝBHS|n1a^% G`|#$Sm> |-}>!W>%96==ckp|mˠG Le` 6Z´d6T96[1 7~*p_Snt3̀ݲʨ \q4D[Tx {RݵKktc?EXrR-^0E,u۰Wo,K݈N Ʒ#Zş(9N˰7G k0Cuw0{\~?}wF(Fћ;u٩+J :N55z bDƶ0&Xi0"1H/! NGjWțEdv긡'aCZ^QYZϸ* ,}p~MR5.T7>tǪJ[oVW$#HIg i2M v'SIm$qEƎ,M>U T$V"ΐqmd9JoF<7fe4% ,xnHOӞP'7@#_1ˉX;7g6..zRf.~o_g ۂ&Ŋ+a\Q7P3r&8Ӥ 8 vV|IiگSN'Ŋ=5c|]Ϯ r!BV֬7>5ކ[`ߏ_h&BVߓP;$  ;ުjdPg,YsmKcBϵ\ {UNC69Tґ/'"i!mkw/~ɏ6 qf'Mڠ ;Q=Tu|SZBU@{{/5bjM_ސwIא3;iJDŠPQ5)at~qճnŠ߂lҨ>MUUS<Ϟ3JYD|o\\%;OޯW dʇɛ|NyhBE:wCoc&)AE'x,|zN?en14ʌbI?IYRu3&h_6B";Gt #y&M"t7h3*`ߑ>kȾ.ؓ Cxx;e>p RxsM#PTM+>F/=anyM4?\<£j_߮TqziUWnCs?ۇot+ @`/xoUAqH!_S3>ۍj,IuvqtO5kO1q0n--(- #f3E\=߳6c3-ǵT|x'F=_cb-稿I}R5ߋ;jYf,8.JoX A?0:;yTݸ<7լs.=[%Įb|xws:Ӗ'm%m8~!ap: g?!H-d2:^I'G+*{|p*RSww,5.z__6{~ 许Q|A.| E484'bG6q^='тi:Vzs~}-cm>z$,GM|,a<_@I Pw/ HpZV؏Z\\+oUT|2?Ē쮜iqH~ENk<{iOZ3F U;lYW7qq̆싕O͙dvliwCx$l׌Hњc?4LO BAПR*WG))_ {0ܣDB4 6@ ~EWNrbyr%K?Uh"眮Yx Cs\y_ , .z#^%p KX Dт➄{ŽӰjn{ ~_?ĽMxh/ޫP\){wS IAAt O?/ 25 뮈oo^ٵf/]d>c\LZ f^Qr7߿c|sĒgo~Nhɝ,Ϡg ⷛWNh/=獟Y)*TiN` vw9R[2#Pe']pwS}anyx:'|M+{|`}2؀dl}XX\7e!g YYz<a%f "f VN^px_0%;DGpo~G6 o }'Xk}蛯Wms^m/}[[^o9VX۱Am֭[IoOLZqZB&D;"}!2}{k[[A 6Λ·-!2}kBfoE7Eo{Fo_ED޹[Eɛxto·qӷn޸{QG.I7m#oз ҷYw8}o}-Noz ۽Z]n2zuSWU2+M*1ozRmgY[l/L܋m$\d^-yΗmA i[R%'~LvE;ܸlkπP*FN7:SQ߷ͻ&sњ^d A;oռzDe AgF'^9}XRcl/kuflR6:vJHDgD"A EC?^VsS᪉V2luY*39*p91p4Hhwp$ۓ['iWL:A ?n_O!%Efqf wǫ;M^7e.#\YXS;PzvtqS5=M=كkwGM=b?w]h >ϼ}ރGHE/AƜ~Bc[ϋ{y+:xog᫦%UH%DwfED;vSݙ5)`ҩPP,K_p,ی.h O9w6n)#@wP'U'5XKB/@cVa]<?R|4`.>Y!XL=~l%G;-A*|$z"BB<#;E`%`Vh1ت#e -Jy bfȸD>QSG0֣ȟ<'\oKm@%rqTEj3wFګU;jq7:v\x-[˩*j!&d(z Kx%x[8MS3h`$:(ȖS S.ɫk`U؀}+~]L.a\:mWQʠe Zڵ&6kU-ݹ]+<;J*\/ }R`AXC> +}oɑ}-@8*Jm@䰼}ge= ෦69&N:w}Rr']RSRk &HP|qPfyĴ(_N# R 8TVJNC:{.c@Wftǫ"FRә$W+;5)1N6Ю+Yt 37إ" 6)oFbF=g7UfvF)&.D. j.0wezU[ uv`{J^ߌYn(%W+ f;y&k(^Rǽv\L}ۙ;;mcns0rd {twδG(,th@t5VDæh& l6M9Ԅn܄N[Mo~jC-:1(prMc'`:)}5+5Z>ZtVF#j"QL~ a@JJ$9ЎmOaһB?,򌇫6LTvrj)jW7RYjL|bS r 8CՐoaevo嬉R*RX Fh(RA+IN&O+Gp\o1)P<3M,Ew;˽ɝq\"zLk"yd켊F5N/q"?_FJ"JF2B v~鯄y$ሽZ8laeʀYLXI>)E˯_+II33ܯ5 bL՚-tLT"9]R~eul7bGg+I\؎`g\6#$f=R5$UsSR-SLE]OT1SZCIxUul|˩|4h١1Pg"sp):lݣMluK>gkO{g{egzY`oO{^?,O_8W6Bٚ]&Rp&e˂VcY-M g{.k%U(*iX*#e?3\d _"e:f|Ar|42^GC|ldrڈr;xifC@ <,ncwG _x(;yF#9oFʑPrƂx(3;ۑ~'FP_]Dy96ŝ(v7R`~e( QJ/#Wɦn ߂an-e >yY-77!4/lXh,BQaJ0r=w1Ƨn܍}:8BmJ[:rgwyˆ3A~/ow//w`PG~7R " 5&{S쨎v dۓvY]tg(loUxfa >$'#gu hU qE:QzЛl9÷ZBVn|Њv(gJIrq},<@UG,'%<=H]=C)!P@I΁0"@Ap`w9O/UjžZ0.&lcMXۑR /s !dœQL%o"Y0>nqPGmTS֪fJۋ iuK+1V5_wʞ)5yׯ`Nr-No|di)(9 P(=ӱT50IʇT7uTz` L1TwY|)gxHu{'4c:q(9SY!QT3GEIԔE&AVfKAⱫ^6Hj_L `$EQ5%QW\jfBZ1LԵHoŮ>p&Σq0rh!ܲ䶉?A,yiI]j(Ui(y-=jrJ%刂.O_XoǯBsMp0.xw lC[;d&Ys+Ë~#Q2ovˣ]^-=܎AajFY&GO+:R0S'n@.Ը\pܛunFDꮓգwBQ6/ )@H4HBbD$jôEZ& Uďjp336J)dBOyyHUAh-,Z+7o"ZjHÌV9Z 6#Tbʙ"9Jqs;6"BVq†#G;_'`=Z}εx|0VEiѹ?lQ63UЙoK;2w(ڣҩ P"e;SW? 67/a|f@(A(ND'(\ h*'~QݵSWܜVSw'IOX[ ˸B%J.,أ}h$ߝ :k e}% bC";mf#ț1N@7[s|AQy@ֶbn=4#t8ƩbabQ}Sf SCŃ 5:]4\k xQV(!a[7md|/v?j Wd,5:MJ*  ;"K\\}Hz}vSc4%P`[ NR`4asA bޛX  #S`!Һ"VX̔wS5"ffClyw 崨u> SW|}qꮑE`mc-[qO[ NPyh<`=yDU%= '؅OpzF)}㽯_;%=1$_q]dZD2}d4iPVS`e}t6a,69Pъ oI `]}dXU`lY`8U9fn#dIo"HA/2p n}x:e* :;S\Ru.קۡ7Q2+uP/s>S(m75C}}39 >OTEeGrʺ#@Y ;oJpAvZ^VRrE52:I0] h ]]v&t[9tN#ҸnCazP̦K`O@ݾιT,VzXpqe&R PEJ$f8DlBVd:~#1Tz yM]{d0HX0p=~b?2TS,wy05dJq5Jc^s]E;WQ"Yp1՛i!ڛوrU* 6UqTV,ޘw1UFKq,ZaF7Wb{;g&"/\Yw߮C`/bc_|;Wa'tN$R [Y?d}Y?/L_ٱ5k7(ҘL _!"y',k!1v]3Os:a#Ϡۍ[}YVeXNxf[D8{r)67WAB`ѣK aUDGڸcڪ$^{/g{<_HfuA%iEh/gBzỌ}7|&h\D.h{0Mo! WQQiP¢]*SrU?mRNu)ʯ0 {G5ǿR7ҁ%= v"Pyb[M6 US1(`x *~Iy%S-KDDml4ksv=ԂoLput"J I36~3ډ3zOS`m!u;_EzDF-8B {#YKi;=t4t)ymG.D R<D@xRM@[|o,2JNFHұ 3hتw"p.Tt|oBACƥ j%VO &9;TD/S֚_X~XWܑe_ X.a:[j r'wWJս6=2tU`u*QZ!7=N"kB'>lw Q-9ވ7J?)xiG۝saV^lDRv=i*$)Dըީ gWpI]?= Z<2Ve6I'Oدɖ l) O|̖]TJ`Km^ ڊF)Ҕik-*z\J=2mXGΓR*0Z,}Njabi-16h,̫=0V*Oyز5Ծ ]3F; [x:YhJ2{eZ80c*JzƥwXgMF{)X pSw' m7/[wc1xbou 7)7 +SS] "H˦ Řڶ <聓P:B!-Gz{ ǷQR*=:ބ8):nv \6 ۏGzN[UqCh%Q\ZEq8){ISjxN;g#k-Y˕i}9畠@}c?qf N=)g!U`"eU)fk˜/0F%"SalXM Qog*e 5 6Y&U.gUv(qN B~iNHqi.QۤXȄ0T)> q_+'6Cy3`,I!yRiXs J8x}"L4{Y4hеBAqfGBnwE>o3^>kO'jV>IqHHm'6rFmE+eќ)DI|X-52f.Q|]>2OY SQHS^vo@Lhn!CId_( s?8ŜGU00K4 [Ckmh;Cb*w*tMW1`*ٯ^ӌnB0u j"  c)Pgf>]B#K9sRp}gSֳ(_j̩3Bl*rNY Lqwppϑ{*\ /w9d%)G`[IuLJ}.'<}A#;]UɁ3gGۚHRM'{zIh>y8ðf~D[7_0u3YAgK,4ȄNJ8':z}?G1ak%CtuvG#&AcJ1F3Y;45*QL MM?Y"YG"X.|[zUɭ)k2dJ~D*eV@fY#V\vU l_F'Hs1Ď(ei[Ƭҧ|CWȉQԔ:Uo_L*~ZA!]QRէ  0ģ@A@퇡Tαq,P2g5U3<8ƙEAx+Lu y;SG[Ѿ5N@xHbL7ޏ_ ee΅t`OlU_+\+}A`{'-h9T(RdR+1eE!\]-JU{y|ZRAtE=:A~Svپ2v7PXT PrZٷVqսkup?J6Q!ȴ/y# *y l-or<,YjP@V vt?N&w#kDDZGyR| D ڱOw&s|EpQЮYx.C::^=˪VBlנ8hw=Qt,GtZa*j/,~+^4?+zӴlꮄ绤WҔY8 mԔ *0jEоWuR~K m_9ǿŐL??{zUo--9 *=c/SܻP@͸Ij`4^)"D>Uԙo!+<)ңG 9`fz&!OClD̥Kx 0_ T; ًbRVCg&b>V})@XJnkF0:hb9݁' ]Jd2v/J9bZݴЛ:c!3\&:iwo`{4 Cď[* !؏P_HPmrCt$@0 4r{9!G`ۤP2ˣ.?] 9x a)3ڟM;@%*@bCj@4G##\Vhd{(s[A&@G! V*exg9eNKdMqA혌ӐY]=7],(>jeS9f4 h/|OSv`A&PLd3[݋ =\7Qwg쥖7Dpd}` K@8Nch6z&CجOz|h3 *Z;c Յ,aD=m, +A{у1@ljJzc,yXyam“J'>}.`{~j/ .UXW\(<> f*WSZig" }*O9ѽvV'0<;d8rup;v.+DE{e>k嬀E6QE@l~ڭ_pUYUYxfsbj yU{@Bx][>qD TlKG;oB̚)%{p'iAdm+hiX>~aKY'C>>ؙv,6Tp֧tϮ7#_LER|bl` АFA"@83PA*2-h 0\}]et'JOb+_0sAowN6ڈM$JUW`dVzsS ]TMiDתz+*b?,I I}r\M9$2AH@i l|<`yv t3wR3bkDδ~YFDe-W+L=A xrc?TGu0;v ړ8/RU;kT0a! '&c R13ûVJ%J+y͐?@:{\lߌ-nhط5jU7%T`)%ߠ\ѣW Ma\l?6&3:JAhMޜ yr&B&[99j*Y4]Q OH(Oҵmd5k$>Z ,*-P3]2w%ZZGͅ_OzTq=?Ţ+sn8ADCUrC[. "H(]mg9=ɢqe˜_wyz+19 Dv0?!+ױF˱``ؕFI=w)HqU+M((-BcZ"a;ZߎRǔrǡۉ=Tz{o+P:aV)y{em"-]5TWO.Q+H7$վA{"Z]P!e6K2N tWº;mW[z?Et~ w>{0'.d-a.K 1í-qK؆ 0 ! Nih,0<˓頨Qv/A<=^|V@U}I95@ȇ>:{s2 jގ Jѣ;+yg .(D PI ]nW#DEn;K֦);uuQ 2XG@\Iijb\1Em*]Lw&Dy=ԡ@i ݈ۤ?dށ^vLָ}WwHw`JS}介뾜'@rU wGPP!/yg]AM[&Zq)U>"uh@~Kl?.Yr* __E o弾Y_ӔgEgĺ+oJtX}(D_ ~?!I*:dp2m?4W\8 NfT1d#1`\Kn/S7+',M=W-()\33So -ngLF𬀍y>홅rgѨO3穾e}֙G Hv#]hV'EGJcG;tzq LJ@).-3"5850__xUnɐ Q WRdr9&jaa^%t1qV7|Rxz `Z5pX_75T8Y.=YT-6@VJJyP5\f֕Qh9Њ[׊t(>2{ݳwlUuU UȐ1 }޻h2G@"z*Qi 3Z9 &ow~y,cq)5 :%7l:G`. _,_iEDHOxzR:ȋMyr,-w\Fk ;L@LT=ΎvdN3ϰb9w” >c':yH .U ?*G@/j 4TbXy @>]\Cd%,DyH:lgev%+[V{ub7]grRڃ+dQSxOِ&,~pAzr 6TTˤ(dl3UFquxw39T5JqužƣE.df ?uQSsh{[03LcvB?P\kY+ɂE.qo(FO k"iՉCT_W]Xd$sJwѪpB/U1R )rt~>EStx2.ǟ7 ]h9̞*k $zT 0-`,.&V޶G_\Jbg|}mnAmGXG.5nak}KYiU"h%;a]򟿣TV t)u#+K+a+sg^-R#وi跹rޮԆtBE$^T; t+<+7Bz@W\!pT[RRvki%֨ŇN p-HBTi>U,SdEř .YQbx{T/ӽDUuTTQDZU;:mt9d5yF/g%pݦ tH%>"ڃ>@n̓R p=5aG}P Bxl4,(~ wfŴ91E UQSG 5@p_]q넟nZF#DžpEopLۈp݋x[`N l8[6S{TIfp,>)X[a]ll= 3T ,QB2r4˙gܸdIVϥMGhgts<ۊՅbaRb;aw!˓ֱR:td7͢Dusw M$p")j b]9~CΗFhKl6?@y<nU8LjX &Tgv@{m8#Rvzc,8Ct\(EP@UDRd]gtE.VJ6xqI0([IHt}6sLi9n\E:NҴ" 1@l]6oUQI6u߲8m:˾VD3ʃDz{)Kv%:9T06)[_c72Ϥ]6kB\5{ޔ{Ry1 ~. v[^?am>Tb紱eZENcP}_D)Ǭ We¾(]5BDK!DՖUl DWq>8f&ݭ&"`&\17KZ`PԊ6it .erd y܇K,4>FX,TIYEg]߸]ғL3B> =$4c)|_KSrIatTbAfsb,K)`Z(cE֒`^)Gݫjq!p /Xϣ0`ק"$B,K\DHl_=bώCt3c!DQ<*Pg pf=0^'J Eӣ`Ac2 n׭áB[e(-WY=̃J<=rW (TR&i=^k3FĎseHe>ǩAAY>hfɩ*)_zW|&>,Y*@/H)U2 "30TQeKq"g>^lK+}G7@:ˋf,1vD8nG[kxeWhpG+Ql+מP$T16ӟ6O=I]Z;hH#dwጦ^hňfԡ!'&4c6ghFFP*J(!]\3E8II2=L3)In~fo䆚u6za8 Y] $ r,Ih/Y K)ld*'%SgAJ N&5~Lɵ`7qwߣFx"%#DK-(  BU $\dB"=½Lsl:ڈ/np|2Cj DVř1' nv #fwS5ͩAF@aczv$ԍxw3 O/?O0ϧOC/yt"D:A%o*$'YK}ܣUߌeh{`[OKR'm?>s]u-칕D۪O3fgL#xƜu8f )7HR7(tL$kZ!.9LZ'EyR`T_s)1O0P P: ңzڣǙă&)JGj8O4FgL+˘kN[aew 9\M6 @598jZ+h T:'\\$D(+0 A3 z',~1^ټvz##]ӂ8]hF)2~%2ѲGxFitXa,}mMvetd ᪾1>6 -nd.7 ܛk2ف8[y9ڛ³ gK Wr?ee Z#~F:6+u6$w|+oCXVrd ;hL%?v(+ m5EPn%9Urze8 φNqKy`<@Z5w"^ >Þe d^.e7.xHAc L;Z,&8Sޔ}ltj D+b}Ń51Qali !Wa.؞sO(gld¶$R,b6nen[qēhSlJ7Vt'(bIҧf_Ć+l6UNE @P 6-> z6n *EsJvCβʅQ~z0_݈%ac$YcgRrj4G_MB)y 7}LחE vӛE` ơ19Q4ۓ 8^g8} ˫3Au񑀋z<1 5ǭ%1%^}!|*Nd=BcBzgW}mRP]Ff:Eo_l?ʼnAٽr16+Js ZZD1Ud=L~k]o\>Y;'e * +UV, spDj.~*n|CG`!CjD:RzU;^m+( t{ldK?f9mu{?cx.S.!;=ʜ'h=EPH"Ɏ\)"G[`.a 3DLj跱QqWy33VEegcB`{1|+ƼiNQBY_dfx cѮ7{_N&mu#%`_2ha?$ 賄{D Y7Ω V]uB @~yFC>(vQz`C5Q$>˖R}6vf|$ߖx~A֛CRmk6A҂iX6*zm1e=S+ VESk_KԌ4z iH3ZqOt;A,qDjk,';Ad׻qk^N^L%,P&FCٳozr0)N`%J`\i cV!K1qeC:lN[3/e򵕸k_Cz:&T1,A{@$&%hWе2@EXqYVl{dhhBqP þ1R~8ՠ.J*[-u{Z-ҙMҋE 5|kh<+$h'BQ*5򞉶sO/j~F' u9LQsj*Jͭ/Mg7 oh= F ZħǴkb!0(  v=Ij *O )F@d &VeЮQFHf`Irb\ӽְw5'Yƌ#@m@3]8t?I10 I{5/e%RhMȝ!$^B /*&CƆ>"Gwm0Hvм .ٽ2anV5HiVlli:N~X| n.HcGI:ty_𡚎KDD,kRsNcطjAMߚXѧFחm\dhWOf!.79o&- ꔎځX-S)Ph >f@H&·|&MHOBL o0fdC~cc~pNo$dVM͊3gÀDm#Rf^g^j9R\!tv3@2+aF CzJr{зc{W8^}wOƋR3RҬԌ" ¸R驎PEYꄮ. CB/bX}0 X<{)MzV›&Mr"MnJ(2hS\Z䙺XHN''RN&xEfg 0MpሾW}xO!b>(`)u~p! ȃ M,ozNEEuar/z^rEuHĦ[{)FeF3jLkC;~_:J1; C:xߖx2`U :䨎{m$8YXHUW< mJ|\ K7;1;G)iyT!բգ,'y賐89 Q1LcV4m.#+f1KKhTlk͜v縠$(ĐxbL5`rLIxj0Oc∪q'Nmh.X+DX Ü1lˏN')o pN /i.򒾔=" ␰/)\ 0張\gy౦ 0LϨc̱-v)^ nS4l_*<<łUY =UCX?\oK:|pG #|T vHъ fЙHo` ]탣DN8 "B<[Mp LUցxrvj)#HPWYNVWΜ&[N FR< y.MCF-2WULKE-')gHSD@fJLtG}CN#gE[#y|#d􏒿 )X0(l|'U!4%94i .=j2-`:iދ\^kKvx2E #X%\sд-A*>&]'Kd U] ^¡j1J<9~cm"ЉE#@I"m 2 ŽAz2vRrұPll"tƂR8r) RATXd-_s`\,̲M Kdy|\&~8vR@8"}1)[9^$J ٨0v¸b9v% 6-$,vI-$#f 921}Ҁ %" e7*WaBC>Kws8v-G-{7J pw9J|=z dj :s3}IJ19MsU A[EیysPwGB 8q78p+=!fs5:dd<~wҌ?7K~ׄ7, *Gn<)F? e)nuV `Bȩ:ABrh_OCwv ͞s25EW&_ihMnwiE:L&B<:rYC]Yj?q{"hIz!*O5S $pHW*z3 ΨxUﳐEܥr\@ahW9? zp ˠh7\[v =u+2ntt;MDhߨ%j ᏀJVGjoJ=ڜ_+ؿQx{Oa@@7'yPó4Q"žV*>7BoP1N",u szXOJ'+)zn1F|JdT sSVV4 eDECJzcT}Hp0Aokz~,n&.e1IV = Mt%47֨qS ?ʘ`W[CRz3"?>y8"m]TQoJX*ؓjN;Kj)ꂤ uF7PL=5Z7USb tL}__0MlM\&~hN`3'o]xۜ؟[9L B\V˂zk?:GbM-aT|6k)>11 3W/6%-&XeD $+ ^uv}GưJ&m>:_n,x%+ 'iV(16m-rqCa 3)}Mⴎ++%:9e|DdzENQ|I(#Y0tph6wWI$QOBƸ^j w^jߋ>=.}Hi9}X Jĩqd_rpвDsZid٣ QN74<3?7`3 g1ɼ@26>Ъ+NQSSr/4mRsΖeK#NblRr;R݈!v½2niu+zb4bM,Q9§j$2h wnsбHJ =ND`{E,cؘNQ('/02NK~J/o*" %GqgB`%T`)˞%* t%~&oMODg:XAW"4yA1n{D:mDpKJ;U%DY (٩fPg]:\(עgNCLV}QL8KBgw/bQ!">C$䗼oȃ>_Ai E9 g'hgM#|dُ4zHMsC'[D? _/D=B4NGv7H'ơCx[x}W&߂8z'(.{E057bGcĕCOWBQ[جALzkuD|y®;+y ޭo_65Rz϶1`ہ(`pw n1ZR= 1u(AcY[k wf M  H*m^`_)و=g`b |w ɯmQR RVhM[ƛa߯Yw""7bGq˨P_/n\3+kK%VX7FuDh,a%v@:E+,{\w@;Ķ<{b:z~g/q˾WÅqMY ~?Ǽ,|i#t'0LERV~pw}XیntHv@Qhޙߒw1d>=>-'ɾqˌT#[c,osWP#w"iʇ%xL :.΃|]Q ״Ǘvo8ՀlY}oKz7 M,;>FP\PEyIeV8Tpw3s ?#z|J漦Xu06:o DC@GG# b}"\BM>2RLJ.X[H}z|F%݆}0=N^ #vxLc>+:ߗ!p|y27KwwW7χ@k>_6lBG+-sq0|>#| &>߃'|>Q)#χig|NA|8i>u~>_T>_$|+>_?v A|\s󝈀8 1g#I%| |5|w}|-1|>c|" A>7I0d>?]aIQނϷ]*8c;mniS1QZ&|>O0`nD\pW'I^C6@ .h{{{={ '*UމqL0 ji@'[!<2F(3`r;#wR A oR ѣ+dw9g<5pjzb GV"f8TeT?VZ^d)|M{CA4~JI1&<㉻Ťx*|R t݁I1'u䜕9&*4)ȵ?Z&ļ "mM sBCR҃g˜灬KCf" ݈GgJ_{ 96'fB8 TbzGDd=fH*^dl:"SVyꅕJ8D#6_&}]z=yh @֣]Y tB`AN< xROPeyNGJy!*>z2T=5mQ?zm9+*&Ѹ|Lm{:{#m݃hm f߆~ۃBMFۦ oC˃1$_Rc-hGY{ ~[/vIHmm SDnܶ~ ]zSo#iV6T7Nx[Fvli -Fc[oe`*\F'tyЌwV5׳|Ylb吼  2^!C^Rjt(:}*!tP𠕸m3^mk(3`X|¼:%o5Y 9卖}]x݌ à@DɚՔv}SN @ '#pk-0 'ZN9ڲ"IC KM&NPL@CվapC(K×|⛱P#ڍE c gZ͊onb"n3NKfoEz;ʓ޾9!Yi90|xs#65 { UtptPx\jmo\ o'D1O2Uۈ-oPRiQ ӣ"h>XXG;b^@_y1n4ޟ,E =턨gPvI ZvU{(QѾ¡JsbT_NysA7mQ3]FhLLj1K0f7%Y8~ :`[E m2;-O8-QB+A= @" >K,I ywyԐzb\sQ;JnF 5ţG"ɝ.tFzJf!=\kAL9l<3]J!1c挈2f*YeڟtF95jQЃ;i[~*sq&*|dnɭxKvwG=#=i)pTJ9e]1#M]pSm',?sb2;`?$oa1{jn.4Gk,F tMۃ%.9@L1}.6u6`aB:'11x'k"`iT71X ּ a~xYphIb\/; L[cN}{Q،C7{ۋ71 SzTM&%i SRLͮn ?%nTNljQgq8.7ϕ}&OϧI˽tʼxkXb,}W8{"$Y Oۘ3ݵF&Pϝ*cp2L\hPc /1(dj&î1kAHhm߅XSσI 3>Jk33++#܆/KO@m6o\Ftͽ2ȅ:wn1|pfgm%&_R3Ɣ?x mYf~A2w7&>NzR;MǍ-n4=}=uB _mK ťHq/I!ʅ:-"DZf LQvl0>\N@@Σ WtἈp!y㪟Iԁgi6k~""3}-(bȏ!s4O_*?Ƙv4 ڽrn-z6Rq=ڏBwW=Hdl?-K2 @+ѪZָ͹ Aݵh^CD_"/@ uɎ!Pmie э4 ~ Ӹ]K E7_~!p,-~ۜ?rTC\VD*A0 Qi!YN3?Z;yFOp|?2D.6=<-_zn)l|eKEی8W\i*D>u;. xyYc}B'C <W 7UnrCdnjSM=n]6t):d/`]꟰ ^o/6텕g^}>dkZ^XY]K7,\QqWmC8rۈ>;(#Wm]ۉh-[I Bnz|Wgxx^{0FA'_:P®^ ye$f7vM_8`]7py9 s pJRc{pv~ps?/\tq?_x_j͸j_X-x _r=\\ y\|fK -'v%5X~w(~=ŗYrHR։0XnDO_ jHke/6uYQ."GWE z|{Ľ؂iea*jgįR#Crfj>̈́* eOvkʑ*XsZqkKv#Xϲ6/:7&tcu^TαyE@KUD !g &Hߋ)AUߴ^h*elt@/c^7t-SgiȨ3ޥ7c ӺP6[,h/pU9?{ܿHbspbv|R;\;'o0ݯEޖR+:S_q3z?;hRTc@m_5͂\tsJp,2+npKW׎_| 7C8҇_7{\YM0{;l`rbvhv7Z=>LM"}~7^MH9Xy ;ZV> zβ|Y4OEoe(?7a04kk.kpI_½/|eώ7\*H^k7I 8*tEe#kZ#4!{_@\ԨڏP5\ϼ&-4h/YRU:$CBրKi+\@*tIA DW2w(Ԅ9kX]$F1WqMgc/p} 9+oc|; \U80,3TjXXYiYifV`V<=NQiieNI[+( {ι2@~}.{{5ia|){(N'J ً):2e"}-鲢3.X;g;>jB v[ٰ͆1<)ȩ~VqHt8 uW!q؃:<ānE-`:dyD֜hycxEm7*ZI bv0Đ(%EE%dp` p0+t)%gˀ ]+Y@€ x $0bınR! j.jEweEŢǻZ٫5,B#)xqvQxל+wG]3n9;wy"9h˟j? (zˑ;'; `ֲ_9* C`"KŬ/nht!|3? ' RB>|5oDA;96PE Nb{2z2CBY m*ҫ`{a)ϥШQ*K,aNwSsaIsmQ0DY ,X`i`;*?WTSSt|Tf4lh?ٚm|uH$`ٽ 3IH'n-'*b" a|:IJ|3\X^ _l Dc1|QQwW62Fqj DE5f>nBͿ_p -01QLpXH¼*\ j [d'N&%'Ru1*<Ї&A?}Qݐ5!bSd@ٔnH\)%AKI@R :~W.>~~)\+/p=8xab댟7Q:VXw|Co,7L0H6!O>Y>JiT@&Jv*JD4~J;?ʾMikV%Li}*v`$CWױ%-{|/x|m(}p>T.l,iY7YW|XUUh+۰3"Z"Wc F+[l˽@Xlo+ GP'bU[8GK ǚ#rrt2 m4T'wt t4 t.Qނ({KGNJ?nQiY[\m.oȌ KDCRtfB>l gq_>x}{%"+ft%Vnے]ڎi#u.@Glu #e%& ތb!_QCΓH^yZrq1ڵrs<\_Udxgl^Ru34'{bp5L3nR=>a/u)taD?B@3.z{pJ 6D3ຯ &\7]?&XwؐW/(Z/#ļ @S u snv&ŃMJ`}wR5oB{ ~[pq[r"R !%(Vx ˭OJ bj69lY ˚(~_u( HF:m<~!oSMP;n`s_MNѥHAj +:SLM/O9뫬If{Zn[;i x܉VWu|e bvA}_a@@t.饖 !ֹ_ Nq]L*kDvV5#~+ȺC=՝&0A wTNdbl f Hx/ǓSZܳffiC*L+)rµ^-@)?XQ70LPhf~ =G(Pڱt)ưzE_VZxi9NL:m0.u <&Xľ[4Ls؞66M5- iSku'/#L̢) >A s0ݤsG0rWD=L V jer ӯ<<낕"4N$RC8m@yq^^JItHg7@ך(-\`㌳ALwjCi):u 2{&x<!-v;˵1gi]܍~aPBV!ϺH: 'C,w(ؔ Wcن&`2ˁ>"^؎zMm+~;5'!O?ם9\r&j8|j,@o,&ۗe*STTA/ r MCJy]EWUՕa|{+__5r~і)0IۥZZ/2q YQ w02sټBCԆ^Pr %i mƆ>؉ 9|wݼsG~x`xAUd1DtaI[υeO.&܈)7PrS*qPl2.8ff+dNÁ˭Rf{pqǞ )Dq![[V"q, tUN|ɠV'.ʝ'O[!qz$.ɕ0,.]VRW( %AҘeiྋ.3i@]׀?EWKt7,Òy؄4IJ(cOG?tSq05&?WhI4*6ڶNZFbi,` 4^bz)́Nbt+QC.#3r,B#[ֵϚUShjR [Lmk<A,ټ 7?`RwXR 4l]dz1cҶn LU>\CC+aғzx@pV`'pLdXN'C6 yvi)B'T!frTY利!ݨJ2]a9 V޹{fps[a21ɾ{^bZ<;+wI+WEK CbAr沁k|=+af&×k=NIEədR$k8Ǵ9_G#Cq8|!hypڲ2VUeuMȲOt-޼@+?7ns,b[(1  VuNr@(Jyaȡ1_&39עSs{?8"x-4≯SdCY|>}UxL@~;?j:+6 UOvt\$V٦^ZmoGM JF_=ևeu? >  !Α;/GgpW 2(h$6V=1^G$8wH:CbEW C;|S)ڳc rS "Rq/= 3-RΎy'<Ά!EQg VW<Pc?KS#&4?V?s +4?[gfgcG2!7%3^a3 yY22Ҹa9F``뛷<̣͜ykg&NXܻ b|N`R/Az Ke:(`BGQjR?[U䯘d5 N@&wAᑣP3a3@12Y֪XZ^%VZXxrmmE: |!`\z[btb?rlQn6<biZ!lCm]5?S&BiMhv~ؽHLW+͘ipMS%B<Wpm4rP4ܒ-븇Fq$|&cTcD@95{ mmuj$br&yLDT'i@.6?>I+`؋1vѓ4gL`+U9܃*l#fҦ1j$i2L̏0djZ1Miы@kM .5҆6L7--o^#{>4qbEh[rѻםGk#5Oєg=9HzTK, #m T W!'= ύn%/f+% |m_ r/6c?۪3²PuؘlNm9a;y!Vpc?PQ mxFU1], Љ& h""f6ި LՂ%i'`9 sO5È.Y5S2;V1f*,p%U`OS:݃JzlUTpO1O9t5WBF;0$@$)(saP']Ζyܶ{ȁ:ֵr1`RɫCQl:AŸ1DoZ"eb1qs_=6԰4\{ߵCKzyU\u!X hm9p11^~x \r\yl! =d5r&Ut%pd1ju1|cgywl_;u.#@qz(#u|zȅzTo^; d>BjMU~m ~(Tܨc>>W{l92 MWD7Eق.30A-Ui|[rN$n7fԆLk\8*^,ja^۪@*J e\9# Kw?mORд95P#n݆vBۏwBs4s8 rH/!V?@eruqڎYfM<<> a(*)NU*p[[îTBo57<@)X =XnW"j4 t]mG7 2@:^v `O.@(3rI*`+ôu2 i7b?? `eR"0qڶ1^lV9_e<`[5R@yH$CH]cC!յVٳZr;C?PW)MǃZW/0W`ߑsX?`k\AadMC|aOf"1Oއq9ijV]|4ЁI"mл!]s%T'J/e{^'~pS@x?9/x2EpsIӴC B]ouFS{X̺p?Wf- @'v <;:!H=yVDyNif!0v]X*lpnƐx?8>`M&=hNtH}bVQ{_'u?^$]2& 1"$jTYxh/¹77DFw6mı)$jmuAx1 [RXk8vەI)<v! k]&_Nj/Vk]d4T !vlukUJMxOê&swlV~e+/G<>A)0JJ&{4d/l)Tn/5^˿zDڌKwQ?{MT^^ʆ%~Tر$\@Kb{;L K >Rћdnӿڲo/ſ^j."GwK=v>wxF, d3*<^K$K5W[7R^4\itU;TTsJ}Eq}fj; N5-uy;^{K/mk@ݥ'wǯMv'?y/=VN~_5as?y; 5{OC2w*o[A~P/ss? zg!U)tkswE{NS~ S!HA(sI<ԤhǼ dF鎗FmG<#,o4;_.ʛH=>˓Y"{W /+ &@o15!ČiC>X*K{ _0x^ǫHN[o4%fr.5**vDpQC!.?uD?h+qe/{* #AFɪ|vX.t}p}H00"p_b(Jn?~7߮"ޕo`O&B`.uO!x|M?u0bt>j?נֆChl=Xi}heiBY(>gG KN(Ow?@ڒ.L 4]ٖ>#<.i?,u?^i^O R$ӌԮL!aY1 ۖ*7s?@5sylH~&1W 1p:Q),-TǞӶIRPqB0l&ߕ-Ʊٌےb<4-d.R? |QχTc]۱Jq}GOz)q|}|>>)>>plA $[Q˝63cD\eRCR>_eܜCڃof&u)Ꭿɜx%F{u=ˋf!p1*7mœfgn!h<Ϩb 4(rˈ066~|| [p%#-/ l%S4m`+rH[+`\vnL쳋1X͸'vD]\)Nª;n[nwذc 73{geeX3kJ~0ٿ!ўATdc}? 0loFM͂Ғ TgZQD:>@ Xe(vŐ:{~ }c3.X2oee=0BY1~' @Q`B*_Ib٠PJ%-9_e0Q *j }#E=29ū}ƫS=62񼹓t"s]}8Ş MB^Cl􅇶x Grm@w"sv)Q8bc4t`;WDˊf:l7šawЁ_aU9mG&8-;]LdϨw"rRe:S1*v)2ޕi D][p g7 >A{dTz ࿯lo ow~>]|s]?:ߟ /l`ص>pSu4w/֌blO?U¤(\TPA_&t)$E;CCy|J*\d712QǿM':b7_wG{& c6tqACwj+VSٵߩ%3dgZwj)E$HSYީߩY'ʖwjOR}ݝڏܩu]s]B8ݩvsΌS^XNmVGwj҂ZZºwj>wW` !kBݝZwjoWuh%% \vewjTnu0PjQw] V; *NN[v&lߔoo'K[%??o}~A?~| n Gm](AXAr+,nұ &}I?ʜt̬ (PCA9jB)8:`Vl0p WB-C>s |t xDXu1ЁЎvz-'9:@h2J@Hs{p-#nXN㜷\AZLM3#B|xT3*Ru A})DDZ2B>dd&c).@]bR-#pb\1 K.pH'[oL[eƴ] aLiK3(;ީeԅmFZܩ i{{|N]H[ީ i6G8F=_Ljs?(rè%|5]6?]cxIɁ| ~YƯ5Phxc5QIy>ih4GkwQpT|%K#ҽL\(";ܣ}PZ0"{}*7&JiW@d&JOo)X4cmJb[Jb[Jc7VUwg|gb}o(UՁߎχgWe߲쑱9xqJ%b ٪.bhJ[5&`="uLA|K(|ڕ |ko >@X+#h@Ai F@iNNi1M3KiP`JC; H4ƨOi@M['VΈb2__ŵ( 4_ѹպ0L!nޖKY6< `x|hѵZx[>|hA޶QꝑoKWۅ~(-4u+V=mD{mvo|G|ApfcJp[[Q!)7+u u~'ӁBm{B) /M/isK7Jn؞QqJn"%+)gX!B!u"7Pg!Uzn먄OGC*{RyvT^1 :]Htȹoxp| )1ɑqJ+}%p["ϣQ|KZ }?Q7kQ$3ouYCV5Gm]PAЇRy7޹k%Ht/ZYe4HrMܤ=bPZB>PzZc6ljhxȳ{Q<KJz,?ǜk uĘ}GP8Cc$G+@9?1?GP~% u;S0%-,=9+MSh.M!.-)p[7nTaVLG+4ңwS^/Ч OZw]o^N}'>ۓ~OwC=l\;7NߥO :"%W_ɟBC̊ǹ>qr8ѓ bZC?Njm7[؏'DŽ V@0I}ӋS_|Ut~K:3U6^i 8yv7Z}@\28i<c"YFcct CW0{S}aiϨ0Ѐ!wKig*wN; qYS ce9c*X\mױ+ Kf>thønaaUNwU:yNq\Km[5)n+EwW9lbCgy0s8#ݸqUaN: ;o D~ :'HU!{C ^v mBV;e4Ş$Tզc%&[M*bTz􌌹GDg94r,$^ɻ%,0ȏv<'I/@9gj`FÂ>yv(7oo̪oY7;7k=l DŽ"Pxt|NFv>4}!p桿<,hX1;<7[Y~"XW1T7) ᅲNp/yEG N)jDEb-̜])9p> vXԠWnC)2i?k"YpO4{UiF%_w|:qZgO*>{ !8N#j4{й9uiw¡¶'Ns3s31sΞf.Ξfu@{P }^:CGlO;G)i3GY~g;=͗9›[0,>{ E_&Ͼ>(AJga%nTb$1##g1A>* V'ь@Jk ~ʯo~ s80!u3gi߽vi`$1ƥwX&w0afa+{n*Qz() sUmb9䋯k_lZr)ߓ`>g ~Okw~j_ulFֲhh]0b7 '24`2n,JaHa[Ll'Y2o o-u[SkXQX?Ԓ,ʜƣv۟[8/?~p7w7V Pޫ5-Ʒg3OLŃv]6Vt>>IITt7Aj06[@`,0^&\4\0b0\9:!WeZ=*)Iʊ";}d[e6;ۼS|#гS Hp'FhtXYF̤\ɶOo# aXy3]nj8,&5yS_aނKMkfk5~b66Sȏ>#8XyFӰZy)w_ѳ/K\'vN(|"<oŲt/J.&eI>$uBXkȜq:Ӊ:Dž0x@%|rI XIuC~ZcZ#cFٱqn:."yAdĔd`# E\9KYLMZ;>BbIݽPo54N-˲Ld#:u$#S0bxw6/@sJgB>+,07FW#A‹Wo `A"&5BS[&[V-rPW n24֍Bbc) ڬ[/X_ f2N]hV_X3;}&RHFJ =9N^syUij3a /[rԾRτle^sؤo Ti#5uj/^ȦPm6IU SKqB,%g'@l2"~a}?AwMR@'`yF B".Fk1kȘ'T.AdFRɉF-x<@3}->*QM y(`-g*9=tBZ귩|dά'ufb??h[lhx8,x۴NWOڙG#g8Dyr&_u] H]}DY@ j'( Ԓt7'rrGBJ %AEJJ:-GS/G:tC;Vxmŭ=~3т7`L+{ ֊ iKhbSPpӭ k,@+YXjj Z2;gqݲh?6ywtwgE`K}@*a Eiy^/*h9h뀶2;iYt\`@jgl_bk`:@iX#RnFm!P13q)*~-Bw >=kS=(*1{J͔Ujo+Wnk\`+Okϊ_ |Zu5ƛ95k3d??`i3C"j^`)#CjC Dy J/WwH*H5ɤu?3Tՙ?b<x?Oՙ?󧰞rZc͟~̟[#֝?~4c4^W99 }O~O^Sٜ,35@*Rw\yR#!5R!rTHPSLsr|hVW)il: AzzIF'nٍx&ߧ3IϠ@"|R#?ƾ< c9!{+/<~AwT?w兘y /xvq='tj*I (Dt1<5.f{&G5ء͞<eؤrcŞ1Mw&2 Eӎc.Lbw LMmh~ƇĒ9ǀvmeTSz8yJ'-Yivq=TG |ӴQ:KL{Ia^j1sjz`wh?ed(H~͋7BCR] ?m]K ˸@ez bf‹0?["n%S?.X]CǓ{z8ub 8TY䶽SJ|sE_TۅK A}GoozRO^Qmj}xwgä. p==xxAn$'kԈÛn۞I=b{h tdZ.ۃQ诽tk` aya !PŽ뚝+nS }X]u/K:)}}jy#8V"؂㆕-Gn[g73g\2b(-M6i[vyx7mE-.os_QsOtmO! RbEp 6ZuR`GXNN$UxeH ,oHЅR`ۙDg]zDp 2@M|0fY[@eLm'%1N *]l *ˀ_*TX8X$ԃd)]_L]'GBE\Gu{)x if$x#5@-oܕ =7i` RE}Fo{UO7TZ0Hѫ&xxU7rE!]o[BGNj=ǿ!zŰ8Lؔx?P4a;.v'vFUNthS{ƟcpJS< OT:/yףEa # #A6,*ZzVQ܋ۥ)1BA*9˭DݚD?oCnkԗ+a{ A!=mvH~#40q[WT>) Dx7:zE݊1y!Sdm78!2x(߿[ZqBԆ6%煜!x.À.'ZD2lXq>{@\cCoG#DgXiI!NˢEaqCjsqCqS_lw`t6- Y[dzvX TlaRTj~4YR9!ĔPTf HQJ*|YdN R?u۳C{䷳(;#\#碢ѥH=ȳAbHT*<IKA:3:Ng٠uj@<-'G3}/#[t/ؑ}U{6/vp^>{ ɷ́fhe*:-q?{s_̿INL ƣŠo2ݤ^ض& S^]H= *;AƜ 0?6%?0r (*τG7!>Ce6;֍Q Cs1>9c?Y@ l$K_e}&k&I9?yph}X3NLC6!ulZ;3~UTeAf #G3a-As׏-PvxBg#I_>V}VWsa_}f, G:'yV ~C^F&& bKRo(8]zYܓ˭P(&ŊeE&2v ] %@!iXXp4+g= UPt`qXjʊ^OGvX>z[ SY}=i;f /PSsvDJ,Rn8饋r+?Tsib?d/< V?>o. k6p_b";qA64>GC6@iɽ=rsf쯂r:'btǗ 81w(!!ϙDrE¸]/=AR-2$Kм++7SkWi Ṍc:9_@l zsɕw$Dyr6,joLNbqy++= ^kv60AⅹC$B/$_pO6 " 7~ C4"Ћ&],ych_~z8Idy pV;~^RXОo+"..5Xq Kuq|Yy2|?1LQuf(Fz EaKu磉%'o3l[1VDQ; Q@ѵr6&Kif9*ay03^P[,'׌d<~4[ش1}W- - TZ7}Y"~ @] X^g/Lfh_cA'1 pL,eR/#'V&\ SU{VCd#^t0݇\^!eK tpiJs);3)vQp!JՖ]W7:l0S-eEd:#)ta3PZJWR/Dr`rR<\q2vɅXˠ5 ycI3G8̗fM*J1 Vb%v&c h(蹏|Olg  u`c{n2wW|{]ԝ6U ՔdNUu1ץů˱}:DI,<7vE?^}\A^g`^urD7e!EN"nhIYfԚX8LדiTUȅ-u~L<|ɥZg>|@,?9KPTv PI;p/ +K(wl >ӵ4 !7-LxwɝsHXC$lNt(4}aZ`g\IF$00$@>$` R>WyP ua] q8P:bnWKrp mzoS6;ă~Fd^MDj1KL00g)N(\)gU7cXaCY^ͫ쾇bJ狿S RfG>^qle,F^Qlr:X5`$Yu<Xx@+׾l .K,6.@sP#QlANjDG8;OHhl͆*28u]Jh8ot-˞f6 wٕn93/jQ8fh{2O۠/ȠQ qZ0[ADl2ʡW8:Ӣ̊öEQu1RO7,Wkj"՛,9]@` <+3d[NrjY*l*Zϋ{F|)+CҊ5WCq5zU+<'q&+PKܨ?ZLV @ lLb@/B<}Inv'A*%etYyZ!7qFKRxWPte*ς3 %OJih45>]AWhhmURPφZ<,(܃ 8RUdI6S^.Ga!1ʒzlN7qŦ/LOq{ l ROrj_LQԈWgj35+ I`+wݛoDu~8?6&Ѩ"2is:bLNgRA¿I b& I33:ϤLm՜4ECZ;Jq.l}FYZ'ܣoJ0/6> {L>MtRU_E>EG:T(fiOo;|/?=s}(:W>͗ݣɧ}!=u> z []4Od?O~h>$b{;;.Vdy#=$ ޒ .%# 'R yP ̨@.gFr_Dm !@(5P({zo{{k }kS9BND֝QEkyP|FwLI9z2FwLGQ,oGۛ@=TBM뭍bM/+ktT.ij4FwHRs7VkqFDYҸ^ڤ??#z)37ս`7]kSNi˝$ ^AڋN)fWL/jUӾkfߡs >?/M3ƭyy=<1l_J} ng],_Dɧ xs TYMߦ7τ` }؎XZB;}AZ %uH֮ =}/s]gԚː8vnRw< etJLܖ"buZAQfe{`Э;"5v&o7͂@Di/P_]mCH5 PMhQV2C@_fWr+a+5xe)a)n6X2ޱzp厷k"iePj.l :Ոރ B.S`J DKLp[fC2xNB;p3 ko5 r]۹-=4l /lEl,P*AN qaOY'0s:ebGX{Jak\`+(-!y*gxqSXUڨ%-+ y5g-%=>"DУ O5<Ɠrp­SҦEO2,*%7K"I#=wiDwXgB1j{IfkP:\+(fl Fo,|9'Vv`a`ɲd' o0Cag< f0$a% EzRIkwwPpt,݄=w۝F[wݝ Hpݵ6; CwZ>y&6|2@rM:zϗ3gމM]Z7ԭnSrjꞦ\PPSwjMEZ_O, + .3Ӻ(mEc'<܁y&9 T*/`Z\w|jg^lg)tڒ[.v^I = @}9 8yhS&߱`N4,|ߵ.* 峱sMH=^TʃPGrc"e*4ՉZMxה#͗ȓyW.>T%qZCxj~Mht $FMz:KMw% p!Ww4 ' '-wx#;/'Cew70qA R!pi!޽y;wkb.\v>5ߊfg5R&VtTމPw;sxKuC0f58'ճ @;n}j+w$x?\)/}CU;i헳f)>ԸX"ߎtY,n,B(!gh~^Rep\dl?RW?Tnzsٯv5Q۟8hC2zVyFb,|MN|O=o_5Fr'u} ɞzj?xH_J( P6pʻ7gZl ym!Y8 t`W:ӄ?b2t5H?w5E5lؐݧF'ctDWE)# 㫁Ƨzu~دFV/ܞ|Τ۾lH<s+9t'9sO&ͻ(Z$K}>>RG.8Jx`/8a\c{ӡlSK&Ǵ`fvVvoο4gnӾ_vN6t7}\};df${ex/2l: ҽm@-\7`m }ne1( iŁfouj R,ݡ j؆[kzkje\n3$B`TvN`) 5]}z+Ƈœ[ݎ¿P^ B U=R({BnY@ygW|9J1oG2"Tӱ^ˡ?>(O߂W*yGw2xMyݯD h-Pp=7=oVcnv6ތff8,EBPHp?`7 &ڡhj@A=YE܃@$Fyd)06;7/H(:' gRj9T( m|h3txPt)(ǝMN \х QX(oUH9^|#=ynV4{jK1r(PfXa3 yyg'd<:҄\ֱx`k7X7iՄ P,H"t4LXJͼ:?ZP&myW)!svi&-'AR-+7( So$)V|YF[RKAvmAʰbAG$F.ۍ$a>|ci}M˾^߼xoJA u_h+ ̅1&YP_+:gƷ1],MưOch;>'ʿ6Rv:*?-=U囊ۃ<0Pz@OJAُDp}ɠH(G84{*WtD/?љ'Qs . Ǥ+Fs3,2Y2pĚ=*OD@VQ$a>e{^Wےt6R2+L5ݎMB%2ó N2֠f"XFP-wQEQeQRM=!+V 4y p]7aI8_0!َT,%-Zόm{a~;Rm+׮l'wxQ[̧0"L/X>3!6Ha<._eS-98y^-N*^ ԮT2URouՠxfT*y4RTb-NB!s&r A>(z5zC88Z($ˢX2|!>5W^uiY3TpKǂdƀq:@!MwVq㬊;}Vbx/6Kt5IQR, _ h@**^U KcTN foGiLG>CT7V#|%cCVgQܫ*Ӓ9Z2o57_T祓,geHuF*:%_c]5hq1yX?_Ӎ3zӛHl?@M *(OmpWbxĮ OPյŽCbMɝ ~>_j@(s75Dr#M$U~=yF}y2YzSJez▛7QM |ph ВGbZ QQ)\I6}*,6cVz "N՟  jgy$dD LC,`JN)%е&C>7x B9|p5SDP%fP3R>/^jպ =}P=W}UxCC߳K71X {#&~%&8p:& =?n+.ڥv& kBa= kp(ホFxm!^: t_î4's@/kq5W`#Bq\vh 9_ò t9ˬaDKsNQk?!#l[Qd衇IR)# wifmZ2BvLډUtX;ȅj3Relj)DrrylknX؝@| H/ <o;6`LnDo{1оOz4ĒȨui\ N.v nCRNm13w%ff*AGqvFzˬ¤CS[y|^NʭpJs=f&!{oXBXm- L)[;CTܵEpW1^X( _ E}dte*3bBQQ-nqZ& 7͠HP򰖰KĆvAP@a| I;![/Sy\R36w#l>a@[JbZik1yku鶅)L+:QD/7fNZl!PBi|&T˘JXCuZHMk2!eu&R,2J#i0(Gv5g#xݬF@HV7tGP;qeM ԘMa"9:: Q+-GbT7UMT].Aeb}B2qaDŵT9W$) |W'*g MpssD~gߊC njT&<LXzNAbҝAҴ;j *Nu)Zo &%%x<(}=<~. /W4-i0{w3O t@'>΀\¬qjdb#cSI$JbL`dq 1B<츯c? mBqF sxDQK671R~4֥W ocU4B&1#pR9v1%czKm24ur!}V) Ro_{## 9.p1j_7SewLf8y?kNi$f8oBXeAaGCm-0 jh-I^KȺD_4AAU.KzZUq!K#D5ٚhi6  SJ!>{Yȇg1RqC'SxөL瑨lW "S/"c<LZz"Qa *+uͩg|+8n|=6<񽽚wWǗgSK! ~]ј* @j26mw-#C"wba9~vݜ`%_%YC)n!0%z%zXQ8=d9%bD}/Y[b2E'֎~iW2CTdW(\ ErqΈ)+q?lu'[d['T|Ò↘ N[ZF=6LEb%zUO ^Jџ?DPddJz}l'e2Zإt`~OM(; Ts5@\߮܉;g3Ӌ|}NN6{P/[Qjkvk7z5ഢcDklDqb3L86Ir[r#F{ Ei nL<Ö_j!H#VԿXJ̰h/o6*7Wg}ݼI1UIƘՓCX*X^YpSۉ ƍ c.QYh0KKKrT}lv??Nˢb9Fy@XȂS,L)2Ѹ%}Mm3@A"_aU + (X  !ohJC~5!Gг}O  wßkûz3 c'0cǒUs2-̂OUV23Ȯf1$YXUJw+a^4]q3n91z6I\A48ZW4D.Xl+= ? 3dop 9oK_ԋaIuƉ34;r?zG5j$,{a&M/R\5-׼Qcw3&$-j7\C̽/B'2s9~a9vn;ϥBX%ΏcqpeOjK%Vzi1?<}zz}Jdƻ0 >-Hss`bvR49`K?Mdv@XI*H dN;QeMu/mAP~~:0Ɋ%Aw'=/s!UFWu悛i=qr$7Q l4Нp֦'_'\G7Qv\p7 1v[pDdTwv-l͸5$C,GBsaٯ9nƇ?m!]գ4`<Y`BOwⱵ䯩ܸJ "n"/ =9+gCR_B N%{EI ꕉComIF2Y#K3Ϙv?LYRaHtwY!'8oDʦd,*DY Xb/hr&vb<&ou>aE8O_VcIV!Z Yv+H6n^.&clCLI ̯׹0>3j{]/w xQUimc qO3έx9-ٸvEMKGFMƬ} C(#h8%oY=)c.\|뾊^;~Ox)oEEtEE /Ep1}U>EEड़y7o@y;ah?ђF@i\9xq]Oiq0~j%?=}>e%ˌX0PL$74ȑ&W;CT> ľ9ҧ/kBNQZ9D OݧP ?`=j;UX>8r]=i(=᥼'<<(dgnAKM烗,`^yqz115+z5R\QB*Z>I|yylWH x4Pԝ#mDmlUwW ߞYhC}F~3SQ(# +8RcBq%7ey BqƄ/u4eT=I'4"Ѥ.W>P@G)QN!j̻nv4Sg.=;/%RN"!t<^\P@J)STȷWe9zi1@=֣+:NϿs|:pcMAϯk鎕;x\{Ca>ܷufX<{P2Phbt|*uvNSlLCO^O؈π#p;ܾ մ+ k:ąv75[5=ZjT,QPj6Ok 7 ݴ5ȠXM؁6*5ۊ,jP[aǃ]/-Y=Wȴr+WUVTxR`Zי;`%F xz8^u,l (5}?^+*GhɡZRP!{^J`⵸,ȠXصَx%ׯ|zKh<)3\מO~Ty>oMNpD;'T봞I>3& %w5rlyJl4NR|-lZ Q,UpY|]AZ׊Aҗb,"xU{@w䍀@WFq Arƺ!MڦaSd P;m?D'l> TsC8[،HFBoDF9s kٞ-Q(mzJ񨔶؇FiyY7Hi}vWzJ`-z߮PoR]OiҢbݯA*fe5GJ-dS2 Ѹj|,05~(#"oR߬m>@1IdKV+ 䋭I;zBB{C M8KBϩ 9 ])4| z8bIFiΟBy֒*K=狅K5XŐuAް&F%Xb πll+*zF=ZϩևZFK;mZ+Djs=Ox(ʛ_^ٌ</B)[YKԦ# [zU*Y|v*!FQ1F: Y1kǪ噷Z!%>,J$վ, ϑH}'ϞuTz"6 hk4tk-+k#IMq!M+FV|(Xx@1_ON4nⳡDZH+NAl줁!#0.  9p)UJH+җRzIJP (‡!QȭL Jz8cI}A?#0ÞQi:VKFkH3zJ74kJZ)o k`bT|~FwjTtuÀjپ-TZ=W޺ЇFjD/$ W=;oS7)]'E72bF f_4wi 4@* ;@Ii| @nꮫ" @=u$kӾ iɪӚsJ = D j4߂7&6M d40sϗM 4~Z~hpi௏zXZQޤ*= aB'@* L=4~ i* =(|h wDu DPl7h fv|g4N핟RGTKhSJOiNT|i2֢xB= t=a^4PpiѧI'?.N4 |XH@ԣ\sD Oi`(wR5ВdIञDAUtupi -cS |U&D?DR{SOC=QKՒO(MoǧlǞi t m߉_V@`; ۍZo qZ : |h~lvG 8oU+&ey~o6qq~|ǽ8^~\w(n[;pq;/(\@z0\Ri"/ \֕ @9sBI9'= <WÚ# BuJUH*?Pi@TiJo%?! TT)QUN:o@o7JՒLP]%Дր7-LlpSa{6 px sK*4'= V4pB/ \GTlA8@l= lc yTh7)5c4`k4гiwT(4Pzi`l .4d ,iR@4py;~۟W*M-%jӱ>\ h]p$0}[rNoA-[ ܋*  .|V44z(e y6Wh7). 8O{+Ҁ"@Twi UȾҀ$ i 2.Λd4p"츘O +M-)k 4N4@FWR&66&i`:h~ZEzxAl40/[O3< &iрRGPn i1o!uu灗Ĵ󘰐vدQ+u7@7@x6@xwF3`m$Hqo><HTbq^%v((}}wHU{!C=رu$I#۟'R{%S\-Y% zbaf5bՈUP9[|ɨO} [}٠la=}xTwzbO8_;~ш=DŁ YHfxBBI9=O`= vn%~CT-wbԀ_l<Nn%_ݾv֡ү (㩀`TM` 05!0V5x MtW?93 |_Fr3YzG0k \1Ru`؁_S{Tjnށנ[yMk=KL{J50&q0v!SQ~=_W`{<N[xZX}B먣u;*;:/WZG>PC8}cWFku;\Jh!8%SmoĨٯgXcMv+}G͗L(^zi`~I`4&0FWFu;pN_[[dhڴ'f6_ϳ>yxF㉀`T}A`$u5H/^µ_VoҪǻ ~>de^C/40>'0z^ FscFMX^kkx?U'|0è/7A_{Y{xXN`;3]8S9#>W^ZMc)96T[yχ\93 hϰN:+<-0czy G>hxʧ?:|e|Ӧ91`: Cx[ؙ-ܠG\B~.pz>vP/O4CuO40 wH0ZrdZG5BBp OA7v\}G pˮ?:G5o#1z/#vo!ZZ Zm0hf㡄B(Gh``DzJ!gӟޤ2)Ia/j5鶶j}A*Sw@pV[T+[q 4<ԁ YS.dW󏠺x#pjxĭƦ K/x2d[**>TQAemV9UTM3ٛK~Zq?^ תīoǫoUZȠVK`5vZ}^oZB-/PsU,|q^CZBX9#고zJZd75x}%x|+T3O}<_7PZhjGKG~W uT@KF~,TߗWUVoP Qb a|%Z3eizZ)g՜}a/=3yyjSZ9^U ̫_wZyJ*շhIZ朡U\i^ZjAj!jHz7MZjSWGbgSr_ċ39KBdXYy͢Qj?; pn9}'V0l'gxuEx0r&Sq]Hp,_.KK95 fŏ? L pOq! ˌZi+t!/Ś_t!o5]6CA8|]OƼlբwkaǽ/[ňEƬuzږBq$uD}O|_Ӝ<*"!YbnWB<:!0&H}gr1~lc tD#<2?H0 )I՘ K;4܎8KpXӰO)}4Y4Jc!xB~k ܬX9A_EEgdh[6ߕtDb b-U//)}&iO32{,mu|ZV,d?7q|6OV idm|Z{鳖f%rx\BQt2N*F߄,e9ַqZFs@EɣO8 D qrI|[4|ro8zg||>M|=C?͟o`Ϟ~R7>[._dōs$e|B4 ~=Fm22s.8$`yGfL{s:FϠܺ͋A7942 7c*A~^K~I,d0YhI/;ޤ b =$7)x7K/ަǶT] U,UWTVrn=?T]<GOK*D_^m_N'97Kp{ 8[}*Ԁ£ C\EkZ - h&麅+B%L8pE0 3_(-G;O[止8מ.#M2p^U"sNAaeȶ0|K9&;ٿ2˺i4}ݶ4%DzLؤƭlAAyjc-H?,3 +qȯ𵀯i1`Ds3b.QVcT]z.=Vt``YyL,o&OW,W-+v|SA)'"/FZp7AjG= rx0722)2 kNxo|k7 <7ОIO|j{D7Rd:IPO%8Ee[繀-9a{;s ,:9HW鏸خ̒2ldfCL3PT,{R,@pr 9w X ܥMVaWC3Ӛ%|vr@z0R_$g{Q颔M̐(QlMtсWiQPG؋#jvN3Ɋ> @pkivڜ[MeyN]ofKq=jjIbC8? y/YgE'إ =.4 {`  ڥ4}6yNHi?`vRPc4 KjogU8,T+|h<nUΑmH^ӭ g3\ `6HX<)bRKa/ZOrjDycX(ϧ˰<1՘OO@qXcR(}&p9r@57՘#_xϣF+gvv'hn~"^,{F;ʁ'"(\ />$V;&O5 G?<0bi|7B8;AA!pH" $0 EQ(JIa]'*>#'`D˒eEn!UUw& |/twUuUuwuwu5rc-v-ءȔƖF$]`XnҬbMҝvBPҬ=fŜʗԤʇ)oidۂ%GP>RWe,< @_K=er١O+)R=ߣKi:y ITv_,.ә 9:%S2aOv ;1*/M1F M >1b -Z[4>JahRUc)Dwc(oEsp9s\Eys `";_  ̒}Q)|O qJRH]w jǺs;}QZ'FIܿkY )zʔ|aZ$S<̢21?*}D*Ѩ|OCqw=2D>PK5 ܓE<}*s(尽aA,>qe*)*_>M L 6~uIS"W{{c9cU>XG xפб>WTXptWaWjXߟBڿb(cn[Yn$eiQ QZe* oKM:X5Vu*T rjlSC`nئK{ChY*Y*ɳC`wJXZU*ِ#ջ1QlƼ7ɢNj!yn hSyYHXCP'R_c)l/fgv.j-d36ִ|=/|=|=dI'WyqQMJ9yAWStxkNqy0OC|m (UG˯#q!(Ա WEOhj7C `,M_N>J[9"{^ |{3ٓA=X?he;E3wA B "E86 }^\$oR_A0gcg~HuBp7U,rMkfK5dibHȪWSB)$Z߷rz xx@A=(6*L9ީBNF`}U0M|.JkHkjQ/'5oЂ20)p6aA3'[AfYΘ͵7ƓI~^FǟSSg**_ (0+-W,]TblQrX0MP19 oiʒcH wEk.y)d7@vxȓ3gagid5H,4B@ǥM<.5F@iE?Go'1 lVOd f{zնCl$rxP_I'o 5g&^C~cmy/6f elZ]H VAm |,Gc-~ԲP"2w놩yWy{KZd.fj֐WO!q(et=-L zto = a֧0.R{^hP}xNc@l$-E`Aj&X lBnZp9mb  l>cgw'9bMF&eUP>i=L=q?Vc\[>ӈ}[4[Qv%l |7LD.Ӽ}1>_3#N&oOdEcdD;:8G.|mܻ0 z5jg3=cֳx^ygOƙo5{q_y{~kڌ ܧ*GG~a~a&]h_8ro~x6/&htoC Hu=2Y~o5x +j IÕx5 ˿/ AdM6_d &{Q㿬Szn5˺{aŠu*ӽ}Y|!x#I}W.X/^j0 |4-%l lzebvLnɽgk5-B%|%TqbA}Lnateag }w"y9L˓Yl7HrGD-hoe類J{p~EqHHjQ?7ݎ1%yLSMn* "l?Cv'E=6LTCV e]&*c#JpX;uv6Ӧ*F'>G ^i0a'o4S;\֕erb9 .y>2sd@"#OkG0x^;57~8wAPib>$x"7L?2d <^NЩ%qʳhv^Rñn2P0f)Ƽ~gSr{$}/ƟԢb27`N_3ymzHNr##(s΍Lz@S3w&W]&("\\%М~ئak,'KCPu(%lAKXgj#W&`i>QyMTdx֍A-F@n&{bLN4 Pgr#*+͗C-rB[dN YV92fH,;*B(lmu68;8vC}\Jˇr2$%;, 'o dY!pz,lTy&*4|M+<s9!d`< uq(Fų~$?@rS[XXF=* X8Vݏܪ*b[Ynկ2Szc[)S(&PϨ=Ư$=>Ykӝdz=ޥ,qJՖPk6odWeq]ōCNk}-'lm%r* ԺCʸXn(tF-HM̸srFNGJOSG1CWC8*wä͆jgYrU^ƿpTlTwsA7s X#a%fKR+F*QLw0Yc{N`]$纗S}V9p'e1[Fe0ˍʝ/G#);QQviCy)NՀ?! )h@;IQhyd/Q -1E$=9 %1llh9Ah9oAⲰ=%~옷4ӰcE>'ђӃVD+1"%f57[n{7{k̅ phX6~ l{YUl;+-l]y@6=МMk)xjcl(?l: 4?8 V7ȾOwϳz{>`&@`;c~O%0-I-1?[ނYpMGi4ȯ|!Qt2&ԩS,PnKU>&u:%{ SPN0ER2SMWNPU#q]jhI+O Y3vZ.]ء-@>C7oCB$W7.~SIw0c1tcL8vYf1G8iW5JxL f:c 'I8X[vzK,;w˔RY?HOnWx `~.E<`[3]>X)<:#w%h@^s* WHVmۜȢ-3~E_wD?HGY[ }*U:+B|bL#Ȉ1g<@\X{&ꉢM`,IU~%;. F;e(wF%B )JPbRi^ǻ?çѷΠeiwhuߔv^|>#Xzq>uzrU_qu+"1PЋmk]1L7j7ÕhVcjltt\B%eFsԫoFkldQw+dנۯ7s^ duGnjeY2J6O̹Ž2}H ,CJRaq70ufqxk0ހiuw?4nIH?[f-GR" 3BO#yrb*FK>f=:SlӢg*]k f<wj FFͅT#Z 基qd~xxwX}uyvU^nbt%JK~zw|uwG5(5(7p(TJEx\g{y fa|.S̃ fJ.B^+oMW՟џٿ^t%j%[~>x? e[?%Tů,wloThʿvO8]R]FBwik(FLm1zdh<4[-%H S#Zґ|,KNN9h@#W4KX`ãYM h]F؇K/!'Z}XGt}j۲aJI$쿶m$gE}lY ނB7<75P5X{=0vEh? ^mlߧ߄PT#g_o!"%@Ɂ%h~P1FՊ/Vv@:l/dϊxm<̉I&h-E^_&x}8v24{ =n~{}Ofrm>ooW7F|oҨŊ_؛ߗy7\_h+߇=0IS^o%9z1S{_@Y`}ۏ?صW& !V|Y6b]q!(l [ң=`0In'c9wh 9u Y0LMjI͹p4Es=3#XaLZ$--RJ`재Oz? 2`~?Y c)Peals|g JlSQ@pVv D|c Rgr+Oʰz>nm^0 dx|̈́kձ)Zo&^ˤ׿ or׼'®YƔ0dK_Qn`fa>ޏԲg.43xۏ]/.\~̽5#~|=ys㠛㊎ُ7_nx;ǡx@KJFu.=M z߻Ǒe+7.ڏw'\(ug?^׿o~WWnF&E72UʾIm5y/zO~7p_k?ȸ~4Wֱ eۡLCүb?/ {`-RхCWmejB+˻zיF2d^VjY>]Y1S6I߲k)}r#zEcoS:_yO?V*pe֢veC~0tiƗs?͟YN>gM'Ɵ. 0"J7]V&v=Fdqց+;F*WySgzV.v*.˹.bG'&ّ1`[gw]旝IƯ:*輞Kݿe{o}ʢE\<ʁ\}SnZQ0HIUtNӧѝTt4uS1O9u~OsC))Deֽ2קxemO_uO١> <]ќs;ԧjo~ mӧU~]w:^7@[WWFW|;_7-Q2s~i_h__w0}u_{|9:O g} 33 3a#wo^8.:"μ$=Kr$hc|`dnOX‹r,iͯ88+ G6db^)$f_RǚK)UT és")Ycٴj1Q9.z%{{|o 5@ 9Br!\giay,2&:ܜ1 ~ęAH { X'o_]T__r^}> 1(WIs ,熨OSk~Q9_3GՋ՞&+fzJm9 d@ tYṈ? dVVR`B:!JNl( kCjDbQ1!,v D%L^n)h8֘(w$^9dс5ǼJt;:AX垶XkL1Ԕ'B.j5%h*wH)l* ic-rY;8ֆi2 .tH77 ºPUym3'L8ڲP[f혥۝icyw[ml#OJ ǙVK0'd'uI;gLYs%㎙J1s`)cLS>g՘_2@ABϜe9o\)|ǜ PQYKo g/O9?c˟ɟaʟ(? O- O-.H經J~7$Tu< NcA\HdIYc×"*zhܥa~T\=í„o :F*C- dVim@c5(3a,mTX~%u,8[@gWuDZD@ڞ LّTƅ⸅e-(K1$dg 2rS3CL?ԜvO0֝]9-,ĘV$+z++[2r^n1%\o=H 燙 ۂp6K`?>Vb),; >)0:4Q0k({MCl7-D`ZqPLR7pNCjP kv,LM D$` }kwen-7-{Nٮʝٓ ,L1x4:BSq!eNVY[lRHBTk5>gN88v»J| Q.H֌ AW;Stsr1ޘQ3w H7Ǚ7@n)uLBMO\ <4( U"t='%Č?ve$InPh,sc8%|i05%ؕiqmzD/yWĒW+| vE|L//%-ĺ"e zaAh% )0!6d@:CTb}p&tc1nsEPPz_T-8p5';݂ 9Uz+N 9$zZMO`r.@ CAF?Ax _qwE!S'c))  2Ծgx؃s7w}%ZF8ntЌR}?M1Q΂TCFdO+?Ț Z5a`ʋF$a5l42C&T_4-;5f[J r"Ϥy?+myif0dWmNN%Zi v3=tC-9jL|ާZE xIR;YcϳLC q~@46Q`h( Ӣ+ƏLm$FЁ4M>1Lm&d;ؖP;F8*"-";m*쪧Lɋ-jWgKb2ilYqGq ݲQ<Tթz!@-aЄ5Hnxm4DEuSpTQ=_&Q*Uae#S@vrjL\ e5zudӏ1P@ı,cN4;0Ϝq(($Sbcc:XlɒL3c%vEҍJ0rǎ$7%Q;3d y׎clZf0QbJ!릆`5DRMj&̬_/q.omhKid)īimO#&ڞyStZ|vkB)N!Z|-NMCiڠNlu savH}]`ca>R:I݉~_PjFa. g3|0Nr,0Oqo S:>Ji3-xn_PAF2ag9MlՅ6=\)1tSFWh9u1Y껟N S,|L?, `mQ xW/q/Ϡ¾SN39vA?y^-5ڟ GJ6Tϑa QUsqc1ZpYY gN 7GhBp_nIXH=]cGl A8!4׷C瘜??g_FjNΒ|ߣo|i|=۸ɷ62G2qs%n T^7 {zCe8=~G1$RgWx=DU*e_PX_ݳ` 7@ɃB+ypo0n? /~}>~Y\/WķŤw(kH0J1bHظ39ČIm2fCt50glG͢wKa.#K֮r#%8ʹI aQ9$HցrXrf|C2 } 4GύqW޼%-Ō 6/؅8 *B6p><= pKe: ;Pve@ئjXmW(PbTQ6b-i@Z&F'Y sj/RC5?x^gKh,B'#‹,s<&Ƅp]$5 /2IbXd"LY9Hu/JǸGja7ng})pk xgR)fdI91{ZcI~ VwKh0gAGׁw3:xZ$ Nn/n75N)%?vaTT2'+>]]ҋqYіHz201zZ[st`"'D/r3 V P_U4JR@6>bO`RiM6 K Q5ILొ=N`Y!b 9[.Oo%cOKq/;ndKpOŭ1#O0q)0 40C@\AdLH? %آމrJx)`ƚ&ʹ؇2oC PK([96_C,sV03Z9IOHȉ zi)°' @4 zBGa)T܀kqug0^Zj폤p[2蘑 Ha67^uh6uhĮ\Q^-"=Qf1?%gBiG^$f"=|$bcaz>֓^OZZ`}UvnVjzRlIC5ݭgBsEITե1zH" bK 6Tڑ0 Q^zQbǗեhXp_KTXúbz-=03kb"uhajN,w+{\P_y_Y;w&~g~w)]ȽmP<޾@۝8xm!zK bKPCR4ڵ 3Zl蒏NP'%%/u7Eک<v79嶘)c{_8pX?,W0lŌ7aLqET5j9hwKrr6LlggflCOǢ j\@S=f~~iGX1C${ԍt(M @`~_H y NRwzydϖ/}i^K`<%^DL+lSΫי?\<7yW? vCoٱM65h6x3k^=C4Hv#mʟ+ӷKUp̀e /j]\z1ڥrŬst3BT$ -#٦s/&ǔB {B/,c5$Pf$FL[m? 7фKO8Z?gWd?aNφE 3ɗX&_|+h`=}ʣOE'ndn1&Pݮb4nrStjCEСFEj2gTpO^m):KpZC+YAꗔbv4LC(IԖeIBjd@ՠO+|plaHZCEa zP ܧӞo[Z#uZXrP/BL{~ۘDg;rXʖ UfZwKzUљ2_G*{Le 1=b#=B/K}-~q)Tڋ55i=D 6jc#="#=//o  *ĕ}xeg\}!eqtSdF([:@-թH'kA\ )ݾӠJ[Qצ1<^v'J&ڞ1RNONKewf~Alr-ƂĘCAbhP=TE-Rhd*fmEV d[Kel^:W̩ ɞhjr1g^,xd<+c}.V쌫EqF 4σ#kJnLŖw~ѧ{=bȒ̛9LhM}ب)KK*YROae&#v_0D*yY5B/P9 Иˠ-0~Q4#?kID%Чj%J~v藶K k|tMľql !O >sJ?G4)nr\%V^cZ'^$/1V6d!T F 3>]'%"', Q!. 9D25m@OvrM9[=&Frp{}uOxun Y/ /{˱ 97ß޽^|)^;{Tc#="/{]^o V]O[6XlmY_'f L(On)ߌzVf`*omA F;tO; > 1yOn&Kϑ `Gwmi /vg ys ( Po^ Xf:26H?9pnܴyE'%ID^FI=X/usV[a_3:k<Ҟ4 F Ȣ/Fi/l&i _B֢q1!VtW^|P'ƈЅ Ɲƺ?7{3v϶*Ȁ[=fs/ʏgpcZY-Z14# ;v5jUc5ή 3vGz6O~W-$ R=瘅J&סtNvN#i_'P|5r\*LI C_m}әxGlNS2}& hOƛKJN+r;QwLidyCp^^f9XrЎc+ؾ١b[a[Ȗ>v>H2ᅼ!֥ |X0'wGK"6u٦ӽI>vGw pE,Az[y "ef*;HO2I.sAr9Z'dWk/1RS_k8+ =Ȟd/H Ҏ }<Nw1;NKǔl޴k] ߠdp&[q'.`Ŝ/! Hn5W( ۢQe8|Tμ8bq6N\!cWP0KZASӷz[mn0UN1ktp)6fb ~;ӏ϶s?z뵍H;E_>hi{dTv}΀:V6hۓfrZhX6t. w!jׇj;? ߺU7{ H<㌭/O廭ZUlO #Zp^(5ٳ[Y7n?o bT/3v HꃮSBtmk"VЍV UZ1}@C,>_,i\<)?r,1T$B\N8\=J[H TrH!d C\rf6X + P `! Yri QooDG$x.>ӄD -uaWNqjZkS=P dDs/Ϳ K6qԃ*5cfx&%μ}\"GqE^l{(DZkt-/PZm‘}%R9=b+*aB,njŒk,њ?oQ E`Đj8\>N~? RleQm$6B܂&ڱL>'04ўb[Ԉŷ&:=xv죑42ļ_o1'&ZĜHW>L-h ~Vy5Hy%GߣyP M8mߠEȝνŒF2tXw~#h_y_8./O:"Mxd&;&,1R٤ f^͛`̟ >>U.!P=uh5ebɛ%*Wc;4ؼ.u[[bQyq5D^di7gYd7УOm!~5r6vFk؁6?kzq XiNtPD^:嫽<[of´eϙ3,7Ξ̙6:nP)sk~==Y3f(XmIJAmx!kEXbk^3GWux z}?}cXRNڽ!3 ~2<3Ch=\83\AN /xmc]ǘ5beۏ?nCE1 vl<9̟DF󫭒Ë~5d77.Dc;)QD<U7^"0{4#`ii1pt1wJ5y~ERO[^2lws˳Khczr$"Ubݜ%dMG޼p9;p#|c9 4:}5p [ܢƖd6 -157yޱg55oʉ'%X7b` E]v9ClT 9VGyem g(r\Y\ME@fF ;L0{ىӑ$ L;T4, ls6tx1~*;$RH[&&dk@wQ'dƺM9w ¿s` 2D?E xMع󦾌c@9i΋IǼGYҪ{(yH5gP4-U`Z_N~ΜIӪ''ͦQҪ3g,{&UN Jy^˴ꛒW?'6 HqU7aL xj<^Szk0CaVcD "koUYhםw+#0v$L"S1o٬;':1ǓRnBZD[]UljqL,GR(HU?PvYÌ1%sB0aPm>X-Qs朔$u;] ۥ33b3Y c'Iђ$rtǜT~=!*5Ad庶+l]Φ^ȗ30%*d-;@9^R`VhPdɐ3Bu Yrc ֺ]* 5[8_!}S#:ZWݒ}(V2'`iyXTd $ZDѕg'o@ǭ3z СL 62Ϻ})6<-¤;m%u'NND @F i>|;?>ȧ|W:|;ɥe\v>9S|Wt>Oɛ?a˘|¶V}"'O~HN+;/e{&9>^Xqҳ3-BOZIWJ]x,=qǵ;~1ꖕ׭?K)pj5]7= M7$Ւ{jXP*E[_6+n{jdL/ /77yҦ9#H2XcyVhSVJdT3d(٭3i/ ,-n?пPn[N{`~4+]+-~%^F _Bq/Ke2#evd3uK\f`H'dK^_nŞ 3GɆ8 6(,91~F$3ĥ>ΐL +hzdM\l `S Lۆҽp"o1gĆto| l DkǓ) ,+'mx"òʲ{Č?` FqrD?{\qe">yW9s9kt #2Ce)m?B 'e3 L^qĸBs=q'aO4ӄͷYmF`bxe9KRmByBYPqAQ|ƞ>R [66.ON/GfNbuR',g]K2/rxaz'LJr꼆oZΰY^?9o{`y]䪹˟#iY^?y߃W;?%8so䪔{v_%;~ٕ/'0*>ɇg{Ûs%hUmp`')&oF[Hj̑b3-bo G{#sr~3񍏛[47'T)H]"OXb.%"^Gɳ|cЂhCwtB=&,m%@?/.c˹Ϸ'zY][6M5zduj1,r@' R W)ek|@ F0Eg[~b pYl#A3Jچ=֘sV*D<$;8bEEKl.j poOlI2oBKɲ*d5z m|?&)4Y\dÏ%j9 g ³ oBH) ŴT[3Psk:^oӎRW7Au9!6Kp柶e&k&M.X7ӎa3MsB ='sB4='L {گsmALC(>7n/Ri-Y_6ô(?0? A@Οh<͟a=6MZj6e+ؠZD?@k)s=$xkdϼm]rݓ ܏p4zf^&I'&7[C B;i9Qt1{}NwaARU{|wc>;g~|65c0U@|w&\v9#-`Dמ(obǽ$7jM~YS4 Kob:}_gm#rjv ?{&tzU~~HKLጞh3\w#5&ȗ~*O t{^"Y" 8 +wK):^Nx*'/6 AOG[`|+ F躎z6]%x Wqjs\Q\Z]9L"Ă8iO,ک|,cyf2aim V#Y<0 hFB͑l՛R-|$/%l4htO?jZv,XcKr<|TX@ _v /_+:*u ,DZwc(ϘnMz@+jhP/ PiOJr7ƜHzc</YEڥX$xWa`y FG_0`t cŠ:,:*01E,E"2\/7)&ԘRF <:!|&'xt88aX ߅}X:-eYH}q8@.S8jX>A7*RSX1#q'{M7:o(Ir~<^ΗwHV 0+k̯/0͌B&BRQZ uz*Gvn.̽kO*1z!2^Y"H1K<,A%hhT^˒K0TXr/7ac%jkI='1ſSwӨs:}%5?!ukUn@7 `@+Q 0%V>TFvO - /t G6 tr>0q1y`3&ǂqX%U8@oO,W2? s rgc!w&,3)dTgbLE^2 t6?-6" 7&Z·3_F9\\vmAtW1R(Kv ?Nzԁ*XH}j;/ EHO@+s|) z 0:ɦa 8.u+$ϨZIM$S+I0b`p1_.*Pbn ;85և/qcaE.L4 -apeWU|xp3^ku$fM7%Ȕ%1s.|~ [OG,<\BxblM@h+W 0fIV _X $[lCف z#SIr΁4 p-~]3iٟ<9 &c-VD#&~9I$w#@IK69&wiܧZRK,rCw!s,ᕀX4[BI>VzӍ I O.Gʍ㙖zi;|Z3t3F8 Y a n#OkKҙiY `R'>U0tJB3'2XǷ0X %{1-qߙ{Re4%a9csZ;,|aN̶9UԂd_Kx#bٷ]M^*nD{B1#]OX9Q=psK3t.uT}W֣le*_R7cQm#(iy1%WVF DjS~MIJ;Xb<(،UIarL0#~!4ks(0SGHEwn P3`s_F^ą/ \cx'epp.iu|A]LJP'aYyq>BG1^b\G1G}me:ԍe}_+}sĈ Dp%$;8*T K)GGlRP=XP*SLMcS}Խ2uo5nM ㎧,[ϵQzlQ;$zjxs;4 [{ kyMmJ~;ˢ}HfFYP"7|69qy:O%}P!PPѬ<sN*|SS 4K6:4"FGCbX(GK /PFi&C3]r?l6i?zH#z3Gȭ.7v(KXaCKv ,x= :@ c=z [4l~_jjUaآG9FCmO"} f6 ZEX@awړhd\s#;f J#_ޘahXs# R&Ҽ1olqs :>A `C|n6pm̍f1he x^f̮pr">ԿULÚA07 :3Ycrj?fϪן4zM/2ӦWeS2N>vz&ӞZZlsC$5&Ri FIHqv V]cemZOw3טUט;g`^>/տ/{^\ͼ? zń%035X?.5 3XE3aB5D?˒̙aϨ&i/&یt_V&$SNTY_i$_X|{}y{e0"Ә|VG^`|iP0>a4+NI$>Uxz]_DջEUoTwT\.ܬKLrkAa l^ t0ƤvuWrڷ9R- `XdHI' ?dhfc:RWDR+13]iXD/7%^aƑ@B&3$xj,"y#8@0XeZ"P2 r u 1A$F*Gv{=b vH`ُ/`bTa; 9/*td _B?dSNnK 7T{loY?1_S+b8/fXA_i% ~ @$͂ DR;NRWA͂AR?A@AAE4FG}*ILP\<ߦ\Y^+Xa^C' WR*rxq"PYH y2Frd%Jk/Fi=9Jk/Gi=%Jkt} djكtKLh4Ym?ĪU/ʳ*/Eyg?hqH  x*+<$<.x5Y+@z?vb 6 b~H<= F lcG3ۋ V{EIF0Vrh2R6}Bk֝@`D#Xc-Vkɵjz(QRWv+}h8^Mbufnw4;6^g=B-D۠'a:=<@$s\"#Z:|(>E#a*JhsZ@1kuP~>Ilӧкl94$D2Kr{6&PJ˃ט@'hgbHk3De4cѠK(1r%,2v&5}\@)&gݸh44-IsR5B1}V(/. 4*G~2hO.W n'jE)št,@g0N@_m}xXLNQj5" 9 c_7hBM_D FIV5pQ=0 N凌ZpH,˺w-OO\龦ڿ ÃOu!'bW`qjL|&Z׿JjA`/{Sxw =9Bǩ6ZgFĄ_WͨvQ`ؚFxQ"p#&Lag7 L{9G`:)01DG1>ƶc,`0p`<'L8C*wKw]'(Mum(T`цBE Ն4$+E)Yl?󮈻oE+v*p)KJy]Spq)ZZR5e%̫[;q^W' b:a* SSvFK;w$+Z9e&w`˭„ p^Xߝ1=>W؂Qr,@o *NK]a)c 0^s RY,ཀzqϬL:ݐ09?3NO!и;O r}e*o 0; x7[[}sS0oӅs0{Sկ.u뤎%z@>S@z~V~^TmMcUjΕQL3%P0pWU) XʷѶrM(xOُ8KPtbXh` z7{(?ط>֦^9Ŵ7Q|VBlJ:71)U۴fyJ@`e_ˁהvbdym+mm#rMӾPM %.+zj֛U/Fۉ*X6tZhI;Tw>+*"5Gl'5ɊJo`djshNM1$C&5c?>&ч`Fx1YN[|}jtHonwԡo]Eo]Eo]F/I5JCOɍ]jUw*OI@M E E3EюA8w2u LjU-G:`@Y (م.²> $l7l3HՖiKز8l'@`*˖Nb^Ä#UH)V` 0 L%^;pEX7 c#tugպLhhh7 [2v@ECP.„#T=695e@Y,Pzx'jǐ<DLz8|Ƒ|#H6 $ЄpYmW]-ʺEőވ`@@@zn䃮Z }Z|CHHH2k1Ю:KvL! ar6+J_&ʍu^nLx\+w& D1Ybӗµrh 6[\tL81\-+=ɎT^}aVn nLV&}0_+w;* ɓYSuV[3+HV.__n&w1z/pI#/if?~uŒ#~GZtv_3٦׋,2BG+|3_s /J鵢0كUk\{Q[#[v6}V/r'x[6P%\Mm~ ^.\N 힉q1I哐"6&Z姲$Ť)O8Cl9jt5umO4sb$hN*o7bIЧ:Wl@ٍ-+&k#Z]ŸkE)Kܚ,t;+t G\<(tb~g.Az'+trw%(U5ʭHN_WyFFBdaBwu?ƴ7e<:V6e cj=]n<DWOè@N%[3Pz86B͕Rm wF6bba(v! fZ1 :V YLKP̥ rݥtL#-BceiMTK>XpRPA Y{K#8jN[LuS[pL Ru:ІMpaO0;PTڥc=sݩ̯NˊD+%~1a?r$r2ـŜj:Q:`?NO(%i,a I;̶J[ny ӣ'BbJbN Qy֧8tV-"sk4@4T|7!g(V) szps xF,3Ec=;9L'*L'DS@l~iSS\)c^3|5W!pb 2 bؒp81 <j-*?myOk6@O[1}Y =YjnJW](PbepW}S4žQi8LٰԗVN&TuaAhyXm 6ѷn*`V[UEo s?T-cKT[[酿dЄ!!|Q/oѵȝ7?>%s^Tsl4Q OSxsznz1O2@7R _t&I(5(vc=U$X|CN|G?|E=X(fj|{`]5|"K+bĺ",21o<@/S] /II{?O9tzY~һV(6yzB3GT1p,/@*ɞ{bMN_Mi6;v @jp<]y`&f}{/kvҭo|S\ʶ3.s&o̙2gre䃗9=;j'Н7ф⼿W ݤzU7 (Zx^B`"0 L=~<8&y¹&WhO QAx),c~08>Uhay?Fg'cẕ *.i/ioyr''4gz 9l<͆4KIo+°3 `Y=\9ɣfY;Ӈ$܊*oT@Gġ_5R'9`Xd٭+\1[s0VgIin6Ԡm;@4~0aLpޓZjSxs\-.9UYDhħē)Ո OYM͎l`>i `X{hT2ъ=d8$uڋO8?о|,ZVg7oOoRՏb69%xJjZ)ۋеz2@X;ph@%ж5M*Rf~PNضl@A.;~7OpP0Y&ZY[B(~8d"L@35&`7&AVv\>XeL٧0yK@}u>F7ZxM-xC-+ {Wn~0dX^iBn B;/?'$Pq {0WhߒK67/(30Av<~ x[tLҚ.6O5|=v귬5viO(w)a7Ibl&(Wܮ„ d!o`7߂$F%{:u?/ 餲ѬNBeti_9kN(Ւ\1_i%5!^g"h/)n{`oyOs=4rϿz_}NkOk;Nk;Ok!װkim5nQ͐v4ϿA̿8?~i'?W~e v݈uٿM_w_>OJ>c>k7@}v5߹nﳻQ>}v-]h}c}vZN{]m}v[һv]ZgnZ>[(4_.%uG-}voq[V V}/LFKvNl_KyHvPl <>;c`ٽGohmyuBnmhFZBDn3v7=͕_?:jb?|Rިkfvg¯^u{Ҭz+3z^ҸS4'o=k_p6VH[|C 6d`_ '5uݙQ>;z+ {jݛt{ k3/{Š= {{gFcMA}v)yMLnRka:"Hg7U3}|UaߦȮ>'1G׽.ok}мbUԔ4u\z>kg6}]TuDq7}t)n[MvzxyM~nzq]&d/8a nZZbM"y&7L_@>iDe4o׷Pvw3!+7q[n~*Tzon~53g!2K#=E\`-RՍ5v?=H[(_hhWݏ}k+31=it~/Ϻ>w&_| `}0W[X>xcOY{PgОX^0vS$Qz >}l- )bwm3'veN&Fl[u*L=~+-*bnN;h00ZOxB ]h'.Eǖ砒{ѮdqW`@KOs" صsd̯,SD-۶TڞW}-{Bsҵ ɩ8RwZ@(N n51bᕪ ֩SeaZMK脥i|kHݲwBb~Qc25Q2N?ϾM?Oь-QE]4{ߤ n3w#ݕ~NSY@= ɹ\+A@d*ʃؾ9&qL̄NK;rOOgSrݣ_}2:MW/ʶk8~,=x(핸iqȻ]|/rE>{\|/rE&^lz#/.gvTLgh_1ҟ+( g]_+>{G}`{@F qMoOebLƹ4* ({QW Kq;^y;凰Fb0tvBfpmPUV3WK? wSN Ϩ-~*#)_ު\tBUV囝ףUy^v{kC/=|NwrN9NΉgwrNs|(IZn1X$aQi=,!@@@@?M%їi̓sgwHo`,#q੡SPHc맙CqnyH7:I[.>t VAd`=,hGaKX[ y+3N}ȿbSU9,(sns8AGCoo *:34Md3WNz1Ё'ɺrR^q-מ?kʪ:x77WE6^x,FCQ\YCEQŮ]~v7Vf/ }ic\)-/7${<ÑGHݲ]ț@٨!V5ˊ$[@Ƅ2=T'i5$0_,ٛg1#p ;Łn׀q5Ei&MG+{P}ߥ|MUŞE#x=8"aSb6~' V?w^0e05%}+kp20  anZxQ9E/xƣXU y2`+H 2VL7`^mim ,y'.ruB{u}A v/*8`߬D<,~+i0"̣rdVl l+og+olZ*LCK)E_PpU:~mn @OaI@p.GNvIvx?r盿/LΒx:?v6ꅧrwF<&A:~6/~gg* ۂ\uPȾuN#Hn<-E|iͯul|>a2Lix"IPSE,jjU>3׼c`M]ꎶeK"R/';0 Ӏb{-Rr|+3v}! 878GXܧUYxm|7 l ZmS64v8~: -nױqu k?CqTxkMʵT3OH?vg3./cB 7 uʷ-p1ҋ- M z6^e=8 >w0 hu;uoc`~87Oo~=с\)VW~z# :; f`Moݔ-%{nȽG7زy>`UO]xMb3ǘ,&7Z?`~C]] sx# eYRe_ϟtT{*"<_+{P0 =c1g0WCAt=׸|?k||+|T\Y#3ɯg+'̷&Ɠ̽s_2@x-2xЍ,2TŰIZtKpY6 k{EW k[H5g`+/ okwYLUvd ܸtpWTqs_ ëM0:ЯrAD)ڮJ{3U1 ] cPp Cuݮ6W1vª~Fs]WLJv5!Fr\PwѾQ/pˠ0hŌyxA g^w'fƜ甹F$۵eC 'uXrb^_so#rVĊ "4a WVAr/c !l{.\␣1~N͙-$nSqKp`V"ݜ EhBVnsAH~$x\ɱb% q:* ߫j 6H\i#կY3-] MDSU#@PdU@O3s)< '5_ٿ4MeY^Q(*tqVf$z❦ˈѾR0ݾUTЩ|/F=Y ݵ@_ XۏrF{˄#] *whڼ`7Wt6\%EKaq1horzgVcH.!r`s_}}SRt2LTA?(W3,9C&=ǐw\?va4fl%-ʙ8Y5X)‘,yurц˒C‹b~2ݚ:I2?3ta.z΁o((@T1j2u3ۅz5>5Q c3Qzo ;<~ Cpa AYɯ~卄7Zؽ8*X{t2>GSRȃ>ZI .1YYk%8ٓ14ty3Z-̞tW)\rxQ+EL̯~F`Cѕj\ JsԍjM跾|}؇/q߆ $ډI{Tq)<H#M>Y<#;;ƕ'?PxeY* BhS+}32_oj GL_pML_A':IjDFZAcJ9rGkˠ_@ jɿz:CwQXb֟H)|RuuՂ~CA ПAN*W'[\&ǭloGFsǔQiT52(O8c(.\+U rJ&*3 p270ϤQ`FZf*UYF +9ܯjQȩ( 3\RIM02/x'5:*VKʊ0Om`A?r!EOTGTԅ+BzԀ(ŽMߛv3i[.׶ `)h5%.O/zt>gV"?T쬿b9u8 b֏ݜ-بgk 5e`I]9u|NYW _3#X͕9+Fse(j(`5+3Due1}Y{J3WnzLey$[w/(Q9||uzcpp~(w FUSpEvLJn_%܏S`MuE t6oeQ 2fG'.Z6 }޷iFX.aþdFب/5#AFwY`nxЩ -&1D2k]Ze|pB]M_Ĵϴ`/AGk_]lj ZDd)9CK5)eXWe_kM7i2~9.'35eW[0}Ke{q%ѱoN2g 0qOea\Ė~;=?Q r;U;T"Q@)mdq(õ^Dk ?I ,>vX^Sp1Xqyg-ir#~3`WRWJPʿ暲9Oy =V,բDƄ`fSe8ixx#_YOһ.Z3- ؼ`*W9]N0\uO"Av8MԋʦE;XTRya 9 $ױq.rGZCHrnuy*&b`ъ vy=سd.dA"F}#[ldV7Gm#hvⓈ?D%yqLz?J}x|`:6[X#tQG:{Ӵb=EZu4~b/Q(R1݉vGl+e)S_npIްws#hzqzg| '-")mBzs/ZbU=cR"OXomAcdlr&p#?y]i-c &ؕXܟoCƖky-kVzJk؅_"C]:[F+)!"!˕R┇E}ܮC;_=o:ϔ*<We>mpi})8GcX(  4eԃM 4Cۧ|A@qltO:MT5imaxp{.'4a~G4%ow+1FodΎ CZ_RKEFh$h|XkF|̥G")E5b>| zD7*˾e7& GW9ܾCDJ7f?|2Ve5xUUȪ [p5߈Z'gWm]jh*+0`.ayQ/Sć |-*vzvQɕX=dTDr;Л8# 0pg,o"qp=Έb^aŶ|-HقE&2@O_fxE^6k/~J3GN"C(V%BzS㊬  ]G[(GVΌgq`|n1ҖN;:ߪ,hbb 12)RyMUqAfUn[ZoP[-R6H&SMon~e1i h#z2X 8ْvd{-iX,iW-i0h*?J7,_ь#odo0`'n'ɹʲŊ =Oʫ^2OlmR̰VY~K[eɩHȡU UpqT?VOJֿcȒ>džk?0ÉwaoSVq߶ˤE',wz֙z*P^2 +gu^iXEYmuz>ֹ?ˢk?ϯ3(14Ϛ~LgA5g5 5;~冫ݟzVW?2=准`g=vVegwH|fhR5]w@hF݃SvчLfT,l*_ ޟu>>S>1\Yɟ^75_QI-ITLs* MzV'MAG++Mޟ'&^NAVtY6Vޟ>Z/_%U{ߟ57ZI 6cCJV?CVq U tuZvg! {:g^\Ez2f7fmEzڰP.G_Y7-5sY6?k+ ?kIA?; vT zsgŪȮ=č Wܟu|~{䣺,2x8&YO|ԿcrxB phyxeOxmC=1~ȴW g3}1N| 22Z?oGxue,E-,ZFh-c\; m:2L"xwHXQV}2Z0!+aX;^Ƹ q5^XEyanCSy_|rYw2S /PʜJgUŋh>s~Ss/zď=#9Ӡ3cZ1}eK/=/Ϛ|[;>^|,&we}=&go`}魜 Am%t<#\x`7s@Z9^Ң!/N|az}u,m^"W դ,TzDK\SўK3nO^U%`B7e~KtWT:gAbhm|:`O;=fi`P Cuu{m.1C/8ަoLZQV xZ=D%t,$ѻCNST#;7-@K$W~U̔}=72g.w)T}tRފkJ9 y]^W#MvSNҁJ3.--RM,(fTz1yFxS,8ެK63h>ȝ1v݋Oo9HH`^;=ex ñP3ע6ۜr{h;\1pّӰtLtϰOn>5!eI/J]{O^4.{ߣ,+.kWNBj{Ftz"ʼIܱ+)2.“bvJ/a,&5HT Mv"HJK<#:JIHQłBNfDKh O%q|K9UQ!X8(jﻩ9ĩΜ&ޣ !d/|No=:>Ns>.z~秢6IPA5|}]DU*O.`tOOe)wȗ*ϴ=%Uo9<{;RKNb3}fHknNO93I*diokLge |d**7+7oVXUzNB TQ)ѝt3t4ݳ?.LIsGG69-دshpgP2c?w**| ^Oѣ4|x jd//~7YheNo`:=SoGQ+jd5}UOԤ8uyNgz3qJ"FS2tOHZ;FІ1vj]z/Oϡ jmPӑ;XlۗF}yӾj;ۗ}9ѾS4}Y/}viTyh_QcӾ_J\G8qT#20R*R_)③V~8үV~<үV~:>+%Sȼȼ'ȼ8_'A}~޻-mUUTU2*LۛvE~|@})k2i=,+ewGU*=Hߖ~-{Фl7mD~ u5A_~=u~7HF6o~ FM3oR7)R<(t=*wBX!m6)QiЌ*o&&Lԡ՜IҰNE[fmu6zS 4n$UMJWp~oɫ^;56qJ j\o(*`}/kV6nKn2NpG M~;ʯ~[C ;[3i?VU&. /qz/Xq~;)bP7zȋ ;}LoF;D*{xx=peބUf&Dm$yo8*MjspP<:! F[xWaimG6|M'b;iSWXްX,߶^Xlb:6`oM?Tӽ òQvIb+lKOHKbr_lb(lV̫Q²-/o={;fTry7 G%),lPht7QKRr|5Peleh cy#r{`ii &+G[}8V4ood855;6k5~cG kZ5#/%MVI3SIYN?+茣:^]HmeQ'^ewY8 HF0jUATLPQT(or Uh#6>w )<&!0⼠?Gf1#Vp"מme' Fr^B0x*=*61. o< clh$]JYO E;6qr j27Q"1AbhkWn°.C؄v EX&Ǭ%ճy!Y*D3cz79ãvbҨBA{elF)H޼ fW-kQ2H0M4;O?K-69Yc?جR, [&laR.}Xf=F)6%%U%tcfF}ipqhĦ8'ϛBl Uڰs)vݙoR}$,axaY!Ðdgԅ[B?h{[8A[ۂY'hQV0Z-SPb&s~)e*MJa.|8xeŌ#flZ-pB dDe&a;X{CKEVg=!A5u2UGƒ?Ddq[z,WnQ^.O#pM@!Kn!V̛4vgٞ6+M巁֐_c"#@jܮR԰(`hC(שF쒁CIMD[M!k~WTlR=(g3afRc,G\kC%Ng9QbAȻ$lIzm>%DEI1rT?{~/wQ6&rrS4b;J#TIkʱĬJH4]B۠hog<Ǯ/dn_=4,l!C@Hجۮ١5NXy4&e%$U}Zǯ]bU7ޤS7.e2I:=X RmPdCB=P752LV}tE?߄(xinDst2ij~{?F3L}pY\v)2&JYSt_[56;&_:z簜-۞MY" ~ݓq΍'yO ŰIQ4R9& X h @˵G[b);8K iM)D #?&u2()-"rіK0gA'i_r#5{{5v*KD{ WKf,GHIo(cy랳U[.٣l_ޛOpKES]èyw=*c'q>Rap]9{Q.42w.ѶVؓ=3CdOpޔ" o{nsv|GhoR58CTZG\&\TnP&Qެq >zzU66B=(+0HC^9ԨT\ 5P);yUj`ob30I-B˓&bWUVȏ7b0^[ryN]KRKɉMjěPC5JY -2T$띸.\]{ë&* ҵTkR^oY-[,T R};/K;{۩(%~ Lm{/$mirR&㒔Uδ̖qmݫĦĪvG%bl{*TҶ&odzr"zoC*l7Iځy265 oGm罭U}:4(.Q<!Lě*w:xx-R{V)q1czP[#g֥Jzl6NșAffK3 ?4UdzlR>>ҽ&ʺoj7irF:ɯ;U}2˿RmN𮗩':BiM}5o3b AOs{E~|F I)Lnj9-Vn\Q1EaTiJʰ)5`[*%=+șTo`кZlV䴢]ߏRRVh*TѸ" /$GN;Rr?M"U#JwJF\@% 9 7%7o5-k.nQKUs7Nn:qX'Uc~%۫Jr3Z0+ۛz`L65p;HlجMk쫱 6yoJJeeN5[jҒR<>'qLobOuo(۱>]&z]./>q˛\v%=jy%FTܨ>ih:,Z52U_;ڟ>8{\;Y>])fkYDu:Qە1SRY-k*M{ǰUrq. iMvsSδIj?6Ku|: [:Dũ+koj`G!"bi5`r7!W)!I 4r(&o3Hz&@΋7ngx~`C4黆ŎjMOc*ۤPa>}a}T* Q$<3 ͼ2Zq9)=uXQT))\3.xqs{3#tVkT :eQݎ!Ё\[!,ɼOd*OxipȢ0ȩ.]i3W҄ҹd|,@bĄ@r0wQq2#1pp㒣 ׭;yreQ-^\z=郚xqx]ZwMa:դlFN-`ˈhtL̛T9~AŞQ{_4RzIۡ/>"Цȏp'L4묣6 7roT]Mf$xGS`ߋv v*9ZU{ѽTw8 \6reŸ2G=+yR=2G?HAo1 G=e߄񈭧c7m1L\|hTր[4-Ujxk117΢]iYۇ>HlTW{B@BlLz(ݨ56Glw=5,2rroZ:Q\J2*ϰn|)*NҞ c{eɏ9E $' وƝnTM ^󼖗Oe]uCGR9c,9x6"ʋ ѽpv\Tr׃&<_ˏ`-^uȋpeedb\&#dƿ*_uK]0W2UM OPn/L~_;T6ryJqK7XmB;…/-{wKg aZmz)NmSt#yS=C$]!ު6n-i—|:dDNK)Y5Ӭaօۯh x䙤8ddMlⴉ^0٘n Q-5ގKK q|Giߵĭ\o62[%чTGS _hy:.(3b`~9՛ڵA{-Lw3nRԇMm \t?iv{)+jCr-4e3$=Z{Hӑӄ&T4+_5ZT=UH)ѻػn DU 3 .!'Ӳѝm(rpVkDJ=Itom[|EFIM¦׷SdPɤboRyْ GY{3,F֙]v9g6g6I8AYӄ~=P8E6Mɐ'I+ q& ADq`3FT(5BC48ozWw:Rh,y{w(~[遲_[Nzg>燥iC%\e]U֩N55m4ز1Ka LXDNYY9 aT%O( Z9Nxyo424E N"3 /!J_-bcȿXF;mQ *MK|=ONU T'BPr5Bo-&F~4}w/ (ݦ;fަ)K#!(a4ȜHICzzˁ@dh^*&HgM1): ʱzQ[4?;pWo+K+0N5&ꀌפL\8?yCiUDڗ IHֵĀJS֥9n뿨IFȗQD4 MA9#+9MKuƁ:_} ύ/:pW锩z(E53~MCJiNql_m̨QSoN8s [:zAg2$&Q/f<,"| 6mq?$>HO9(i"jDE) O1 ˉvʐ)Uod_}2\H=$oTET6ˠsS&oe?[]k:(4 8gLv2]y]WVVů=?l4QZj _77==95䘚W8/ԌC ix.pW.!ߪ,{nCZ=;+w]Bom, Ys.J -^j1z{rsrGҕ|Ab^w (7Kh~B5e3kLOM*շ3.UXi#?IAT<9>}z2ÿ}_ U-ٱ]Ŭ]I+KB 9zz{gcXd~jclΜN0O?[ű8-"<~[Ђ[24ʾ]p=.9/sq&+s`62Lۥdƾd4Xʼe:`Mi6MV3<v:]Pry[{ vަ &/@4UNXGFHL ǗR %`O7szmдi'~M9 c})T?IuzRY;N90lص8p2iF`SQg8PU<%j"w*[T3i$/mJ]iuSJߍw)i?1(6Q)r{zEK-Fn9G܃I&+%6a+6kS՚j0Mсh ý-O 45* iK=USU~-J_F\A_Imk2D8JV]~5pO=` %Uj7Dѭs=IKZ> U"5tA ׋WCʄh8w树`$:s6'l*rIM#2L?m؟~FV 'T#m L-lCJNre#iJA3FNR>+=>74I@G^Cq LRvd4k;m3_E'{U3z%(z8oǕ.p*gde-CvVR&8AB=:=sD"E֨:ל3"oQ"r*v%;!0ƬME!0O0jg/LN!"]C ocm]tЅ ~4$*Lh0}{x6 Db"4-D\] F Kj.oԮ;uLSKu-"%yc,7qs=xhեE)Wz7ij}%4Bc9TQ#)ؠMғ&!w $3~BWҠ.8M/.Zc7f4;WcS\'Fkv:Y ͙vBM3_)_UɯĴKȊ)8j';Ȗ[zO:O90= {_lrIIyOzp5yOz[oSI'^]ޓQ/IY=={R=UsxOiޓ:o't=>zO,{a s_}V{{ҹ',yO󠻼'jiSloc{q{ҹ9dΪÛʣo*\_~WEleŸ?O|_RlIZ.{ϕ{>?O+q?[g*KIC͉ĖJQй^'=ҿYQ_2+w'j}x |`JޓTV0ޓarzOQޓwm~hkԾ9gIWA^_UG2JNIwGז=!=i¿tw'MmA[>[o:c&יԙr\}"71>=]Ij|)29\N ~GN:S"'.ī.DNmGnC'wjpqhY䎨xђ7Te!r7D@FeױkJFjB9>p N9kfaW)3c7qX)18vza!b~aqEN+C[Q3ڭJj*k;}\R JkQ90cn_yOk杈Nzbgb=*=s ҝ-gsGlV|E8Ϲ/ؖ<e|`)ټH0vv@L]]1p1%g r|iZ} dca> bUtoy^M5.UPqT]B0ObBB7p2B**okʁ^fSRw^Go^o⡸čTߑDWJ6}wϪZE Q 1vrqH`%);nT*KI]aϷ( ޺&Kjz pMU'.TW]R&vqOB \:Z%7RM F&T"}uyf&墇T+69׸`l.⚮ĀjhDV؇xl|Li9J&u-Mt[V[52ndq% m~FF\#Bɵ/y\P}M.v= w1*2wq [ ~Sg.7Ab(V\t#RagnU/.+.n\HIg\YH6Jf}ʞ\H+T¢A=mR]ܠ^ea`3ukk(h3yY/<=`pOmv?$5^kX uOeTÃm@kH,\۳8%mcuT⚋ޤ:p*k2Io|,__*Y$?_L,0_O2wSY~o2ggS9WS.wp/?m T/U^^*k'M'4VeXLo@67:TicD&5n_(J vg% IujWgq߅(~7aG~w/+)}g;x: 6jf_59e`5,Ȼ u"OZD6tȿ ov>T|8ɫ2W;")WxhiBvox_S.9%~h?+#O99kqŋ캲~qQWQP2Y0{W6Y"ow8:OVXB?u,1b>\]\{J}߽XY7YloYlR[5˾lvڧK iM{EWWv×4~YSCQ/?y'ua!K}xp6V4R^2?Sf5U&҇ٻl$}˪9Wguj9=N.COTuaOG" UbXr,ޠ7~9|G֑񡪈ExŸ,eKY:{6ln*Wl[@$ ~Koa/[8, :FV^?XYV>T{ֳ~[S֓˨HN*oߝn{ rU{4$ڈhcM6#ڂh+^mmO#v"څh7AD'ڇh_D@&:p#":8N":TӉ :,Ht.D}Bo]DmGt ѥD +"Z_]Gt=n&v;&5}D=H4!z )>%z^&zU׉ zh@DCE4h Zhň Me'ZheUV'Zhmu'ڐh#6!ڌh z!G=юD$ډh݈>B^D{}h}Otg":PÉ$:N :dSN':L&"ѹD]@U Atѷ.&%D]F#+$k~AtDSn$Vۉ$D=@ CDӈ&z1'~OG&zy^"zW^'z- *D &J4h.QDGBD-Fh-K<ъD+J:њDkK>цDmL fD[mEK 8v$ NDF="ڛDK4h>CtDNt$QDGtID'Jt:DgEt6%:]H Mt1.!2]At%UD]K 눮'Bt#DNt'D&"F0#D=A{?=EG4ѳD@DJ:DonR'@4h0PDs"h>"Zh1%F-E4hYV$ZhUՉ$Zh]6$ڈhcM6#ڂh+^mmO#v"څh7AD'ڇh_D@&:p#":8N":TӉ :,Ht.D}Bo]DmGt ѥD +"Z_]Gt=n&v;&5}D=H4!z )>%z^&zU׉ zu?D 'hH.D}h&8>D'ڟDt0ѡDIt1D@tDNtљDgMEs'D}"o]L=K.%GDW]It5D:뉦Ht3ѭDIt7ѯ#zA=LcDODO=M,D/D2+DN[D,*D &J4h.QDGBD-Fh-K<ъD+J:њDkK>цDmL fD[mEK 8v$ NDF="ڛDK4h>CtDNt$QDGtID'Jt:DgEt6%:]H Mt1.!2]At%UD]K 눮'Bt#DNt'D&"F0#D=A{?=EG4ѳD@DJ:Do,7?D 'hH.D}h&8>D'ڟDt0ѡDIt1D@tDNtљDgMEs'D}"o]L=K.%GDW]It5D:뉦Ht3ѭDIt7ѯ#zA=LcDODO=M,D/D2+fi㒋e5}(Q;;o t20mLa´ 0-i(L`|AL09 &< LA2g|) f(f0i L8V5a49~L%`: S>. ˥L8+"L8az&)a fa.a¦O`z0-^Gq?o+e)ȍ a* SR LS?4&M0 S Lk`*VptQaNagt <ʄSOt 0${0턩Gp$~Lk`Z2N6Lʄ]k4 0Mi LH L`:S_"L Ls`´0 7SErL0U S8L`rL-7T(D~iLaz}0mi+L'`Z+TV0L3`L` p40U2Qp>#0u!LKa 92~0L0uT[(B.t0UR0L0mdneZ S?>i)La:ӫ0ʣLaT^i L` SS{z4N0m L|0Յ?LaJLT]0SL!ueL`: 0i=L`YLO0ML`:|.4\T0QcqLaj`zF0&Lsa*JJ|0 S(Lv!05ejY:=LO(iLaz /`Z vL?_az&Wa0E40 L),0L]`z8LT0Ui&L10S>) ` i3L7*.t&L`:SLa +0T50ՁiL-`z&\0͂i0La(^iLOaS L´&/L`j j+SkJKrIu-c>\q]mS&NP$p|*Ng@`"k__t l&@9սjHMm-5odCTf(փb5!Co28R:>Bz, "Ylx! 7Uҡn؛]qmL!Pe"y<|"d9-m7N.ppy!uxo]- h|j# :A% ufM ! {LP>)/urI.6h腬6W={R8l/V 'VaaigZ.Zqaִim6ѴhZ[)-ڟ6|9onQt@k܆NMkf|ieZ-ŴǴvEi`X, /ciq`iYP"4gbkςe7GP,KuM_=EX(xyVx zX#ib'X B`-_ [+-'5 ^ %L 4DhhMA 2hD.8h y 2,d>dY",n-܃ 6LOap3^k pki σS<İ İVhp֦^0 i;ZaZ+jX `"8mB k}Mk#$imim)8/hG)fXkfH{((;*r Emo{Og8T՜-h+495QyPm̓-kV:hN| 9<8 ֜ul]h)\gkW༂OO70^FO .6=l1j*Hphd pӶ=Z`./Zy+%訬ŢY_(2HyZj qF8 QQ=?Ds5Tt$%!J9@/ RԸ<Lػ62@"8 A1#`Xɲ[h ;P̌`.\_!`xw#;[[k]"i9xKcȂ 6ۓR ehQK4кKY%>6'"ݒ?'? w"[qGo = @.Gשּׂg+4!q.ÀB?k 0Ov@~d,lM,Vţ}MyIR8hZT$m#TY=׮# v ZwΫ&Sut쾳YTͣ uq&܃f I@eei⒳T| Q)pe4e[] e7+HС>UF߅Ę"3ULs#j FY-;R1c1X!s_J~+89si)2 Ke/oÀ5*dj->K9]BrS@B]@6G~@A()1\ -p*1} ;$1pZbJx-H$9S/q]eu\ҟU|y}6x <=%Y4M5'r($*IA{Zb<1N0%K xoK1n8ccuݑ?uYW;'Vt\W٪&::}ZF?`'/:gΚS\f3%dmpL6sg8U+{7Y1.n&թOSݥ$PNIY{_#Kl0} 0Q慾(I< jS4)/S1 6~`r"vbͣ&I>9ȽM2jpkr ီAqaqHXAyHBQ ! Z&)1eJhp\\\O诣tnCx c "v(V e2k*V=sdI͢O=+Jh%*D7pL!k'x<-|&~:2ʏr6e4dV >혵%Z>YV&7xDگ ?qxqQ]_񰕤͓U 1Md:NG.ǺM| bOI)0+u99wjWATҳIAK9𽀫tFvu0[Gă3LJ= Yn:8`˃g{d$k(QSa-Vrzpʑ5hȻ` x4pLxNQ#[jRvDɟӁB1ĥ23lܪB6q@kTllv?is,`Nx,S IK1mB6E\N`DmP.P.h(. P9sܴ eE/M0wKH&2 +)4V+IC4''Mi4}US;sY}= =3lv73Ouc{ٙ@Q/,f5:Zm[4/KAp-9\Z`xjV,y7-huWX\9>(%Zũ8ЮN6W,mS a<@ IqC[p/n&\ kI&i.2ŨKV[醈n@5@|EnD%kҸޗfG5Ұ`9|OuyDeN(IjV9PxEPe׈5rs@YZnYO2ZܧU0Xऌh4`v'X.~=rFT,z՜?f:멵lv,j}?;yIГyZ!̈́:SEG[u-M@4#زb5y3)P)9@yꄽP@ O?@Y+m5u|~"(ڵ|r琎SS̀z"@+d kY嵀Oat`x_XyG @tĩj1>3GȜ?ZHQ`@ ;iXO[oZ<hqz23_Teм A"SwC8߀!gZ X?m,N4ӟf*KP_Jq@FPhp#3Z9$iFP\F? w-4֜,[wIeZ:JBVTw,Hl߱6XJ_BUW.N\,n\>"sK*֕ hZIowcvށʅY3_8Z`OV9UdB/vϲ.P5s0,aM1SуDu|(Sm6ϰ]Cv1~JOO -E`چќZxOmWh 5z:_|=s2k6N(]/j 葠wQRsXwW-%kԪkwGau.H-,1bJ'<}o񎃛psTҽoЧ!#Q8U KH+;6N ? ^oDIBs 9tUB+Þ̸>#q}-K4=FB], ,VllY8o@ZTJk VQMуOkNW'h^e+*NX(EKJ-6vquwgf[N(ԅ5g8"]V:>dprB-U (qӤ8,AGB4ZV-~hIfo6z%MEeW':5d;c˺0_JDd[n]tnI饝/tg1C NpIrS]ҍSLGqݙn1Y;OZƌ5$ӛ$ᔕ68hi7SJU mm)tU3`@a^K"G-c6 [ߚGz]j#vяӄaW 4gKY%( vJp6[XsJNÑ:/n?Rǣ: 6&fqRFHagdKN[i0TPNBa 2]#oWs6t7L qʾel?Ӷ=N_Pd!i :i5>XslD-T $FյO{љm#Uf5b(CZ;t.k&踢K$Jےk(qJ\1c€*WL⊩|:1K\g=$q'q%֨=ĕ\31jlvvhS ;WmTJ&A$l W9v{J8c!+jh 8Jmq<,C1g语,uư!!hPgw@c}r[bH-lo0 :4mqDx cΤQ!yA#6;Au€2ntO*H] e;-yT"ftVk[-7Ps02 VZ\+XC6G+IaLGm 3ZIbR4G"ҨOXz6;b4j4'1|Pi44s]fO%d g}?w̗`/h7hYi{?K8~!"DS84s6zy,~%a5 \`D6&k.˿2ZnU{(GVp{Z^Wubb#a>&g{ч4X5, cIӴvn{ efewq|L3n+(Xx[6ZZ Ǯ-$_`WΖ6_{)%6eau LҜ-npZVEMf mM& k%r?I}6h)t誝v%<[eN@3d*կ𔜁 ?C9{&J첀Df(ȡƅ V@9KQCL.̼v? g!!6c"q~pRPp =b=t(UpW!M`vQ ^DA#TKr+@< 360mQN* Ry!mP8OR7B S-< v|!`DG8y>"2VvUz$x.NO\F S. Me`a.#c5AqHAk*FDLͣ6GhE"]"쎔*J( բM{Sc$hf`c풸Ѷw=#dtG{RMK iD഑ `w_5xG!q9OB_VRYdJ,')8} c'M%pJ:zVZseH['*E9/Fwq66̢ڃ`z9 eE0`[ܔ 10(X.E8 n6Kj!HYQZP-(!7B'RXI*`,T&V@~L0`! O.CZY=Tg6 p# k 8~T#%KQ8WZmXS]=&܈yd>֞c̹d }9Pi{9{,Rm1W'"Y)m;CW{C Ha{[vW'-屺`+Nx" }nqUT td~4ci>~>ͱ+Xo%Yso Ai>Uܤ ܱsg3MY 7J $s7{ȭnYŸ,:s0'8M%f$Q7hɱ [-˿{V !c)-+9G<]FӤ ΋^_O=k8}V]4ɰ(dDO@'%zLeͽHL|U5lB/;A C9H%]+[5vO6N &, r?t%78Ux܀#"nnxN%O8]Fsp)E  'A1! Q0d#G gǫEm<#^'|[8O9g4Z88h7|!朑MĚb j^Q_0y8(~p q jsKb2C.tI(pNyKHgזĺӇ[|P"E~mI}`pa0Wn4?i_m|=IIMznOuRS=,f6')Lw~4%a-FSn6Rsؕf mB5ಿݶq$}%i\f(D1ܠ5Z[޶;6]>8Ow51l_0~S.[#HDBDwͅALVo&=D]󱔳>=Y {nM5n+ a¹ 0,g/ g 8zFN{4mbiK3MihGҔ.))GKom tFiuFI8\}uU޽USNM\Nҽ<4wj M#!n+H5rj8&#n\#DW|]0A=/JU]IJ"R'*R 7ΆRZqizG=mk`mͶmٶm62QlJ8bmHpn*m *ZHT\D # }L *M Iim&))i l!;Mgқ iΰ*:N sT4Y P:bmrI'.'k_;+dX_=.Y{د^H?a5gl)+)5q 8yByGa6)ƣD1ޔP-yl0C|$q o=5`ɪAhndTv~*/5pMyk7e +4W1GUm)&k%-U ^D#1^RkdU F^ D2xs@gwl6NmE@/`1721^N(w1 2VLk/p0̧<=оK ; #(VFzH'E; kk8[ xBi_{kJ;:HgAg>qWwK~a58\G@2Dn+@K=ik%nx:B 褫4q52aPRF%z=JsMqA$d,xYײh[9j1)~g@g*|Ĺ/+k*_*{s~kaR7UD`I&`)sZ=aI}Va]*[W)zxL}(K+ámW,zzNH! i"L"׶2/@O } 묜kC Y KЫS )1q8C$-)P03<}{wޭi紴;Z[hٜM+V순ڏ7?sk_GTo|"[h` _Bxt\"R"͎8H>0ICHI<E8Ҥz'4ύQИ P L!{aå=`lŜKpExgM$ ( ^&N`0oJ0#L*]/(©Slh =Z)epK^L`y[Uơ U#Kњ61ڟ9a\ ?g]{EU p18AR 6+;:s'xM4*~ vPZbOH;BE>X&H y& `X\8n.@W&'k"1!B(Y۹y&qn1qaN<86q:= y98>k>̂||,83 uK*)OxgT~gQ~^'Ώ!"f7Jp0'q>*ȡ ηHqz$❡I+փsO¼'89bœ,Z%U"i(S q S 079"={xq949Sx" Έh&Ipʕ48IgS9Ӡg0q ;p֕eE*͜KqmQ'/uz\2mcMMCWv51ܜjnnS9A5簵XМiBH u .>҂(5o]q~nx^g[!-P/5ȷ-y!@Vqzbj nVRZ.2ъr?ffbkq٭9riI,8ZsH/Y]!yywJ!v^F!^#yPAKpNq>p"nqn?m{[n]pm|9ρFp^{!|=&K'yEkl's\8 lp;338/t68g48.y WaTWlf7(RaGEKC ai~~X ( q#FCċ4. 1<՝@ݹ!= 1q1cg㆘8qCLgSqn3U8qn-`k< 1qf> 1 NpCLWpZ8$OrC<ԓrR œyIY+ @@!ЗBFe/W@3*Sx*Df:8k61/ۉ5+S [5DO0,B=EqKHܝ{2 qW O1 9`)3gƳ>Up&p@ZN='p#r׽Iek'2VyYpz3XTpGWB'1(1 E@ .0mk 8p^Q%0(P:"W="QK$uONrO2+' K{N h ]e )fMY~SŖpCQ^ ݦ1ir5͘8LT|(`. (5]* htZG!.\tH `p^NDMg8Gf0$Ob-կpf&q'Nf\i[L#y'$XKi\һb&o k[98Ԓe4p{R^xKjm2 g+w6r xT)^P~}nsPAc9fu$} 8pU@_:PK@ka/\l\8e"z{|# Q&gp>j&簅Q-2s*t~Q/Dbm bm *ւTLݮ| _ž,W P~A$@WZ&~*y`pRv AZטSkR`rJ!˘ @z"ĴFs vZǁGp{Nsƻ ]sͰ5mPk`⚳ZV96akG~mE`.`g+JL];oPt ~nݩT HԼDwYy{%pd܃u>c!8&(7s:[EnYpZ;z3+$#,gG ow svKq7ɞ ΅ æ @~58mhsJ3)D.iͣ'N0D!(p.Ҟ g+LEE749b %ipQ9.x͙Ŝog=*EBT[N?@R7tntOu4_-.  0VX(kl/8iWܲ[T# iޜ.0&v1Z==t=0 }€G)žK;1!ip_Ѯl06՜X̔]OV{It?6{Q^i4pGc2` $g4%6ˢbcUtX-N" ok\ ?D8\gKik$)/X"SnYM{I%`}|A;H,{sF!o!`c|P v9aSRE+ĹG6<Ђc[  7kupn~7@Wg#qj ?ՆA\߉pd"q~Fr<  ™ Knv"PA%@9cGְ+: /D@0v΋ء98r B _Q1- si/=ja,`;8~)~nx0@^!Th pP"F!`W "`%8LMr=GF2R)C% S6ThinQh3,Ip~Ώ(C@(./2̍be 8 N`n8r"of>mHs67o%N<NC@,58 Ɓ39! hC}Xnp:0,- ;t/~RЍ '!y h iAC" )Y!0 2UgU!ޔCNV!Ct,Gp)̢+l^T EX@MpjaZ1`{ <Θ"C"`87E\E u51&9\Ԩ aŘsxM_3Y |hr )8LNNF_*~/s7}I!r68SK K֗/usڒ#qN)pcr4'tcG Nh#Ds@3/:h#{D04,)XI`DIC+%Y6rXJKuRJ.h^d) D\OD6(Ӣ!ru8[JO2L7E!pb cd 8sbX 11, H ov s(Sypv Opt*+} @FWJ8sƬSc]Pw-nR)p7RTs;Bys{pL/qOYؼaoY"k5 " !77Y'I" B~o0Xd}0 W:׻DV 7$ g-OEwvǂ'3ZI?4`?P'Lބ]<}Euzi.vKq^wk]K;KB4KEH/!Qh/4M8g.  HQz5JƅH?v0@Q)NژXUs lv3i߹e6z],5P6'$<Ĺ%h5JZ)qyF%QZvXqN@Yz%X/|4 qHl# 9~ 䧠>ݾȓP+gp5/p,Ys0tjQ0N}9`F!_]Tn !8@Kr(5ξ I(t8@PލD7h3c"8qvɤ6pltĉ5v29ysspzX1O Wߣm&IvJ=NyMüu85N!y+xѯ ~ `Lpg,|H:vy%(AhpjR9gS??+կg~ 0bZLEPvW+g<"OT[Eq!M-*;`ǢkbFpk4aS1 @X Q1N P8g}3nWwH U% Os)R`4'b k6'?/&kC!>h[^p.Gsqi VW?Ws g=8L¹ SO_Q)3B8SPr/P_WnL!" Ke:/ XX N. ?:ȅ1w#1 >B=SSvVrnCl>i"v ί!`~Żn%c[@/S=_dcp$Q3M0EC<.GAL ?]ji2LtP-xVdO@ջi2Lt@ "% ,x/'JK N?m%k`m9S{x;Q'㚏 #9ې?@S!~.|{/0Yh_#Ғy-\{5i 賖2;ie7tS6=墲(mwюܷ5h!ÔnղYf8Oơor wyB4nK3 t*˨r%7_Mw ?!dM}T|loLCw`n-b7=b5Y8 6v5W)(tB %|I;fEK]V B;rL=)떊nQ^$1b>v|H Nl*>L v.dRi[cqyR0ĵp+M)y.l MY\ލ_'!X?ʄjNf";ys}f>-vеLU5v0vt.dPpƂs䔗F /)XؼzE:S:x`E-@OR}G_q~E֐ݭUAVÀdN*糹Dڱ*𶞽,Sx:oG80vs`~Z6>8 57X8)[.H\P9l'<*Pr~Kp4 'q-UYפ>_> C=+P|-@IB ,9D؃a^ [,4ӟ.3N?8 h@_Um<(6XhE '+3zCu@'8cViVƏ >wm- %k;'v/ KNѨ?l%Qymmu9k,tӜ>Pm̔o.Z{iV.c2.CKprXWҶ _-)][wjmN_(D4zGCr M8bqHhiJzP\ԝ^ӠNkB-^|z71javGFK oN_-0ԆB~Gv1$[*cs˷ VI i#F@ @.ènDqGmpkSL}}tsEYz\/6_lN =`j(.ݵ>G6@Z^ $o$sµ}g$Opq V=O5Jг e,voou:‹t.f},I{`CG? ? CgDǦA؞Hħ 4Ay܏@)N^1L3оh\qr~/{7E4YE9fDC1mkzEzlzu9̿J!}{t'}h^W7X{ʙ2wS `1I_SOwpഀ['Ir$[;Pu,2~Pkfx0ӃVg̡"w#x{Ҿڢ?+7ǜ?} ]:ލ" {1[+*IhwĹxGI'TXzOڧ~Y&W(#P m["YudS>UVb]6b% m|Gx~7Rp`H4)A+fpND"(CBҎNmVx5'AY) xBp6]]\,,@! E_'\\mB)ˁZ'Gs Qd' Ep^7 .M"يl<'lшO\uEk9zQ]Eⷻ㰊v%QsdKw/FC4'Ж\؂D.(h Ռ^7Tܚ-*o.j9qaźF]OI%\w;p~'kdi8.5Dxƒ!B#,RkNpv-bvB)G):MRшQ)9:#G~0SZQQEGkRd@aȧ95nNh:,"P"r vDq"(-ape. #/,pNJb-ÃVRy dk%*_rS?Ш ܌=ChaAS0} % dI#$8If.MUp$SFr80qfH kKL6u+{/@!!9)/=YZsH7 kNQ)7b~R^jI<L,R~tgۤ +ׁg$CKv_ݥ~fy%&s??-/Y)(0fpf=[Zz\NnXi:T? Y;t O CG#CmkC6Bޙ*m,p^kJ1 =h{;.㰯䑮4@Ux&z[9%Ұ狛)9n37iHjiK&:x%ٱmwʩMvȿ5I n4*z yŭt# '85" e0℺؋bY>,v9ukl{<'[u@@HQ:U7@`! @q Pe 11t.[V~%c!3P)Y"dMn̓гD @83n36MCnv:gcbtHPIrwPdO|-N]µmklYʚP5n*thpkP'@&gp^Cᐧ¡- trm4rV TGi Y{,P?yP#@v3w?8S՗yΊerArMrb,f. (O`z{?Vd#O@?Dݤ琚jmTZA)K4{\)lg0>t<{3g%Iwc ݘr&# ~?Q< ɭsݶ}*]xzVXQbaG5gp0v\vpŹ.g!%MBp^ڮ-@;<~*e.A0Yt ۘy^D"e:zH g3GK< |!}QE*H-'z 0< 0RAr[elٿk.vtAlm2`uS6Ώ>op?]kiRonEBu4G@#3=R7JR7R7E/kN!EYn\bM=YvT(v̭۹Ʋxq)n6ts{XP8؆eh֍ k5ͺq.7ugL]p04ڰ( DD)%u i`7]Z:&a $햩 .[v)qn%`G`ߡ J{1C3hӽ\No2Hrz2>$ xjq3g1T~1]ר-8L?Rl8JDR.V7I𦛕2\j}"/I߽4} s$S(N Z`abkQfv ;Yg(5|} ʜE η-0$',ncW7-2gAūQ)%(N]H,3sկXJyrDDg66/hҰ !$S$0n9okk+zm =88 t^ER>te~Khi)f.f 3hqoFLZK='4*Ez6й:ְ 5]Á'ilnn0jcbq _},:I#}\kӦzN{?Ѳ|?@t7;睮MF{p!`PS,L 5=^P2hi&MHeg$t 5 |LU8" }0O} wcZS~4yʢu:-]T XXƖyd8Ǘ8÷p΂#4[.d|.aOy 7iX(Q}N2~;tS@v`W]F@+24@@e6u%™7*gkM)*x!y1qY?<7Ɓb{RBf{~sFy5€p&>b?)~ ױ`k6jw.džP1ShG1 b1Tg7u Z2kiK]ns3uT#q2Rj7i]CGxލ@{TǞi8iѓa?ϸymzt[ꤥ6('-1JGޕ?-5:!e~iSHz7*}q%!Xr&~e~{Do҈E,"#kȂ>eIVq_G̈́ M~28)iHg~qvSm2֖1C2C}?^vm9fVG_!%o\xmfS5 ZbiŃ:}a2{9/U8mv&XBdhrb* DX¡␊1CR`,~)ֈs\pNEo+=7<rC>U&dORnmgi?k*{d3L$@44@eQֈ!Z; nھ_Hn4AIȤ`c`NMSN/dۄ ~!FkySRM7'BŶ¡\) JrgPVX KGԉd 1p 1կ7 \x[*n(vzKk tU@`P\2\\B&ɏg?YF0m)PPF!.c@AKxAF1+Zn\l% J!0Yq9t;'P NoNNF8S_r@&m=\34jNuM8ꄭ,0Fc ;Ϭ<[*k''-cC_]`@#4@@-nPۑs48Hc49Mf*'8 mt c>ea[0= >DoJ `ј+ /g輪# F#-{ɉ-XCs L \7g)5nnXd/0A,*DiԻd~/Ȩ %nBX05dhGy2ιڗR-|_?z&V `a۔ ܒKjOXAdp [vY5g"}kWYzvo?€v6ыCgr m|g1wcc2)#99yPUq/ `*]wxQ,Y <:i05o!N\r@\]DC))\ .T6vǣ%MH6K4E5E ؏Q,oQ, +pDntn*uD@7137}iL0. <~aIʚՃ?Pp5D'e ӥz>IIE؟Jڃx,~qy|89k`-"? P1?_I 7!`p0?cb:[4,JiR:,H9eDW0<8bOAuth #DcED=Ȉ4<=+nNŞP֣Q;d-#.sIGx+7Gr7{ m'cx3{2+=Y`~^ S^t58YGt-|i5hq wТ7,lgqai]K KЩ.}H0{ "np~*žދ}.)ʞ7i[nX@Gd`B1).7,ƞޏ4[㎧ˊKqȅR\z` )nL.7 pΕ`Oc=+aO A|{Zw4 (08͞NmxhuIEڔ4<]U=WK}J0R;KfAV=m>}iӱ8]\+k)mTB1a$ՂN@\Q1ފ? =e8l{_SGye9>ԯEY#>r>XW @r1 y@@w}y`e9\8ZCT+!Xk=(@,:`by#gѿ<gLu+?`eU ,!@ X|\EWت!,XZ< P"~ࡊY',(ST4DWbџZJ,k{**Tf=.FXkS}^D" 2 P!Qe>Y,k'V6D̢G"`& !:Z@E%u g*n_C*CbXUJUYZUYtz tjU]yUTE-j֒j,&gUcCT3D?TEg\5ɒjȒjf蹰vS|΢(U̒, 89מY]u+8/k0Dۀӱ~a5 )5d3!g|.sdӠ&~hjO`mmM &p!7YZU@C\,t!Zy8=P 6mJmc=pڠ,nL7|J}pL8gn F[Gr:@O9 :[ 'Uh]eו(^k|.~8/"P,  hЮ[0B8^2 p~"B^US_,Qb-`pX%`=U870(B%[kA89vqbM(bf1{*%bh>PJf-Av@gFi99Omy26x+Ix 񲇍yInl$AWgbTR'hW)=-G? b>rwuaD<֣/ls)ahh%^ ۉ[]]+Rv;%t]#!6ڞrbjIrhSûJ7(/B$RI)A?B}6ٓ`.{Idw\M!BT'FoSz-^ Ҽ>"QȇNE|O nLkg1rCxB7L׼Qz w{Y7?Tt[6 &B1"SKvt"\H#eǺMool6hpS "CO #Ƨ}S8nD u~͕fľVRG95 ׊L8iBG~w1>}ޠC?b*\D‰7!ؕv_}g e,FQsP}@~T#?1Jv!h4#p[LE3 K<'!Bl1tQG46۸n JD`W<Ǔ--^EC/|TTm*%T2MLJ[$4(.e{NDE|,(l ur7 *Ě2Э.(DNSzvb| %dziˬa6qџф{j谖*rvҦ^XRNhsQ֫ӛ=h_N7paYc{ˍC@'ݣγS|K< =S _LXx$tRIĠ'I Ä>2-n"YCmCo>#nԔ(UJtC ۨhF9G9}=.\@ F"O8qD5S-B,%ط SSZ[J,8uJ̒[STiJdqi5ԩ(-B35Rֳ(}P4(WUZX\?wvl[qmMtJﺧ/`ؔg- #M9NLij§ht:f~=JٛxACuV$YzJ[dk[`0.3S\QP,:f' -rH̉yESLK85O=8 GeO'b+QoāA*N.R#Lj]a.V9-D$VEHߒ]1H>>Ү&H)K;aE~?h1t0|"/: sƖBz%CʙF!:JF zz\Oު oYrmr^ُyyrns%'q.Wu,*uhg%d,jc]/67o+ PXy]`5x@t7yjύ!eOQG2r-Ays`;#ɩMג0^,ξ^g^k:EF#ك<,E{Q䯧D> SJI7G3#a~/P@mdL Pb%ƥ |JA.chˉt%U }7QctUA،;%_ W 2ȝ6g{:Gr~'{C<{OzRRJ͞`BGtuShfKĄDu,'n6n4XTj{t-~kmQ3/M9kiˇ̨П=ѡ/iJAfIG؏uE2q#}UA1(kN=QL, Š`3LϽ۶pL3s3c@|3#lOFZ At %@G-JtsEdy"^+WC\b5ojܦ/u{uT+0´k\?!5PTV[.D\P3X(B%fV9D% -kʡ2O T-N} Z8W֎0}a{Jx=Ow:?zp%(URi|jZRn_7JOU2~Y$c]BrӜp ,K;BSr{J+C_r6,Z,tX67 3% | hX$2'N1:k ԧp:ѧ &Ӎ}҉8*$jPD:17>'RCnL裤)ADaMJƍ-?.X4ڛy?h+n(t#`;,80@{|vouRkB]* \ZR0N۾%-Jݜ]XN-+1ߒy~a|5 ::tYб//5Q:+m}}7|7x_792K-151UO;iǶH;)`O2l.2΢@l$s;z#G]<锜N0G8(%=N`r= 5TZj* [A{.P:@5GO@H+U7Dl hp[JH"k'crk',& v@`>;*tE*487m%\| ۇEJ(-틋%p$(!~% Q!KV &`Z6F)5CJ-o>6m~,|== Km.>8k+AY(AA,|2C߉ax<"(<MGgN]ߜ>3;9M G+WQ9NՍZ[+HwW"6ق?1X62(q_"6(!C,p̉~܎SKq1NGJУƅu QF>9:4@M)U1η[o}?_ݷuJYQJ ii]ã i J0əJjkɏN~LQ 4Dn)&JI >UgTֶI݊ܧ>q$N֭66LOļѥAtAOi/w>PZy >N^":5Ox+歈6;=yձ m{<;1.{+tmH=pA#1ZѿLJ wN(ުPo*OŘCۨ=Kxu%ѮZ1u OM3ʥB̦}ѱFa:M;Oܭ^*Dmcv<+Z1>ٯwu.y hX^^[@ᅹWUBt Eb? _[USs5:sϹ!a D,bk]ő-3o{Ac5[o9|>4"HAӽA铘/㧈_Zs$CwM{]~P~D?=:4iK3?|]x˓/d!{.{;w}?o>_ߗ_1{s[GOĴs5_~>_d|8oZ[M `Jda-pz9j o5aaߒUBKHu1Y[iyacߜ]2\}i.|ap=_k/3%?񂓍q=_~x;'l6egȢ^/ mRj]v9ħoʑ)~QaV;;ylF_3.pH%7oKϊ׉3gV!]t#;w9!C`^tcnɏgK*.Y1vLaSX٭yi4i:=FZeHF:s鑝+kw/QNa*?WGY)F19ˑ=/0KZUz@G.>ΐzry7ϛ{v\yVe+]~8w0o-?6pI,WJr?a[#_qhj &O1g2kgjLI.қ]ʶbm{47׳mF醍Ti˶mk׾ෘm^W|g;LN{WiΧV{vuC'g{꿽ޣʡ<5?/gB|-ިwg=tiT;=Wk *>o0!k՗ ՚j4 goK ]Zo# ~fkv}b2,Hc第7a tM#;4Z^K@;N;Vyoo.H}>ߘ,?;c~OEzqFzW~рP_I^Z b2l5՗k~dV_jpcK, ;Hzas鶛␟tkl6KUxwJwsT OH(æzV3'-KYkLq[Gvؖ+7׮[XCzv= Tg{>ظ7LaR:c|I06y|J2}h1+ #w7dǝʇz??Zr9O93] ;2K/>nOԱٞՙwDioҮXM8"M|sH~$}ws!lFƜ&ň6'ME =gjHO܄3b ?JI|ay,ie0|F6w$Uu5://Il=X Zʇ7)i$K@ﲾ&AZ+|+|`n͝ ZX-?UfNC<f2'Y;;h?XH"#w.d&O)a?_W/owَg_cI Gg4g~(_(dk^?;KNhZ޵zFKy+-E=d=0@9#=|$)QKT:9fIW+eʾ ky.nH?ҥ۶^Sָ,6 \g0+3Sr*"K5wKZ1 /7xWULώ+^gi'& O|{jz38Svc`Ƒ5~Ùl|'2&fqH4ݦ~a]Om{MܓkWd\mv)z~\8fڛ߷)yZa. 2 ˽צݹ}3tͯ_-ҶH̠R*tSߩ~4FKl<ڿJ }~ݞѩrTO ks?8{mFVW2y68:lV7e:|/Yi3l><ɋ˳+[-W|ok75]ٷ_?/pyf2wvV>%4tȻ|Sׇ5vWi~l݂~u?liT숞VrT*vCz1s6~hTЂl}fY kfݩrtn&66~v9xcx4#"X{p*:UUFٳa&6v{u<}5u|T0}|bXzn9(.b=p~R}g+;~ߝV/,UvƇAM'߭{KYO>bfP%Ioha9[r^읣%KGcY}UGGz64ݰl: Eo.;>q񭩬=l*z<+t3R5a-t9Kڗwu~ozzMsZ77obAHDWvF'MxEBF/?d_ur;Y[6'K M]Ϭ M .} u:ng^;"i ߞu֟E=/X3#?<}A~ZϺ7V'v> f[/.VeM2}<]{trwSl} ÷&Ͳ?Mlg1C}:zJ]r~[k\o[6Z+ cG_fkaC;qտ%+0%j_kb'`oZOO =l}yè fX2F.5I -?lѪҕ lmR L>ygHhvnswm!wоZMPvr Ƴ;V5I?:>s]SG ]UVn<tuޝ9aU&;KxE#B:t'm.د9ko'V}ݰ9e&[>9E-?k5ڇLH;3}>n+gH/ ]ǭAj+I'*e~Ys,KM[;%v?zl<kFΝyy(?荛1϶H(Y4y ;qq;X~Feiٌ3{uwٷ.NO҉k[W1{Ǯ[s$w2zIB^c/?ѦLTu;i'H}S ٯ_Gڳ1YuU˞6gYѤyˀ,W1;~CcK{7u[u/ǚm1{XңX}fJ]\?&kl\k}ߺ+rdYk6F l~4Zwu[ٿbk]WLtm_O|5ٓ2.Mв ~ d-޼q@ 1ڼ_ٺS֠?qV_3ـeEϪ1aF_=^m v=?ji"-z_Co1ݠQnR i2)ft%KC;gL?wxZP8wIr;]uG1!F7}qg^Z޷d2)n~5a1VrFi{seͳko}9A> m[FoPhsްun']sgNZ0|g+\RtҟFv]ZWݪ`jnaIYo|ObՕoo}?DcD79vTmw#1Kw;pӫn%?}''o>N ʹuxor ?a#vϾh-?1+yḘ^2}ۍg3 c>,/7qnf[{yt"orߝ`cѐ'4٧2E/>}v$w!{Ց?hXi%m^WWfߛ ?`#ܣ6^m]dW qxŖ[?Uv7{ݸҿȽN5;6ՃIW1|ݵ.ްڵ~\^T%R H;*JP0vݒ*^aǣVg IN.__*5I}? _b9O=xҷSk:6Um_zs?g 4Wإ2}d#_$[HMhup+ ?p3}tM1}^c5=h2=ϟ0Uٽ܆,ŤM~1${UK~f=.?S/< ~<6iȮ9N>@u7g]?Axzܰ}닶αw?ܡm9OH3K­—/ 杖5pӳ8h+/ƆpMfO|ؼr9Z;/|30_=wR'`fEV=weӼ#|#w[&̦?9]93էo'}{)=ו}sw"ZN2y.w/3~c߿ܐd>#ȧ3ytnS9&־]Fd{ZRtYyG׭;Xfj˸,\Y~ߞ|+YSK}ܑ,?\'~|UXY^of,w_X 0i%L,γ* asMg#W__uG=5Kj2ύJBg+ӎ1Q빇Ϳl0=s1;7Xx ^%c5ͼi[!ur,?/W>+;m2I~НнK_}7q ML3ZC~-ςo,ƫ/n7|ڹLӁ7kY"pvO@UlxՇ5Vٷd7P㧙|2 2O:(b]h6ѓ_dUU 76Hla?~҉0yk~P7i<精>r(kO?nigcX=~Xh(~kx`7LZ'zf`2oe-9g6^21{ޕ̃|p֢s8eR|9[=d5>_g-ۯg/˟'Ud:@g\aM&d7[P0RH/;Ƒl="5؆gniPlh|c6_m>KXVR1!ͭ֋.XZMǻUa?Azݬ/tr喯X#iVVn8d'koqjoX~iuyxfO X Gw_e/PM`^;߇닅.ev *{ &}mF_Zվ]K?,7X{Owjs.-c"|Z9PQ2K/rWtȰl/azݯY眙gt߳ww'! 6 M'X`xA[^b=ã \}27_BSWoμ\=l{AuKݰ[Zj W3GqΩ}_Yl=O=-̯l/_'Gowy>dq 1+ﲒ\M+k0䪩l72oݺ֣wM:Zq[/0O]A!e0~dXC?d43/z+_־^siWxeYss37vc@<߆烷ykZZCcń&v(Ra4 k=4>3iLH K O 1ѝBcbBH;|QQH_$OhD/wB+DņACnQ./C+&):1 D@pK\\-σmNLk۲Zt%WՉ619-& Wo-[}-bmPD쁿)oDl:J]Ge;XH˴~T#{<4!1ؽPFp{YW=x&ո|X\\Bb@ mH g7 7+F&cC˨ѿⓌ7]^>\l\bHx\141*,i462$vPk\>\Xq 1qre"GV(k([6bBx縸脐C14PS1gq1eʂyJnbEB 5nR&0 NI(PҰK9:\p.j (W\Ų7nV1D Xg*Bֺ5??5xthBBH}rrJ?c 4Q``zZ&*L@|tTBbH!4DCrbH5RTl!9$.)1$.2$,.)6"AMzS^\)>4[N'WSŧf4c7Ćˆh 鄨(N!4^xf#רĄzᴀB* yhb\<+Ae1_O3GQ<<볨@Hl5Hޕí?(+Yh\Qц4{!ٻױWvK{-a鯱;Pj{"J㝷Ю>ť ӄ~X Ukk5Lf)tL;jiuIUeםXtKڍ+5j[gTU{KѶ|~q.&`u4 \+O2D[U;c핌V-f_ lAikV;{Mv^gO5W\;M=W|?VM߄~iX^yQi3fPF}-whƙ7ZF3ųhd |Y:pLo^ka{yqkUGnZc90,5t9=+͡{Z^i26+ͻ-f/JѪB/^jm/5g )2dKMR&/5QzQ$ w收3w{ MBs[Wk ͘ƺ{e/45N~)x皚Fz\câ,\S{%~ij"2LSw>ta3MH^`;/<Մ$isSzT}1PS͛c'=4k7'ڏ%6D3LR[fO4= X<'S?'_zI :pcMނuyMҏ5իP#M%]H<0ش#͇-%H<*59wNq:5=OԨWuj[f.jگo~Fux1%~Бѧ3/yiibf^&}z%rn;e3e|nk:/s@ǥș^?;&BS%74[&iJwuy뫚֮^]4׼˚c5ڜ \мVWm5FR1 if^U)͖2o~^sT_g@@BY f-I 4C5ή:>1>u._w􎓋5s&iʌ8Uiz/[睘iVqNfN¿2GAy /,dZZ4^A+2.h^ Zj\[5ÛoܶRsUNҨc&'OyZ3Pwlk5{܎NS\o "n/5*ݼ{MWwsl %fMS٢;2jrڳmdcӔ80vMͶ[GFoݮ)}{Xة|q߲L|ulʿߥF.~{4tӪ$Sۆ\(5WH+ۿ_%r;4܁5:AM^ZtH3T1#ke8nCg҃CfѴ ߺ`׼x+vvC=9XvO3[]Z1M3~LSaG4KSks\sp~̾5ۢ] M/nOhf\MXb5jB⟚=mfBF7m>SHkSyhӲ]-eiM:SޜZR33'f}Fsg؝ag5z;<VX9yx#NӼ=sYx2&=s 7:M\cZ]l>5N/< X%M}u&E _(LM&s84%k/kE5ETiǯ5Z*˚? [flp̑+eYZpUaX%bjOl8wU;\-ӝ4jqn93^D?IwEfo yRohrF{G|SSZ䌸nj6ݔdY-[v|pKu55?xm,w(MqGjjFy,qi}daO+||~_sVfő?GxX5װ9ψИĸبf5!Km[[IӚ ~ҏ8; /ic=zF~TyZ?i߽\&"*ʢGZJ~ycvZ ~_환?s=~TX|_㞏f ͟pSNGE彟o[C㗲Ix ?eo/%F%z SI'cjjyEڀ#x֣ooftQT_9o5~*'?6 cD}0^FGwMA) _> @PH|rP)2*b  G|o8V&9n|uyPpaRg@TXL=BC{zEM"ˏ$?W)c0L5o6u qw 6!|;GzB*%jCS 1&3ҔIMwyn>vEs Qzz=_Ēb!\WC-oDz.PkxpLQ!|%{|P.;~<m F'z2!jGTpW&1..$Ѓs?NLd\|Lh-x?__1I]j+C8SfSm$&e10_ 3C%upSD_Y> 8aTj/?jtWTik0,.8.[K=QV؞o^sROj oﲷe` 3ǿmλ=݇Row҅~?c8m,^Z,Ty ^ <*cǽ-Bݍ_],pjTn3]S8ShǤJUy:X &ӟs=j_F9''~N7<1;M-y)M<9}c卧xѥWxkyțzTu󢥞xЦzefυl6xJ.wZnw:r=:UD mmvzJk˺hS~gfRE,otJ-c~ܽ;=ެ}Hk"BBuTݱ둎,Zje.d9ikZt^{ ΋NDzOydMw'utW}^Zppȃ.{1)Ðx65l '^>;(;g!ҟ*sYTҹRԹ{H0`gOHygl@zqLM3vqlEz(?+rӰar/#ܣ…km9ɼ%{^'WVëEw"];́{gu3Ϻ_^{HZo?BzYvrlr_ 8 ˬ~t>-rHu}.ILHW`9y/ّ>5;ɡZSFZӤn{ ~fmT=t/eܥ 2LjJH_rf,(ϚAښаA`U=I͊HW]vtU,l+˱I^: ҇qAO8e\.,~3"}buGPS~3749w@> Y8(aHK6>ytw3d^3< sΘo͂e틑.>?1Š/#ߺۄ-)e2UtڝH8ƯQa>Gzwf^֎tᇮp 騡ۉ<-.ŏ^z^אAXn3s5;}*gλ/^zVgOmOS.pHjЙtH}75s~˂9ڬɱh>Czͬ#N?ϛm^;q3e,ҏmUzAHw]kaUGwv]hl]&Vq׍^tTit\EmMrk{{ԾP. GZ_KN살_bmwc ='=X?+b_,.}ÿ mV֑HY<ϛU㑾g}iHGrTsl[YijEH]0ifzF>m:'-m5y ҃eRˋ>:ys+72Cae= ֗o@A§MtONs[ WW_ .'|47i#7K{ {/c;Lf'oB^;uU棋 eO;5ߘ鏴汗^d-'ϝ, җ{0눣&F9o]m|S,C.it{G똣nuB}&L!1/ӝebٲ;~F:bήL nBiƑu?j$҉+T7{xZS;4eus>r\-ZY{'{ui-ڍwV/=n8{e =eNw+/?]XDvϟG[^_9ur]穛"{nk9$R"DAܟDu Ocw̗=JBy!HlpW_HNoqÛkwj.=+~̎g4D_x}__A)S罗O3s\!kO(2`hA=| %%%.+.C*5C7&DҋwqI|\$@GRbIthLXDhb*X>RĐҝTCc (S#JUH59>G,w< a I:HRlX?.>$+Bppl$ w6IpL3EG%HJQ |h@bf ]"Ф(>2Ub$$HHp=Bcs\8'h9 /'$wC!=թ3pQrtmr .1TC %  *ua"X|ϸx^'AkOWC x] #pqrk4GEF"Ȁ^إ) 9*65gch"hAJ _~HE[$_hn!GRS{BZ>lG IB2ҋIpz$#B 0.*$1*"жBHuU$cn?B".}Z).Qw+(kM`b㌄_ OHSJ7lܨ(h@,Q"D 4hȭC hImb',%1$CK9, 6C$6.OB3dXyڀDC5]<)Z=IVO$&~yQ¾7wQby(O-W)E"螡Ğ6tT8H#"ehP.Z=i7ݚ9(U|v,C<0K 21e4HC8vZ{i m",c7dKAYu MLb ;{RrN4+DžQJuʻ>8 HՀV \4&`B# 4^=/d%%=7!hQU8"$.6'!brᡱT.nb|) WMhNq}kMx]@# |DA&1IK**XE ='Iz6MZ͂u5+Wo7 5iڜhQ]L*bE~Jb B wU -ƷkHQa=Kצ5tSHFn) )Ԑt4jJ(a(^Q:*(g0T.4&Ih;z "cX-2A)qh Ołp|Q4Dq\PPtXHd@*'$)N>&)!Z'=5QP8GAqP /%5XqD/:NU Tm2*BfKIlU[@T,+6 B0V>:IFg`DPWA,Yً.|0-$añ!:& 1OL||$5B !lM 1Ğ!B́U"i!eCGnހUc-M/q6t?zE>g""BXգVq4wהՐNh6\if .ɧ.E`**]j d :`ֱ'D"vL=% J+qpI@!=yhl2*vK).`KI"V{fat5IgȄG=\5^##1- >6PȖzw ƐxFhOCB1Ҝ#Iqt>b(#KIQ[7$ސްNmՈfPġл@AD@o_ħ,4@ta#V(6uH੭5`aՐ՟TB F b!V8/H €Y&k[y&M5޸_auL+ 0%v7X$9Rq!0NbҲb 0/sGIL BH`ut'$FA:: |<{#܄xg-,sd=Oȍ:"x% Y}QcCD:<+<֕"ĢSаh{nyòiŀSw1 fk%hեkѝT-P;Wap/;(j0KV@](6l0nv2 pn,%&[tOǹ nvuq@T ȳY=j]HH@A1t ? :rhS?^<A|T0O.@% ]VHC{Kɋ  `I841q2&!/{D(ua=5K"xŷhL)̒: +=BMGDj*C`܉b-}*9PI=NXT,}_#MaKŪ~l&@KP?B(%#+vQW`Q/5nܴNp-ѹV=kQzTq3]p5n*LM׬ QIP VzuE9F@M ,Մ)G(DCg\6 :zz/ %6}LaT|JM-xBbɍ*CO/jStHwTJsC<8| [*^$({R)èuHm\S0|N)+H *_TRۛFtŞIk/ZlJ { g$[W Xn?1xk=q9!ZxHg2Ps||\a]UtRL$}|I*%i=iB7a$JZ'>_j_U"a+Hj%"5862 6*p[Z$ēG  0'7Vt?h pJߒ_Y4/A?t]\pő>Gc[j׽t^@Ydqub{ KXIDX+&QO_X)BD""* /F^/<򬡈ʂn/[|의"n:@Oh8&ԭy|O\JXIB s4y.`a8(xkJՋCz ) [X݈$!OsT-*\|A4ʼnkpm-Ť[+^J}tYmX(BVi n0UKhtXeVJ .LX%u, tɘ ["}K)`ͽJ)|UMKQ``>Smu$fW@wQa1aJ8]i'e"HXJ>R"c,aBX=ƤcӃ*_@sV/ b17 %zfo_a;S/.6vܾ {Sk)3\Ci|T0n Rg|;Urלw&-Ո/YNqfz؊+_\1Tx71f=};t6;ʆԷy6k޴Yk{<6ĺP M^V0zaÛND%m)pك|tAAjL^;ꓞtcLoꐐ>+eG`~`cK$Jٚ{#r%**oWt\\WaFwQG " J9dbL׼œ'AASEMq -oz Kul*lpa^#BrUT4W"D N>@uKBUKGqF:ҧ:w!itCn:KFH2%loBN4! Sc7&?Cф_#-Q=JGLU9Rb:Ƒ{!#]+y hρOMLTP̑/ ?ɀhKw]4"@]}4 8b5tF *pOؗ#N@]?T IO& s$}TzbcbzȑӀA&'_8bT H h‘ n y.hзqmlqdGޭvLSZ@p8G'8R h =őBP>v\:σf2*Gn5ȉ -_K('5H綐%9L $sJHPla @1?nQ Q@b%t$`i i QK@] GBzlQ'PBIHrWZ$J4!!C-Ao(!:@ $!cAòTqn+ Y HBǃ\z i w"AB^RxI `.@G hz@zF w$d^2Ip=!>b~ |$bT <} !m28LB=e44N$$V_Hpe4Ok7IBJ:'^USA&t(><'o Q9@\H$D6-h@Bm1@7' ݥ@2 y hk0Ȼ y5ԭ9 ht= ж }Z-d4t# h4AM@QB9tOw > ߱"wp*rhP)ޑ&]v{.  0P}C:jC}|>+ y+@DJnM>٤d7llH7DH*Jɚ?@$%ARPUH~*%& S+%TYH7@zKA~_Ky@mK))g%%%UqR2QJ+iAdOJ~o4_#dH,%6B: MRP;m|-ۀ")ct.΀?@'w#$%yEJ@ﻥ T|/B+U[FBh##^^H9@m9"~ p <ʈN 2#OO,e$P_IF-#v@S]Q)?'#j@[}ho(#N@m#WO42r P"#/`hICdd$n4o[} 61Gi@;qwC>A}2 ~ ޒ!9h+#3;9H89ɖI]@TN::Zr+''YNx{9ih)敓]|r" MV@{!99~JH@SI9Y {0}VrC@A>*@SE99Mi 9)h-H 9!^$w\4A/n ߁<h#'!^_9: @IN6-@ ᇁ 7u$ț3~Mc h)@9IS@Z3[r7A6>@.9YÃ{` r qCzrR=:OB9h9 ˉ0ĻE7 h Vygr]?r0n?oP[/(iZODVhRI4N4 /5*=ˠ ˂\d 4jBA s(H{@; WU)HE@[$!}@kSHPeP]o '_@~(mV'q~UE 7cᗀм:)JǍ rвIA:|AZw?vH2; Z@9_ bqh| HB`]@ ܃|mV{~5P }Z' Px?R} p -y ]A\>= >$:#>d$ZXP[ Z>d)u|KKBqV8/yCR>%7i>j`2PM!׆=هL 5Շ܀>8g^@>ͿX .T[|?v@~$q:4:g}\4@  h ]CnC9w@-yCq):ŇX|y@ zzt!'A)d{<\!" jB}t$Ai /`yۇTB9:C=Ї4;}@#Lp>|@>;`YԇXϠ|uϡ>/  gP> ht:M| 8Zw>08AG}6j/ hL?_Kd\:%> j@—4җة;KXh>>DLyCt(g@# ޗL$k!'#H2Fj/~&//y hA~@35+@/@&րv@ I4~LA}4>t9Z6B9PhG@95@{>?2]~0'`@RDG LoH9@ #CV@i@[?(䣸ZȾc~) _Ǐ=X׏$gwQz ' 0 `@> h \nr: U@N@gc?"; t~h3?s#@F[:@Y/`E@'P; Qw@+  ΁c8Pk#K-'h~9:#AA?Ε~ʏl~|yƵ~$%uKt~ zTG_XA9@!Vu;{ enG ]t2#C']*BR ӉDbfKGCE|Db{BLDf-Md|hDb$> XkbI|-gE`g &g0N]Ȝ ވȍMQªP™" {2{bqQ8{.Nals%7Ǚp>/q|>9-?>⠞qؐ뛃_KNaiq[s 'sdvԞ#wdNC|='rJm'Dp>|'w:@wEIs2Ǒ) ~64X89Q/<9q ǩ~r222N/Wp mՀ!k nd6Gh'1:@G@~9bz qp29c|i>%bTHdF_'𳥓p:%~Lb!!$wJ$2k>ܒ_" JBD/"hKJDfiKKݍ>ҭ$! jNpv ׀4jA:3YFCabB9[H|-%2gKDbn%ZK% sx:{Ȭl&VWe~R 3@"%2G'Lq\]!!NDJ8]Db(I'X _$2u<7*,v/>3$r@ //a }73 :J4ZFA{1Pfc %>x ݕI N\WA$fQ:I`R3J..s+'ty( ؃`rBM<`/#%X#?p!"WH\UlWHUʼnT?DjWHM%!mK'?ᬀvA 6 TK "[Wl2$_K{L{쑣7g"394N?As 1_|.!+簂]I|ܾljZl:&Bۀ q'r `׉|oBowJ tp-pf)磓k^-`78=V844&p{؎DN'IܝSzAd=9m/؛;p~/'6R= ᔪ` rPH{' tG} a`[q8g9u =[f;Eܾxotm~/}`OprY}Yrr%`;߁~i}Ɓ}HV*(  $P9_T"т3h}h`Z^E QlXMԒ_G Fl؄F5ZK+`%2}%`T`+B⣏H"%Jc$ _*]F]7앹L:(VqZ{OD6g ]Dnz.!`] ZRΨXRM)}R+}JRΙIp c)JL9DRb-9H9k^Tn䥄/V_J gy)*Uu)gW]T,HM#Z)g5#Tb%邥ЏBzR@&R_OJ*A(%FJFTf%~a@ʵ2^*I%R9gs Reĝ/ۗIRc}Ti2%*}uK*YHeƇR)@gR{ܡXdJJFYe̗,W_@FTd g1T[\&5?u g.!:%eLj,%ld;/#vXr2*ҪtLf +UܦHu16_Kѵmd09B?= &kdCe#}LƛdJ2xItC 087xϕerbJ d[##u2 A!1d2 ;q.C =)#'\.Q)0&Sz>Y=;`NB ܿcnҜ[NTYHc:p+jZr\׆ U]\i4Փ+ m\XΩ9[D:36sfrh.'rbePd"2\,g:J.5?u9g":]!h iECZ=d7 r.Q.7ve_2si4}V \+ERuI./rȥz_|OwE.^s**]᧾ziݒ=Hsއ0 C{+er:M`OLDUA9 %Pg> b/ *dB ]L! ^iT L JuD[)e m9g,)r (́ 5  4_*ZQt*A Bj~* |[5g:A )_]!5VWp s &-X!5?6?@ ?.Up?@u=Q5PH6t*8U#t uGOBj k zl :jZ)| m@*dvР:;*%D4EoBaikBAbR,sO :UHI~(ۃw9P(@zsO{pz((11p;r+OwJ!7P.++۫ &&-1; #~~ 'Pg~ r<2|r=v >Kk(7Bi%JG)s)%tJ%ɡyRJ)UPʭ[%%UU*U{#Ii[(%J%T}RCRj~P䜀0%̇1\)+9>ҋPr ?A4 NJRJQ/x*^JYoRJBxYgTÕRp%giR#HH{F>xYAJjX(~CJ ~C0簒s}D鏀G Jծ>Q $WQ)q\TX/~GSJީ)JR?U3=Wr:Y Rt~(8`*a}#7KHU|D죴D5wI磴5|f>Usi#gs<\/]æKV)SHUz#$_tQC!B܇XT1bX1b]Έ!&#EtφWxA0`y?Q#EC |߇{yJt//_]Qb8K]MCw+k1Z1| v+="q X7|P SDzFHܱYgqیHN}ez]Damb؏b9*h_06tb s9k1?Zt7Et1]10IHtz#= ]|#Ac|-ʁ٬G E~%؎TEttB8"5vՉ3ՃZ&یX/ шr/Y0W!_qiVc\B0=Dk,ATa|DY0f6َu;Pn jZ>pT0?*75Bl|lj؎|cU,&ݭvfDN鱽9`>Sa;4b|+f|1Vر:?-87b;VwG۱۱۱۱ ۱ ۱۱vlvlG{c}r`:|/7ó18n6y.c:ZCd-Nӈ0#O'"3b?vmB-?{L_h,Ei]@܇̏1фS)E,F| [#WAc߇Gl%`;t ߡXC[L2oָ|ׄO'"Z0 &MX85㼖 b41|ׂ8VW^{8u"qޫш]; F[P醣yфF7ڄz'*,/5шhF"ox8oV؈c#ΏoG';߈c3юDTa9pl_hGH{xd3c^"lf_zD-ifhD gv3@8.*"> Dg1MpV.jDgcu3q=.ȳ YD "qӧ#R!9N9+.-ӳт瑾Cy9!QNH ,ye5C!E~OhpOp{ӽ ӻ g3C'8p]Ō0.j,i"M g\|cFz(cEw&!ʨ ='rل\ "=.<;E="ҳn<HϨ ;QΣރ,?9:a\W:${HP ye+~xDDzRH煮L@oQ;@Dz&TsHAoӣgFp1>2'izVH t%;=+)"=+Gpǰ^EguybHgӳy{VL~u"ҳ;BU;рDw8knnhD_|Ո.AXXoOcFMYR!\EgJ>^w'CϚ b>a\c${oWWz6> )Qn&uK/L N1W^HLw}#Їo,w!j{a=/ӻD9~gz6"ҳ{7GU@pD܅rnѻ(_ǽo> ɀ Dz1Ŧ߈AnΘ`>'{w}`5<Ϡ}ˌ7ȇޥ!<>^umO~S;U0?NEH |a; Bb}).ӽ\“}vN^Ӈ΁vtNJ-@rlr؃T!] XkQ?Qo1b|m{g'~H;&`<"ΊBM^?l0 [a&g:ʻ()%>EX#0Őo,׊U;l.H3>wc!:hoa"ҳ~O OT w#Jb%}> qUmW Ea? 'b. 㱟+"=&z:|!#>W9w4[ ǃ}- Ow lt{a? 0/|OeL'W]OQPosA"Z>FW<],}K;,(+?DDw[Q1#{;N{˨X.^fQ@g!󅱜6E>ѝ/ E3Hc:_k߆NElNȆzY,;˪8,N~2کh֣Nxs[Y%ӡw c{[/y zSlOq^$l؏%'q:E O:kDxuf]BQ?V6oqD.υ$#})B>j!r88.{>+㙱ˈEX~yt,wW3a9^`}UqOJlf~k`c baw˹4@\M8ҹv;֫t((\gMѬ Dy"g7bEQhz| нEԾ~ q!#d@gyQHp!@=C{^OmL>(˭./rڂj<'`[vn*v7 Xʢ"fÃ#1bcֈaшM' B\ q3D;Yk_"Jʈ1"Xb5ĺM#F"{!B8qRu6'/"B|QQVD+"@lQ1/Pq3 Z7"B4{I9!X bڈ:ĶɈG"NB .A\q17e#B,X1QX9bGΈ}#!C\q!ē;O#bG̃#1bcֈaшM' B\ q3D;Yk-*,'D]f,؎ꬨ_D &D+QшhEt"9P.D3ωmW*_,V^nGd ~K-}*}þ?)G F~zDW2=0?eȟߞ:"? 6 |Y|숎/oMȟ_D/oam_ȟ/irl_ʍeu_ #eME?]? U_葿Y/6@~ND2ΟgH5]o3|O>oJ|TX?Y|Wg ع!#ʦ~STZDшhBdr:KzrG}X:̽TU;; e|xOԩ뉝ax!f-oGpth@D t4{-!n Ynlׅ~t-}O?,ODbD߀j"FnJc=BYIxn~;K@ܿX{g~K>?C~{KcO]g.KR/G>S1?3d71*UgSͳ|z[kz%k{'<1U#GT7Z%9(ƈ.4;;όr8gEږFZuO#S43wɳ{X}gK{e^Vs3 ,wD' _XX/y{Uo z|-ntesj1Gevmlgi਺#_D>왎gmR|K\OH[4xvGIan9o Mhcyz`ee.ܳh^o\a{,;wY ~fʂ?"M#=듥}dWЧngacb݌ގE}|G=#Q?_/$ yҳ'{ˏYٵXozX!uzx/w hF0\|6"M!)K4T/`NCThF4t"<lݵg3Hb8;}عK>;;f!o ƳG^-辨]I̟-wWX.,]w|W|}ݣ%x=zy4{JG.󾈟{_90B>xVG<>țλT8tW˭q9= V#ћ_gtWi=U\Msx? ]ؿgƋ $rъH/♾3^H x!b>pN g ҳw;ݏ&%<#Ι|x:GeOϵwρx_hf9~0埾2}~_v#~M|t1e?*\Nq~Yhf"{ճ=6<_|_`<D~x\ю|Ls=?)'S#?YQG4.jiX~SO-^~U*x/׼k/_|iWS_+Gp>Yf™X̅ZD;{^ZXONÁq>Jx?/ -7>W4ևe[NXݧ Xѿ`~OL/ zF3O.ʋ}^Cg?1y9g."}_ ȟaF9-/L/ WԿ o=럅Ӣ(Oa#7~FFَr:^~^UN<׽F>h=}}$x bY,9-ȏ|H}|9>z/uY|W|]/_x_ '3=xQhb}!ZY~L^cH?=U{;3ZƬU; ϖ?S}f+.+;QOt^Ż=ݧ_)3%gېg,>;¾Gꌩž¾oDT]óİ ^U*|cx$޿jɜzx]%jϒzxWNlG9=Ù2>]>_ Lt|{O&geIׯ m=}}MU.f։1ZϭgxN @|ti[gC9^\wLw6}9ѝtVVUőYX_~goM}ό}OW3Vٽu2!-sw,&cD'LY#S^{}]y^<Mf|ِf+Ƴ=ʓӍvHXR?˟ S1q|׿6>z_篍2cM=u eϾ ïVDӱMcԘ. gpu|{~*oϷ?NO߹0] ~Dyg}4{=ߞoϷ|{=ߞoϷ}?*kes;un:a IiSt]Z$`]yV\q;5 **yIW%#{ws=%=϶϶Oضs# |h;zz.B8y sޟ{l_}TlO;K> >>>>]q݊nO@=!uƵmmm?̶mmmmmmmy{]ڦw/0w}7|>s磶x~| +%v_i<U8tiٝ*](w!ش4cQ?7sR_P<]':=e8>}y϶ܶmmmm l`r^ }#} -D:պ|U_C:;z/IIǣɔngn  !Pl*4>Aޯa=x;GҬz1z.DnI_jLW'O޺cjǿug8Hs\}"+`GpG27!}#O.I$vDSIHg^?S~ 1b!e|Gl_v =0`a#?` [#ol˾ ރ:$зlށOehfeG/tx25L\YߎVkiD<۲#SbUDvywrdg "mg/pf0O'M+6,#S%?0\xw?"A`^BɎ(qd$ˊ[?Z| lP0=k8Y0KYꕦ>+=}G8=SUq"袙OE˥p#)3wD{%SO%709CqkK`]极I~l]s$wgg@yoXy!P^ӆށ ejFU5ե$ $U^uMس!eNguW__W_p Z0E-\=ʶ PB?X 4jq=ӢIFd5|*y>H}PKP³+H_L"EZxm1$RH^e]M6j_Ʊ)55z<"Y!c͍ &ꫪIu5VE ~f1M5F|],X3bXW1X_ol\QZ7ɗY:rpK|mZSD{I.2lR,Yo=:짷:(X,@])fdJfL&&%2*M: j+D]ЖGoݮ3A_!hwj_\<+ ۰]g3he/5RwE5svmݣSNlkMk!cMzE՛sEXWa;\;be%y0b-X2 v- {jX'E,<[/5/Gv.xP_)99%}Y*RV 5jݼQ9Kt7}COS_˿/}@oeXЛ@_خ5ءPE0vTY1D)O 3|4;`]i1Jf^ ?8HY,jā*Z_Cf)ߑSضЙьQKj|5fmIc=Ydy*}lf߯mOQؾO;F##10uq9pmdB6Ϻ>Lq_5/Xg~(ѳqюR~)PG*뚘 *}UMՍRiYLTPk= L*\sa-qnA*))V52Sͺ%Kղ#C+ӗ]Q =Js54=~ʝIK1חbc3t ܉L`fk+έkpʜga&]pq`S}.P_Ttx=`DE PtR]ev8dIWWVz{D@g '쌵RHHWYXfiWwj+vJKgb^#S(%VXMh l>6xЄ 3pJ tIV0.qyd_`c˚1ňWe4 8s2XOycg33 j][U eūhGvQMbJP q  㷾&vI~Ol 8"sZp錑uh9ouvioLc*\dԂ+vq +W(o#6,M9Vzo/Q9gaXiŷh4q.?!Vֻ 'k| rO'\nqe57:-h#(ܝ|[s#9goV%=uM*躦J1QkN8TH-|j~Ui0Cu#M2e1&j% <`),Uwb?joW9X|Ijmmqp'gb[+78K&"7&6z1'ܩlsWD$1e!eδz?tgO٤;[QKRh=8wbXi[7/,>*5_R5+9тe}a"`-L4O[}U% #?2thGc݆R71P-$yyJHem#l E \HQ?~jwX|] {M/M5'_Mbgⷶ[qܶeomc[ni*olMY;ggMVdOѯ[x|Q-[^n}3-*Udmma|)GWu'ctY*ML?t$hh[3x{wn,侫[/ۮ_غANs\a6Oh)!!KBEF2?=py(OLO͓N5Fj[^X9~fsXMhܧS' Wb =:U+8&BYۭ{kA7X EG۶~2pzcl 3ܔE[cU[\E2T_`uu[wc֔1f&#Jq71ؖpFo{J-r+4$ίLܝOzDX7IMćӣ;E-)FqiV~8){̶هh/=oc-"|p%|M -1@QBAjD+x%}M -W0;1{0SՌBŎ@b<]yuch̴ '-[!9/|-]?Ұ$|hAfiEuk-%jkz-9] 9}+wқ6 ?XX :oS">aYSl(&?[#JD㯭cq;<~X:.OTyuu*91 róm|Y֔NJ3rk"`ǭ[ŗ-lޣ?dϨumwXl/%C-&W0Fn _bVjyG", |V=ȭ.lJ[5h&ki;ks˯ob[/95uinu|Uu$gv宭]x;md*G٤g,F'S~mEN=UՂC~u)K5W2^댐.")Ҡ.H8UcNqxn՘ SN'Jrz/kD.IF$ 3AKn|_Uᅅ5pt>Ip9#M4`fo؞M&z,iJ՘)&OڝhN]yyX:>~o2Px{='k13aj9†;)dNpΡ]ZX$+,NZ}kCmTʶ_6ʧLYj'g:hD,bO@l>Fo0+jdt^3о#}#ֆ'Y ^PRuMim40pN(LMSV(.#4;*\dM)Y҄Τٚ)9i<Ǔ\Z~M6TRv &'9Kc&K4vjdVy)[s!-h[^J:^F@#!6}*f0./u&[]l_oL כ\G:cle+ҥYo]7IMb C;כ,KddN +MF{nKLm}yI}]^uAjõ_[r[’ [’GRG%W}U*2X~I rɼSf-(#OnU#;4②,K⑑E?So׌e]+2 IENkElҬILLdx/YfzyUw,!Wb6㲐%!;7/ gYpږ~\sYRXuB*aɔ >ژnǕ;H']q9)٬hdcBHWmLwv;/`%ųXgkXquk6m+5J]i4Hif4-ͨBiY߁m[K"]+:Mn栕+7\̴a\1ƁM* UMipFl0X᤺ʦj_["RVKyzy}+T! #m$j@c}ʃŕ>6S19{r]:KGYM:C]V 1yT+8,7xAbD|Yjfld.P,ZX-:rʡZg)66>Ū&r+4ȐO u܍=]^2r)q_! 5!.W^[UWFM.O]oLo)w`nr1ԦZLWNw/eoȶe<~TdX/l3Xf#CE5JrT J|T_+V{<TVV]{ئo{JJ?rR  Je:[/umzy, 6*ׯTx˳.^|3 L{4-zz _CߘϷ~x}ʬ#=#}; >*8i1{C]S} hލUkk@iqλkwmYڴ{Gm]KZ5c c 4>FyXfOs #+R˔ZA710x5JKG0Ϥ@t{ ]{_yp?vn4 Y"ۈ y@d|vaYDu5g+|UU p`O8PWqC2 7*}UMՍE7bsxݾD궁zLyMM9e8:68RT5:x:+W]^[qn]3O}WST *Q3a~IQs| F|YEP>n_oaMKSҫN,+mÉn>v:):mGaҎ|a"CY {jdsWaĞWW^:,$5r;jQVUbW[q\/dxG C#/ ҁ#Ohn2x*5tw [qhNݍLjaC~ͫ 7VkwEDR*1<#!kxi"?8UD+w&Һ*b1>L$;;*bc:Eվ ,(CSRHjv+x ʔl|qx?a"[qHe r`<Kyti;NժAH?"My_&!1)Mؒ0,6L^[>t<=~qGڎ^ +ȷ9AJ״ª9$!k 3qT{lHK'u|U,hN@6FM2*k~G9gqW^#F.Ho. wq"]hHWX0 _ٵIG0MYU]bCLmwmk"MvN4Px'D g ˌ$.H8b|~s)B9vaz554\.HVT Ғ凞Un5kҞ~tB6tB[P:BGR:.vQN Yh'E lԫT cH"1T;MsZqu'Ev ӊ-ӊ#5pQ\L:*Z;Vy/6"{XoDN?UT+SQdj<3Mu0{RQ&@H3[NfF !+jzOO r"(')$gY;ĉeM>0y:7t@֕2ϐ!ϒf𒖄V74SR~PT5q$CT)jcӶc\;諯2疭*{*&{`?_ih3UvI}c؉8SV#?M%▶>J2T^Ϫ=^^ow&zy[0`ľW93wgO=j1NEG=׉~)0oo_hrgU@I"n{٤S1vwgOkjU2sT:]Zb-<^Mqh1a]X P9biHPL6D5ֱsQeX\+)=4|` $\SOsBsX]'n;_ŸO!YŀD8.*)F ``$`M,ZAOp B]Dc XCh- 7,fhT}6hgADloS_xj*}uu*cAGw1m~ZaBKa!l4v#^r(%-@{2D{zB-W,#Xڴ'KΩ]ڏ6]Rm?ewP y=2rIϯiOsڣ=iͽb26{w~h>ػ@ޭM2>MT}xvڧyV.x|vZ3P7X}xrYVlq~0W$pJZSc]2v|OJɆl|$\%-|#QIEgĺoxqD3\=E-!<ڳTV;X֘L.S]'GZi֊zOsnIacvBSW&kLh#* '<1/?ɗ{Fwc><泾ʟ++v"j>[l֦[C9oGtzPJ#AGMq\AoQ=n;Pr"_Ͱ~5L_WX/S2dI~Ҿߏ9×Y6ķL _oeads' מ?R_Yiuvg*!+;)ʦڍi9y|I&_z )=XNm|˺iWdַy*l T^O?|m_6嬆$Ff"FYYApDo(Ͳ$O 循rg ۰,ۨFmI = bU54+'&x]3"?  f[]lf͑WN%ٖ_̒9i٦}=K;͍]~8S6LUX{~ y^]slsq3TJLFMsß0'#Z w70wNBo.kԾϫѮsy6-?q+͌;IOHT>3ȫ3f5>W'=jL5ey 6,](.I)oCVL{5|o7'cs#K(nx"ugm7񋵬;-Pk9%;-R)3AT#36snk+G8tff4gAm]Gʤh;* |mV|8+}\F5V1|p֧1 Q-ڝ'h{˝՜Uέ|:M{y`&ÖoV%M"n= >ŷA뼥{DZ+KX%Ǘ'&ZfͨFO3A7MgTGWk\>y4HЯ|1)s- 62 /&b| W㏢ht8-7񙓰S[wb?y-͵h=JNS<}709%n*S:0v\KqlY PĄs}ĝD{Vձ8y Q{$véT{MeNXGܪ^-~mƏ?PjI))*/jc޶uɴ'h?Ί Trêażo;]}=օe#)Yg'SՏ.si!;xc:ݳi-WzF?疟&י_]QS6Juj+R%yNj Vo"Ndži3qY#VԖ1k8;x+KS ' Eog"`m\y4$ޭl~)јl΃p 4_q/"ڵվ(Q,-|mEn_rT*6tymgܿWW5]@'Pǟ4k~e>A*Pj|,*WȝkOf U|Z/ɷ O^C/fisyeyW_-=Dƀc ;0|i|T6%AR[2]n $e0-W7k44|RJƩ;$%F v4?1lz.DQg"d ~ivL}z٩]^#ſ\]h{.5tV֦qѦ[tcC BsY7^gӸ<풋yHmSlgpei?ţ=7>sy[nbRXMŅnj!yH[ˬc5kbkًvy]v-oD[WvMckV]λ 䓐b+\ߔ=6OfikSu08!OZ~ z9$䪻Ws szYZqwM|;>W3wh-yfy4ܖ\ 9u^|j-~ÆnJ[f=oxr˖0 ^l܇bU ۤL.'>E{BDyIےgya_y叽T}VZ[[e[=r goJܲ '|ԼU~( 'hK^f?b6 FK3|[!ZqQ _)O γbsb`Fy En8KRtɰo-Vhu{1ekpnd bzo."KnE<G$DI6ζZn?mm†MonɧE~Зjek;C㶞@X]K~8sóud(yN*-Yc+;a<"j=|EfTWPU6 uy{yb[c7}$Plb,"|Dl#kO cߴZ[(ۭ-Fs-b5wX/7yjk'Yji+ߔ[ިVP))u*9Ϙg?kgg|yC?_~*=;ۧ}&--fd-՞9׊~)+=.mkQ7 ~#mx8^< ڽwY*#ׯG+ˢ)&.1L{i%!{վR\cUS`~7P_T]_eycygq5l{_ڬ{,,)^W0˛P*LJG{{UH{,bb[^A:wX!_E5f|grUkk'<9\Ew׷{ }'_^ۑqkGs4kymŹu μj|WST}v\3䫗WUו7:ځWj֪=WVo0ծ]x +}EXkj9rXlҙAf891A.ZHm#R^yb_Ю\چ 3~[` <į$bCxOJ\z_cS}xbiHg32OR6?,qE֤ yH?Kx="RۋXEڞ2"3XGU9O/ba%E6|)w6Փingu+/؆Koin++GlELĜK2]5 J1L+I^[ˬXXˬUVNg`$T[|gLV8tmXŬVs?[trTV,Lk6yH{ek{hy[isZsYS-T1?~u _wy]J]9ݺe/2~˝,tk`ո/2zdfIG+40Ӻ-|$gl_k5,? .5}֭!Xhb\}>/7yO<5 u [Ҽov TҶl }u+Xj-.YeOx ~6SFeLzkn޷'pK[ ?fxGVj[}S[e_o[sï [6ْgOExDwۘb^FWn6.B%B$i^vb~arڛ/YVl~ekk^*k޴".Ӽe\V2=..i/[iI\vW/%q^OSLwodExW^[UWhj-ުjK}oˣ2PYtR9W:K1łj >mO=\e(mqceNψ?bCSZfVLλqwJg_>iMNq{Zn^*.V v;G>N׍JԪh^7੩7h2# <$]kC\*WHm;zV֓aR0"O_*c~zu-~]hϬZOq֡*-[m-/u&}t7sa"DMSoVjW mwDvFvF{wmȂa_9l X*Xa7n8l\G26r1m3a|%%gye?9$7wZ̪kNC=|wx| ߭Mg мA,o6$-bmخ2IAے3┙m+|gv~{w_dx;XhT<=+ώ`eHvڲNa3ݞ=FZsψ?'yDhͷVKYe4N"6_ڗ0_|֥J|mwGio_'zmc/'*cp9-3k^翬irnEox;q}lctO.zg)Ovya|uQlo|KD'x hOn9o' ϕ!qx R=sWS-W-9Xk!e+bWw`nP`}b=~.ںmbX^^8%K^ά]ySYMSsLI-xe/l|ejoN/ s|^ꍋ\Ise;g̗y)U SOjҤ+Ӌ.+qۯE#m\l߈fV\wE-:-)IovH4fḄenN/)<)sQ|w^wa5K;QXl#D6(F~6xQ|fʶ"ynui Y7ݨmEpBoEB]2'RĞ'/% Nۋ6$K&|dK\5l?:y8^{,sI'4i{"6. mĻ9x7\z7G3sŸ;LmõsF?Q{ZAڽY[sMD#ש=95' ~&LZ?mvEZk7I ϼYnw\,2 -OefcWc7 j~~l:?v]ɻLXl#D'XHt%M66C|L]lcWɮZ<8<g@!,ys~vqn}Ͽ_w/1^v<:ˏFD Q}c|ϋ|OpWl$8j_wo-0zKn-0zWZE y=K%ߺYr R~zC-)!S|;O7))S֯7{EcW/oѶr{haVOl#׉b^| FnF=<(,{b7 _yI~9^߿~̒G72pŞ||'&q G]-|MIߝpj?sـ2.(/W%֨}[=#="cc/cav7?ǖc ݣVL|[q]l'w՟6R$=%<-<陣E{Fj.)ƻ}55>Ouy_7Fҩ=PN4-"@oSuYz#N~驇s /h xL9t7P^JaE9W=K>ؠ.9w'gt2f܉f,٢;e&o9zȤ-SwCΧ ^9*گ9@ml ;+\mЦ,a[K%lxY$@eINvҜ"9k,)і,>D$LUj<xB/e M '''țbT uj1/GlcbmRVoR^ })X.1-mXlbZ#/޻^)|Χ.wWɋ`_>̿~yڷXQC_Tڥ3;ARY&8s'|%J$q w?*Fb#};k"]xRw,I}D\I䛗3N}-mh9s|=O=/eum;:<}gFڎoǣqc^I;{_/rs'~+$w^8|dYݹ"Iv og$? [F ؆6xv/f .-s:+>а~9]_I촎!5/@ќ"61I^pۣ|r2.ۈ؆.fO@p^w9xo{W䰰5r{v=d=3hYȁAKc ΍o%aaG=;r9[֨%j m\o}sl^6v8Xv˜?z.߆r"?7+%a簿,'gx؞XNkGwz=E9܃}"HOU& m%2XpPo#`% `n(+bְ#NneN`*v1Sp35؛}Z"9ܮrʉY#h+W0%¬lw[^,SaÇ}s:n;A rĬ\ֲ0kYXݼ=b9֐3Kz{1–!:a w.2!XNOOZ-^:s5 [cc,Pof^akO0'f P0'wrٶ6av^g%'G?/|=Z@%0[rOdzV_+k`ky;Y̛DyWps.S-B9ўQzpnGdN,3nh v`-͉^ ZV#[XoǶ{%i7QzNgY̪1++ΒD7(qDs^I Bۅ,F6zNWBJ|9NfUYyQSey1kˌ½aª:9n̫9%jeEں56ۆd#*z0 1'zs]4:qUg6aGbX$r1?ݜ9,Y=û2#;lx.o?n;Zتo*̼f=y{Gs< bެYzFў,>wcŵ;UTt*w*r "XnU.`nU";0iùzf^rVYh csY s&ڡCY=BZL 3a>,B`5Ú5aȜ>7; <ؑ#zfh.ܽ-vjJƘs;sDf`VNOVaݔ91"DEbm 2!bCE`vc?#tìYx1y}d VvYdncs ܇eǂ_-ϭe FYX`6f!ݗKa6(aVf\k dܾ,j=ؐ%eqY{qQp}»rm^vљD{؛THFt&l+`#ެAev`Rc}G ڃYKbDb<ذ o/Fֈ16 { xv{GhP.}L|in1~|w#ؿ\cV%L"O_UEۄ'?3 ٿ@f1b0we5N?FO<}p]92}뙛:}I?H\{NMO< қC$c{=ЫsG~zn%w$dwUe dza}1%y{ۧ.=Xd ˧!;t}\GJ|=S4HzX׎'84巐'a_!qx'oK/=NOxJmF]?|%jvE<^CN݉?'=ˈ/%R?ԣ 'c^P+r_>{!_#׈;}?{$5M㏤'cJ |~$~&$aO?1~&h1d,}x|~!?JuGpAF&?5opj{voXēQ_~0 pC5_M<? X7K|ܡo;#:﮳$2]Q|WÍ9i3C_1Oҫ翥Oޘ_27NS@zDC2#!wxs0Go^a>GGo䗌E{TKݑh#_jsz5&qŸ>=5^:g·̃=w2x H)<.sӋ!Yoxh1g>s&.BϘ;b~ćc>h1K68Xd,R7yQԌd,I;xuL%t{I\w 8LҳKǦdNbs|xu.> J>Fo_ AY hL/#.AWM_~qi/ 8D#)j%IF|aF'x5'q*G O3_ rx _Q|XF}x& '$d,ʏ_Li7$d,$v$Lj=P߇^lA?ħAR z?zMI~zՇOxs %M?VLsh:?\uˠW3ƓH_OƢ}2@/<+uzn2xloD`~|?_:x/c~XdS?O^2~+E/5/;W8x*/F}Do@^aM_/;Z Yi%c1H7҇ESzd_Jo#.;U/6z/cD#ȥ# gYcѾGMsg#ߍVawc|3v@pz.3W~77CεơOFD$ %uκ==Aw cw ~zIDM#&*Uc>`RѻE+$, g;L]?D%zMCA!1EBp.z7B& >$ߐITc&q/>Nvmo}&CD(!O^7awc}dqM7V:w6 ?0GN8;r~GgV >>>77Jzě л>@W U"7K CҜcL/?Nؗ'r߫F]Da8>nR=W8[}/TM؄oR )Jo'z;ѻWJ+o{FO^l$?+Do# OpT85G'z/J`6K_"^|NzYx/?0},/1~xџ.52)b|]Cڇ<g#Ie!~(}v॰<ܟ &mQ"E]ϻ gw>L#L#|9ʿ9ccc=\_>٤ ~TFJ#'-O:rQ#WNCy7M꓌E8@?^l(CCh??D#WNO3^UAz &LS!?}Hy4RߨG!}T½fvyCwԗD3!} gGNv W_eWNK'BJ)o2-٨o=zm1gJKel?Z0>7]mDo#z?\nRL5Ǎ&AAOs}!HZR?&Oɘ0, g[֭hL=X1ب4EO| z>S@$?yƟv܉}7܅^'z# ‹9p,:\np䎇zE8E+E;|1JA?ʜ?D z_-D{jNODi-w+hr?%.?E䯓8.x19B8!Կ?kT;-gU?r)U$iF}2'.u׉>8F<` A/Ǚo#ȿ?MZc ďZ?8_d;[?w97ǡo 'Ct4=dn'Di'c1GsWb<( pD#чϓsWXD'z|ɿzE.)?}J~3 '?ٶ 1 +Dg0 k* g_(g> ]HO,wDWXC>_ƿKv)/ 'O͒?wݗ!ptp_zG8^9EmQs#7C!zۈ> Gq>n2m*cUd>A?xv?"ٕz8n09^BCW!BpG˰LJBt~mƗWПfOWD zk% +D? D%0C&zѻ>Mu /Nahk$u5F}䟌E6 zoIyI0>wU53H}L7\s70^1׾11>aW8ѿI{$cQ$1N؏]DYW]/ "7Q[q'p\tJ(V?D܁w4It|);]̗4g]9 +}'x>_b7Wc| X|~&G#z!G?`:wֶH_Gz +' =v]1?σ^w~@~sx@]HOѾD8_]n<@ZķQ %+t?/ 1s?.{q9|Ow zջlRzS !z{M-Ϥ>GL}Ԥ>[L۾kOJ +8aR6'͕?~L毫CG}Ԥ^[)ˡW8k)z/mW?C0D%z/kOW8[O+g?Mnw=-!oDMc[F"ŨO +|c$'܏QdzGX,r|{X>O'峿"aNO'َU [!=xKk)QͨOb{[^lw$WXcD#?z>Nq'Cpgԇ3_m0NըgAp ~R~\uB}Do@Cp~?%cJ~hI~8_^D!O>AI58 +u$c>>5G#HyA?3A1_}3s9ߠW8[} }n1𫠏;JJκoRZsw}# g۟/%Rp7/͕? ^; mDB#&oJ?^lwێz.]D##-@pzmO>@(? O/$S[NhoPޱQE]C҇5>kwk>N' %wEkq?ޘ& 'cn?OG>#h.A{Q##(BVz䟌=1*2Z? C{!}QD+1#$d, # )o@һH]%1>ڜc$/_#kJJ,?2^?oM$$x"/оDo#z?I}uM#c0&1}M_g_DiW8zp"]}>Sɷbgz?7t7BpH_u(9Ҩw{=]$04d~&xԟq8SIͨO¿| X{>oF|#a-w#}a3>!2?Ib|;Og~NϲI-+ґ u._o 9~uoD:\|W&C zѻ> ~5aπ>j1>F˟_ ‰]/\ 4B!6O 'd>!7CDnD wx] u{d\?Rs!S$z[ ;wH8|aQGJǥ9+/+:Nc&w+m{1؊Bd>љrζůWX#QE%A҇v6ꓱ_ ~j.6 z'z;(\O#zۈDM<oRcџL/ tԇJ&V8lj^'?;%jc?}3~u?w‰mo %A I$W'7Y- !*=N(?Xc '?By\WXX2 szp8w~ؿ>O=AYLYlԇ ^ _>Q͘^2&J&퓌E|_;X=Fi^X^;o2`WyHz^_ݹ4>2 I/J/L_~*8wL'cP>c;Y|_77GD+?I} az|ġWct.=@߫}߮\z6}ugg:w wN:?lч>LB]E +}#D7W8[}1}D*+m_^l_J zч}Z(/l|ɟD ‰GA#@pDo#G#߱$rL׋$Fu?G>

J.w$zϺoR&.OO( +_2;\ nn>FV#/&#&Q.D ³/I-EGpt)fcz Si=U087D"i?z/=9pF|-=)i%n gjUHztJ[Կ0镓xKq+%ĕoO^i,{<-uy3ݟǢC/%KEI+7^3czv4EΕk,_&'w^DQ }D~OB{z^ERzٹ]ݗIWa<4ϟrbWI}Y8ߺ_-'Ap}H|rVg5>$ٰ/߂KWW_OlIc_c Q0X)O2{$`^sv-߀ Fݘͤ%cadzKo/ӹ"9[z%8?y\.N7X.܄\nG5> F0 FGw-ȟ3ͷ"+$[+z;?~_D"Z1g \#˽~yh"1ן }d=;.=ךz>S<տפ=H4j0>/F<\G?1Jޟ(=hLj& z fqC(?Kd[|Ef.x!Rd,e>Hɿaċs_I:D̕"6}5+?Mu~M(=L#j%_>CO1W>cD#zv_Qo'NW`Gɿ)俣Q''cQ$̧Q̵wgHw6$d,·l?ٌL߽_, ?LjޱxQ!O\q}:F`wwGўxB"^D\< K'{Wc/=7+Mnzk?ɿzo.8$ϼ 9}.>B$?AFC3ꓱ?7`4%`HX?K߂DڏD'!}?H~#!]Dհ_̵_cfR0!z'c&woC#$P7>KI>7מH~o0W8} {}cD=Rs??i?i-79sz aODOb<̓WW7iV^+DQh|HoXXdn|b=w?IXb[b/CDi~:J(s(W~D 0)_?Zo&w6A +{Xs'*7H}c1RKazf'cK~uOH4ésH\J"]2oNζ?G=`#|gn| VIN?wl,z#wIH0n" 1R!z7ѻV-^agR;D)9j$25dMq}\ ࿻nR ?%g,P'o$K$MOW8[s_OsDwh.q ~M4\ɿf_߹d<#z{#Ap9OBO̍綠d!GiH_DiJ܃^K^w;Gq"ɿ'.2Q<7ѻEɒadsԻLEn2I/;2sr_rQ GrsG~W)_@b]IW7?sB̿z>&a~OȜ?DD&tnΟQg3|b'cfJ[D\({Hx&q,w{z|0ןŒ2G!?n$C%?%3 cD#zRɯ;a`?v_NvRP_ ɟoG+̝H\G%_=j.cY!{?O+ ^sw$J?_ir>1DJlj>N$mS&SOK~1O.'E%z;>U~?z;i{A`~ި\ Jq8Qz~4?I#̍^a&b~Qbd,W?_Yz)ay+\9_D1]X/c}Dȕ7W8a!;\yR0۠}U_v!_sz}GɯGs$WX_48k3?.[w}$GvW8Q\R\ޱ?}1ϞeJ!Ow441bm_y) و_!ˌ %[N؟䟌?^$[=cdiC(-~?M^D{Uw >O$~VL[p>rcHFNGͨO"\`N6y>t/.oxHo'/E_wzd~AK =D&K$ζ<۹r/1b,OGC{x~goޒWX5F;|/WX3ݏ~*wi1槟kog_‰Cn7azszջ ID%z/kk%C(Zsz/{ך?B&(G>R^a]/͕_ I,Cec$~$cQ%9 gx̟>Hzg9\$qN?7G?77B[dn#c?#3Y|3?ʼn\1u?A"z,0>f|2=}w7O˰olOY43g/]>>6[| h2W F3iM Iy $;r ϧY//B :wGr˓K8_OuQi=cw䯂^aiF}2D%h61hOVz;ƻI/eg/B._" ^Fdgq?O엌?/AһH.v^U#H<%O5rT;YRgW2zzSɟSg/Y_KWUHUhwWWTI7?̍Wҏ5x&̵OVApVsW8_5> 3z‰3Ws$! g?]%/ >M$wBdB+?} O^DI1ۦKrV8)_)_ApS 3ŗ,[x0Z'vAF{ ?!7`>!4,}3-|_ JV2l +}a?cCبt}:@?^DXk+sQɿ[%zrqXă'$? }뿁$0_zxVˡW8IIK#_/Ҝ$; %R?/H+>۾DҿNo#$V7$ 'x{Ko ͨOƢ-AoG>JQWX]D"(ä$c៽% 'OO~}$`>FDbD%z/чC2?D#GwW8[}x{&̇7C8Q3/v4$~Aw"CʟHٜaIW8.+Onw}B5"Cpf{w1kJ~\W8[I}d7C{H:N?"SV`o>?PBhAso >llj~C$>RpBOʟEE=ۡb.~{W8?HmahM]KBVz/{^_7*#W8zI;5YI+ {fa];!~}DW‰d,HAI ɯRA?\*WC <ݓǞlgHYV@Q?͒+%~z =g2^'$ϠW8[}zɟm7J#+\g HƢ<зhn~#CۈF^sB|!z;ۉ>M0!wcσ>fR% g?/oRQs%Q!χ>4פ\sI&s,Apofs?}fs叁1z-~br~uD'0 wn3J~# +}^a'cw/}숟w{3_A#id,?h{|\/R?`聯88H%/O‰!z7ѻK_,7D @D/^D|#ѻ#dA{_W8HA+ | 7&3o% ez~ z>LOtO/^?oo^#k~xT^awcs2^ex^+/^a4^ƾDxW/+ }@A#^D!z;ѻπ>`R zs5o̵{xΜ>~u?m~1B[oo%-6&^}DNo^a}%|{s]D" Ї7tgB,GM5c&mW8[ C 6k/'B51Ї6k(Sn0i>f$Y6;_AodB B$ŨOƢ +䟌Ei&F zջ+Luσ^'z/{^$+W8C?ߤ^u}?Kd7b?~6_$Jζ se 9KЇLc_}cD#U{UGguTA~2ɑ 'c,mDv&^?Kv}&&:7A,A|5? goYo>ۤS^_}D%zW//Qz^D>Fڎw >~͵_WDD:#&󏂿 I} Kh$Apw n}赝%\)ggswC\ }#D!zMG̕?T6szf]0~b:Bb/]?v췫 Gw5W8WA\ûI1c]+|4q1d,RЉ:I/Bs%!=o^ˆ`~Nw}0̵o݆}>L& +}䟌E|Ó_1_qlj>Nq+V?Q W~rjt$=]C'6;\cX`^p`sj?D^lnov9o Wfiw)F< } G2J~߳!y*ÁK+nw}k!@pz/!^ԇ }D%zۑ g&CG>JQ0W8@w+)CۈFnO?_ }ؤޖw?d? ,F}2/'W8[H=u0Bgga^ g?#7ч>lj>zt0`<;'b(g1 g gO/3g\XɟǚO%Cp9Nvߩw&}_쯹$ gwnzѨI}D%z/ѻKV7Bo$qcQsw DCp_ 4:>@sIk+ M#O>BqQ'Bpz?i wЇAX 72X+W8Q~RD_ "C/G>Jg.C9ݨwhVzu!}c;\3% gkY1 'DH~i5_?kA_^s* g?:נԇ?7!cPo28Mo-+mvNnw} L#w6oRokW8[W1Dz{?Y|gO1'@p5̕_z_Bo2W7~&QgA5wz>3' +}D9I{C>oJ!剐$DcQ\zA3>॒$ӥ&cGW8#w]&_FCg^!Ap~h)~ScS%{V02+|JsqiЇ4~C!z7ѻ6 џkRI&ZH d|9{M#tg+m& sz}9 3̕6S[W8[}ϠGp!s4FfIC+>|"j~D&z za2o'#| WCk)p.~Go}.Fd.oFAIAv_j~:~;? +}?׊w)_&Dm'cQ{%+V??7c&z7ѻqzI}/H~ gூ>hR?rI/+D%zm ٖ +̕? Ї?o% κI} >F_2_/ g/W8>D􎥒ʕn?^᳧>͔^!Cig+~cӋ3d 1~6Mz֓f` cοtW@pUF{|~UjD3E^f~fy^"i~وNs}'dl_J}7`iCczXě厷‰xa=T?Gs*D}GMCpɿkI6d>!9ooCC}1>קvR`ף?'lj>ިt ^aϴzhɯ׋ q+Ic/!ӿXdcyQs1}sW8&]/EOnOF}䟌?z-:#D&z7W8[oA6&A +D(I}EG2i?K&D2c6W~z-6:Cl!?*&?>Bv} @}:r_%KVz)2{!nc;D7g?3 g]}{H~ f~LʓE<p>#Oi~S pd| %G܃eG7ꓱO'C҇=h%0x^#SO0?}>D%z/K(Vf胃͕? &&%PMMcDsw +}71W~P/YGC͕__^'zۈ> ~EC͕v䏇^a4>ŇI*VuK~_Q}S$z'IQ<px^K{Ӝ_e'c/d_Ƒ"K~㉿u:#z^.4gmh >wM/40g:]!AN??rW+6W+mq?d9Ѥ^Fu8C7wM.j.nR])VW?7A&(G> G0~O39~̕? Gb7Y~.`Y??7צK+v}S)=gǮ) +N:gKy2xI'zIzr>BΧ|*z_xQ$푌E#eO?!1eMhQL-?zp>M 62&GHV+DO;$OAۈ^'<8Ϩwcw| z-c_[@p"d? K[+'$;?~<{ͨO~I~5 'D^ ˠW8D#BBs"/^a׈4bP7y=ˣ_|ϋO>`Œ?zK.w .u߄^'z0y \#K+_0"_F~4/ПcH?=bn~?}ܤ^{T^l л>F#w &]%AVe&Q>jRM{͜>Ї>@vBp)&^D2&3aˠסW8[ w?ѻEa>lR_M߾7@\CO>dRvD&z7{K?Ǔ3(s.(<d~'c?(7O},J?W-qpx0-J|:\__ܻijn>x~^W? :A1XbD2O x($)*?9*!|=o.*xp?ODJ3uK|>awP&R/)({&c-?;Q#[<.J|.?x.U $~85*{|# \9| Z`?~>x: |t5{\Vs+gJTǁK0\`{=i 7P#_T5Ip%JZCGx|&bುo(*I_x2I1r`7{/ qA [~ $>cG>x'WB_>{^\ x1)C'G~1?pKV \p) ?cN8p:~?Wgw? L"} ? ?w| pd*?~WOoz:?p x>W 7DK> x-p9W |p% S^8o\lYxfÁ1Zsms  ף>{wA`?BO@>z?NB [Ko>8\}Je!?E#>!P% >r)gx`b# <xUEn!.#>m`;E7]\>?*ߢlـx?Uǀ>{>ǀ'6<xzীwk?7Z[O^vJ|>]Ӂ{VJGWzJ-Ϗ_ǁ^|:p _xwH "#)?'$. >x#-K| c+ o |ڎo~xϝ$>'77|plI|/PUo?p.[6!>8 | Õ~W}S'<x-\vxiO? :_YU{o;{GO'?b Bn{E> x5 ߪx7?p l |:5K/^O'_<x>J ځvMX$.> gˁO8<ˀ8_ ශ0+W#\w> ]@ x*ীsDv <G-{w`xh>?BZ?A3n^ 6} saoOvO.n>1`< 8|pxps?V?b'? |6Z\ <C?ޤޥġ%O|pp2#~./-$ x| _os<<_~ x#;J8ܣ7} ?O)E^ x` k>DjເUx\ t_l;;|đ8QBNA>LgF;^<x:x3xџ<$睅 |p=*8lT> F?U>.^/~ )>h;"^ G>`W ?p5SGoޡ9jg''ס>x"p -? < x?/=x$;׈ Մ> !7_ =G{?TǀjF]&*}W]WG/<[?E;O |=g+ TwAW_<v%ko}K]/?{w<("("ڢmv7"=m+f$nv&M ("("("("ϼ;y7$61Myygz)ONޕ>x0x%< ^v 7^ r5bz!VQQ58Sx9ZxE<w=x|^ '߀ upHh:7P"\N[?\~n߇ç}vG{G?c7g /K3[9728igp[(x\]lOg5bx1$ o û/Wvxl KE;ps%g*x6bA*8 'Uup 'NWXNσ'p/^ _ |n.w~%{Άƞ /Kp5uE( ?/WKO&W_#{8)pχ'CpFFxśз!Gzx!Ox 6Wãa'Kk&>~.[z=},=^ 'Ͼ ΂x>6\ AhC<^ë5;eo/}^'aRxvCspOطjG+WßC>e/o(yk?Dv= 7t0"nnᔡl7lkz7 <|{29%$+yz<[Ւ$zߛ~Ǭ*I+M%'y'$,ߪY#vKH^!+?go&OO|R"9_r$7H~o?R׏{% yh;0VIϦz8A&J3W%$yz,)z7Uo4OKhϗ_ͅ9F_z7C"yx'm?Io}Cﻊ^#y%_.};+Jѥ,I~_aR~HL+ʥBxH~GJvHRWIIγ)d7K{d+%,^ݒJKE3$BɟHvdO|% H$o'y$*9)$$/JK>J%7K=[3%?#yjK%?*CѻP䵒͕CedoIu-?u𾧰Ip=NwxS>p .SW9p =\O!=t,8.k﯉J~@7/3ΒOst<_Zߑ|zHË^/wCjKR /Hw.{+-{3%?+*sO-{5z$)rT$'oIUruz( HIrz~Q7=B:ެw-zyߑ|yoBrEz%&ZK^!H6|HvHLݒ_dz";(qoJZet|-Fwni,+9Wr%IRr{^(y}R^j?{zK~H~S>Y3|#z(J%yTj%Ԟ]?ޗH^=0@ܽ.[_%%0PJ$!9o_)?%"yɿKޗI~AWP$_*+%#yKK^ n/KN?@OɟK}3$ϗ]qC>[}zϑZ#Z]?}WHn|P)7LC9\_ɵ#;R NջȤ^f%?%yo%mIIvJU:H~\rrt|Z <5S*w{S$ l~J^"Q}$vgJ@r(Ozk%_}ޓ_Ks.1z.yX3^m6WIzV.WH~^ϒ %#y%(7!ɷI~QgΕwߑ|$'ɷO)R}s}]$_~SR{Xӧv_JaGOjo '/.yILI~Y򟒇$_H~BגQ$wI@ER%ϔ|W$-yCoE%R|aTR|K?R+r'R%zW!??$_/$_wSzoaɳBz_ےϯ|g'{R.S6*GNN;UrK$7H^rzO\#>Mw;GOr%')_rZJQ.|%zϝs\ߒ<<+%OeWKSrR{ y vH'y҅z@事.XZ_wez}k%/Bj{ލ]w5zV%?u}[AunW$uRYCnR{.y"ϼM=);>.O[jߗH?^tԞq;= _ڃz|H:>SÒyD:>H~So?gI _XOJ}J:^= zW=+O۳\%oT%IZr z%Q~/>+k%!T%R|f_ {+%?#;]9JT}_^璫%Ѩ+|['3 ^ 7/}Odx<Mv Mp΂s[>.m> Fx> +&x쇓yjQ y]p "/y-ZS1MI|b2vM[%5GJW9G}a0afo?=eƱOZoͰCvxcpص7\vIXcׄײSBɟ#+au _ 53/oEg_?saO 9 pϾ W`pr8 ïEO.`_'7ۃp>~nS.d΃s:^n-`OsSa;n?8N NM[p-&x> ?']n"EMR 6?Dz+a;|O[ڧK`>\3 g~aB{_ 1};'^7~nz\W P8M nµo_^ 7{Ʈy= na'y<6\;ã`;|\ 7‡˞ǾGlϟȏaSp[õW?>| 7g_d/hIp[o΁w؊mw.ؚGnî>/ې۱ƳO`vs p$;cӒ)Seg9pnXvg7M{"a1Ξ/;jd ;8w"'؟]_3˗9va K&!2b3^KC>?CٯC?(?S pl?\ws >&OGS`fByj{Q[3,XYy(i"ؿEQ?oWc} Px 7 ts&bIp%kZYX2 ucfc}W!%pUha; \79WFaװ/auݎobvMB-(OGbؿ$lݬM=(L_'}h׻_EKca=Yڰ@ N~}%?ov'mc'b3'~=^mf һo*{< _oSVf?@{ 7$l#p8. Nl-B~§ɟx ע90h//uwaWbkYp ]aZy >؉׳/k/fx [pwK> ?`[{v679N?8'fvc{[nzHOsҿ'ۇ}?{;~n:K7=wd/(?.kc'ξdQGWMAOۃDU/s=չ?`1]OEz&ϟ7߇s8ӝkfj'a?7\> &48%Mllv5Z6~ngkNao{;.:}-?\s!;\b^/Gy>d N}ho.b "~bVIwS/Ef79?M#?^?ƞ~%. ^O_õ`"냷û݈` RY7#}[~[>? }}ϱ߄w= 6Ǯ\}ĮS;Qn%p3`&p |=%b~3~-l {0|Eip<nkig͏a{H f?fOk/M~Go{B;^?Dz_ccuI?}SSO#sكٰ '\ W< "^7ŸS9/!*(/hW0=}\ 7W"~~۟ξ5cw39' _GzH#d?וgW/9g{kا<] 7]] ӯeg]lzv|7.ҿe5-p/u8!GP>3נ>>n|=Cia} c:H OU!e~/׭C~|&2z[,&[ ]=< nZ>;??c֪wKby+?8>#D !pl?O__w;iI/ނmߞvKLߑV좝؇wf? ''ۅ6ln߷e7ÃoNڃ}~vɎ7q fo ٟ6 [{CؗG[d7~/vAi{cp>zz ~z_ߡK#o`dqK#-t w0^=뇭`YðtP_{p7'b$ëRǙ}> FG3Ddb:C6owⓃx8s46t {QXqc9skJ)Ga>Sf1,cQazدٰ>XnW}o.GvOڷOtW۱j3G#|o.{S;"NG԰Ϝg7,=(g+N~ׁ wub]b}_*Czܘ4x?#+Pע<>x_ʾde{HBH2q%j*s>OA|eb.D}? 뻓= .+5HR0!gc<̶Csߏ 9硼5Q χe_u8o"}O[O_-C}ӟf?6?yeQCyc9땈 ˯B~÷]@ Z~=%7 /c߈ބ!p-[F_ow n|}8p{8>݁nSְZ΄S>֦75%]HwCFz~AE"鿡.A=p)=^`?}|\z?|on w)[6WI[G>[sb'>a[_Sa "}۱Տ!wbyMfsp.O ={φ.Cb_ '9$?23}SHHAOc}?=^7'=Lb濏}^!n~8{XM)~YOѾ7bp[/k? ß?e'} };߅a% _Ia"+[vcg_kٖo}/zg"~!?C_f/?``}?jǏmU`3ןJ'A6=Wo/p~߰Aᜃ/ /afg{lb? 7p("@|b7a DZ CgyTm9]%}Vl{)L5dyٓ{ H.Ğf ;/BpEkٗɗbIk-w@dO}5vfpm(?;#a(7>]svϳ },n^ww77Q^@bτkfg ˞׭F_}^H{#Q>GC7nrO ߇]'m^7+mY [ᚾa}۱OMWp-\8߅G|#0NHPl?΁?óFx- {0?\Ewe?7 G^wg2ex$o*?ݠy2@ 3.`lo(x96?a;Upҡ"8;  g@z`?l΁m>\; 7pCۜ [<ܔ0y!p ?+#n!Nt =ZvP߯G~7bj/aM(_G ?Cx*\ 7æ:GbyQ>(3,^7[?[F+ <eE|ngρksaox'=냿kc /c1 I1pho`{fH?:2-nznCS_LCz[۱?a_|n;(/pg>n )l]T߲gRx}6l6?N1H b>?N Y2l n-GzvbWᤝ[1>Ïdv3\w]jBfWp# <(o?w@~> ~.3waæُp>7}i'c~;7S@ryp0pp 7#Q"Q_Sكgc9p\kbOFz3>߀-b6e}<2۝shg? ž|$z:{Cp7`qz@zp `؟p=|Xepv:xX8pnC|ƣ&8/'yϵ'E|q?n>w"\{#FA<.b_:0\u! 釿Pp | pX}\n8~Ù(_.p=RRds]P7ߗc}K&{+A/(sO=>r 9w5ÅOpe!w6bp=ϸcZ,WI8>EQap8^k`eho:l~ N}_4n}MX>uH5p pM]7{3w ނ߆@nE-fa??@vOz8Anc~ 7lC8^Ep=\n_erqv-_>nv9<ܼ\ mok`op'`ߡ~cO=6O |߇P^bf;د9;ox.AكbEpҞB|[gqa?} ?'v+p v棘n<f&=8g{I> l@\~n`2L? ;}?x<|>4/>Ie{}3 ?&Y /~x~88%}\op$sp TؔnxǛEpH?샓Sه@|p1 /LGy~۟~ž䗟]3^bk8B֯#p^ G4byN8"o`~48b:&%gn\ײguspҥmLn]s],=d5w#{؟)x[ҳw ϝp]("w#=Y^pc=|?;#C~6/e? 7Iϱ/M/so}?G~n=/Q^5߁_!Ap_x쇧~&i=>[ٿdAa{1OVӄoe};p= '{7G¦3Z8'g/o~Cx O*S`p-oЖߓ>"܇`[lπó`a x~n_P7Kbo#h! Qp!3|3}\3p5 ?%<^9y4zx<5큫3UepTmjQx4+^ ̞w4{Oxb>{ܯ}lKUǠY3DZ~^ /T`kOxb 8C^ž''p^]\ƾ^ ;ˑxZ|*(/ãU.\myb8 ;m:0^ _ 9Px?qSA/pl=o Q w QBaJHx,@'sa?|1nx1꿶%Z->6c^7#P+*x&|:>W>,\ֶwMڎ˝Paݨ%p=x轨pQq "} XAxRxCp#pᣨPk@ֶ?ix6Kpxo χ!ϡ ekPQqõOPbg}-pggEῡr8y x1 O[\?\_ GmOBmװx ֨[ap=<nmU >nЖߖ~neo Qxx-< Nَ}$Ep) //~ensrx$ cO3;OW—wbObWx}_)l <χ^.qUp-|\?Ԗߕ^#<;OW'iB\x5|-;?/~=| > ; *m}{Ípި~^o#l^ ? ëRx@noE/??CG x 4N:NO×p38 > χ5p< ΂og B}x9 dLx1\ óP-/ Qzx]YHx-<N9΁gtx%|\; n7i; w` 8΁>  ^B_CsPvC{*x|h>^>7΂P i"\x=|?\‹pu!6nO??j73<.<Ι_? e.:^?O= ^ o'7ETWK U_?<~[Q|xɨp!<_ Cja8n?Võ6 >pl ^{_1N>y5pʅ(_p\xO8e{8< ,x\7W/B}Ox||1m_:x(p/ࡗOK5O+Fx+^CfBQ8s 7<.z"(m~x%ux+zg)R8N-xnD gC8&g6f7Iul?|*|^  /~{/G1[9^ ʾ[[no}xpA!bH.7اv2^/Woog?p`,k8/*x6brVm~Q/Um;ٟµop!(w ΃f)p%}|{)<~ ^{ 7x_ [x)| }.lzx><\ }B ^_x2 ߁ophQ.nRp-|9 ^ ? >~S6= ^2# xc:]p3<88^Ïk23-~8 >`' /'KalZƮ}d k7' Cy'pNz}< pn>9D8a/>._^x,~^b8"{ ΁g5Lx1|_o~ o:x7u|0 .MO>f/sxh->i(l x*.|ʮƾHxkjQYVv=\nof"xd Ep3825ldznɇ_s`;ykOcbg>8`;KNWNcp< o$ 78>p>χ'&x} k`\7IVG`}p:\ cpR:8H N{%1p*8eWan`Kn^HcqpQvei4Y N[~ /{ =1w)A|a3\t{;} &x^')EzNbI kӋ2_^c:ߍ&'\ 9p}Wɥh?"4 酯G2'7I#<>|fWEp 'U|n);^_ xup \!pc6oG|ha{}6Tx{2C_p6}6{@遽pTk9k.7 眂nD8ۃמJ~#{rz? 7/bo=ۻ}l_~Nz:5A|f_79 hb}y}>? ۟e%3o}Na7C8>}d!xw:mp\p-lg/E:4xx>|\ /Ë5 7 t?<N^> ~>.z]缀)/k-@z;IWT$LÕYs}-!~ُ O٫Ou-"7 *86xDߎ!,>|.F|p<=U3_Mw]wBi1֯q'p<{Kٵe 7=2,1 È0ϻ O#vw%f8ﮁEz-:HYfLllv&\t_Ҽ/~ O 'MpZ7k_>>΁ρG+B<"/f`̿0| d߂L vIh&cH/~ŞO|!=+Kg.O—W4m߀OMpwȳ']sxr^<Nyۇ߁+0/aؿMq|y>-Uw_a5c3^/`[_C:ۇUobױz }Wa78Nzm-UyMw{o < kP쑚e'OUMyʤKa7`$؏gE\_u>Ci:JX^ac 'a!>f'ɞ e7ͣM0v&χMf4v'h?6[|.kp# g>C|;bfsR˴Y?gۑ#fw:\Ƒ wpl v3٫`;{!pM%{8q?W\0^fר?;ߋ]`;G}oGyo \=NnFGx<p O(3p|(}Q^'J޺&i?GͿưov kwcWd=o?-o=GW9pV<|a[c}M`:e7|޿?qnɸ?U=ۄYg|~f\8}8?bG|Lwfa4~b~[2NBzs #^~3 /n?W~&\o8^+ɸ;1.˯q?w[ã^8 nҖ1}{6?3p,#>da{و/ԌxGz1hL 7c}7c}rP0^ 7ݰ߅Wy >F3Wpsƍt['~Zqcdjkpogu͈xO\Oa;3ho\qp3?qO8 #="usSۑa?2\ 7"a8'aFo\;p3')?_`'LE~!?Bp3~pϟjF`i?,?nsZ<#dG~!~p2Exjˣ>x^9pٵs 0:BvS>SfO87/ 8y/`G:,6VAczx̃i5EO,_\7]͏VJ؏hsp-7gp2ַ6k?+=p-A~W? D\)˞XdaP˱~l&8놧Kg!MLw L~΋C?퇳!~9C dzr깨OؾͨKC|[Q`u>҇h pm19˅05k5aTWT Qߵx>bL9v%b I?c87!=/H"28<'xeX' Cz>/t 7\sd+>p 6u%҃EHyp =pS*Lp f,Ո?܄Gp[,ؿC$ԏp3TMZm}WAQv#=gõSp u_^7` *r8_sp}m&8a;|&^UB? 7bKp &v_\o: NvOs~o@x3; w1g-οNь_9DGnQ7xzoҌ|˰>'I"x"S-ӦI^k< 'jm:C37_NQ%t2؏k|Vf̿# 7§8w lG~ux^wx^j(XX˟ 7b4cGk\=7o<SoS> .QPM~Vym>@y"LA||j8K]1~Ԣ&Lo)埄k<3^ywFϱ=x KuW(p=ۇ#p?0-~׌竖|ay{lڌb5c><_uH\_QWbZ{{1}; zL| cx^8ip-?^K qkѿPf^ѡ>bzA3`dg/lԇ]f~fʤbL ,H>Yϕ|$?$y7$>꽋!$%hŒ$_,&K$(yc_|4ɣ%O|rAgJ@Ւo|$?-eoKTvz.$&yɊ2Avg|'% J~PO;}$ ɫ$#y~z\)$oHN')4WIO ɟJGv+Q򷒷Y$|ZHMd/CvѻJ3wU$&Yݤ#yo%]oI^.oɣлZ=?zO|%#9eH@r_%KRH^'+VɿJG%@K>MRɫ$w_BђK%"Vm%H+FOJ~CkS.}#ЯP,i%_/ai%'?J]r 'I|Ւ`*y"s%?,GyCHSw3Sr@5p|iJ?#yo$%y׃>PaI.|KҧFiRi ߓ$y%"/[vw_@C$l|1'J&XɊ2AɧH>S/|[$%yg$(UoKPg\Bm4)SpR)b$/~6ϻ )b~:BG) Px1E<NryTaR[E?p$34w/+}=w8FQP|N`R|ۥ4=@v H?,ڿu+)I( (Ae R-KxLji7ϫ(K<…ŷ\S؎~SGa8GRPC!hˠE EOP8¡i0$)io RHПž4m VN!7Cž(RyWRȣNa 4p oy QȦ0ɥП~?D=5~(L0Q(e{BE\(eSxF~1--~?*>~pއ~>$94Q8¹v$@6vK (J-u)(l+1)Ka$6Piuޤ 8C)X()QI+G &1)yBa!~SLBNa*DߊDyK[N{V^*g)<#Lӟ+i0QG)uV_'ľQH߷ey)8P%Qx~CQ4oMQ@#;їe9])JB (H;q"e?&ʞ 4MEıQ~A/pK-wObM[)NOˬ}7~\(~ <~Da? ' %C}FZf > (Sxr Sx‹VPx^*(N.}@6w(Ka5(O}@:P(|BS QXKs _Pr~9 _Sf.3W~)BW Q@w Rk(K!i)lCa[ })lOa (Ha' ;SHv=) )Ca })Ga )0` C(H[vSFP )0B* 4 V 2(dߢ=B|} (QBa*i#ķ"mPH )K8 S8I E)PpR%##Kr WRRµp=)D·ߗ+(LT)Aa1;)En K(C^ Qp?()\@B (\Db Pp)(\>\|p5p-(\O 7RB)BV (Fv wPX<\|3U=pz,ՅE^{O@={z '{O@={sO %)pW9B*G qTg(54wvs3̚U5zM_705?T?9OIí7UFVS0%2H-#G#Rr衩UTNYV7J=es#iŎDC"DL[ ( $(75̛\Cl3J)s[1T" ̨u oEE-C-s2GECۜ)X𨹁#+L oGE7JB+n2DD{Q 4b_@)wxh;jm h"wp&Y9s8T*R 9lNj 1HPԒ32s\~G7Z+{ccwe2rF9l4Gq0m(cD]mKpfYDZ ^_H:gžJ4j-ZCH\qQ)ȝ xf9vDrx*]NOճӂ E=? q%bHg~0RwJ \ hg3nNt"E+~OePq\lSqDb|i"Q;J [ޔ<9S\ DI\ތ)P]Ro&fSQM'"R_io Χ<]T`\1AĤ;x&c8$-Vܞ)jHX֊'_p%`xM(8TJ!ӕئ)ܡnG++ݾ1pÛrVV%*3Gj٭CtitKm:~s3hԋ$$Ky]Aqo@\Nlv}o8U`¶(ٙx+XYLGp =,n9O-uW|~1G:4snoYn n#:nI;{wRK*MpGS/)؅d7j[r *8$rD4WOV7 v;6O۳YZ5;\mJ{i*u[RGylJmPOMPO;[IܽG8rX=o9= |6" BٮZ11H"_5%pnYAWM9'wȥR|ŨetCZwúA]Y=A+~Mӽ@pxgԓntD,RKnxLF1wVu/e ttlw5pI~-J/ɪhu\0&<%L?SЬ՗p0Ot8H J'/Rd*7ΝRB̝VMV.fMMW{Z’oTfFg2+"j " ޲@%uK&aVlMi`1[3Yiij*ΐmϐeD}]Vs( Wle;c:d95^1K]S-/2+"4YbR BR o xǣ8FoKI(V*,Z8EZWG YN'ZnTqT|ꋚjaxk Xb/Z3;{C}{R~NgfSAKtnDO|4%>yQ/SQqrG[ӕqU$]' yzaB,|*|kpѦY/q&NGa|k.? h'ЌG苶bEbqÎNBsrG?)VA31Lѓ=vș3NNoӬ8ͬo2;466-aigoݲIX4!#jO3U[m78f]%&in1Tu]F( >1 qtS 0sKۍ-hX`*b;FѭWu͸oA3v㢳4PY R>& }|&\JRU)uq+c:C+vX'8vbt``3xzxbjD%Kբi)ƸEmT]Z4P\jQxlVܙgzf1A~a7?N.ȓ_6N.8'b9Zh)uXq'W%1J8N(V &iOT'uoWWRѻZv#T TS8J")M6FA,V3lFˈxDpdY_ (<CغzUpm^ghiV7)oԎx۵]ץHQvffD>zP-<w]xj؇J=喦i qc:E]?s+9Ց ur\xfEўY`aϷ-kNT;N'2uquT"un8q\MטLW;umF*Q0>\8^$ș[ I1|%(sǖV1@ӥ`,@qp5bHHb<򘋳n`M;t$osb.OV6Rs i7r)~Yjlj݆Z3tƵZ'/[Lx1F+rv#1sl{J|I"==D^RB@\Z".tڴktt jP U;eRJ?54/.dRFB?s ݴް}RePR@FfTlFbn:kR~'7rN$*Fsjȉ*7Kt'\p9{zqyrTO>ӓWO))r BŔ3vRdeܱ/"D Ăb׺HU-i5Fr|IiJdcܜ(-h$Xh/6F[@tDdk&S56<oaTn8y 61$u <̌URP-o։KKWycXlUj:T3۸e6ށҥs[]WWLt v\xV7r㤿g#~Wک_Fƫʀ],i;֮5hA EKm[8ߣwԁ&7<2j#Ke5"7';]mt;䴖$=d o [P݊FrOvr;cfxbt&hI:vy܋ߝJ!*=|efnxV}4>fGJo:Mh0~gGmα7Fz[Y[(vS|=!D+'g'EZ9x"GFϗ}-1H Q.٥_1/ߓ_?wbXQvx=msO kQ -=xi 8Q,Ri-3C_k5Sx@ v@S- - V8!`C\ly*} [hk~I}aGE-xM[֏7L{FNɳǛ՛l^Ѳsx 6^l~j\^? g<Κ-֖]A1WiјL-& B劌g7ܴ|ssׅJ7^5 `򖘘^_H\ŸqcOgX\\7RB!<厐Oq粝G,fEWqK z|sU iC!ԕ/ R |#=2o)Nfo-]RvG-AVsnn{Z;{Żၐs餃񑗁lX]؋М7­o_Mt6BMfl/)?\0X[Wr7'lRs;{b-BfX[v1Zu&u&>O+ICbu 3<~ՍO rF< Z,W-ՕCא'}Q:GLj5zUd%Rˊ\|{qlQw#xfTlK,Ԇe뚝]߁aVf"4)097vXo,"KPo|kyvɹ`1sT.\4fc'O¸u۞=b3L-+C(sNԆ:K{zfte];aFɍ!;pKK%~vf]$~o (1~"fT9< ݞ:Qgjfj⶗Ƀ qj!ʘ~h [p<nx0KQ~UpR*ԇF*3'g,ڣAꭡ'=94&ړC[ 7mnE<h'ڜ9+ _mo#+)t+s): Qg(wԏ)S\ឆtXaYjj8WqA`fA~WK[KW:Ajdx wi #eTJ}VuK!Ǧ 3}E񮯺s6VR .qC[M@xXPRItGn1T)UzA-Y|zk4Vw<[Ef̀3.ٙf:jmQ5S}\먳+utS+N7SJB=%TJg|2.]x^llbԤCu#ۖ#jF#FGK5gLh<9fphٴ,&8ډiԖF(.j3"r&>ʈ2gx7_xD)R/"bRz^4ufw(ڊQ[*Rȃu7vبnUiʪh+HV*1AU܍>J<1vLk2-WupOkuk7ghR:TR[KlߖCǽߕƣtƸ;vgw2\^v, {;4А[ nA^?=Oxy(uVZ6S(id:r#_~hѭ\Y7dqoqƋw٤W ѿ6ވ5 qtuݲAtun@fnrsKP{6$ZwXvqΘfVcYSl^KmlrN} oP_Q[11pE+NR)SR<.28g5qF[pm _N4U93ZR{ݛ ]ݔ.|kKkRwtEN2fXML}N̴E d&-_lPŢϞYԥQVgp@-kXmą}vY\?,%-OdۻRMI%84yE["ΐZPKP@:A釷}}DF;'DfFEy4?oQGFKruj*n͟ 7gT7u:?S|ne1" FC'((i?!b"q|́Vh-GjyZ0ZU_)"OpkAn|?߳ԛZ^{t~{t{tt[>4܃24kp=-?^q[Oˈ6/h[Oˎ.Hr%v?-\J# p8!.̉o1xz6w˛1AV:'fdM 4x,z*P)vzQr.r)›Ng~G^LGr%⨶ҶF0Y}~WԃM-j%}@eA!Z IKo".!?.vwYW(FJӡUYResvy KGk1"r6U4Mf}a7gOgM{c}(_;VUmTFU[ iNmS]ឫN3[njP@%W$ X1|fb GB:FORWB>/~]8EJg8(}\JLv[[d4so&>C!{, O5h##o%%RwˉAzP@~Y̗Y)k,򊒄7_]i2mTwgAJy*GWE1*Cow2EFQNg:ݎ*G K8-].9iYN7-q-⭲(-l<"+ Zdï+1LͶw(j{ͻٖ5sӨbz7Y{  G \!,W|:-ܱLl/uhay!3bQSB,ԙ襻+ձ@eJ 4f/txَ|/Vi t"s}!/|/oߵn%L{=ݶ$.G4Af+=qC$Mu~u oVoP]]~q9_YvOVWvΜfaFjF`Y P󧭧 ׏߈`#\AIŜ*.a8"Hoa0EI=R;JRYSV[t%YM{)^ѪxsjS jMaJef|osө<6漾 V*^ٜS[-2O4e;rZ坔Nx$c}bĶH>tf6N=r3CN~OľMMHuZba Ǣ#W2_.3c\k dLߎ2WIg{anjrfse\4C {[>$q nwL_K]Ouʧ=v>嘉m99 5O՞T\”\f>C6yrVD~ī{D}W}t9[|qgIqNlwOHq{*"Lu`3P+TsNW5M? ݽ%*Dӗ NܺߗL,y\eG"r1S \R cٕ|=G.MXZbnSoꓜEJOK|YB]#. WǩnVT15=)*ӓc/N8!O)׈˓GY$Eq_9:YJŨim9;Tͦ^S)v8C8Afd/dW\ڛ(ӹd. DlأgZ`ɕr-M5}eQP'vxm&$[Zf-_ CG[Mqy\HMqc-j~)rybOlKcImVVMAU,A>M ۩l.S vrCQju9㫂=2 zaݟ,Х;LR7WqjuztfքՄ5!b=wZLjCyS UWc[i9!EmeL:|k^cLC~sw-mP A:IsKJ]t~V݅ƖpamL +p{,V  TKo6D_ܭ>^Y 3 6g: x,Ox ;@|of9ٶ>#̽;&gb|Ug7nYF#>ʅQRuehrŚH3S嬬VTss:һv *_h9\`6hp6Vbu 6/ZlL݄9ݥRGMOܴqt ;Eiޙv1h`q:"kbH񕬒*q90"Vp~7Z9YMr ɉZHQ"Vow9Jy<8ȅJFcmcO>}i;WQvE;]RK+]R_ Q\¡^Pouv͙XҢlNȈ Рwа~acanuV[(ȷ먤"$?<>c0>tfQi!4:~RN4JZ7bD<0:<ԀY}A#FtHv ŕnN].;3tD4-2Xghkя{Vc*ۻC۽v?k OIл5/SEwZ;n_P[7b#h.4ò!-rwoFN*hɌ(7̠:sȐߏ#UէY{˨T?%՛!^G? |"X5h8/]%.8-ڽ7 ƴ:B,w[a'lXMD%lPM= nww#\ʄzt6F֓A7+Ʒ~{ZV!k;+u)_y#B͍64?s D\leA ¥¥BMȰ!Bl0P |==0:9tO̩6|X7i{r+mC@7=sSfڻrg7ͦ)޽ogrD{ORYl8Cl5-b1*["?m E&al[6V6G[&0&sM;bm7#pD[Ո(e4"slwq~1f=Ǧoz#|l)gF7ջ[}}#|l9})h!O]K^?Tƞش4acO~[`:7\F|f͹ǘ+6B+6|2b2bnL'gCĈ=8+z\O mfEI?W^tX#V7scX̏Ԉ1V(Z0gL L̫'z^>uq􉞗a}TOdbn*`ϵ6 *`Ӿ̶g&8T&q޽igr ;=e0*GgKoML=NbqfrEڀX9[ 6^gV/LҊ:;Jf:h $X$6RǣYKGAQ3DuwşM.|kΛejmy窏nVٵG0"mm+mD<<4wu; zVuf(uu=f+ꮰ$["uywi #ЬR v 2UO+0g,:P*KBnZ^lNl(][@X.;6m6wkh_kKnj_Ե+P*?Sv%t D{ (PIH_olȩgҊqEJ ,s_ejԨ;ŶBOTS;3X Pތ,Pzug1*frݢCц6ŪD*YKbs>~ѫXFr#HVYTig*_S+M.6Sl6g~Ucșn"Ym[ry QƇ]Kƚf2WEtyʥxYk .9Cё,6#6J}&#^ܝt2*y@+mD]mٱ&,i;ZZWΑ#S27A-S\r3ؽB\O@[(l;r.2~H(Pl6᝟Os5[p#n"n,2\tL/8SM(ѳ3M.l H4oa+]V.:aF]D gN2u}1K7dWi#т{AkAUfw+*=!8lyo6zC =ts,8YY+{jA,dJ=КMW#zȤ1q{ eVK7z<Z bRwE+uwrl|9O7CԮֵKu.UM:t9=``.hTJ+Vffwzq&+ZºsX}-#ɊdJbL}ݪn['w,X'*JG9tnn#L-o &5FʹwD(ِ{6nr=n13ߏ7o*V4r^{Cecw:uNjxW$szc+⼘rZycԣĺt>qf{E[E9EJ_̺NfÕR]}|D5[ow ÷J[{$wJodEy$KFzGǷb9m S^^w ƣcx=܀] 21[=nUt{?]sx4VlQO?)ur#mwxZWG^?zҢdSx`\wQ:2ީťeAtãQ':B ;I@m$"Zy}"SiLv t2Gl_Iyכ]oyjuMp})Š[wvr%4c|HQs1\ + QߝyF#$VwwVlhţ/>_ХRvZE$7jCϫ;֜Z+>[9V՗Z?Ui\i#la#'j/L7Kh&<%}rYI1]nuwRJEb:/1ߑ!ZV c;2&c'mE#'F8gJx:NW0XdM5^~I Ci¿(:VMgi̜_ T8h47<Xaֈf9Sؖ9]7;j1_iuĂ,o^*|-Sb+x`}1Jy!ycS 7!- +qP ͱUb*f+as Bf- X8\&#^uˇ:5i),.'ʤxDwZ{u+/NWg\jl+ݜUv,ݲ)˺ȚN'_{+ 9lݺ5F9oO8HMEGՑic'Y GӬ9 7~\@\Wj[v~L#Pj?1⋔يBĪ K/[t7_V@M戊QU *%1O\`vCQVOMqR=< ./[LkC]x?"켐.y65yxb 1: (@MSZ.Xj1+GGц2 6Nu*+ek-_ݥi\p Vը *GDrdz ↝-u<ׂ,怯T*|ހOr!LP8"y>/X$_ {%~F:Ա;:NܩiB^*nmqRG'4 V6ΣCM2z++D(MYKtS=Ǐ6G>Am]0N3Q;W*$ιQl+^px#3Oq1NRHx*m6_eSv ns-@hګyyF7-wW]~lKpd;qxίt(-90mg-kq6ZP`K,_Uo#* tJ&b(wR^[-FLq;G< qc4u1݂惑 M&TS:NU=No9N3:6-! ڨJѕF ʇ\~A>3:3up*^>W. -?} xlr}6c':fKiɁ~ڱΦDM3-[%/Gt1g[W-׏~xLKmSht⩞: UTUL w31 (=EvW^Y=ϨȘYKG0[>up>ugdGOAԩ-*}*#nosrwb;h!RKnϜ59^[59KD*R#wr=NUZ{Vi'BjVb~i9^s;c4GYkOh7cn|ڏnw$oOw]LGVziv2w"AëiEq؀GP56; !"nL}5D:j2OY3WŭǪTU{d<`H J*nCx9ouK~f ljIky*v$-6_3WyԘ6*[^wLjH0gZj-QZF/OJEcy{18լ؋@vW@ZdUGuxwvur*h{/e)swdNʎnCmͪVnѸ(AqZ Ԉ^:Gվ,GqGHm$kLw ҍ8cͭPG[h^6bf2[1d6Uѐtg)\t'.)5L=֭_:! B|~Sx"gi+h%m+>Ub7;yBgi^xkof؜qL_ϙ(cz{|{ݛQ%1m Zύf#n:pXwۍ6{{= YMoEEBsEBbq錬pFGQ[ԫ٭"fy,m5V1!q|* -Mn\rYj$კ 1.b FoGAoɵWpcJa_jL㏯TG~e?pO]N|'SɎal݇qO!(Z={٧LV#RYm[iձ&1.w!vNls#t8=6iζ+pm3ܽ5{EqXֈAȦ}&^Q1:LrIDqS1m1|B.%7wwQ"nAlX\2t_L :L!_.c#>6gݍNfA,.nik:c}(SI՘G2RG%Lttx=JݡYf:]&kF_'u0Ob{wiX6k[xmܽ¹OJzv~d P[;L0:w )#SF oev|븫*p*|bǕ:EhkJmߍ\Xy99MVWek-'qQZ + _{)]ShXF7 8_};(x '17Elw*̃xYBx;y,&/D̡}jh1 0Fl FF(S1 (ԺC#Gr4zj:-Qgш#?euICS~IvBW'D^a}qy"\TGLzqɖ˹J:#xD1 @JC?Շ-BU]qhSM }Vt2: *ݗIE]QF} u1z}?sEdzI *Dž=4wPqP+\!w oTz +Z#n3CQ~WRD?J1[i҆7ծvs;tWB3\O{3<ڦ-l&9Iakp2ʐWUQg{[./ܡL6<3]7[;WtYgCwW=6ngf8EEU,bN!] .u{L8)[+#G Cr:5y'5lx8rik&ǐa-Log]eV=|.,s[c%c$ |{c1c̭2 @lKG?j=߭]捦N.{;qaj%jEnv5pߴTvh{G(u3B:Wͤ蛂qm3HxoPWHA"Bl2h/Ӣo +xjchA?G,bA >Rp.S!=^SZ 3Pj޺g߆x %Q/v{fuuwxDe~o;L/6 ǡMIX=|B-'"1Ų;uЭJe0LQg QS֙B[)^xu{Yb,n.aĶ7~#6qJzv8K[fT˺":hr;PX,ͭFͭ>v9DHnS`bSu$ҒJlBсĶ,ӑd,f+{GjhB-5{ƩIӺM;A)Von 7Px 7>djo' JD?lP%Tw/W 1D[ Ir+K|^Gt~/j I`\ɒܸ!eļr eAkV-r|/tl3uԧRoEՊb4dæ$Xlp7q 6}6*uYFeEN pς+ !QFC~_5vq?2Ef7i[Vf/\J"Gg1lYb{Rh؀悻;䍈̌rIuWgTz5"3nVEwdDNDdu#J2a$DCidMˀalA,}q#WfdU~52#=s9{=K搝¦.)XE(:x?t] ='3ciOno~M'Obk5 WH:BELhC#¥HYwb.)KRXze3xȤ SH'cS1a#˻mSq{#w;g\C<ʮ~fD݁t*z>csr=1B; Q,]TW:hos{P/:rH H L3q8%py؉bJZEH*@ѹE:/zk]TYRv !0J[1_O-LHtb-AMZ]h ȼA8.?4RI[aTZdj>`aj,qTk jdO9T9YLfϿόڹ&zhX#n4Okl -Yko1@F6੔cmw\ppRrr'HTrS zJfx?!0өzmTaXcň1a܉r%ePBnӵ37\Hh!Kv2[VosWW F5ܖj6~:5HME,=B'۴V:ߞlbpC=5LSKq򨪜sE= \ 9xX,;:C."uC؛ci҉v7t:gI^Ogwk飷dINn|6 _.YW#ȳBt/^xDѪD:g.xe"_0gj}6EhRǘ%r|L]-~!5h5B1++ ~=s*Y* _ \kN mLց"SZV6iy' l{Qȶ'z/ք4-L*n\J)pRF75GG҉W/^IDx6 ~|ؐc E,jPC

]lyG>UQ$ĚnpuI~ۑ? + ҳ KE/ ݎ3AWg|޲C!7}_߉2E~G@\ܕ:K0RHr(kB?Pڳnmd渉S {BR}|6׮Cc]tu zK~B'? Ml -6g 4p)j 9_bS 1O/^ L6=:Fb\&ݤΫ5 ̢5TFGefqɎvTEX#ry383͠>ֹ)y&Btӯ؊N/gHD=igяMBק Ve6!I{{銒de_Dtc^H`wd|1y.ʗ#؋ bZp;c▧4a1GU3@d`㙊X#jZkm#-Fh1` Ğ\Vm}HʡjEa j`B~m6nsἭv}ro>!@ZMd?66d?.X《Xf<0'D]\C/?jž"&B!%+!Hٔx5Vl :2rX}&<"AE2z]^8h0S[dW4Զ+5I7R+uc@`Y3 BLPZt,^XA bctAq,O!%|j3±_Սf r;PϔsdZ|]VtlF|lW];âvXvmrs#\sq3v \΅~xK\[41)!ڰgXH/H+b#<:GfQ]Wg鳥mckHK4.D|N0+mg~wC.qn5*J!v`B%^`mVq˃)s˕l}Oe}7h;;okn/E f6 W*yN\ޏVJWX@Ǯ\I}q,ЖF6+Z-Kݠ*OEkX`l,v݌< զ-lbX:QV-cyOE^!b zbI(tgAchc5 +p~JIIn>f(p \D3U 5<3cJۦ(3-{=K7eQYNErBl# z([۝Y6}@̰QHaY,q{&pp8~9 !`@bj.ab`AӼwkerYC޿ {bv1c(fS,mkA⃂pT0}R}wG'L]iDq(::'(>U\v*> JJ(.]J ܊hh29'tݍKDB DlY4_+3ub9^L,?bQcw9i3PDQzovsљzLfrb[5&-ql8G+k~|Naڞ # gq(?aGnyeFYځǡKd$0k2|g?rM6\I/43`n49eϷ- _C=%3kZ_Ŵ2<`o77SQ ߨo777<@<@

H=Rxy<^j/5Kau<{QQQQQ[[Hy$<z = =AyTyTyT"zAΰ!L\.  C*7? J<@GR) z.>KV+ >@Hz-EN"@4tB1Oo;64m BA.O:@[cyMT6QlD `)6QQ;vR5}plX ] T `_(}Q[(vHE+M ``T]qWl &M*`T6mR +k +W*`TSy`Th2s`_T}QE싪i~+F3/z z*שK&`*0SL +W*0Q6L &਀~! CZ'O& M`46i=CtZtZWW| -4@5@5C `4h={@*h*Z_4`s5 iO>̙4Fh  t.0vw ]`oCt1 tз.o]@ֺuYd +|ƣ.0tu. ]` G]` t x.0nuq G]`.]@{ƨ^Xvƭ==`s=@zƷ0==`cJSzƔ0['{;C <0~}ks=@z|o=O:=@{ƙ>[}`LcJЏ> }`^֑._3}~]Э>}[з>o}`lcYyYqj>}@W]_0>05hk Ht~=\}`l:0Ӂu$:0:`Ktc:0;vHl:`st}F9:`stFu]vu`sD7t;vRI:`u&M:`u?:Mԡ(0B (2t`ns{#Ё9k: ;>Ls0X1Ɗ7Ə0~ r)`ck`b`?1Ə0~ c`L~g83ƅ`9lNr`;s;9lwt`O=t7;lX6v`c@YPH`O=`c6u#K=҇@lK0!*}ķtÊ:M5(RŪvJB(6uKBCyӇP^@A{(juxׁ": !<gu:} Ӈ@BC!A>\pCy(,%v؁;Pb D@ф(v)'hM(6vр>Ȅb;P`4" hD@GM#hD@ЈA&A&A&A&A&A&A&A&.7d(l0 Hl0iYZЇPA FQEDC& l0 9}5X`5c YQ`5g Z`5|p`#ap#zP45}P@RLJ2P6mi#(P>ld2P7µmxP6>iP#(A!!x~2P6pkBP 6}!y d}U UYoXp!!@> d+W @_? xBP< Px#(:APC7ȣC!(x: *4v@!t ;z#(AQ GP=ܑ MmB K*dTAq FP7B덠`oE{Ӈ?!|#(> 2*eP!E#(A1 :GP9C"-T3 QGP:}! 2A*dvE#(TAq!@+jӇ@iT]J *5ȫ8u#(RAUGP:xu#(:AHHC.OC0NwF:OyM|^xdIJ|FԾ(M*QUkv;v9Ɣ(eņCL"ո/⹆h!W\l6ڔ?&sc\7wqB"4_唳J*FCxJaEA󗓀oz5PVaʹn,`1:5k 4 ׄ^|?JKkԫEML6 66*h^ $a's2I2srنs\މ$v8LrUkVYk0ԯaXMˬ]io'\Aɭ5eI HZV:#[.(a L.4 Df=sX|Ak:O >Q;-$X7ul~qf>M.e(ۙNޕy^?XØ tPnfnJFbs|l+Wo*p/lN)yÜ5RlDor w^f&@K 2\ &iIHa*[9{OvoxqWz@C{*Ԙ]=˹1lĆw'|>`gejg. ua嶈ax',EGVNXHd5geV{ (E K;>!JaRFN M$(AT39W>zƢփY|8A,%J"1X+/\Ǥ39'Dxda3 Faiv3SlzNy[-3./ v_bS\dSe]>7;@v u3.GLP%.Q fԡfi@#,K[uK)Q}p_>RyOSfǴ%Iu/)a$; ! " I 汜 lwʂT1. ,5aGxBo?s5c}O: #/\e{݈;Z4k.a~sqz١^ilkԡtbX)mNUe5m'mH{ 䣂">^I<uE\r;@ˀͤMQnxػS+z}c>)ZVC߭P|.rN"w܈Emk2au׮Mwwy, e AoqpT`1ĢԄŠjOfX>_28XwjPP;܊e\|| ,_(cIEUJ)D Y&/p6r9'KʖpCOY `U)*k)*k˧AQ12pMk6t W iD7qo0'si n ,nϪwA0< 5:ņp/j!~n` ,ovjEER+imzZGjMD 'Ns ZHm_4c .C2DH*6HhB++YP;wufFnۉ.YRUvmcK~h*n N^@BKg[7zUv>Β30w]+w=VEĀ2mc CpSgB|m'-u l:Zje:[~K߮!92uu~ embSǿ0@'OKi+#`92E/G&hQ̮+l$0#]4*x.x*,s$sùzV} ڏ ;_~ō/1!{i0/{sзɍ7*]!X,|JŠ|SD{.#lȭ[GE!)z1*%Mg_37=JHf3QVX*la^-7ɇ7+\}BfuYe-`68ԫ;o\WԸDϧr>½\JH{)țV*#77xEϲRK-Zz﴿9~`Ҿ%z{̲Lp{>xt\z6]^YbC/CקBwA+EnxJ^̆~-fck`Ef\KG-9sԧZ $= Լ_MM@7GJ7,t cGi\]~؄PgPE0*=\{@˜E =,sILOeδ|lX8~^|X&ϲLJ$,sySyQҜIȤJ127LLzkG2遐- #!,l@EO ۞w~gч:7H:lY Y_jʴ>0:ng̱HkR)I*Cf඘w1թA񝆍za*91ElsPg4<5 <4<*/-sZ[!rFpxұx: ¨&\^IfΰNJ? 5ݧ$\wn6> 2m+br0o6ƜNm[ ϝ}ʝ+c oNC=|j+lQמּY))$EטnȭҶ)޵އM"=}j!KL#]R*ԴY ҵޮ]ϳ٥cq"sau b{A=Խa/<3]2)5.K3ʒƚ_c7Zr/_/ݥGQ}4h羍`{Vm&%׿wzɬ-]\"f_YGtWxjt//Fñgܕ^oWTD"n!f)B+[,ʡ j9'lx@^` P^Eio{ML$NAOԫ(S0 +lIW(:#BnZt <z[:̈o_5 4uXe֯v=UŌt-lưn,S(6-连/vhvdxxe0!OlS}"~qoM]x ٮ~EoLw9IIf4pۮ/;x!w /w_ϕ@zPr\A> w /w_ӑ+l!k ([?B 1 Vtv :S~r~S:MẂ i No*?~\hP! ut:cTA4Tѻ-6Hʵ?Bv"S(Nw :aZ[N\'LkK)km';rDzPHʵB~v"[(\c?oPEv[ἃ־\ktK1wRwo_UwRiu'eZ9}mӣg@k14;zFdgit¯iĐPk1t1+G)A A 7A A @P+8Ѡ|ccMlϺ1v{5_l` ~ v1Gۄ+%<,'3 ٝӌdf?Lv}D󞦨7/瘒S&0uYƴd13ow 2@gQ2a=rcKдhz5D֫5"oPzP1nVlTD?gB!'Ј.8{x .>X .>A;n >j0uM+#!^lnPq263V 2x%X )\Y[+B[Ʈڮeaa+5": t9C /Z)Snj5@=Po^c[S\Ę < є /x@vwjiJv6 u0+ +V԰ 5zr+)XsIp<㩻 篴b56z .KLAěS1}{}Nk@Fl[UyXŗj :G\<'3mo8Χ\SwqVn7;[ڶg84 HOw6o03EZT<ô)+ם^6aOpnU@"6dIš3wnd6c;AW) W޾R[x@YJKeϐӟT/ƣׯWϟ^07+ݳ/=uG^\|ګ˧7C\P@y?ů~ųEux>AbE_ׯOO/#N/^ |KI~~ k"^|x: {Cۂً|S&7W(JE%fKYo,}bʯ_~ WmD4%"_"F]'Bɮ/+s/~FD٧  4.);n[W=s8 ZEֿfp#w#2*y`on┍*384,7jš\bx8~'8# B~9<4]R?l;!sTbH˨ScsE5LBi3t#8TȈ!lψs-!1, mxC+Hk>z%]@ D ]s)gACj .B䙅 wCqEK}j|*s|uS@PN!A"iF{J0'&<պx#md'$܍6PKQ2&@ד}%7 V(O"j';bX͋a ăB ^p7zxZ!fIa1I[6#G'ɊrO{nS&$<@>L=IMۍ4KLȉ)_ww'{A?7*U{LLsgo^z^yO83*}^/|5c&r(͒n13Kde+nӾ'Dg9!C'*;6-NU\黟Qfڮ\[r= 'Ԃ`V+_u#ʏb&&t{6B-ϥr\2sXB$*rTi֩#/sgb$A5_olsםj &OMc}V$j84_uqz_ۑyf9hH]}S+&J[|/fzO0).(-(Й];9YmLaT&']vv36r8\3ϝsy]ng™abg[~Ȅ 6S!OKl}5A P<__ Oj˺/Њ/Qh4Kvr4 )uzUpW *]\6iX&vrV)SqE,_I6AQdJWT 6u*]&z6B Z}˙bIS3N%r:͌KV̨dM/Zw 4.WZU!an5ђ W"$S|͞KVTTROmùNLs1rFVLs8: S_[:L]>z1Bؿ>|R,WG݀1_H.q8/sR4 sZ6Ӟ1֘,JToYxV^ ΰ5k=(8L(fbvՙ~P IOT찊ـ-Ź5o-] 3 /)لI7—f1:_%`&8i\^K󚷔a jugM3:͛4̛`ҹBsZ C:>ԣH=[[1Fҏt\+`yfܣc1Ih<C5 b8H(|–X;a$tQsa&n!hRiTl|6ಀb`@"rLB.{'q׎WY6 =CiHC*dp"*j\k=4N,^΅>[q<7``AgH @1̀vR8`TF(jas, e K3Í}}O17Y]9:VmQ5vc~P!-Vw>:K`;j[;C`p׆ED|sCƎ؃*3B5<w) CPG&DhMQ5s4̲8Q(6/Q3(܌j: sQ$ǾU5]޾I(ݰpio#ܨK;12ަVBik&=Ol0g4_uWQ-OLOAW0q)=+sϽ;. 5FKTU - /LIXT*{)r4dJo2á*'M5pōZL(Q24쨂'.@YPۢP*<SG'E:'uUTsKƹ׵Rf\s3zSaӢף3xs2yhY!m!~Ozz|*JaXݾ8 &Y8]< nܪ!ŏ;"P/Ϫ ǐ'HfP Ƹ8z«X5sZ= n|K4,ge^f\ۼ&l'n4$?QA*0c!BⱢ xkQ|Ov2ـdfœ۔!}-R]Ջ)ڰ**zQF54dzTy‡g9=б]Ӷ4ʎ;x ,:`MmsʜH#ۏ}9CxxnwVfɂFjZN)%ʝR9_PLyS% ]LHIEAtz FHK%(TTS|A R0 E9*JaF9tҁJ_KhaXވ]aZ}n?_VmL66GU^|WQFqۨGӣǝUvR:}v\*j䵒!qyɺ#;:\[s19ϲA9?,9tCj&sVY"UK1M<6a#<p:NRgV%d+6S*81WG1,TmƼTˊ=F%y` a<]thMlNUo7ҹ2sg-}[ ߶+mF?'Td 2X:&f ,̰<]ѳPɀ24^ &5E^ps~P#Y̆Ùv|̈́ #\XlWn}@vTEa@o,lfFY{{9;o[!ULCmRrD^(UxV ]& %v7֜V@DX+3.zsWjPW3:J;Bg]I"V:3sV B`H]d^;IS(y^x@ڱ :3s/߈s*cY=:鷮~6US+Ç>n~韢y0'?*uJSG:5,G(Èʉ+;aik4s+D,ԑՌ{l 4)ʺS*˓B_V9IȾ!ZHɾsp;.V>,v8| M4's_H@wh,,Z #*s\`.)90L\jWv{u$NZ'7klLq \ҕfەf!?b&W /{6ޔh i,5&OZ'YJ}ԫ(%ņwXskX={rL4SZQuȹ j6t`/ :p:"EKH o05`aK f-1}`l$+_0Uf{:r߯{jR,)޾iW!Շ˺I\7+ c3h??H+O~(=TEH\k%tԍ*7Yقٍ*g+Y6)m\Hay%eJ׵vbI an\IkgR -J]rAZ VsfSɑ96GFFL\C=aArMs0J_QOW_WgwEp-\NYr+tR#6}$/pɣ= s aF(13<$`ZVRfŋrH^Us"w9>OQ͋YqYmG #jjƼi.C'R ,|]^HKS3ے3>ɚ"´T{2 ?[vgy,:1MiL.jG`>y&S6<0rk(kTyOLT#'klʳj#MjmCF/f;􂲰HgD\ 1J쒝R:HJ>*IhEUG`Yؔ -ɈY ʂK9&j73X:P\=QK:~G/ùZTk.6y4ǔ}LDmXi_'3tӱm>]yL҆1P<;c2P_ [ #,kAWd h,xUDB6@}xtE>bs]\p7&rwS(rҭl_,wwbL 9>U:i'75#fp}¿)TD*Ia!u,UZ)jPƹȄ&3!6!*!2?]&hD沛y~سxFPk'!):v<úx^KWQy@!0Da=gy&*/YxWzY!^ k*IĢ5it$"ӂ҅hX 3Um0S|j()b\xsxiV#.N |"c/w| 3Y6]=EbqLS NVP -`Kyu RkDeܝް-Q %!ܯl&Y"7 '95OSA^pa|QTdv`\ yz5Qx<%dɢ;zeE l6qZU$ dJ}O9LYs檠t0ơ)nhPEq3`g8@e„Ak(uA%&lJqTrc RF V} ˋuʵ33l<[18|B̦t4ʕFylNn Z uE0e`FDI+3ٲ\5^jp`( > Uq#jy. j`,`X\[GF}6qdFŒcՑڒAcnفXFk[w[</Ȑ.?..:Ws08)Yfb+gMKx4SM:ΞD |Be=*Vl8 n0R8ȟ/bU;^b֞BmeZq++Gt5 =g]C)P(jmiEzL>χh={X=r"l \>Aj9 qte,C^ȫjRc Ω_«Gb&pq}!?-01 !Ⱦ;C|'$ kİÑ3`Spq؋ 292ygNs?GOl^?N:ȗGmkQ=uW-JCIU.h}lБM|+3z$ vxm)*M~򝧉7o=<ݛ@W˩\xx;>@\j]%uV?[63ϾOxzՔx'4ۙ+Ja1AJHulHQ22ɨ0mCcE*U9+6g(3nˡJCx,T[ʏM&%pp.IdKꊴB^d؊cScB|+jj?lUVjlP ʵr(oG_D+Kczgr+@TE1 >aҫ츦wMbwoy!thZdG 2edt=2ی,#;;;ed9YFvu9J&1Q2z{̯{LCgm{-v߻bYBn>>3[]53[1G lq MϏy_v+yvvR5Xzv[/UÎ JհKITT )U! x_XNPʊOrR5l'7.UvcR5(|gj‚W’ɀ .<ɼ3)\1iʹ ǰU.](F=J=[rF~v}8<;;F~;UP?h5(إ("(Zcр  8 3 O21|SSӦ|*>d,q0ﵨ3D1Jxw|^Ru3˱9iH,k+n"(A<.u<r;S^guaږ+ū%fucL(*laBZ>{3yx":PYsnT!x$ NF0-"x1],zog/ڹG|ݐ1}Lv+t D{ (`zƐ-ģ`c3>+2D\vn [:*na~lFD?~u~1<63nFXK9Y_YX,cnhkt[æR%ERiRJZHsVAf^=.kLSWlrܲ_.GcsR MBS_ۇoz̑PJn붓Vd~ZUC=Eqm&%`^ MOeX FgיQIKn2FDQsd1|:1w42^rYA(JB(}yq@u[WjdzM*:H3lh,Swܛ%Xw#@za7XmȮS\b:dSSvKCZKC;M^DyQdb/UִH@xin45 eS\N}̺[!Ä/AtWakڙv9ɈfM:vm84/Ngb(w9+ QvԸK O E 7VĎE lEEQ%qg>{6d_>1y$Xpwpf? Wed Bȫ]N\:"N-eM9ʣ{r'\/ء/Sw嶵~&LJNK]esS (Zw۵g#cbxLBvM\*vh7)@qϚm^^DrZtĤsQ)Eސgiw3R;:z%eq0+*|Kb #vLfcbawnx'ь:M,H|"ϾэYf7V6ޜÙY6'}֤^pfti MK4yvB]XDkVrζe O sw#Bs/lRaja@8E6NV Wkݦ~GR; 5RktBCLLkiyBGC˛|i'8oW)q`p DzcQ_flļPb>>:S*GF2!܁J UD̘I1OT2JmP`;Zf%{z1{xܒ]wsAydb`me^~)xkzoR7dlip Yu-K hX1Vf+lv{&˒沇ML>&i*p$)m=aRb}9/qmsF5.okySm1n wsqtּ3 K60퀒UEzb[pT AW3QH@%rtPOyH]Js\8Q\tI{,i>|6ےR+$`+ g[~=27$63pWvE͠:?kJ|1TQHR~.x41EK.=q${cNo< ⓹~^@wesAgRT0eb,ۑqT]D1e{8ƫY9ɽ3R֗ǕzV%Ωq`x/69| ht>K2Y ?wn|,1.5.I-lÅP}0؆7,Q&Y{j4%qѺ?.OSTk 뱍^/Uk(lPR|0͋B䥘[>pL-?ˋ=؉%-8VD^D'35#Ɣw{w#%Iq_ϡbѡ76mjTFb^Wcۡ}t/Ab{!66x-.N_sxTi-t/+n`L~@2Z_qKy W_sb;Kor÷Α <(sKhdF /˓_p8m|P쉺yOWH?X)  0oK/i -0pEK 2uqNjT+W}x* 5Cl!U9'!+mNel | m'%Pb2vmm&( f5i[Ԡq'ܢatҐ+8ɵtZ5MKm=Dc!:{qZ^;϶X#b%ս0# F\IG6+WdAߴ)uR@kÄY7%I/@MW9G) v8!paXU2VO)KQJ3'_@!ugͅR>[ѣbfoC Đ!OMPH0g ]jDi7); =ZSDlG+)%F@PDǪp<ɥdlmkp|rV [h]-l%nvYk15.7.ܫe-RIKu! 6\W<nB 1tn {2m>t̬`NH{j'Z Ǟ6(sXSh&b}ͭ&va7]8y/i¶(g;PӱG<,Z8Y ؇)UF:;Ytj;gZ]w/l]UVwoJRyoLe=א*M܈-TL1S**e:3 L&K j_STtxň!POxf90'n9 "$qIfSveK EEL<𾢾+Er=RP.d2Ơ:AZWԯb cLf-('Ţd鍜98]*N_Ŋc6 ׀N^Ȫu9@u9Ɩ[)UQۆD4y*MURws)/,~ǿ6LfAGS8T8Eb#M"B%M `*.M 6E6LͷORKzG !KSHr/c[o{pSYfCS0|&QcI6WR ljSj0:92RhZFẽ|*@ș"Qj~e~#e~(Z+6e-֛m8WW4\ӒmPImPM>*kՠV-ךˣ]/l@eZ:SR55!6SJ7 6ӄֽ̤8&ihRUԀ95. ;3TdNׄGQgmv22 !8JsPKv{ -(]:wdn̄p5ipxx  oG c.qRN(:.L. dά~,B_:A9klΛXK#w5NE oNQ$?+ɴpƞ*n~ 6 Tv3OCZ7~5hkb4rsήP,7F_Ģ6Xf}6(+gwyl<ܚBvgcg:fģ%%;I|mfL|δVo /FI[|n)""]ڕw\2hm okwNیvqvYsŖvUHkN{ԨAvdY:u߲ԫ%t>q_ߍ ]Ԏ[Ĺ{%2>SOCR;DQSJǚyMx|-=*~ =xs_ ߷ g#%ПXpn!]b]pOH䇓6Q@E a9K쳨GSuvI-=GԽZYNGgndVedgt8Aa\@'9gp8?3͠>0 a\bm]":ʘύ~SdEi8ŷ @QN\$.\Fp($9bKz-\!GG ApLqD.4aEx %\[oExZrǑ7~ mZH[ mbX6Ur>$~]"ðZS5R޾k!6ZlpV>7XݪTd& XL':w|P~&!xڒ.s5bty59$!eөⓠ>)[3ۊ\86E܋2&y C2>fBlʚvnPqe#Fjm&.l]0k%ZA;E+ha[ܮ΀n{:~CisOmnUks1=0Xn& bLq_/˼8垀)=G`*X=/މ ʯ"7y:9 #kÞaA/4En̺}p"7 }mlmiInt! tE F8 HVp(6 Lxl{?#j;y0E{yCmggc m{VZFw9Jy>G%/5w; c~G ȷ(i8ܪĽڒHufV[x`NP9Ebw݌z: '͎g/&'Jެi6|_P+WLA, /(TD!a%"5\wTV`mQ Zˑ17 Eý=U~&~oݾxý/\si)!W]4gƔMQfZ{t:oGJ"$!il/z([۝Y6}@̰QHaY,q{&Sp8~Ņ C>03\[+AS.wkeUYC޿ {m`vE+1c;*Oilׂ  Isͻ;:Ye $<(qcfb)2`sfQPREqӏV@VlVDIf"PBݍ@K|]`"pmL:wnaSsOK⟦fM WpbDE֊_5̌YZҤW\21Nq۷ܪ:Q15Re%#7Q+քYf|j-->'c5e2v#+sH(<J‧UR\r_T/NM^fPu ߠ7JV[J!B(

PW+:-F}@(7ZD"@hcl'04vm(h@@0\ t*v!+ ,6QlD `&*SCl8vR줢8kXJJh(}QD|/ =(QgA W `6)?^}Q;/O &M*`T6mRۤ W*0R@`TW*`T6B6d@}QE U4WPNf^*0R_O|=TS LT` +W*`T`6Ll +M*0QJ C`N M`46im{[hkk `4h={@ U4W_E| U.@h\Mj0WF| 3iAz 0O: ]`cw.0cr.o]@ߺuYd Z\W G]` ]@ZAZAX+~{. ]`V\ Ż1Q [=@?zoGc{=@?z.o=`|[{z8Ɣ01)=`LGзOwhZx?`z1=@zt9{:3}@Ɣ>}@#]^g_>[}[з>o}@Ʋ>0/>0/`>}@W]>a}`C;k~>0{ֹs/u`H:`t`\ՁqUUWu-сWZku`;vHwu}E:`st6G:0/~:Q:o耿vRI:`'u&M6Q/u&O|:`tC!Pa@1Pe^:Gs&t_v|̙7L`a b`o`SX1Ɗ:2?1Ə0~ c`c20 qf s`v9lws`;8{:`ov9l;",{:;l,F>{:ؖ`TC Uou>Bk:PjU֕>Pl.F> , Ї1!䡐ׁ:P^ @x( EuLBx>@{(|u!q>";OB}!!DӇPXbK@( !v8؁ ;P8a@(R !O(0x!ԛPl` } v4" hD@Ј />GЈA#L:L:L:L:L:L:L:L:\n[#΃Q`6 b`(6; OX[,-~ j#`MA`6sj #k0 0k0 #Pk Fò#0X FhjHLJ2 d*mn#(rAFP6}!d dTnk#(^A!% d?mEO#(|A FP5BCD;dTl[#(A!+Ɉdi^}QH*3ԝUX׮k1_SY:K/2)eW㾜Jt<^7Oɳ,H =$j?ғ%aT ބhEJi"8+t\8Ê/'ioz5PйVaʹn,`1:5k 4 ׄ^|?IkԫEM̗v`-ty=4'=@Z+N0ct-}3)e"!ď\vIp]"D(O\$z6eD+؉_pxQ;bhu{\Q8[usO,`\?Jg[\yKagoNió_cݞ$@o P;__ Oi&(,ÿ儷s=ED T\y#36 -1-vߙa~;hC ܀x*1ePuif]NPSGiVdFZ=P',rSͱڻoBawT R%9.S2,ڠ'duG 8ا,y&/Wd %yi,FL S}Ob:#%6NuBAR(!mҺ& FsFP/S/EKPٓi#˅~ToURL˸JI5|V2MQ^o:(\hN?@aR"w3)] 3y`&܋c]/QSVh?@7,+юW8"wCީB WxX&܏]p*QKiXE"ymO;޳CWćg-19Ld~Ŧ#"jʅ p0d.E7-N 3ȤȀKLPc{scؖ j9'L[1;}b~ZSv'WRuyr?KG^Xojܨ"˦KB]*F̢(#sĻ!cFXy^cEG9Ig,sBfuX\@j^:=ǍPb ֩7[zPeu/"w'2g)JۀvI P%"_"VRκlA* !ZYKʥFeϺ*h}_ސ["6Sy{AMl; }bcpshMo줔%omݎn߷ gA!>߇W"9+JuHp"&f㩻 7Cut~y Se|%KdAcẺ̅fI-Et$ToASnR?ڹixWdaX_}s ,%aWݡuE>MS┥ EfQ]Qq'W+y5]!_}L9C,Zh(PhEeSV* v r.DR/׍#OylVs@m^Bg:J:{y5q"=pR?/.}&;kptً}&PRA#mxWd1#aDz¨y~8a2Ya$i*sw2On~OId ,AK?UhL-cK{CD %}Vi _i5555m~x۴NV7PKotBSlJmSnҠޖ͑ j4nCi܆RZ֥+mTbzJ,j*ĢK,j,Ģ-6i!.q:iEx-C:!Qzlһ(uݶ:c!ۇNG!]cuHױۇ.*N:kۇ.*bCzvbRop I^;kmXkҵuH$M[tiې5:wQkM]ZSCzeW}X$kyۇupW|w}!O9#%sqqʮZP>]^Hr6(eEP*S&&DgxPf9"~ˀץ}egctᙘMGOݜ_r$Un:Y,=~ ux9e]nWfl]9Ĕ.;tMF5Vj ݈4H ]pN>Ӈ, m\gO+xdK lx=g!nR]LWL߮aP$fv-46-m;:VSTTꅕpfZs{hmUUȷSPca@MT5,P.. _{]qy[Y@1!ߦ/ah˝ޕ.h|VrE;<6$Bc9x٪枰!t6l-a+iT~-'ҧ$& "fG{Qߍ˞ w'- 5OիX*^㊧뵾;b]Ƶ!S);Vul E4/C Zf% V|ۼ]c,nJ3M:G'b? -h/uM˸rh'ap:ٽlzoޏw[ͤlm= 3صp`ڔ= .AH9Wg9tc9UP8 7(T4/t?霺 AưG<*ϣON.N Ⲩmbxvq{ޑ#ش~hd$kЬ&Zz gi/[3%;'e<|,$1>(Ň2V bq %Q6|?3|yAn,+B"{gJib`̈GXeޱHY#XmeO!gcu=>}c[.]:f J3F6F]~ģY'o`GTA,$9~O 8f.̶&7v0b lK%%4 뉍Eac=+ dZݙeӿk/R7b3̢KVcޢ$ˊZ<A*J\Njj:H?@ChiCuH_=9o3\ڷpi_;,d޵^nMa~ik;`ya2J"BmrkMi<4 |޶ w5RaSv3#f&N,;]t4E} q깤A Dn˙(9zm&|֟JxFcû2sù>Ќe* { @=vo)28׳+8lS;\o-! l`,ʈe2UZ ߠ5OU٢UAgX9+0Ф ~2+AYUd`:* 'ͫT7+D(s딾^gT \|% Q s1$Pau$U(N>/sNŅy1*tR|£Ew a}QPAh zZ["/x!Lyd&\&?CXD=Lgp+^d˟JϓQK .Q):+7Źn^VCvXNNQ1(X,=[t^Nbfte\ [DW_ id%^0bKsבϢ[̆4>fCpِ|8,e8lHlHk{͆4.ʆ4>lHc6Aِeِ i|̆ߋ!3Ҹ$.kkeCl61 i|ِ+̆TR>RZ-dCL Fճ!Ɇ4.ˆ$,ʆ$)Ά4^%RJ`6zlH5rِjT) Ά{ʆ[\7 )6 )ʆ*fCKeCJʆ*\ I.ِR#Rܽ?"Ȇ~zsِِRِs՜99iِ2?Rc6"Y9fCj9lH)b8{ (r̆1 )oZ !51dlH­=9fCG6ls6cvgCJΆ.gCJ9lHч~(ِDH )M$UIQ] )]0RXa6$XY6TlH " աeCiOeC_!a1҃qi2ِWlHwlH=fCz@6헶ِِ )i:RYl6겥ِT/Ȇ*ِڀ_ -Eِڂ_ 6*!DI6)ʆ`8R+lHm@.ȆlHm.ΆT̆TRy6fCZ iE`ِVX iexِB!ʆ   `lH+!(R Ru>ʆ$/ʆ$i'lHa:NC-ɴ.Ri)*Ti)[LKYmfZn7R{ 3-eضLK~ELK㚙 3-WȴTQ̴4~s:^ +$թPjNBqk %N(sԽ"< w)/\siTr1ĩ(1}Ljmt$C\orƲ+R#(S:*tvMαB=kʛje.}=* S }vm1NدkЄR֔XOxc{M%-Nӯh.h8.亪KoJ0eSWtճ]U2~͡_&M:C{ަ]=a.}+ڐ Ǹb9٦lA+3 r=TcBdLKK)-bڊV,4 NX`FK',6yNv l\fW4-BΙN]2YSblD\}eq̌`pH-rK)V;1l5RܹKv ̚h]NlzK~ām8ӯ])чq©X5q/UA ?T5@maGgofLYx00u-B{_kZqObyVWCykl Rs@]VZP@*O]7{GTRhkYg3pFHvgSĿ,<&؆wEVT@s{<|ՀxaXvb a&ca}$D>#Q#J=A{RPЧT:J'vwMӌDPl*BXijn ]DUMJxmB~NޚMvQhvBctNvݢbOdm-rO|ZD`B|+CP} /fffFm.~ҬQjμm֫ppmz[<,(R FmQ=Ie3}}P_Q3Dͤ5JL*QCD 5JL*ue}( ]Elʻ'_`T.ƈB:%zPmC:wO0j-C2]mʕ}(WJ\9>+[r}(U(ۇ_֡\}(Y0BwN4,n!;gul[gu(W֡\m^ݶ\cRu(,טlʻ'5f([r-wpA>pb>mS\d!2bz7՘4#A@XdRq B7/"2R|]]8nm(rHMm@JUD%MT ^!+ߖ(+IⓂ>Kcxdx~:)5x]pqVߛم]%pE2ˮ竃@Qi ^0ӯX*8V#`FWU=Z,Acg;JOCϖ",D?wʿ)B!1X)lR\zI1S|.Եթr Yi;!WIۈt޵ h6te}72wy˕Z]:U]]2#= 1fڃ6X,mV 4Z+%cX-HjAc?tчc5tNyrGՈ[NM>}7\Rn]׆'Ձk=ԅ+V*uתJ%B%^@-2H Mj XWt$;%W^]QQi*pJ}N+U*YTzJ%{J);*L$jB+Ere=SxePJ5}IGip0K|vOUj:צ;Tsg-}ϜSRwi3a󉸰0TfiY{`N̰ժܭ&xjR*Ho1R~ߝAx%ҟUd+̀p:|>K JdzӴepVtj?~+u|{P+jbVN)riUHޕ۞D-=.՜nO1cNic>r`[NĖZtn25@P9PN,IPPIfn]> 7*@娑9딏:LʞuzI3~&/%5aj].y*%O具iz-yZMg֋ŢEn1(zK^ IA\/߼BĺƔ~f[زo*IͼhsI}SWP{ oSЇoMa^;WzWZ˛v 7ꂸ*\*ܐ۫6 1|gkHRD ȭD$hZsz=]{1a\nQi^y4H'v jKw,J5bQvP^F+WxA~mS胏7OnS^O; Kx#VW""bHLu0;ty̵)GuEݵur~t]Ewq;?> Q󳳟Pr#M'\N3ɥ.z}9#GFoԇE.ȌxęDe? ~̂T, Uu-Q] GD!?NQ? ~t1]8t ht6E$E,~)'OUh`o)~UcRz2u:cojdaZbYZ h?K>ÉHĚ9xܸ/'6S,5)OH>7ݩ>죬gMޢ0i cшC <~Ee;@ ]d dЧ&-'RQYE ʤi~m=|SyEhҪҰ>[g=)xDs2|Dx[8rk 3;z2^$ͦ܇oeY Jh6YpJƵxVeOXq}7̹b]vƊL>K2y95]٨?lAI> ~0Eț7!Gr[n}?)muuŽyWӣVhjF Xmp&̛ܫO: {3=KrR{YN3Zg@⏥*+ٻ% pJ7][۵6-*E\g}}VVmmS.[ܷU]Q'֥ U*97R Ɣ7 {v!ZiG5seʢin֧|k͍;v;DᣨB;vEYO_ͬD(wssn.LGݘelѦX"!'%-2 ge>#.Gkc}YƔ<^^؝ӥ~?p"?߷fh?>-0UPy?Ǐ/iK|Ch1p1T zF .O+R$ D5=ٚߚFyu+D_u1!G#]d;wbC < >'\x8)Yqۖɶx{O_ߤyIWޏ G\Dm/\ǻ-EϘ e?p2EhΏ%p,,4PT+Ӧ#BSd ?&y&`3]mmsBrSH%S5dmv\j1Y^1׼I|#f"PcO ~t!)WʔJ77ߔ3#jG/>8% {ʵS,>5?b4Chlj@_ #G^`Uwl{1ᅮugx|LEQYɓ'98qҧr? R(|DŽ _06[:3uoFBk؁džcw>6|.a\`b aT<6F(lJ3h3}i9sj4qT *u4<{ɓ'?||cccG_3zcJa>^Hgsbr.'dIs1zrޒrϚ-g 9H&uzƾ6P[q%ɳeBN'-S>>$<5?m2K'f.M!-YY@s̘Ie|5V>Y kIp`̬dUI )(9/f\=KVGq| `Vچ_,U +."Q < WR4$/ѕ ":W q"afy~jC =d Xd).D!_F胬}~O"7{|#˽8Ü0RQ |~A3/maYh5,49:Yd6#Az"WL]W ⥋_Qyj*qjUM +8 fw yVNƦo1" ꨏ^"YTu!+( |ŔՏhpVP%+ Vj޼H\7Ͽn@T=f23L >`<xZ 8 sfIhC #ļJf3 #i~ xi6_?>5?,TLf 6.6^"##G{ ?~NPa̙rHHq'U %6O7I 7AM H ;qx $kn~ ϶dl o4@Җ99c^W"XLWCOu;c*f;l1⅓x.J n{8/mV]g+gfO:!^%<p\=]%"ߖ20߹W3^#?GOX4dyu/KZt(vxyb{7-'F59DBD0l),QׯkK mF[ʥJy?%IbߢÀ¶φ? N>gHIڨ*C<.܇k< GU#* Wka)_D1Z("Hœ Fߑ-2кTx2;[MLދuhZ h;r)ϫ$,v(Njp"d$ pd[rya2ԒėI"r+9RN!4 .UO?>fQ~/S6Qʩ>x7i7$}uN @԰1{@ѧqQtH8COI}x3.aߐLIr ̸ܖGRJ,˓L)$fѿ1KK6i*F[+bЇG ~nxeY}- K7]ZEx$!߆fxg ##P{oeD%^X̜=_g[hCߖ=KrTWFY'X[c]|cؗa BUʙ~x_C@NXN=1Qazri$|L >yņG¦|ŔY xbK!I_ OiSCb­Dfǖ_RrO?m'%9t6VC6$t͊(ץ<7|X%+o&+ld eԎV,1c5=DH =n H'; ƣ:Ć2|o/naGvr<=1A|}.osNA5*_fQVp'̰d>5GHl7a*R^L#I :?%?İI&*8-h8>t3܍8^]5&~.VN d̍hUH>ڋrT8Fhp^zEGob7CЫE"YeͤbZ6;&|;FWFA]~_Ӓo>^Zҹq~WRTőmʿ?g5RUK(jyGQM@ԥ8.P۫8.Exv`mu#!|g}51I`XHi|8A2:efȊ&)G'Բі]H5/XOT~:}?N[hN?O|ܴWcr4%^m7x:2l[zj@|F8Sag?yCpi#%;܆bu%LD~y׶Aet#<*:$כODUOwIz˯ Wwoz3ӳr0_Ԇ_}':KggĞ?# bS ŚG{KA|If3 Z8cAu$Z Mr\. z-_2"e/Á'?@gN|M/{r&5IS 8G+?@_2@*rG~_z,Oo2@ԊJ?(_y릤6/Kȑ?(JmA=mM6HjO8~&ZD+PӛhUa}@x~/q22Z1Zg<.h%Q~ymRA3{Rz vd1N+\woQMISɟ-)ײ$[%fKZw3 99JAYeLI'R2e,ïa*B*-Hg<%@ʾV<6|"rw})-Dq.6 zDpIZc- 1e-8P.8Y 2 /auIz]};j*u!Tuҡ뗩\^yt8g'b,sV%\HC_>ysP\.$2D O&m?[hпd`=j d0'0cf>[5aW?`G~_ȭŠa8I)٠I| h+Z_In+BOkM7p[r??ɶۤwp8dt]kMb;_H[0tv+_}fO v!m| I,d.?Q#>}a9'>(}Yњ\^# 'SG"Ry[.afթ\}RWdSgf-F)UBP{g i51/\"%S(_*=yoxN|+y ?o.6 MęOtYut~~q|| ?` vGsTv&\1ln=EfU#;yΚJsn?=HGs<Y&$KrR '_W(ۿ&#Զ%&iO.a2aVlj*CCCĶ_&aUz %qDG9SȢ\ + T? ?$E {ɿ/?YVQ/a# 'uHCi:v[*̿Z.fː,Jr~-_*` tDs$8^E j%|,K>OP ;&PMS6X qќgF5<ϸFzO30 mW.*C,f,~us:=Fh&'a,3M" PF=#5sr˖x̟_~ ji\%rdžBٓ]s_ T[Nٝe!Qro|.-cט|e-zdAJ.P,N,^hG ~$J"7$;Uu} U78+P7kԚOW[oDB,FhDˆDⷚw_H~n8ʞo-=9ys"KJ%+bTRfIѐ HV2"h;|KG^ȇ5h0JD%rMUrv*zvCCKqĥ[}y248_i++]E'ke<>HR(g ül0P<a0_,/af+a80t}a>64̃g&n m0ǥ<>[\ KdP/F%mHf7_ƶͣ!90$fHR/4(,}m hFf,H,d ėOmrCll)l>p֎JK8!\H[2ക~}o;.,jy&]]qUͣJ;u/Pw;> ~=%b޾{POK((? ;TA["h!dM@^_ Uw!2|4cn[ ig/pL8&6夝6rs-7wj4c??H_zSt Xg ĮcdMrB/F4.Yw93w˭K գy90/i\ɾx Lʸ|vÛ^oa<QyJ%-dLSvn ӣY9 "T(+{m\Dt.b4ar hiŕȹE`ûeK`˖y|y(-c[ɠwo svOÛR㻴sFVރtVgKT3[Uf)l|"[ƸL'`fCP^Gԏd^NQ6M>M'|e f e{!8$$f C﨔Jx*u߮N i3[]!~Eh T }rRB2= ~M6'ֺWu;nHlz$hH! I$~YxŐw|!6-I գ990'i\ɦx_ Kθ|kB˂(UVqt ~*` ރ`vCw(aòW=ئhǀ}ERfk@Ն*9ȑih÷ύ`qa9Eي"B[ɈH/$e<^Ode=9ys• $d`~wٗz NInpf/#o\2mٹN_-_k.{w^=>3 ]0^5. :ޗ-ۨף[8]Jrt.r}]kE^}ѮVN9uU6EwssAf޳f|B3}B۱<יSVS8pU:dd*?%F-{iַϿ 5F` ioeL_Wn<b~ ao!&1-T>Gf^F檭N].vX:\+uc۳cQ_=W~kZO>}O 3tɼtddjA>@3 /}BQ/guwX$)W&d|$&?XCKzixfbN>gYm} ,-99*lPB;ӛ t+phxoTOW7!YT{*Er33[t|WU.x|\K}>|rwTà&㰽߿IMK H&YK)Nn[ Hj w47X~۷v` qpglp}GWwE0y[= =X+3XMZ"@h!g摰.4;q\8!\Va?~NY?#`}/K_XH7{v8A"EJ$K%$8( )  uPԨwwwٙK"uLKh+-ɶ,RP8J|)qG;Il'qbtzg3f{WUzWyWCYW=PІkV`[ o=}ۧptMP\0+ݡ5lp`C#:Jsukӏy\>.ys0F/ѴBKv瀞 Er1Ee_占D@}#'oC7tŤՒ_cįqO>JU+km2釒3iAyK:Go:UTfm% 6V PRNx{qVoA13 xy7bxq׍QyِӰM>dg{!tWlCK?!oǔϤ7wpFHHZO3ys;34` pդ}]A[!Ds AqH@흀v LBy!Dй}gJPcف0,E.nC ,ݏB)Hw!([QT}u_{84g?BX`?@韄{/lAq9>@֋r20qxCڳp=cy<ЦGfv]>8;Sv]ueA] *AeM͹ay\ ߧk@YZ> ?D5PGP4\}7FP~e}O>׏CO@Ai? i^y>y#ipG!a~! Wofm}z*/Jy/@;~tMA K#(~} VN7y.1!7at&ѸYSW\"/ƹD4'mWAWq kv)P;kϟT >Tzccj}_O?1Eֻ{lг!Kk*µ惹]ZG.͕sJ smi kJzbs׾˵zVnTl ~-fQTvC\Y/J*[E$n >($:J?d]`&X*WwF/pM0L=c+j$\kDdt:BmJ:؊oCRW+br'"/suj Jn JW]IINõ'Rnf HVaUV1[,igYƈ&#wh!L>޸ަ\ qK:GϿtn ^е6eרvǴO9ϝmڵ+]hډv](0T7-S ^;:Q$1R =^YnZ {G>WgV#ZN?Wnۈ=F섥mG:<-UPǴkKjfT=]Zl92iVKcSTlYMT O'i?wwlHW;^1rЈK)KS`zujӮC^o[Jh5풆H]ex>b$gJ^Z|,hO|03b5LhD|)Iw!cMDz#v 1#3njF<0=8F*ys!秂'ޣ$cG*p0gj !H+R x%8"t}\|xA}Ltק+"n~Mf yN&'uHU=-M8t1_u5u詃94̹`m|JU!᝖2R'QIڰ1;:S^cFCS[U#k1tE{sp:Uu"!YoX"0b%CDH_"x#a$j}+kS .\U!ݬbj+j`zu}94i#zxXDI)W]O]v$fޫSڙ*z\˵᠋/ i;e-EP5bC*V.ˋ8 S<3/$\ݵT3$drٛup]kg`ٓJNmt9?K/XH؂ o35WߗMp,_ -4OxfHjDl,+VmW :މiYh4,"$/_W "3<܁=k,%NyԄvqU(\o?n%m//w\oKqe!‘ۛ8t8pNïK^ ^P&rR:qߕp{v NNnڗL<6 }Ͱwx@iHz+b炙QswGz{Nz^A Ӕʧ Vs ?3ޱ Q&;u*nD3QS}w@8A`m2]:N~M _j<<J6@6hVwiz=-yZz= 7ڏnU(CjRe47J_m3qhG{R14MSLӠf`x\m!ɧ$o58aKcxh:,m\D$ 7|d dCeץSҽ #!}^@z.u_|dt*~KG;-p}3\cp}Hjp}B0m^K tN ]bƍ& Fy6d1Zp%_y\癀R't'G/E y̒Gqgu}zMh9sÿ۵hܺ2n걌?GߏU5_OT#_'v̒>pJ2Re,\mڳUaJ"DR:@ZM,yܘQ"x=nC˴w="hiW ǞU'|,ZJ1cƸ1]xqHϥ9ؐdDנ,T1{.F|TkU5lhU2<'ҖJz}JAt!n\;!erΦ9N\Q%wIa>chj\K+WEwt"VkP.TT)֏ڑ.R 3Nk&d{|7VsjE`R#855G(k|K1)ErӜX(+U!6>B-F[msU7XdtM(M_¼˟u1l9ly6l]غ.Jz$ xsݿ>$p۟hD@O^B% fwX5ǁUfKee l74hXjn5>2g[I<[*CHx@MK/³i\³O&lW֗Fx6³a;f~Hx6³ #gkOlߎ"<BxOY³ -x <Fx6 ֳ Ax6³Z<[(g GY7ֳpx[cڢ@mj# @m=j#P۪2.MgT[C;a<㨶4PmM!T2;^j 7r ֞a PmvPm}9lj#T6ê9l)hЎj#TڰPm=2xvՖM)B9-jT'T@>ڢBPmj# Pm=j#T۪E2\T[5@X/%G64v1m~qL[CH@MK/´VJikωߟ'LeŴp 0mi0arF6lvUsSР F6´aW1azdl -[Ris|[Ӗ8+O}´Ei#LaFaz6!FUiKce 1mWհ 84pl+cKc h±iy)E8 C)pl9)pj~"d,±pl氙A;plca± "[淡Ǧ>-SqVğpl%c6F86±,±lBplǖ.@ض)8BP4Ɠl+ eKc iy)EP"C)l9)pj~"(d,l氙,A;leb"([wʦ>Ƴ-SqVğl%e6FP6,lBlʖ.@BٮCP5o -%0ۊB$lZ^x-P%0[{N8q5d? 2rF`6lvUsSϠ F`6a1zdlm-{Qfs|#Z̖8+O`}Ef#0Fz6!F`U fKce l 0e [I@1d8 ٴ" [hK@8q5d? 2rF@6lvUsSΠ F@6a0zdlm-Pds|YȖ8+O@}Ed# Fz6!F@U dKce lRl}-%,ۊbB²"lZ^x-P%,[{N| l=X4l}9le#,6ê9l)kgЎe#,aٰl=2x˖(²9-eT',@>a٢²le# l=e#,۪Ų2XW#,[\hKqP gKcL_dΖ?@)gRfNjlau,l9RgDp+Xga#8ٰVa3OY>v$8Άg%DpFM}i!8[⬈?J m<lg#8Ygل 8V--]ƅ \-ԭ/4m+hKc Biy)E: K)pj@~"Dd,Bm氙A;mhÎbB"D[7Ѧ>-SqVğm%h6F6B,BlBmі.@"nWmv^=B&>1p3pKc 0iy)EC)n9)pj~"d,n氙,A;np~c$"[>F-SqVğn%p6F7,lBn.@nzj"~ؖƨ -8-!$@[RЦ̎4Xn#"Z-c+`xU{}D]l*XGڴu몭yB.eFZ*{w }qsƭݿ1LftO):tU * oKjk&Jh Q`(?zO-Ef3|s@~*EEUK*XZ"֙Q O(ѳ Jfx!76zS&1"窆=#,Lvi’دt/f Ttn{0\*-{lUdl6ln5fm^l5vt7k.כ͚+Ϙl hJUvdrjZ*2JI'MIdo9MV@+PMUn^Q , Bj|sΒ?MǮLӭ&O\5k-XkUJ擼ڂh\*7m(ɼ^ߡnxnKVTTP*옺13Z=T\*+O1pO"Nu^ mUORh)V-\^﨡{u0)Ҥ?TL?)o$Ote{MTW&%=)sAiTg_By. W]1fis*UW5&:m2isSlܰKP+_T Ow(y܉ySgcmakSSәZ=gkVV7퍘ߥ9;ٙ2P܏缞̖qzQɦMqHΣN޴/!zUklZUM'7k%-Vf}rܑ.WUӺP .%KJϿj*\1,[)m=έ"IG留u SRIf. <BMPQzN׻6J~Rc|g'sVɖvV;g V棢Ely a)7تPx!]K~CO/, ( b1fFcsvը:`:P5.нZmBL{fUďZ7~}\1fW<>+c1{v۬9'Zx::n-i5SmFܚFuT:I=OsbwmOWC(͋4zt#wn IwkU,mMGP(WJdr.o2M]TLd̉|8m߉_sJevyvWbk[@>t˵*[soX4jl_s}8[lXIkŨU,;ilJ>gWL9?3}vfΤhٞvh>=#n?{fЬinϴrpT։dgDrd~ n?w= aL\b#3p==a|Npd͓UnŮ(ӭY;2̀2˽}>9 cܕ!s%[:۹VY{ =Ȍ }^w/M 6OR+*>45EafCJY\8 slzdx]kM*}dI2Yj7&(0Z/7HsV5\824^|@망g#fFn0㈙UR;@"bGs7t!x#?f;^؈\Q,|}\Q'f9үJg(R),Tna#&ckQ4n9 *K1JCJ&30ޡٟ~"t@F ⒮޸E$%"ƚ 4I;@ΣE]n2q'324:@j 抇&HcH'48n 9f]{of]3/tsZ%u#gca#ZAza|rmGB8/!#(O>XM[TqnIr;q(\ c;mN[$K۪IM!~,"Ft5DE 5U*X1hE~4p$??fޯ!>c& 5\ 0>;an 3Fa&| 3CA??gE)@>6('\?;", kBPlr_Q4q x4vQ='oYwTl4퐞KsH׹v5(K!UBbrBOi\/gNdw|~.4U߫ґۇ34tt#\ DfX =po3~"m1h7qD\ ЅXڲՏR_ Φ9NpΑJĸ=;q؏*W=qD֠,]HȖ(/=E`)RՀqt00i0arHGlnUrؼS|* HG8h0=2x {0qA$R5]C ACOY81x #a HG س AG8Zc(q!݄fƠ1bZfHB91m'c`̎4X.s⟥(J3@[`(B9ʑPyxU@+ʑPr4{dlD ؃Ij)/ڇPٟ"r('c@(GB9ʑPdAʱgPr\(4vQ8D9F䬂c=&S zI==!z"chYK)pj=~R@i0rHGlnUrؼS, HG=h/@=2x6{@zA$R5]c ACOY=1x # HG س AG=Zc( z=6RR-:1uL(uL[ %:jy)EPbT)9/Rut_GP4}9lu$#6*9l)Њu$#A @P<[ۅ=QAPG:f ˠ!c⬈?AJ m<ԑu$#Yuل #AW-1]:nwc`Z:&QOˌuIX::!ua"ch5KXSՀuti0arHXGlnUrؼS|+ HXG:h =2x {uA$R5]C ACXOY:1x #a HXG س AXG:Zc(u:V[NZqdtLc` D9-3Q$!hG:!t!"ch-KH?RՀtHGppoh#c EHǾ6B:a6 hEB:ґ@ c ž #!{P3TM@CeP1SqVğ%t6HHGB:ґ,B:lBґ.@"vuaJyЍi &(eF7ʁ$t#pB7FЍZ^x1T%tc{N)pj@7~R G4Ѝ}9ln$t#6*9l)Њn$t# i@<[;=QAFB7f ˠ!tc⬈?J m<Ѝn$t#Ynل t#W-1]D7nSЍZi'e8ʁ$#p8LZ^x1U%c{N8q5e?])h#c EǾ68a6 hE87 c Mž #{P3TMCeP1SqVğ%p6HG8,8lB.@CǙ5o 1%cG9q$cN*QK/8Jqlω+N\ GOW {Cı/ qvX%;ťZ q$v#gk'*HL"U5B>qT'@>A8đ q$ =q$㪅82kE81$xcF9o$xcN)QK/7Jolω;N\ FOW ޘC/ovX%;ŝZo$xv #gk'*HL"U5:>oT'x@>7o$ =o$x㪅72W> ƀ1RZfHB81m'c̎!CkP\B8ĿJWQӲ"91 "c_! JwG"! HGm±G.aOT‘=Dk!2}8+Oǁ}B8Fp$#! H!{6!HUpLce FZ,.[fƘ1^ZfH9.?181 'c@̎4X.s7Rrt@!@[`(9ȑ@y8U@+ȑ@r4{dlmD ؃Ij(/ڇ@ٟ"r('c@ G9ȑ@dAȱg@r\ 4vQȺo 4rPNV9&QMˌsI8G9-s"cX%c{Nf p1 "c_ Jw_" H8GmαG^aOTΑp=Dk!2}8+O8ǁ}9Fs$# H{6!H8UsLce q8GT@/vfe=&T {I==!{"chaK)pj=~R`i0rHGlnUrؼS, HG=h7`=2x{`{A$R5] ACOY=1x # HG س AG=Zc( {Q%i䫬%cJ0$iar Hp9X`Z^x1N IVA[d˺RpzŃ~lꆻGk2۰Jхz6-ujk-8%#eFZ*{wM)[{c薟pQ r2ܪ'7D(׹˩\7SC_^͌?Z#9gtGŗSPJPa[uőx>(&jjI=KIV2b=pɛ~ j4\p)Œ5SA$R2}Jw"7+OzF $p& $`f x利GQfr?&32Lc7߁&JZgoht)) 㴆*IG<Aa{U-̔k%@q޻fo_8jJ(𳱌o1#|9mF@!#9{ګ 0IzȄ!FP^Zo Qp7!?hWM*m^mZѾ\.3w5@Ru{bKQVęk5ywêj[RY^ tj,_qz]\ eժpv /I>]nX4H_5.:MuvܷZ@.^$&b"5՜E\<]]l%;1Ϊx}ΐ|?}PdIU3ӵI&k4 X8Yd.U*9rP,ve%=TWr}8Ɠ| I&XQ]Iq[!|_ Z ruv.TkM7d2 KK/B,JrI'_1S^3yzU,T;x784ͭzIEL9\'\ۅpI%Jz(h9FLV1ar'/+Rױƾ\o%Ī$$IPjB5`HҭXfh>"iwIr8E8t $hQw\pU "!v;i\J Mc[[ \<}#@;0\%q:`N'`;I /WdYn!sX"鋓L*}j|d*FP%.~ \O{[IbۈT'/^)ە(`2K8HlwY{ciˁCMq!Q9QG0D/.{# |spyH{ 廬w8f1Y9vN싘,\R>uC렌g]6TϏtąn(YBzfs/'hn̒=ݚoј5`٦Se6|%ST>im1΋L tK7g1Uh(gQ(kp^yәLn1\ l܇҄TdZM>tg?`v qZz~ܣ'̱EZYsk>$eʳf=hٓhվ4:nGs<3Sy6$tdVVL6yɿ F9HTY0^-$;\^XJHxG?@aDjYHk,byqNgu )VTx(8}W#:=NRE>#(qW?#1韊J^WjcҔcO{hh{r^~ *FIX&.ȬZN4W?MPYG!K&gIKDm+zb43 ntΗ >V8|j fvKS&t'|Pθ3,h{wM0["VjVDuA+lb!r&48csvը:JpŚ(rҷV0KLʹgf6rӱ+3$2E]]Rn3Jy}*C a/:ctьF/tS TnXuu3Z)j44+ḡ^exf%voAiה:F .XKXuzPĪX3ޚrsMQxKPYvӯ|Q%?Atv w`|ghb;-F>v!'"'Qui\c;nJ ߑ`_elCL]|/60/ѭJh]:Pchv̖*wy4C8wsv?·n:6\E~M<;PI0V<;=65ԔT@4C"HyP𻆎qG컆B.n/C|/]NXH[/Gt썻c-1M,gLgt<|vq!λK܄z_;qUft q .lbV88o0Uu˝ڥtlMQFeupǁ;xj5]W) d'+m.2BRޕYTxS"AXf{ZrOc_@ٛ{Q$Yy"y]/w+P|ujSx'񤷺pGckDwx|Ij(d:цӒfC>>ӒH%=n+rUd[K^l|c'=jE>ĵ<|>Ge)P"7KX;$1XPW'qO}S}d&צ2>@-K>7EqO%_\"ݫ 6q<iǀ{5^͗'Zt"Hr~)ĮV Κ*/ly&m┽T8x6W)$-;TG74hm4;@l_O@}JQ벌PJ kgңAuuoa1;q['.q iH :q%9WƂ2bIQwRFQ Ud_$m젭e"wX3Cm [Иu1jf+kd~Ux@[Ei\SGqG2 kAj)\gY\O@H _PV'*q'S &L$c jӖS.J8X`Kf˪-{^Jh;>EQ%Gw5/,)=f`.FVӈQ5st|1:{p՚t'(oz7.=U|.Q~ZՒ 8a pƭ0Yd//ʞ{!UƌRgcH5\gzwrک̜RѴE_5tJrrI "X '1'%Mͮ&l+kՙJ +=o2ݚ#WilMwtڮtrؕj|i?ɵmϸvARkb;wjmOlʀ6?\|֨ٿO[{T& MDH wX!?:WJ߫;uWߊs5ğc runvi6gF&Li:B[U?/pňEy?IhAہߡiEٷu:6"qZo'c= 0WKoU1vtꨯލ䌈IFtl&d.fNg3 {rz-[ˋnO (!: -2i֬E3J"J_QQݦE+YP0D槂=3dLfW/Wk]N}&q'U 3I[Aq mͭՕ&8SU@Z-B jVOw):6ΟYufQk1f+u螺+u$(E $[Ѽvi5NX6y}csT]|6;~ΠAsM.KtHB 5[Fù:GK Cf0Ee?CߥkH+[ %h+=)+|z{)>_nFƶ-Փ:;VtU-sB[K[Ή\Kp={Uq::Ap%Y5RDdpĵVQ,j4EZtTݴ??YȂ]l$|gR=ښgb&y-)ZW6YpÒeqaGP Um z p^=<0tmji6Zfau6Д{2iY:|G.͕dmuے-̍yƲ6eے᝵Wa-"u2xӈ:8J."JTMa3^9!Nxю:DYo xt"N(5}j̫?fR6VFmHNm`!Uk:6Xڂ59['Ac%ƒC9ciMW(buIgYC1:c8JWLW )D M+)\]tJ޻Jsqz4cJT\>^Wz ׍L&ːz\xwY`>e]g[ ȹ}4sU[s{4ܞinzs0insmY|~J ӓ9?Rו h9vf^sE4NJh́5X5Ǽ eǴ2=⪆]5Jv^>NJ( R!F4e2ue=-ְw"M"s`WjDeA!\Lf] 7)I/ݎKcP {Atj,|9R$HVBX+}bAS+\өa՟,O$%$GX[슓@ m.E(3/&j#Gr, ,佨+ EP$s&=0W%\ ˚W}ߕ5JC1̓m+ܞWkuutԵT%#6i5-\3w}>!-0+%dHR*̎+(&SWBbT!5#5; )xC#j{hP\UngC z#uNUy@]U-V^Qd ] %>ޫ6_o2wYΘxeϸyN▽@Ҝke֪Eg<}ЖumiYȚ+]}5蹇HB2PBn]"׾A}3b*d #Ms#QO`X5:ECAVG]&X9`q6:7i% +4Xi~ 8ka2w E,wuZ51-ҌY7^~kPm!5ʿFX~AS@ i*6f5?o53V+R' bJ@ Co$cx#¼{d5ər5 ӓ>8ZY+bAQ!X.dc*^fՙ%+K<ؕ2 iI^uMnαFqi">uA;HCjY y)b )X^e f5**y =4HH! !h$|'hd[ժ ,4&48`n!6fZ[&g+V7xk ` D4vA;>#ON[ցE3d ҳTA`8!#/ \вRAzו 0YPqN,b:CPGȉa+_D4Gu :jH3NݬU37N&`߹dժ)ːX_\Ø({_ H /da)1zx"N +P!i&4I[A_X^[BHfhӖFhDeln͂mӾZvֳ\={ ;Ru˼fO/ ձ"C_w36.d]ml,`tHwX HEHMX4Ia}tp^gf;oĕR"]t V0ݽ\>CI AǑXg\Q2 +G,[/ob!դdX0_W1( 0ޚu ^'0IHsP+c%IkTcZWl˚]x+-]*uDvAX^ -!gx/dZ-Jhh~QW?8L*G]&X9Y7} N.aiXݻݒlӍT6b]9{=ؾJ&Y=F:Ϻ** gum0JE`|wEx7"Jl t [MKO藔%na(3Wd#0 J?W NӪf]{of]3/tsZ%j6O>XM[o1 #L9U>߃%Ul'fIwE.̮篘r kBMt`$*w"Zkd׷MA-.]#7Eq׋E!C m+!}."Mu\/WͲU)?E{/(h` r2 nGnYPd#n?':r#>ҿӿAZT?{|8 0JipͲ炭1\VdxMh"}m-}LCy[]x23vը%vY4mqox~Un2_Pȼs&;>Hi)UġKvI-Z\ &;PC<[؀%u+'Iqnݰ^FЪZN`VJh䄈=㖖rIeE[s1\eįj >sBReνQ*S1F -co)vY#pym[nl.oV—xޥ%%lPIj:71eMA¿\D*y#7Q?Q-2} \FDzlb}+8f|>)gQ:mS1y?V9-`d 1n\h\eu`"W=;6˽gн+xV#ϴִY/nyvi5 KߗTسzi;j[cyFL:>|Fׄh=p5:s,t~i杰~l#jz6LW@~K?Y?%tѪVkMպY/ORvL>[[MԯޗϹVq`?S㼝 VR.ZM$KzLE:>tν[%ujC]'Q]=!JrȔDXQn6gj Q㌻pFhƱfD5mK#^YmxTer8c(b2#}KT#SAM.Ŵ!ʿ&?}'~\W出{"< ۊ5}"xH. %eB~/c/⽌LZ:}|F3U{Qܩ&I˃O*:3p8$¾(o:Aj7fqY U)#ю8UG8b5+#ͻLDO/! 3)n^_ӪeCq$6$˩;32[㭒|E`}^kx YA*#3~W[WCBS!U~JpF!?O{  P6*s\?}r'k=P^(f< nh=Ys 휂zAÆ4q}>u~睆??^cqLN@ \n do,bpL8ذQ;ORM-) f_+_NV-k=̱i5f[P[eDŽ]|5vgM|r|4/D)jU;[flFC6VnymG%P8jK3MǮLӭ&O\5k-XkUJ擼ڂh\*7m(<꺿C]{ߒ<|P딡qU峔|GzQC"}YR ;鏿q.|F}>|6^H`ȜJժq%'j b<+^='|nգϺ׻K 8]ڎc8 :֬=eq0v8%ٰE "e1@׎+4(~OpPc06qX?STjIԹww(uݭ~^7 V7nw~t`|Nki&Ρrc q͊ߟ,gӔ=tA[pa+;37s);!lx}־\vA1zGMªfK5&}VRK:!rkS>ևЃm@W'\/x@55AS.v(oKʉn5{K ׾2ܖ6|hI3V\K[Ѐҟ-wҜs0Gu V;tEZ:'3s-GH SbS& Y^>>Cǒ-D9x1j,qg=:",NOQ-wbBR+;Z#!1||HlSVbvZÉo*N'΢o">\zԧ,2%4V]Ͱ myP__BnBFеp 5Fu5lIw\gn7>⏻vA;B,hcj) CxE'0Rzh?/6hH)cP&eB 8Ƅ#7?c2!1ʓ#MiGrZ+*ү0 `qqBG"w#]W?qGJ=B=SJ=z CJ{y_0(y>Kr{33 B#QJI^׬u;HЃx0 YtdHjn9M{RVb81J/$*YEWwFոA'NC5Ic3Itx. n4GtƺΆxcSF6mťtjb%֢kU/wТC,3%ĸ-da _ U&Y~8K+LՇ\w~SU+PCgzzSMVx-$#Jq͞pd(6d ҁ}۱`zU|-wqڃ5Z`]*ݝgƬxXaB;ΫZU]PAOnXѫFm[]y *:4 ]g$n>A`N_"XU"TMej1>N䊗}`UAup-j։ȽY0fj@C矌z5NǐoON 7t^qBGnRZaHthMkf>|ghyj1E SI HJz<&kƧ580kfk=+|Kz#Xݺiĵ7O^޼# ] tVxy9;I$K"Q As K@xg10BwH3yF4I0e=p,1`Cfb$k l"08=7ڗesup]lG|؛f1~B=aNU՟Fq;SE}3"@W!q4t riJZ~<T*xuStNM;%Bɛvh k*OIqpX+,U2sBC֬Jzq*,[@$RRkZJ&D8U0OC6VbqLZL3ګ%Q2/){̞09UW͙V-ɼ ,bŬRa,M*٪n\q^FKWY 0덍{xXMbW 底F?G:M%Y q,#:~ 婫-bÆÍ$p@}5)6BWMp um1|tp'WJy(5N2ݵX`TkvQk֚27|8)dCҁ\UV;񀮏q9>yhh a+xP6xDT4h8SQ;[up1:hH dë̔ C?9PAr]z 6K{.CL'u"eQ0s5*wS}POjL͟DEt_+~(=D ?,;gs3xJbK:2h%#Z75ߕ93|^ޯVNF]08xjV1֪/ Aom5>Q#(M5~2x՘bś'y1uӁ.J J&u o y6:ZYj#8aFԖivĵ+fHdk1Mv\}u{sFW17wy$b»:Ϙx5k;&BB7Bl5,Zó l'/>4yv#n5y pʂfƨ-د/4$cתVNӈftC4zU03ުu} ˺VٰvV4HL}_eD iscLo8(Xk>*zeC\Zܝm'ߥ3?%t p8(NA|Q ?#q^ Tvo ]~Ӳ !+dz&׿y5*?]н7TwBr6{`io4C}lOnt&#?\ nCyw@PޝocGqv?-陀 =GoB7s=}ʘ zG ߿k뗴:9okHzHs ?r\go0|[5ߝ4)r>>r|Ng~;c>; t=>]˳}F<;Ci局yҰVExujvËoΞf- 35Ӟq=@n^~\Yy\T+F jOm6|i1n_z͏k~?4>?LѸCPLzWz zvEj ^H8? *85;I5;[56ھf`zt=^7sM0}<ծd/Ҷ˹.>(LC\y]r5r .GyOtCVq;<#wm1Ujb?aWAa4<`!ߓ 4x?-$?>$9PtN#.޹ξ2eTm,jHb6@`U%sk|6S& jƔl/[+@EGܑ֤l֐>nn{Fo7ͦ';4IwIer<=jPm>[|=[|z׈W->k ^+v z[|n->c[|^ܸ[@_ouМ&KUm/f#rN7,l&/ac_o7o)(skțvC1ͪb[cice Iܭ3+% }NvP7[nlm"EERGU:Im {,=]uظ{lFZ7SiMռbӗ$TmgRk\q(Z8%# p ⬱4ŸtW^(<unzwx/uG}-C~B|||d'T76ҌVHwGΠzo~f_&68 /vO|yI'˜ =Oj 9~'5<{ {vo5B}pݏr|ϓ[}ǡ _Kh xv 4k*?}PNQF04w@b:y.=^1_xz/רN w\ӟ {?mS: OAve\~+zJߗ igYHs Cy r޼y+J} cǿ}Pf4|ȟqx=SqcFO4 K iQdY*myYO4_ }B2|=\gzMqo:t|`|F_3=CFW1(d4Q뼎5^/q\LV7xFB MFYVBJ Ay~e7>42zh7_CdƏXL0,^?Ҡ컫P{]oWUmQZʆWYp*)4+1qݛx؂nsevhr|{L^aBEn3:Rm#+ᡈhWClT[]urvPVvѨe Lj^ 쇫8f#s#zDX>l'z=>lUxף9uP۰q]qǺpGԼOd帊=X2#W΋c&p%$Wgx;\ȜhyS_2Q/Z=~l9CzQvDHվ'do!הg}97g}  L?!jQbr XZ 官voQsw0 sO0:`;- ZAVNf4*eP $3 I25١؈QZ /*U5lG)f+IX^}|pԈV[E2MȗΈ#Z35Z>u#x"h+Y@jrԓ$W-{ˆ3|~0_E{]6j+N-ڴUyjkh}ѹ7"MӷB)P[laypT!Ifz׊Oh 8p^[Fk#WWV#+94Ԇ1gj&ߏE HSCA^cD  Cro@%E}q`ƫh`@ JСM"0 X~P M${'}qc$LL#NO$K'872<^;':ǯp]cp&ok L^u3p4p t=4|ƧPDesf 8Mi\:xT4U+#ZֳF0 .蠞Ϫ(pe l0_Ol`_M 0ƈ^ 's5) pzuK: h vpAUpAP\| *6jDDqѶ&+6dn8 sf#9;!:l6r!Hp#p?$~#u/ wߟa:|nD;@֋z6A#lyB?Mj::K[c~=b&i)6+u-\;͛rkO3GX l3`UVzeJI:]ԗ4u3j4 u(nA f8Ivdme,@-JI\>1qbW#oB|.B('KGp,W|!߼b'J4> Lg{s;%Bb8'pQ*v=ӆz9>Aj urm@?$9p_']}P`u乨aW5uv4m%nl0'\CDAW9t2ʑNJ:_ ~!csZQܐ%FIF$k'/Ge}#Fnq:%~}sbh0þ-d|/%so` 6K#ӆsaBC?+^' 34{J՚\CJ$Rs/oiT+ 򓪁+8gk a<">c{ =y2N~ 9t|Arvq&[>]5lL*nf".ĔGhq"錨>- ;j'{1_=Ԇ.[L$kc~-6 4ft\͌]x\u yy\HQ% x m6Up= n6u д]c6ѵP6Hw~]F&گJvD&Nk:I%+q(ψ[&,cLS˰i:_N*`d_: پ;@QK]edo*q!{3֯ȡfm-7H!2fo\1Wn51{uolϷJK_~Hφ2#Oenm>\{{twbɉQNbQ<1|h!߰y7*y?Que"?)}R^MF](֬#ѭQ*H8zNP/BڝIJ(/}c o$/(YF{kxc:NЍ&6ҲC%Y7t# JNPrǖ_!š 2 41;ԭ(,G{ojobeLj~OGukV"~YЯ-!soY2Y aU~Z?!#"l*7J.ؑiYhvή7ldĒ|xQw -t98Cr x ?d-ztoZBiC?YB6#L:P O|2smhIL7z *kYPʇ)ZP)oBpi:S!Xx]+hQK(w7h^wTÔ47\0驟~q^cӞW|&K9TΈږ( M~z ma>5MJzoJ厢LFk޷o3T{6-ͽ6帯HtO9EL{gs|~~д*~N@eSj=j&T\F.[Є>I Mkmȋ)7ۨO|9CV :U( vȰ]IʋZ4EOL:1,Rڪ]m岍2y[Dw Mގ+iU 3Zɮ=P7{:ŹZO>{GJ,G.bG'gjpBJjNO A|}ݾd'5]9ͻVT߷_ĸZ 4 vu]N9 j5سlŦU <={8q=o{O8q{O4F+Moyn{ZY;k&e/=oUg+vZے3 q]-wVA}=*l%kc6:ݻӹ[3R1%ښQQ<9 Yw8k#BzK8p]3&MpFw VekE\2/Unr%gg]eg4ayE]nbݿdv FBYNFzK4D6ACGIkCS@AdCK԰?RPIQ.v7"ָ'|,ZJ1f4MCܯBa^^( vZk.%#y4d8|X@Ú/&gݏ Ҝ'>g;Vh;+8If`w~mIQtVso*-'Zb.9ZxՙiA9~I;-?%|KUrjg,l98?]jFӈuVĔR"Tm͟x|F ~d7:e1XZapV=FL[a2̚H2&Ls&<7۰JJmPoԦӈOv N0Z,{&؋UfVͣc\~ 3W'L+KaψV/;vtNZ88*;rԭfu"n<p=??^4'RQR1,دPWrY.]&?HPHcO6?f{ClF[@u4zj9n[\,+w8&1- 3eӶ b3UYQX츚PU6'{}d') lM>4;\e9ŋ&:LZ6X(,A䰨6ԕFDzx} Y1bE$C _O9}Kgw}/K -AxEJ3SQ vRJva|M@me.ϗX2?22e.\__~Po!`l{>Zw<crnZsQ t α~Zꎢ]mH]1ynz+8a6ݮڬY!#)iGZ>ؖ :7.x,d% -YhBKZВ7hBKL0|6]15:Uhp۔٪Dv* hm;@RvFʫ6QѡeFo)W^S'׏3}JUWCF)1/u>Ҕ|rcѭ3劽@i3{w}`b鮽}/tyP wk/:٭nZRn$3tMфnAtwM)&S,=h>z_,e.\.sZ2\.s?n2c\O,s_hjW2!‚?NA?>mȮF;|4W(qje88X7EH6Z @:9j|mV՗PE֯V<ݻrXilǜiMRzTb>ϟ%*npBs\۬eփSS[ˇKtŞDžjbQSPX`fܫd=j6j 3+%ͻy)yG-2~iVfX?67\W ❍1iNuV3+56Fy5U:~bڻNy(VC? ^hڗ;MR(L%?Ojwn7K`1{gm"{s/Qgʼnոdr^ģQlW?4j8d2f\?F55i ȎyjT? M^sX+%Ka6G[@BvS5p*o&i}7-X~ <:aXgmָȌb}ϒQI٣@+tJ@Y;xv,[tM95[XeC^g͓BZ/xB9;5!nQ/?_ |Y>??51HT{8!;X<~B@C;v=Jk<1q7.Dc&r|cK8NWnd_|lZٓFMS-2 ޽&t EENq/jNPh5Iyч~K cW[v\=ȁzU3֙J9SB,kx9Эf-9"}b;\[ӱgUidaZ"/XD,9sLxnE;li+VuvjVZܼԊ'"YТ3M'`S:K0g:7ݬZiVk&ըVY vQcs'0ȣ' o8y~<{ON}~>h̋edʱ"?hbiKI(i3(rygX;$yTyq3t”5y"SM&[N h3l)QߨK2KϬ?|C 9},Լ4s1߬r՝g[ދ+_9_vzj4+fH" ϱ= XP-?9?6J5u8Z%|y ǔ9:naړf&c'l[UjKX7VG`e,S0z iu,[\VWҒ:]J0=SG(-ʿxy)eLFF" N^VXI\JWѦ10 }kw[EǮ^g4\)Hn'O d1$3f4OwQV"!V+,Ox#^J^Vifr`|at>ike/L}>:Bp˾/ۘ87^6NJ,5q:x䞀]l9ٚoU,S<{/` UPrҸ5Q _%F[ֲ[Rai #;23}Y\4I+UAsc'ͪ6ء{)7FX;$f`ӥ]ۅ+bMv?b7. JPE߷u|[(!IQo9sixq,F˒LXB#Gs2~(e7vH̔+1 epa#%ne`Wzœ!v9|DCOٶBʌYձYG-[9д''F/{'LcM/$d*S.lr!n6 cqo%LHbCu ʄDm&CJv6jp57x&x?hZ *N Sx}@48ДI p-$qwfL*>YQC ?'<&CvoFs)m&-akgkk;?n6U6OR_fLuycq`SY1 򉠇@(ZiOr7eyxSY4] pd7[jQ/nyq8ȍq: 4->tO'&Mz>% Q+(B&^$9ctڐx{[_Tk;wIB%s)#j4L: i2{)[ZC.{P\ude\Mpdwv{oϛjG0EP/k8#rM@S5 FQ5 mDVQh#_P52eHP5 F(Pj@MC(ZbQ5 F%Zbz6p GA(h}R>C!)h}Qо(v}Q> GA(hKQ> GFA(hGAY)h}R> GA(h}O@2>MS> GAQ> EVQ8-_P>2eHP> GA(h}OCA(h_Q> GAzLZ$#y]DȻnɍ!B_qݐF5aⅎ5.}`,YK&Uqo~!w7[UHְwΚw>ϼmGE7"n^oxr "cLI2ąTLI6}FJ֎Zڂns=S{Mk۳.%'5*{S>If*ky%T෣ \=7}Q,0#0?`6mUNT#[b*s>)`DoSuJjFDg\հgv'LD{׉m(0$r7M 7 @(p^,.pKM6rf)"[tya5w?js3DCg}N( EM wlmi%cz%$[NU9Wk5yWFy \u~q~V95bRc;d,Ӄ$f32 ˙E1ȹ߁/h&"uRO"?!ob ϫѮ.gq_Hy4IˍZ@v^;yմ?*[7zU? \{;~.Fl ߏZ5%Te]!j씹rjnq;i#xZ.,yiή"9g9۹1k"w*;cn -{ܣ'̱]nbݿdv F{_}ſI`LE{Z߮P|DX/TWMvbt5GHн,84TOpI0$ˊDvWнOlc+_ XC#_QgOG&ғr:_gȿRI6,كر3]k8\O9%ݿԥ{Ea §XG;~ocpVnu"ket|{WE T#l]8Wm:"֠ՓW+?'+broom/inst/doc/0000755000177700017770000000000013204542661014447 5ustar herbrandtherbrandtbroom/inst/doc/broom.html0000644000177700017770000007323113204542661016461 0ustar herbrandtherbrandt broom: let's tidy up a bit

broom: let's tidy up a bit

The broom package takes the messy output of built-in functions in R, such as lm, nls, or t.test, and turns them into tidy data frames.

The concept of “tidy data”, as introduced by Hadley Wickham, offers a powerful framework for data manipulation and analysis. That paper makes a convincing statement of the problem this package tries to solve (emphasis mine):

While model inputs usually require tidy inputs, such attention to detail doesn't carry over to model outputs. Outputs such as predictions and estimated coefficients aren't always tidy. This makes it more difficult to combine results from multiple models. For example, in R, the default representation of model coefficients is not tidy because it does not have an explicit variable that records the variable name for each estimate, they are instead recorded as row names. In R, row names must be unique, so combining coefficients from many models (e.g., from bootstrap resamples, or subgroups) requires workarounds to avoid losing important information. This knocks you out of the flow of analysis and makes it harder to combine the results from multiple models. I'm not currently aware of any packages that resolve this problem.

broom is an attempt to bridge the gap from untidy outputs of predictions and estimations to the tidy data we want to work with. It centers around three S3 methods, each of which take common objects produced by R statistical functions (lm, t.test, nls, etc) and convert them into a data frame. broom is particularly designed to work with Hadley's dplyr package (see the broom+dplyr vignette for more).

broom should be distinguished from packages like reshape2 and tidyr, which rearrange and reshape data frames into different forms. Those packages perform critical tasks in tidy data analysis but focus on manipulating data frames in one specific format into another. In contrast, broom is designed to take format that is not in a data frame (sometimes not anywhere close) and convert it to a tidy data frame.

Tidying model outputs is not an exact science, and it's based on a judgment of the kinds of values a data scientist typically wants out of a tidy analysis (for instance, estimates, test statistics, and p-values). You may lose some of the information in the original object that you wanted, or keep more information than you need. If you think the tidy output for a model should be changed, or if you're missing a tidying function for an S3 class that you'd like, I strongly encourage you to open an issue or a pull request.

Tidying functions

This package provides three S3 methods that do three distinct kinds of tidying.

  • tidy: constructs a data frame that summarizes the model's statistical findings. This includes coefficients and p-values for each term in a regression, per-cluster information in clustering applications, or per-test information for multtest functions.
  • augment: add columns to the original data that was modeled. This includes predictions, residuals, and cluster assignments.
  • glance: construct a concise one-row summary of the model. This typically contains values such as R2, adjusted R2, and residual standard error that are computed once for the entire model.

Note that some classes may have only one or two of these methods defined.

Consider as an illustrative example a linear fit on the built-in mtcars dataset.

lmfit <- lm(mpg ~ wt, mtcars)
lmfit
## 
## Call:
## lm(formula = mpg ~ wt, data = mtcars)
## 
## Coefficients:
## (Intercept)           wt  
##      37.285       -5.344
summary(lmfit)
## 
## Call:
## lm(formula = mpg ~ wt, data = mtcars)
## 
## Residuals:
##     Min      1Q  Median      3Q     Max 
## -4.5432 -2.3647 -0.1252  1.4096  6.8727 
## 
## Coefficients:
##             Estimate Std. Error t value Pr(>|t|)    
## (Intercept)  37.2851     1.8776  19.858  < 2e-16 ***
## wt           -5.3445     0.5591  -9.559 1.29e-10 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 3.046 on 30 degrees of freedom
## Multiple R-squared:  0.7528, Adjusted R-squared:  0.7446 
## F-statistic: 91.38 on 1 and 30 DF,  p-value: 1.294e-10

This summary output is useful enough if you just want to read it. However, converting it to a data frame that contains all the same information, so that you can combine it with other models or do further analysis, is not trivial. You have to do coef(summary(lmfit)) to get a matrix of coefficients, the terms are still stored in row names, and the column names are inconsistent with other packages (e.g. Pr(>|t|) compared to p.value).

Instead, you can use the tidy function, from the broom package, on the fit:

library(broom)
tidy(lmfit)
##          term  estimate std.error statistic      p.value
## 1 (Intercept) 37.285126  1.877627 19.857575 8.241799e-19
## 2          wt -5.344472  0.559101 -9.559044 1.293959e-10

This gives you a data.frame representation. Note that the row names have been moved into a column called term, and the column names are simple and consistent (and can be accessed using $).

Instead of viewing the coefficients, you might be interested in the fitted values and residuals for each of the original points in the regression. For this, use augment, which augments the original data with information from the model:

head(augment(lmfit))
##           .rownames  mpg    wt  .fitted   .se.fit     .resid       .hat
## 1         Mazda RX4 21.0 2.620 23.28261 0.6335798 -2.2826106 0.04326896
## 2     Mazda RX4 Wag 21.0 2.875 21.91977 0.5714319 -0.9197704 0.03519677
## 3        Datsun 710 22.8 2.320 24.88595 0.7359177 -2.0859521 0.05837573
## 4    Hornet 4 Drive 21.4 3.215 20.10265 0.5384424  1.2973499 0.03125017
## 5 Hornet Sportabout 18.7 3.440 18.90014 0.5526562 -0.2001440 0.03292182
## 6           Valiant 18.1 3.460 18.79325 0.5552829 -0.6932545 0.03323551
##     .sigma      .cooksd  .std.resid
## 1 3.067494 1.327407e-02 -0.76616765
## 2 3.093068 1.723963e-03 -0.30743051
## 3 3.072127 1.543937e-02 -0.70575249
## 4 3.088268 3.020558e-03  0.43275114
## 5 3.097722 7.599578e-05 -0.06681879
## 6 3.095184 9.210650e-04 -0.23148309

Note that each of the new columns begins with a . (to avoid overwriting any of the original columns).

Finally, several summary statistics are computed for the entire regression, such as R2 and the F-statistic. These can be accessed with the glance function:

glance(lmfit)
##   r.squared adj.r.squared    sigma statistic      p.value df    logLik
## 1 0.7528328     0.7445939 3.045882  91.37533 1.293959e-10  2 -80.01471
##        AIC      BIC deviance df.residual
## 1 166.0294 170.4266 278.3219          30

This distinction between the tidy, augment and glance functions is explored in a different context in the k-means vignette.

Other Examples

Generalized linear and non-linear models

These functions apply equally well to the output from glm:

glmfit <- glm(am ~ wt, mtcars, family="binomial")
tidy(glmfit)
##          term estimate std.error statistic     p.value
## 1 (Intercept) 12.04037  4.509706  2.669879 0.007587858
## 2          wt -4.02397  1.436416 -2.801396 0.005088198
head(augment(glmfit))
##           .rownames am    wt    .fitted   .se.fit     .resid       .hat
## 1         Mazda RX4  1 2.620  1.4975684 0.9175750  0.6353854 0.12577908
## 2     Mazda RX4 Wag  1 2.875  0.4714561 0.6761141  0.9848344 0.10816226
## 3        Datsun 710  1 2.320  2.7047594 1.2799233  0.3598458 0.09628500
## 4    Hornet 4 Drive  0 3.215 -0.8966937 0.6012064 -0.8271767 0.07438175
## 5 Hornet Sportabout  0 3.440 -1.8020869 0.7486164 -0.5525972 0.06812194
## 6           Valiant  0 3.460 -1.8825663 0.7669573 -0.5323012 0.06744101
##      .sigma     .cooksd .std.resid
## 1 0.8033182 0.018405616  0.6795582
## 2 0.7897742 0.042434911  1.0428463
## 3 0.8101256 0.003942789  0.3785304
## 4 0.7973421 0.017706938 -0.8597702
## 5 0.8061915 0.006469973 -0.5724389
## 6 0.8067014 0.005901376 -0.5512128
glance(glmfit)
##   null.deviance df.null    logLik      AIC      BIC deviance df.residual
## 1      43.22973      31 -9.588042 23.17608 26.10756 19.17608          30

Note that the statistics computed by glance are different for glm objects than for lm (e.g. deviance rather than R2):

These functions also work on other fits, such as nonlinear models (nls):

nlsfit <- nls(mpg ~ k / wt + b, mtcars, start=list(k=1, b=0))
tidy(nlsfit)
##   term  estimate std.error statistic      p.value
## 1    k 45.829488  4.249155 10.785554 7.639162e-12
## 2    b  4.386254  1.536418  2.854858 7.737378e-03
head(augment(nlsfit, mtcars))
##           .rownames  mpg cyl disp  hp drat    wt  qsec vs am gear carb
## 1         Mazda RX4 21.0   6  160 110 3.90 2.620 16.46  0  1    4    4
## 2     Mazda RX4 Wag 21.0   6  160 110 3.90 2.875 17.02  0  1    4    4
## 3        Datsun 710 22.8   4  108  93 3.85 2.320 18.61  1  1    4    1
## 4    Hornet 4 Drive 21.4   6  258 110 3.08 3.215 19.44  1  0    3    1
## 5 Hornet Sportabout 18.7   8  360 175 3.15 3.440 17.02  0  0    3    2
## 6           Valiant 18.1   6  225 105 2.76 3.460 20.22  1  0    3    1
##    .fitted     .resid
## 1 21.87843 -0.8784251
## 2 20.32695  0.6730544
## 3 24.14034 -1.3403437
## 4 18.64115  2.7588507
## 5 17.70878  0.9912203
## 6 17.63177  0.4682291
glance(nlsfit)
##     sigma isConv      finTol    logLik      AIC      BIC deviance
## 1 2.77405   TRUE 2.87694e-08 -77.02329 160.0466 164.4438 230.8606
##   df.residual
## 1          30

Hypothesis testing

The tidy function can also be applied to htest objects, such as those output by popular built-in functions like t.test, cor.test, and wilcox.test.

tt <- t.test(wt ~ am, mtcars)
tidy(tt)
##   estimate estimate1 estimate2 statistic     p.value parameter  conf.low
## 1 1.357895  3.768895     2.411  5.493905 6.27202e-06  29.23352 0.8525632
##   conf.high                  method alternative
## 1  1.863226 Welch Two Sample t-test   two.sided

Some cases might have fewer columns (for example, no confidence interval):

wt <- wilcox.test(wt ~ am, mtcars)
tidy(wt)
##   statistic      p.value                                            method
## 1     230.5 4.347026e-05 Wilcoxon rank sum test with continuity correction
##   alternative
## 1   two.sided

Since the tidy output is already only one row, glance returns the same output:

glance(tt)
##   estimate estimate1 estimate2 statistic     p.value parameter  conf.low
## 1 1.357895  3.768895     2.411  5.493905 6.27202e-06  29.23352 0.8525632
##   conf.high                  method alternative
## 1  1.863226 Welch Two Sample t-test   two.sided
glance(wt)
##   statistic      p.value                                            method
## 1     230.5 4.347026e-05 Wilcoxon rank sum test with continuity correction
##   alternative
## 1   two.sided

There is no augment function for htest objects, since there is no meaningful sense in which a hypothesis test produces output about each initial data point.

Conventions

In order to maintain consistency, we attempt to follow some conventions regarding the structure of returned data.

All functions

  • The output of the tidy, augment and glance functions is always a data frame.
  • The output never has rownames. This ensures that you can combine it with other tidy outputs without fear of losing information (since rownames in R cannot contain duplicates).
  • Some column names are kept consistent, so that they can be combined across different models and so that you know what to expect (in contrast to asking “is it pval or PValue?” every time). The examples below are not all the possible column names, nor will all tidy output contain all or even any of these columns.

tidy functions

  • Each row in a tidy output typically represents some well-defined concept, such as one term in a regression, one test, or one cluster/class. This meaning varies across models but is usually self-evident. The one thing each row cannot represent is a point in the initial data (for that, use the augment method).
  • Common column names include:
    • term“” the term in a regression or model that is being estimated.
    • p.value: this spelling was chosen (over common alternatives such as pvalue, PValue, or pval) to be consistent with functions in R's built-in stats package
    • statistic a test statistic, usually the one used to compute the p-value. Combining these across many sub-groups is a reliable way to perform (e.g.) bootstrap hypothesis testing
    • estimate
    • conf.low the low end of a confidence interval on the estimate
    • conf.high the high end of a confidence interval on the estimate
    • df degrees of freedom

augment functions

  • augment(model, data) adds columns to the original data.
    • If the data argument is missing, augment attempts to reconstruct the data from the model (note that this may not always be possible, and usually won't contain columns not used in the model).
  • Each row in an augment output matches the corresponding row in the original data.
  • If the original data contained rownames, augment turns them into a column called .rownames.
  • Newly added column names begin with . to avoid overwriting columns in the original data.
  • Common column names include:
    • .fitted: the predicted values, on the same scale as the data.
    • .resid: residuals: the actual y values minus the fitted values
    • .cluster: cluster assignments

glance functions

  • glance always returns a one-row data frame.
    • The only exception is that glance(NULL) returns an empty data frame.
  • We avoid including arguments that were given to the modeling function. For example, a glm glance output does not need to contain a field for family, since that is decided by the user calling glm rather than the modeling function itself.
  • Common column names include:
    • r.squared the fraction of variance explained by the model
    • adj.r.squared R2 adjusted based on the degrees of freedom
    • sigma the square root of the estimated variance of the residuals
broom/inst/doc/kmeans.R0000644000177700017770000000365113204542661016055 0ustar herbrandtherbrandt## ---- echo=FALSE--------------------------------------------------------- library(knitr) opts_chunk$set(message=FALSE, warning=FALSE) ## ------------------------------------------------------------------------ library(dplyr) set.seed(2014) centers <- data.frame(cluster=factor(1:3), size=c(100, 150, 50), x1=c(5, 0, -3), x2=c(-1, 1, -2)) points <- centers %>% group_by(cluster) %>% do(data.frame(x1=rnorm(.$size[1], .$x1[1]), x2=rnorm(.$size[1], .$x2[1]))) library(ggplot2) ggplot(points, aes(x1, x2, color=cluster)) + geom_point() ## ------------------------------------------------------------------------ points.matrix <- cbind(x1 = points$x1, x2 = points$x2) kclust <- kmeans(points.matrix, 3) kclust summary(kclust) ## ------------------------------------------------------------------------ library(broom) head(augment(kclust, points.matrix)) ## ------------------------------------------------------------------------ tidy(kclust) ## ------------------------------------------------------------------------ glance(kclust) ## ------------------------------------------------------------------------ kclusts <- data.frame(k=1:9) %>% group_by(k) %>% do(kclust=kmeans(points.matrix, .$k)) ## ------------------------------------------------------------------------ clusters <- kclusts %>% group_by(k) %>% do(tidy(.$kclust[[1]])) assignments <- kclusts %>% group_by(k) %>% do(augment(.$kclust[[1]], points.matrix)) clusterings <- kclusts %>% group_by(k) %>% do(glance(.$kclust[[1]])) ## ------------------------------------------------------------------------ p1 <- ggplot(assignments, aes(x1, x2)) + geom_point(aes(color=.cluster)) + facet_wrap(~ k) p1 ## ------------------------------------------------------------------------ p2 <- p1 + geom_point(data=clusters, size=10, shape="x") p2 ## ------------------------------------------------------------------------ ggplot(clusterings, aes(k, tot.withinss)) + geom_line() broom/inst/doc/broom_and_dplyr.html0000644000177700017770000024006113204542661020512 0ustar herbrandtherbrandt broom and dplyr

broom and dplyr

While broom is useful for summarizing the result of a single analysis in a consistent format, it is really designed for high-throughput applications, where you must combine results from multiple analyses. These could be subgroups of data, analyses using different models, bootstrap replicates, permutations, and so on. In particular, it plays well with the group_by and do functions in dplyr.

Let's try this on a simple dataset, the built-in Orange data.frame.

library(broom)
library(dplyr)
data(Orange)

dim(Orange)
## [1] 35  3
head(Orange)
## Grouped Data: circumference ~ age | Tree
##   Tree  age circumference
## 1    1  118            30
## 2    1  484            58
## 3    1  664            87
## 4    1 1004           115
## 5    1 1231           120
## 6    1 1372           142

This contains 35 observations of three variables: Tree, age, and circumference. Tree is a factor with five levels describing five trees. As might be expected, age and circumference are correlated:

cor(Orange$age, Orange$circumference)
## [1] 0.9135189
library(ggplot2)
ggplot(Orange, aes(age, circumference, color = Tree)) + geom_line()

plot of chunk unnamed-chunk-1

Suppose you want to test for correlations individually within each tree. You can do this with dplyr's group_by:

Orange %>% group_by(Tree) %>% summarize(correlation = cor(age, circumference))
## # A tibble: 5 x 2
##   Tree  correlation
##   <ord>       <dbl>
## 1 3           0.988
## 2 1           0.985
## 3 5           0.988
## 4 2           0.987
## 5 4           0.984

(Note that the correlations are much higher than the aggregated one, and furthermore we can now see it is similar across trees).

Suppose that instead of simply estimating a correlation, we want to perform a hypothesis test with cor.test:

cor.test(Orange$age, Orange$circumference)
## 
##  Pearson's product-moment correlation
## 
## data:  Orange$age and Orange$circumference
## t = 12.9, df = 33, p-value = 1.931e-14
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
##  0.8342364 0.9557955
## sample estimates:
##       cor 
## 0.9135189

This contains multiple values we could want in our output. Some are vectors of length 1, such as the p-value and the estimate, and some are longer, such as the confidence interval. broom's tidy S3 method, combined with dplyr's do, makes it easy to summarize the information about each test:

Orange %>% group_by(Tree) %>% do(tidy(cor.test(.$age, .$circumference)))
## # A tibble: 5 x 9
## # Groups: Tree [5]
##   Tree  estimate statistic   p.value parameter conf.low conf.… meth… alte…
##   <ord>    <dbl>     <dbl>     <dbl>     <int>    <dbl>  <dbl> <fct> <fct>
## 1 3        0.988      14.4 0.0000290         5    0.919  0.998 Pear… two.…
## 2 1        0.985      13.0 0.0000485         5    0.901  0.998 Pear… two.…
## 3 5        0.988      14.1 0.0000318         5    0.916  0.998 Pear… two.…
## 4 2        0.987      13.9 0.0000343         5    0.914  0.998 Pear… two.…
## 5 4        0.984      12.5 0.0000573         5    0.895  0.998 Pear… two.…

This becomes even more useful when applied to regressions, which give more than one row of output within each model:

Orange %>% group_by(Tree) %>% do(tidy(lm(age ~ circumference, data=.)))
## # A tibble: 10 x 6
## # Groups: Tree [5]
##    Tree  term          estimate std.error statistic   p.value
##    <ord> <chr>            <dbl>     <dbl>     <dbl>     <dbl>
##  1 3     (Intercept)    -210       85.3     - 2.46  0.0574   
##  2 3     circumference    12.0      0.835    14.4   0.0000290
##  3 1     (Intercept)    -265       98.6     - 2.68  0.0436   
##  4 1     circumference    11.9      0.919    13.0   0.0000485
##  5 5     (Intercept)    - 54.5     76.9     - 0.709 0.510    
##  6 5     circumference     8.79     0.621    14.1   0.0000318
##  7 2     (Intercept)    -132       83.1     - 1.59  0.172    
##  8 2     circumference     7.80     0.560    13.9   0.0000343
##  9 4     (Intercept)    - 76.5     88.3     - 0.867 0.426    
## 10 4     circumference     7.17     0.572    12.5   0.0000573

You can just as easily perform multiple regressions within each group, as shown here on the mtcars dataset. We group the data into automatic and manual cars (the am column), then perform the regression within each.

data(mtcars)
head(mtcars)
##                    mpg cyl disp  hp drat    wt  qsec vs am gear carb
## Mazda RX4         21.0   6  160 110 3.90 2.620 16.46  0  1    4    4
## Mazda RX4 Wag     21.0   6  160 110 3.90 2.875 17.02  0  1    4    4
## Datsun 710        22.8   4  108  93 3.85 2.320 18.61  1  1    4    1
## Hornet 4 Drive    21.4   6  258 110 3.08 3.215 19.44  1  0    3    1
## Hornet Sportabout 18.7   8  360 175 3.15 3.440 17.02  0  0    3    2
## Valiant           18.1   6  225 105 2.76 3.460 20.22  1  0    3    1
mtcars %>% group_by(am) %>% do(tidy(lm(wt ~ mpg + qsec + gear, .)))
## # A tibble: 8 x 6
## # Groups: am [2]
##      am term        estimate std.error statistic  p.value
##   <dbl> <chr>          <dbl>     <dbl>     <dbl>    <dbl>
## 1  0    (Intercept)   4.92      1.40      3.52   0.00309 
## 2  0    mpg          -0.192     0.0443   -4.33   0.000591
## 3  0    qsec          0.0919    0.0983    0.935  0.365   
## 4  0    gear          0.147     0.368     0.398  0.696   
## 5  1.00 (Intercept)   4.28      3.46      1.24   0.247   
## 6  1.00 mpg          -0.101     0.0294   -3.43   0.00750 
## 7  1.00 qsec          0.0398    0.151     0.264  0.798   
## 8  1.00 gear         -0.0229    0.349    -0.0656 0.949

What if you want not just the tidy output, but the augment and glance outputs as well, while still performing each regression only once? First, save the modeling result into a column fit.

regressions <- mtcars %>% group_by(cyl) %>%
    do(fit = lm(wt ~ mpg + qsec + gear, .))
regressions
## Source: local data frame [3 x 2]
## Groups: <by row>
## 
## # A tibble: 3 x 2
##     cyl fit     
## * <dbl> <list>  
## 1  4.00 <S3: lm>
## 2  6.00 <S3: lm>
## 3  8.00 <S3: lm>

This creates a rowwise data frame. Tidying methods are designed to work seamlessly with rowwise data frames, grouping them and performing tidying on each row:

regressions %>% tidy(fit)
## # A tibble: 12 x 6
## # Groups: cyl [3]
##      cyl term        estimate std.error statistic p.value
##    <dbl> <chr>          <dbl>     <dbl>     <dbl>   <dbl>
##  1  4.00 (Intercept) -0.773      2.23    -0.347   0.739  
##  2  4.00 mpg         -0.0818     0.0238  -3.44    0.0109 
##  3  4.00 qsec         0.217      0.0759   2.85    0.0245 
##  4  4.00 gear         0.267      0.245    1.09    0.310  
##  5  6.00 (Intercept) -7.79       3.35    -2.32    0.103  
##  6  6.00 mpg          0.0433     0.0520   0.833   0.466  
##  7  6.00 qsec         0.422      0.0914   4.62    0.0191 
##  8  6.00 gear         0.638      0.205    3.11    0.0529 
##  9  8.00 (Intercept)  0.00597    4.27     0.00140 0.999  
## 10  8.00 mpg         -0.177      0.0557  -3.18    0.00989
## 11  8.00 qsec         0.369      0.193    1.91    0.0848 
## 12  8.00 gear         0.143      0.317    0.451   0.662
regressions %>% augment(fit)
## # A tibble: 32 x 12
## # Groups: cyl [3]
##      cyl    wt   mpg  qsec  gear .fitt… .se.…   .resid  .hat .sig… .cooksd
##    <dbl> <dbl> <dbl> <dbl> <dbl>  <dbl> <dbl>    <dbl> <dbl> <dbl>   <dbl>
##  1  4.00  2.32  22.8  18.6  4.00   2.46 0.142 -1.43e⁻¹ 0.197 0.339 1.54e⁻²
##  2  4.00  3.19  24.4  20.0  4.00   2.63 0.120  5.56e⁻¹ 0.141 0.243 1.45e⁻¹
##  3  4.00  3.15  22.8  22.9  4.00   3.39 0.299 -2.43e⁻¹ 0.876 0.199 8.24e⁺⁰
##  4  4.00  2.20  32.4  19.5  4.00   1.86 0.174  3.36e⁻¹ 0.296 0.304 1.66e⁻¹
##  5  4.00  1.62  30.4  18.5  4.00   1.82 0.148 -2.07e⁻¹ 0.213 0.332 3.62e⁻²
##  6  4.00  1.84  33.9  19.9  4.00   1.83 0.210  5.05e⁻⁴ 0.432 0.345 8.36e⁻⁷
##  7  4.00  2.46  21.5  20.0  3.00   2.61 0.249 -1.41e⁻¹ 0.610 0.332 1.94e⁻¹
##  8  4.00  1.94  27.3  18.9  4.00   2.16 0.105 -2.23e⁻¹ 0.107 0.331 1.64e⁻²
##  9  4.00  2.14  26.0  16.7  5.00   2.06 0.218  8.49e⁻² 0.464 0.342 2.86e⁻²
## 10  4.00  1.51  30.4  16.9  5.00   1.74 0.201 -2.25e⁻¹ 0.396 0.324 1.35e⁻¹
## # ... with 22 more rows, and 1 more variable: .std.resid <dbl>
regressions %>% glance(fit)
## # A tibble: 3 x 12
## # Groups: cyl [3]
##     cyl r.squ… adj.…  sigma stat… p.value    df  logLik   AIC   BIC devia…
##   <dbl>  <dbl> <dbl>  <dbl> <dbl>   <dbl> <int>   <dbl> <dbl> <dbl>  <dbl>
## 1  4.00  0.780 0.686 0.319   8.27 0.0106      4 - 0.567  11.1  13.1 0.714 
## 2  6.00  0.970 0.940 0.0873 32.3  0.00874     4  10.1   -10.2 -10.5 0.0229
## 3  8.00  0.652 0.548 0.511   6.25 0.0116      4 - 8.10   26.2  29.4 2.61  
## # ... with 1 more variable: df.residual <int>

By combining the estimates and p-values across all groups into the same tidy data frame (instead of, for example, a list of output model objects), a new class of analyses and visualizations becomes straightforward. This includes

  • Sorting by p-value or estimate to find the most significant terms across all tests
  • P-value histograms
  • Volcano plots comparing p-values to effect size estimates

In each of these cases, we can easily filter, facet, or distinguish based on the term column. In short, this makes the tools of tidy data analysis available for the results of data analysis and models, not just the inputs.

broom/inst/doc/broom.Rmd0000644000177700017770000002304613204542661016236 0ustar herbrandtherbrandt ```{r setup, echo=FALSE} library(knitr) opts_chunk$set(warning=FALSE, message=FALSE) ``` broom: let's tidy up a bit ===================== The broom package takes the messy output of built-in functions in R, such as `lm`, `nls`, or `t.test`, and turns them into tidy data frames. The concept of "tidy data", [as introduced by Hadley Wickham](http://www.jstatsoft.org/v59/i10), offers a powerful framework for data manipulation and analysis. That paper makes a convincing statement of the problem this package tries to solve (emphasis mine): > **While model inputs usually require tidy inputs, such attention to detail doesn't carry over to model outputs. Outputs such as predictions and estimated coefficients aren't always tidy. This makes it more difficult to combine results from multiple models.** For example, in R, the default representation of model coefficients is not tidy because it does not have an explicit variable that records the variable name for each estimate, they are instead recorded as row names. In R, row names must be unique, so combining coefficients from many models (e.g., from bootstrap resamples, or subgroups) requires workarounds to avoid losing important information. **This knocks you out of the flow of analysis and makes it harder to combine the results from multiple models. I'm not currently aware of any packages that resolve this problem.** broom is an attempt to bridge the gap from untidy outputs of predictions and estimations to the tidy data we want to work with. It centers around three S3 methods, each of which take common objects produced by R statistical functions (`lm`, `t.test`, `nls`, etc) and convert them into a data frame. broom is particularly designed to work with Hadley's [dplyr](https://github.com/hadley/dplyr) package (see the [broom+dplyr](broom_and_dplyr.html) vignette for more). broom should be distinguished from packages like [reshape2](https://CRAN.R-project.org/package=reshape2) and [tidyr](https://github.com/hadley/tidyr), which rearrange and reshape data frames into different forms. Those packages perform critical tasks in tidy data analysis but focus on manipulating data frames in one specific format into another. In contrast, broom is designed to take format that is *not* in a data frame (sometimes not anywhere close) and convert it to a tidy data frame. Tidying model outputs is not an exact science, and it's based on a judgment of the kinds of values a data scientist typically wants out of a tidy analysis (for instance, estimates, test statistics, and p-values). You may lose some of the information in the original object that you wanted, or keep more information than you need. If you think the tidy output for a model should be changed, or if you're missing a tidying function for an S3 class that you'd like, I strongly encourage you to [open an issue](http://github.com/dgrtwo/broom/issues) or a pull request. Tidying functions ----------------- This package provides three S3 methods that do three distinct kinds of tidying. * `tidy`: constructs a data frame that summarizes the model's statistical findings. This includes coefficients and p-values for each term in a regression, per-cluster information in clustering applications, or per-test information for `multtest` functions. * `augment`: add columns to the original data that was modeled. This includes predictions, residuals, and cluster assignments. * `glance`: construct a concise *one-row* summary of the model. This typically contains values such as R^2, adjusted R^2, and residual standard error that are computed once for the entire model. Note that some classes may have only one or two of these methods defined. Consider as an illustrative example a linear fit on the built-in `mtcars` dataset. ```{r lmfit} lmfit <- lm(mpg ~ wt, mtcars) lmfit summary(lmfit) ``` This summary output is useful enough if you just want to read it. However, converting it to a data frame that contains all the same information, so that you can combine it with other models or do further analysis, is not trivial. You have to do `coef(summary(lmfit))` to get a matrix of coefficients, the terms are still stored in row names, and the column names are inconsistent with other packages (e.g. `Pr(>|t|)` compared to `p.value`). Instead, you can use the `tidy` function, from the broom package, on the fit: ```{r} library(broom) tidy(lmfit) ``` This gives you a data.frame representation. Note that the row names have been moved into a column called `term`, and the column names are simple and consistent (and can be accessed using `$`). Instead of viewing the coefficients, you might be interested in the fitted values and residuals for each of the original points in the regression. For this, use `augment`, which augments the original data with information from the model: ```{r} head(augment(lmfit)) ``` Note that each of the new columns begins with a `.` (to avoid overwriting any of the original columns). Finally, several summary statistics are computed for the entire regression, such as R^2 and the F-statistic. These can be accessed with the `glance` function: ```{r} glance(lmfit) ``` This distinction between the `tidy`, `augment` and `glance` functions is explored in a different context in the [k-means vignette](kmeans.html). Other Examples -------------- ### Generalized linear and non-linear models These functions apply equally well to the output from `glm`: ```{r glmfit} glmfit <- glm(am ~ wt, mtcars, family="binomial") tidy(glmfit) head(augment(glmfit)) glance(glmfit) ``` Note that the statistics computed by `glance` are different for `glm` objects than for `lm` (e.g. deviance rather than R^2): These functions also work on other fits, such as nonlinear models (`nls`): ```{r} nlsfit <- nls(mpg ~ k / wt + b, mtcars, start=list(k=1, b=0)) tidy(nlsfit) head(augment(nlsfit, mtcars)) glance(nlsfit) ``` ### Hypothesis testing The `tidy` function can also be applied to `htest` objects, such as those output by popular built-in functions like `t.test`, `cor.test`, and `wilcox.test`. ```{r ttest} tt <- t.test(wt ~ am, mtcars) tidy(tt) ``` Some cases might have fewer columns (for example, no confidence interval): ```{r} wt <- wilcox.test(wt ~ am, mtcars) tidy(wt) ``` Since the `tidy` output is already only one row, `glance` returns the same output: ```{r} glance(tt) glance(wt) ``` There is no `augment` function for `htest` objects, since there is no meaningful sense in which a hypothesis test produces output about each initial data point. Conventions ------------ In order to maintain consistency, we attempt to follow some conventions regarding the structure of returned data. ### All functions * The output of the `tidy`, `augment` and `glance` functions is *always* a data frame. * The output never has rownames. This ensures that you can combine it with other tidy outputs without fear of losing information (since rownames in R cannot contain duplicates). * Some column names are kept consistent, so that they can be combined across different models and so that you know what to expect (in contrast to asking "is it `pval` or `PValue`?" every time). The examples below are not all the possible column names, nor will all tidy output contain all or even any of these columns. ### tidy functions * Each row in a `tidy` output typically represents some well-defined concept, such as one term in a regression, one test, or one cluster/class. This meaning varies across models but is usually self-evident. The one thing each row cannot represent is a point in the initial data (for that, use the `augment` method). * Common column names include: * `term`"" the term in a regression or model that is being estimated. * `p.value`: this spelling was chosen (over common alternatives such as `pvalue`, `PValue`, or `pval`) to be consistent with functions in R's built-in `stats` package * `statistic` a test statistic, usually the one used to compute the p-value. Combining these across many sub-groups is a reliable way to perform (e.g.) bootstrap hypothesis testing * `estimate` * `conf.low` the low end of a confidence interval on the `estimate` * `conf.high` the high end of a confidence interval on the `estimate` * `df` degrees of freedom ### augment functions * `augment(model, data)` adds columns to the original data. * If the `data` argument is missing, `augment` attempts to reconstruct the data from the model (note that this may not always be possible, and usually won't contain columns not used in the model). * Each row in an `augment` output matches the corresponding row in the original data. * If the original data contained rownames, `augment` turns them into a column called `.rownames`. * Newly added column names begin with `.` to avoid overwriting columns in the original data. * Common column names include: * `.fitted`: the predicted values, on the same scale as the data. * `.resid`: residuals: the actual y values minus the fitted values * `.cluster`: cluster assignments ### glance functions * `glance` always returns a one-row data frame. * The only exception is that `glance(NULL)` returns an empty data frame. * We avoid including arguments that were *given* to the modeling function. For example, a `glm` glance output does not need to contain a field for `family`, since that is decided by the user calling `glm` rather than the modeling function itself. * Common column names include: * `r.squared` the fraction of variance explained by the model * `adj.r.squared` R^2 adjusted based on the degrees of freedom * `sigma` the square root of the estimated variance of the residuals broom/inst/doc/kmeans.Rmd0000644000177700017770000001124613204542661016375 0ustar herbrandtherbrandt Tidying k-means clustering =================================== ```{r, echo=FALSE} library(knitr) opts_chunk$set(message=FALSE, warning=FALSE) ``` K-means clustering serves as a very useful example of tidy data, and especially the distinction between the three tidying functions: `tidy`, `augment`, and `glance`. Let's start by generating some random 2d data with three clusters, within which points are distributed according to a multivariate gaussian: ```{r} library(dplyr) set.seed(2014) centers <- data.frame(cluster=factor(1:3), size=c(100, 150, 50), x1=c(5, 0, -3), x2=c(-1, 1, -2)) points <- centers %>% group_by(cluster) %>% do(data.frame(x1=rnorm(.$size[1], .$x1[1]), x2=rnorm(.$size[1], .$x2[1]))) library(ggplot2) ggplot(points, aes(x1, x2, color=cluster)) + geom_point() ``` This is an ideal case for k-means clustering. Let's examine what the built-in `kmeans` function returns. ```{r} points.matrix <- cbind(x1 = points$x1, x2 = points$x2) kclust <- kmeans(points.matrix, 3) kclust summary(kclust) ``` The output is a list of vectors, where each component has a different length. There's one of length `r nrow(points)`: the same as our original dataset. There are a number of elements of length 3: `withinss`, `tot.withinss`, and `betweenss`- and `centers` is a matrix with 3 rows. And then there are the elements of length 1: `totss`, `tot.withinss`, `betweenss`, and `iter`. These differing lengths have a deeper meaning when we want to tidy our dataset: they signify that each type of component communicates a *different kind* of information. * `cluster` (`r nrow(points.matrix)` values) contains information about each *point* * `centers`, `withinss` and `size` (3 values) contain information about each *cluster* * `totss`, `tot.withinss`, `betweenss`, and `iter` (1 value) contain information about the *full clustering* Which of these do we want to extract? There is no right answer: each of them may be interesting to an analyst. Because they communicate entirely different information (not to mention there's no straightforward way to combine them), they are extracted by separate functions. `augment` adds the point classifications to the original dataset: ```{r} library(broom) head(augment(kclust, points.matrix)) ``` The `tidy` function summarizes on a per-cluster level: ```{r} tidy(kclust) ``` And as it always does, the `glance` function extracts a single-row summary: ```{r} glance(kclust) ``` broom and dplyr for exploratory clustering --------------------------------------- While these summaries are useful, they would not have been too difficult to extract out from the dataset yourself. The real power comes from combining their analyses with dplyr. Let's say we want to explore the effect of different choices of `k`, from 1 to 9, on this clustering. First cluster the data 9 times, each using a different value of k: ```{r} kclusts <- data.frame(k=1:9) %>% group_by(k) %>% do(kclust=kmeans(points.matrix, .$k)) ``` Then tidy the clusterings three ways: using `tidy`, using `augment`, and using `glance`. Each of these goes into a separate dataset as they represent different types of data. ```{r} clusters <- kclusts %>% group_by(k) %>% do(tidy(.$kclust[[1]])) assignments <- kclusts %>% group_by(k) %>% do(augment(.$kclust[[1]], points.matrix)) clusterings <- kclusts %>% group_by(k) %>% do(glance(.$kclust[[1]])) ``` Now we can plot the original points, with each point colored according to the original cluster: ```{r} p1 <- ggplot(assignments, aes(x1, x2)) + geom_point(aes(color=.cluster)) + facet_wrap(~ k) p1 ``` Already we get a good sense of the proper number of clusters (3), and how the k-means algorithm functions when k is too high or too low. We can then add the centers of the cluster using the data from `tidy`: ```{r} p2 <- p1 + geom_point(data=clusters, size=10, shape="x") p2 ``` The data from `glance` fits a different but equally important purpose: it lets you view trends of some summary statistics across values of k. Of particular interest is the total within sum of squares, saved in the `tot.withinss` column. ```{r} ggplot(clusterings, aes(k, tot.withinss)) + geom_line() ``` This represents the variance within the clusters. It decreases as k increases, but one can notice a bend (or "elbow") right at k=3. This bend indicates that additional clusters beyond the third have little value. (See [here](http://web.stanford.edu/~hastie/Papers/gap.pdf) for a more mathematically rigorous interpretation and implementation of this method). Thus, all three methods of tidying data provided by broom are useful for summarizing clustering output. broom/inst/doc/broom_and_dplyr.Rmd0000644000177700017770000000723413204542661020273 0ustar herbrandtherbrandt ```{r opts_chunk, echo=FALSE} library(knitr) opts_chunk$set(message=FALSE, warning=FALSE) ``` broom and dplyr =============== While broom is useful for summarizing the result of a single analysis in a consistent format, it is really designed for high-throughput applications, where you must combine results from multiple analyses. These could be subgroups of data, analyses using different models, bootstrap replicates, permutations, and so on. In particular, it plays well with the `group_by` and `do` functions in `dplyr`. Let's try this on a simple dataset, the built-in `Orange` data.frame. ```{r setup} library(broom) library(dplyr) data(Orange) dim(Orange) head(Orange) ``` This contains 35 observations of three variables: `Tree`, `age`, and `circumference`. `Tree` is a factor with five levels describing five trees. As might be expected, age and circumference are correlated: ```{r} cor(Orange$age, Orange$circumference) library(ggplot2) ggplot(Orange, aes(age, circumference, color = Tree)) + geom_line() ``` Suppose you want to test for correlations individually *within* each tree. You can do this with dplyr's `group_by`: ```{r} Orange %>% group_by(Tree) %>% summarize(correlation = cor(age, circumference)) ``` (Note that the correlations are much higher than the aggregated one, and furthermore we can now see it is similar across trees). Suppose that instead of simply estimating a correlation, we want to perform a hypothesis test with `cor.test`: ```{r} cor.test(Orange$age, Orange$circumference) ``` This contains multiple values we could want in our output. Some are vectors of length 1, such as the p-value and the estimate, and some are longer, such as the confidence interval. broom's `tidy` S3 method, combined with dplyr's `do`, makes it easy to summarize the information about each test: ```{r} Orange %>% group_by(Tree) %>% do(tidy(cor.test(.$age, .$circumference))) ``` This becomes even more useful when applied to regressions, which give more than one row of output within each model: ```{r} Orange %>% group_by(Tree) %>% do(tidy(lm(age ~ circumference, data=.))) ``` You can just as easily perform multiple regressions within each group, as shown here on the `mtcars` dataset. We group the data into automatic and manual cars (the `am` column), then perform the regression within each. ```{r} data(mtcars) head(mtcars) mtcars %>% group_by(am) %>% do(tidy(lm(wt ~ mpg + qsec + gear, .))) ``` What if you want not just the `tidy` output, but the `augment` and `glance` outputs as well, while still performing each regression only once? First, save the modeling result into a column `fit`. ```{r} regressions <- mtcars %>% group_by(cyl) %>% do(fit = lm(wt ~ mpg + qsec + gear, .)) regressions ``` This creates a rowwise data frame. Tidying methods are designed to work seamlessly with rowwise data frames, grouping them and performing tidying on each row: ```{r} regressions %>% tidy(fit) regressions %>% augment(fit) regressions %>% glance(fit) ``` By combining the estimates and p-values across all groups into the same tidy data frame (instead of, for example, a list of output model objects), a new class of analyses and visualizations becomes straightforward. This includes * Sorting by p-value or estimate to find the most significant terms across all tests * P-value histograms * Volcano plots comparing p-values to effect size estimates In each of these cases, we can easily filter, facet, or distinguish based on the `term` column. In short, this makes the tools of tidy data analysis available for the *results* of data analysis and models, not just the inputs.broom/inst/doc/bootstrapping.R0000644000177700017770000000346513204542661017475 0ustar herbrandtherbrandt## ----setup, echo=FALSE--------------------------------------------------- library(knitr) opts_chunk$set(message=FALSE) ## ------------------------------------------------------------------------ library(ggplot2) data(mtcars) ggplot(mtcars, aes(mpg, wt)) + geom_point() ## ------------------------------------------------------------------------ nlsfit <- nls(mpg ~ k / wt + b, mtcars, start=list(k=1, b=0)) summary(nlsfit) ggplot(mtcars, aes(wt, mpg)) + geom_point() + geom_line(aes(y=predict(nlsfit))) ## ------------------------------------------------------------------------ library(dplyr) library(broom) set.seed(2014) bootnls <- mtcars %>% bootstrap(100) %>% do(tidy(nls(mpg ~ k / wt + b, ., start=list(k=1, b=0)))) ## ------------------------------------------------------------------------ bootnls ## ------------------------------------------------------------------------ alpha = .05 bootnls %>% group_by(term) %>% summarize(low=quantile(estimate, alpha / 2), high=quantile(estimate, 1 - alpha / 2)) ## ------------------------------------------------------------------------ library(ggplot2) ggplot(bootnls, aes(estimate)) + geom_histogram(binwidth=2) + facet_wrap(~ term, scales="free") ## ------------------------------------------------------------------------ bootnls_aug <- mtcars %>% bootstrap(100) %>% do(augment(nls(mpg ~ k / wt + b, ., start=list(k=1, b=0)), .)) ggplot(bootnls_aug, aes(wt, mpg)) + geom_point() + geom_line(aes(y=.fitted, group=replicate), alpha=.2) ## ------------------------------------------------------------------------ smoothspline_aug <- mtcars %>% bootstrap(100) %>% do(augment(smooth.spline(.$wt, .$mpg, df=4), .)) ggplot(smoothspline_aug, aes(wt, mpg)) + geom_point() + geom_line(aes(y=.fitted, group=replicate), alpha=.2) broom/inst/doc/broom.R0000644000177700017770000000236413204542661015715 0ustar herbrandtherbrandt## ----setup, echo=FALSE--------------------------------------------------- library(knitr) opts_chunk$set(warning=FALSE, message=FALSE) ## ----lmfit--------------------------------------------------------------- lmfit <- lm(mpg ~ wt, mtcars) lmfit summary(lmfit) ## ------------------------------------------------------------------------ library(broom) tidy(lmfit) ## ------------------------------------------------------------------------ head(augment(lmfit)) ## ------------------------------------------------------------------------ glance(lmfit) ## ----glmfit-------------------------------------------------------------- glmfit <- glm(am ~ wt, mtcars, family="binomial") tidy(glmfit) head(augment(glmfit)) glance(glmfit) ## ------------------------------------------------------------------------ nlsfit <- nls(mpg ~ k / wt + b, mtcars, start=list(k=1, b=0)) tidy(nlsfit) head(augment(nlsfit, mtcars)) glance(nlsfit) ## ----ttest--------------------------------------------------------------- tt <- t.test(wt ~ am, mtcars) tidy(tt) ## ------------------------------------------------------------------------ wt <- wilcox.test(wt ~ am, mtcars) tidy(wt) ## ------------------------------------------------------------------------ glance(tt) glance(wt) broom/inst/doc/broom_and_dplyr.R0000644000177700017770000000303713204542661017747 0ustar herbrandtherbrandt## ----opts_chunk, echo=FALSE---------------------------------------------- library(knitr) opts_chunk$set(message=FALSE, warning=FALSE) ## ----setup--------------------------------------------------------------- library(broom) library(dplyr) data(Orange) dim(Orange) head(Orange) ## ------------------------------------------------------------------------ cor(Orange$age, Orange$circumference) library(ggplot2) ggplot(Orange, aes(age, circumference, color = Tree)) + geom_line() ## ------------------------------------------------------------------------ Orange %>% group_by(Tree) %>% summarize(correlation = cor(age, circumference)) ## ------------------------------------------------------------------------ cor.test(Orange$age, Orange$circumference) ## ------------------------------------------------------------------------ Orange %>% group_by(Tree) %>% do(tidy(cor.test(.$age, .$circumference))) ## ------------------------------------------------------------------------ Orange %>% group_by(Tree) %>% do(tidy(lm(age ~ circumference, data=.))) ## ------------------------------------------------------------------------ data(mtcars) head(mtcars) mtcars %>% group_by(am) %>% do(tidy(lm(wt ~ mpg + qsec + gear, .))) ## ------------------------------------------------------------------------ regressions <- mtcars %>% group_by(cyl) %>% do(fit = lm(wt ~ mpg + qsec + gear, .)) regressions ## ------------------------------------------------------------------------ regressions %>% tidy(fit) regressions %>% augment(fit) regressions %>% glance(fit) broom/inst/doc/kmeans.html0000644000177700017770000200433113204542661016616 0ustar herbrandtherbrandt Tidying k-means clustering

Tidying k-means clustering

K-means clustering serves as a very useful example of tidy data, and especially the distinction between the three tidying functions: tidy, augment, and glance.

Let's start by generating some random 2d data with three clusters, within which points are distributed according to a multivariate gaussian:

library(dplyr)

set.seed(2014)
centers <- data.frame(cluster=factor(1:3), size=c(100, 150, 50), x1=c(5, 0, -3), x2=c(-1, 1, -2))
points <- centers %>% group_by(cluster) %>%
    do(data.frame(x1=rnorm(.$size[1], .$x1[1]),
                  x2=rnorm(.$size[1], .$x2[1])))

library(ggplot2)
ggplot(points, aes(x1, x2, color=cluster)) + geom_point()

plot of chunk unnamed-chunk-2

This is an ideal case for k-means clustering. Let's examine what the built-in kmeans function returns.

points.matrix <- cbind(x1 = points$x1, x2 = points$x2)
kclust <- kmeans(points.matrix, 3)
kclust
## K-means clustering with 3 clusters of sizes 99, 151, 50
## 
## Cluster means:
##           x1         x2
## 1  5.1791917 -0.9865170
## 2  0.1583797  0.9797098
## 3 -3.0653196 -2.0887225
## 
## Clustering vector:
##   [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
##  [36] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
##  [71] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2
## [106] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
## [141] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
## [176] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
## [211] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
## [246] 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
## [281] 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
## 
## Within cluster sum of squares by cluster:
## [1] 204.96483 257.20360  78.64255
##  (between_SS / total_SS =  85.1 %)
## 
## Available components:
## 
## [1] "cluster"      "centers"      "totss"        "withinss"    
## [5] "tot.withinss" "betweenss"    "size"         "iter"        
## [9] "ifault"
summary(kclust)
##              Length Class  Mode   
## cluster      300    -none- numeric
## centers        6    -none- numeric
## totss          1    -none- numeric
## withinss       3    -none- numeric
## tot.withinss   1    -none- numeric
## betweenss      1    -none- numeric
## size           3    -none- numeric
## iter           1    -none- numeric
## ifault         1    -none- numeric

The output is a list of vectors, where each component has a different length. There's one of length 300: the same as our original dataset. There are a number of elements of length 3: withinss, tot.withinss, and betweenss- and centers is a matrix with 3 rows. And then there are the elements of length 1: totss, tot.withinss, betweenss, and iter.

These differing lengths have a deeper meaning when we want to tidy our dataset: they signify that each type of component communicates a different kind of information.

  • cluster (300 values) contains information about each point
  • centers, withinss and size (3 values) contain information about each cluster
  • totss, tot.withinss, betweenss, and iter (1 value) contain information about the full clustering

Which of these do we want to extract? There is no right answer: each of them may be interesting to an analyst. Because they communicate entirely different information (not to mention there's no straightforward way to combine them), they are extracted by separate functions. augment adds the point classifications to the original dataset:

library(broom)
head(augment(kclust, points.matrix))
##         x1         x2 .cluster
## 1 4.434320  0.5416470        1
## 2 5.321046 -0.9412882        1
## 3 5.125271 -1.5802282        1
## 4 6.353225 -1.6040549        1
## 5 3.712270 -3.4079344        1
## 6 5.322555 -0.7716317        1

The tidy function summarizes on a per-cluster level:

tidy(kclust)
##           x1         x2 size  withinss cluster
## 1  5.1791917 -0.9865170   99 204.96483       1
## 2  0.1583797  0.9797098  151 257.20360       2
## 3 -3.0653196 -2.0887225   50  78.64255       3

And as it always does, the glance function extracts a single-row summary:

glance(kclust)
##     totss tot.withinss betweenss iter
## 1 3629.67      540.811  3088.859    2

broom and dplyr for exploratory clustering

While these summaries are useful, they would not have been too difficult to extract out from the dataset yourself. The real power comes from combining their analyses with dplyr.

Let's say we want to explore the effect of different choices of k, from 1 to 9, on this clustering. First cluster the data 9 times, each using a different value of k:

kclusts <- data.frame(k=1:9) %>% group_by(k) %>% do(kclust=kmeans(points.matrix, .$k))

Then tidy the clusterings three ways: using tidy, using augment, and using glance. Each of these goes into a separate dataset as they represent different types of data.

clusters <- kclusts %>% group_by(k) %>% do(tidy(.$kclust[[1]]))
assignments <- kclusts %>% group_by(k) %>% do(augment(.$kclust[[1]], points.matrix))
clusterings <- kclusts %>% group_by(k) %>% do(glance(.$kclust[[1]]))

Now we can plot the original points, with each point colored according to the original cluster:

p1 <- ggplot(assignments, aes(x1, x2)) + geom_point(aes(color=.cluster)) + facet_wrap(~ k)
p1

plot of chunk unnamed-chunk-9

Already we get a good sense of the proper number of clusters (3), and how the k-means algorithm functions when k is too high or too low. We can then add the centers of the cluster using the data from tidy:

p2 <- p1 + geom_point(data=clusters, size=10, shape="x")
p2

plot of chunk unnamed-chunk-10

The data from glance fits a different but equally important purpose: it lets you view trends of some summary statistics across values of k. Of particular interest is the total within sum of squares, saved in the tot.withinss column.

ggplot(clusterings, aes(k, tot.withinss)) + geom_line()

plot of chunk unnamed-chunk-11

This represents the variance within the clusters. It decreases as k increases, but one can notice a bend (or “elbow”) right at k=3. This bend indicates that additional clusters beyond the third have little value. (See here for a more mathematically rigorous interpretation and implementation of this method). Thus, all three methods of tidying data provided by broom are useful for summarizing clustering output.

broom/inst/doc/bootstrapping.html0000644000177700017770000075666713204542661020263 0ustar herbrandtherbrandt Tidy bootstrapping with dplyr+broom

Tidy bootstrapping with dplyr+broom

Another place where combining model fits in a tidy way becomes useful is when performing bootstrapping or permutation tests. These approaches have been explored before, for instance by Andrew MacDonald here, and Hadley has explored efficient support for bootstrapping as a potential enhancement to dplyr. broom fits naturally with dplyr in performing these analyses.

Bootstrapping consists of randomly sampling a dataset with replacement, then performing the analysis individually on each bootstrapped replicate. The variation in the resulting estimate is then a reasonable approximation of the variance in your estimate.

Let's say you want to fit a nonlinear model to the weight/mileage relationship in the mtcars dataset.

library(ggplot2)
data(mtcars)
ggplot(mtcars, aes(mpg, wt)) + geom_point()

plot of chunk unnamed-chunk-1

You might use the method of nonlinear least squares (nls function) to fit a model.

nlsfit <- nls(mpg ~ k / wt + b, mtcars, start=list(k=1, b=0))
summary(nlsfit)
## 
## Formula: mpg ~ k/wt + b
## 
## Parameters:
##   Estimate Std. Error t value Pr(>|t|)    
## k   45.829      4.249  10.786 7.64e-12 ***
## b    4.386      1.536   2.855  0.00774 ** 
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## Residual standard error: 2.774 on 30 degrees of freedom
## 
## Number of iterations to convergence: 1 
## Achieved convergence tolerance: 2.877e-08
ggplot(mtcars, aes(wt, mpg)) + geom_point() + geom_line(aes(y=predict(nlsfit)))

plot of chunk unnamed-chunk-2

While this does provide a p-value and confidence intervals for the parameters, these are based on model assumptions that may not hold in real data. Bootstrapping is a popular method for providing confidence intervals and predictions that are more robust to the nature of the data. The function bootstrap in broom can be used to sample bootstrap replications. First, we construct 100 bootstrap replications of the data, each of which has been randomly sampled with replacement.

We use do to perform an nls fit on each replication, using tidy to recombine:

library(dplyr)
library(broom)
set.seed(2014)
bootnls <- mtcars %>% bootstrap(100) %>%
    do(tidy(nls(mpg ~ k / wt + b, ., start=list(k=1, b=0))))

This produces a summary of each replication, combined into one data.frame:

bootnls
## # A tibble: 200 x 6
## # Groups: replicate [100]
##    replicate term  estimate std.error statistic           p.value
##        <int> <chr>    <dbl>     <dbl>     <dbl>             <dbl>
##  1         1 k        46.6       4.03    11.6   0.00000000000134 
##  2         1 b         4.36      1.54     2.83  0.00813          
##  3         2 k        54.2       4.96    10.9   0.00000000000576 
##  4         2 b         1.00      1.90     0.530 0.600            
##  5         3 k        43.3       3.56    12.1   0.000000000000422
##  6         3 b         4.83      1.30     3.72  0.000810         
##  7         4 k        48.5       4.46    10.9   0.00000000000607 
##  8         4 b         3.51      1.69     2.08  0.0464           
##  9         5 k        52.6       5.66     9.29  0.000000000247   
## 10         5 b         3.34      2.30     1.45  0.156            
## # ... with 190 more rows

You can then calculate confidence intervals (using what is called the percentile method):

alpha = .05
bootnls %>% group_by(term) %>% summarize(low=quantile(estimate, alpha / 2),
                                         high=quantile(estimate, 1 - alpha / 2))
## # A tibble: 2 x 3
##   term     low  high
##   <chr>  <dbl> <dbl>
## 1 b      0.214  6.54
## 2 k     39.7   58.7

Or you can use histograms to give you a more detailed idea of the uncertainty in each estimate:

library(ggplot2)
ggplot(bootnls, aes(estimate)) + geom_histogram(binwidth=2) + facet_wrap(~ term, scales="free")

plot of chunk unnamed-chunk-6

Or you can use augment to visualize the uncertainty in the curve:

bootnls_aug <- mtcars %>% bootstrap(100) %>%
    do(augment(nls(mpg ~ k / wt + b, ., start=list(k=1, b=0)), .))

ggplot(bootnls_aug, aes(wt, mpg)) + geom_point() +
    geom_line(aes(y=.fitted, group=replicate), alpha=.2)

plot of chunk unnamed-chunk-7

With only a few small changes, one could easily perform bootstrapping with other kinds of predictive or hypothesis testing models, since the tidy and augment functions works for many statistical outputs. As another example, you could use smooth.spline:

smoothspline_aug <- mtcars %>% bootstrap(100) %>%
    do(augment(smooth.spline(.$wt, .$mpg, df=4), .))

ggplot(smoothspline_aug, aes(wt, mpg)) + geom_point() +
    geom_line(aes(y=.fitted, group=replicate), alpha=.2)

plot of chunk unnamed-chunk-8

broom/inst/doc/bootstrapping.Rmd0000644000177700017770000000720113204542661020006 0ustar herbrandtherbrandt ```{r setup, echo=FALSE} library(knitr) opts_chunk$set(message=FALSE) ``` Tidy bootstrapping with dplyr+broom =================================== Another place where combining model fits in a tidy way becomes useful is when performing bootstrapping or permutation tests. These approaches have been explored before, for instance by [Andrew MacDonald here](http://rstudio-pubs-static.s3.amazonaws.com/19698_a4c472606e3c43e4b94720506e49bb7b.html), and [Hadley has explored efficient support for bootstrapping](https://github.com/hadley/dplyr/issues/269) as a potential enhancement to dplyr. broom fits naturally with dplyr in performing these analyses. Bootstrapping consists of randomly sampling a dataset with replacement, then performing the analysis individually on each bootstrapped replicate. The variation in the resulting estimate is then a reasonable approximation of the variance in your estimate. Let's say you want to fit a nonlinear model to the weight/mileage relationship in the `mtcars` dataset. ```{r} library(ggplot2) data(mtcars) ggplot(mtcars, aes(mpg, wt)) + geom_point() ``` You might use the method of nonlinear least squares (`nls` function) to fit a model. ```{r} nlsfit <- nls(mpg ~ k / wt + b, mtcars, start=list(k=1, b=0)) summary(nlsfit) ggplot(mtcars, aes(wt, mpg)) + geom_point() + geom_line(aes(y=predict(nlsfit))) ``` While this does provide a p-value and confidence intervals for the parameters, these are based on model assumptions that may not hold in real data. Bootstrapping is a popular method for providing confidence intervals and predictions that are more robust to the nature of the data. The function `bootstrap` in **broom** can be used to sample bootstrap replications. First, we construct 100 bootstrap replications of the data, each of which has been randomly sampled with replacement. We use `do` to perform an `nls` fit on each replication, using `tidy` to recombine: ```{r} library(dplyr) library(broom) set.seed(2014) bootnls <- mtcars %>% bootstrap(100) %>% do(tidy(nls(mpg ~ k / wt + b, ., start=list(k=1, b=0)))) ``` This produces a summary of each replication, combined into one data.frame: ```{r} bootnls ``` You can then calculate confidence intervals (using what is called the [percentile method](https://www.uvm.edu/~dhowell/StatPages/Randomization%20Tests/ResamplingWithR/BootstMeans/bootstrapping_means.html)): ```{r} alpha = .05 bootnls %>% group_by(term) %>% summarize(low=quantile(estimate, alpha / 2), high=quantile(estimate, 1 - alpha / 2)) ``` Or you can use histograms to give you a more detailed idea of the uncertainty in each estimate: ```{r} library(ggplot2) ggplot(bootnls, aes(estimate)) + geom_histogram(binwidth=2) + facet_wrap(~ term, scales="free") ``` Or you can use `augment` to visualize the uncertainty in the curve: ```{r} bootnls_aug <- mtcars %>% bootstrap(100) %>% do(augment(nls(mpg ~ k / wt + b, ., start=list(k=1, b=0)), .)) ggplot(bootnls_aug, aes(wt, mpg)) + geom_point() + geom_line(aes(y=.fitted, group=replicate), alpha=.2) ``` With only a few small changes, one could easily perform bootstrapping with other kinds of predictive or hypothesis testing models, since the `tidy` and `augment` functions works for many statistical outputs. As another example, you could use `smooth.spline`: ```{r} smoothspline_aug <- mtcars %>% bootstrap(100) %>% do(augment(smooth.spline(.$wt, .$mpg, df=4), .)) ggplot(smoothspline_aug, aes(wt, mpg)) + geom_point() + geom_line(aes(y=.fitted, group=replicate), alpha=.2) ``` broom/tests/0000755000177700017770000000000013204276216014067 5ustar herbrandtherbrandtbroom/tests/testthat/0000755000177700017770000000000013204523725015727 5ustar herbrandtherbrandtbroom/tests/testthat/test-lme4.R0000644000177700017770000000451013204276216017670 0ustar herbrandtherbrandt# test tidy, augment, glance methods from lme4-tidiers.R if (require(lme4, quietly = TRUE)) { context("lme4 models") d <- as.data.frame(ChickWeight) colnames(d) <- c("y", "x", "subj", "tx") fit <- lmer(y ~ tx*x + (x | subj), data=d) test_that("tidy works on lme4 fits", { td <- tidy(fit) }) test_that("scales works", { t1 <- tidy(fit,effects="ran_pars") t2 <- tidy(fit,effects="ran_pars",scales="sdcor") expect_equal(t1$estimate,t2$estimate) expect_error(tidy(fit,effects="ran_pars",scales="varcov"), "unrecognized ran_pars scale") t3 <- tidy(fit,effects="ran_pars",scales="vcov") expect_equal(t3$estimate[c(1,2,4)], t2$estimate[c(1,2,4)]^2) expect_error(tidy(fit,scales="vcov"), "must be provided for each effect") }) test_that("tidy works with more than one RE grouping variable", { dd <- expand.grid(f=factor(1:10),g=factor(1:5),rep=1:3) dd$y <- suppressMessages(simulate(~(1|f)+(1|g),newdata=dd, newparams=list(beta=1,theta=c(1,1)), family=poisson, seed=101))[[1]] gfit <- glmer(y~(1|f)+(1|g),data=dd,family=poisson) expect_equal(as.character(tidy(gfit,effects="ran_pars")$term), paste("sd_(Intercept)",c("f","g"),sep=".")) }) test_that("augment works on lme4 fits with or without data", { au <- augment(fit) au <- augment(fit, d) }) dNAs <- d dNAs$y[c(1, 3, 5)] <- NA test_that("augment works on lme4 fits with NAs", { fitNAs <- lmer(y ~ tx*x + (x | subj), data = dNAs) au <- augment(fitNAs) expect_equal(nrow(au), sum(complete.cases(dNAs))) }) test_that("augment works on lme4 fits with na.exclude", { fitNAs <- lmer(y ~ tx*x + (x | subj), data = dNAs, na.action = "na.exclude") #expect_error(suppressWarnings(augment(fitNAs))) au <- augment(fitNAs, dNAs) # with na.exclude, should have NAs in the output where there were NAs in input expect_equal(nrow(au), nrow(dNAs)) expect_equal(complete.cases(au), complete.cases(dNAs)) }) test_that("glance works on lme4 fits", { g <- glance(fit) }) } broom/tests/testthat/test-tidy.R0000644000177700017770000000676313204276216020014 0ustar herbrandtherbrandt# test individual tidy.ZZZ methods from stats package context("tidying models") test_that("tidy.lm works", { lmfit <- lm(mpg ~ wt, mtcars) td = tidy(lmfit) check_tidy(td, exp.row=2) expect_equal(td$term, c("(Intercept)", "wt")) lmfit2 <- lm(mpg ~ wt + disp, mtcars) td2 = tidy(lmfit2) check_tidy(td2, exp.row=3) expect_equal(td2$term, c("(Intercept)", "wt", "disp")) }) test_that("tidy.glm works", { glmfit <- glm(am ~ wt, mtcars, family="binomial") td = tidy(glmfit) check_tidy(td, exp.row=2, exp.col=5) expect_equal(td$term, c("(Intercept)", "wt")) glmfit2 <- glm(cyl ~ wt + disp, mtcars, family="poisson") td2 = tidy(glmfit2) check_tidy(td2, exp.row=3, exp.col=5) expect_equal(td2$term, c("(Intercept)", "wt", "disp")) }) test_that("tidy.anova and tidy.aov work", { anovafit = anova(lm(mpg ~ wt + disp, mtcars)) td = tidy(anovafit) check_tidy(td, exp.row=3, exp.col=6) expect_true("Residuals" %in% td$term) aovfit = aov(mpg ~ wt + disp, mtcars) td = tidy(anovafit) check_tidy(td, exp.row=3, exp.col=6) expect_true("Residuals" %in% td$term) }) test_that("tidy.nls works", { nlsfit = nls(wt ~ a + b * mpg + c / disp, data=mtcars, start=list(a=1, b=2, c=3)) td = tidy(nlsfit) check_tidy(td, exp.row=3, exp.col=5) expect_equal(td$term, c("a", "b", "c")) }) test_that("tidy.survreg works", { # prepare data df <- mtcars df$lwr <- floor(mtcars$mpg) df$upr <- ceiling(mtcars$mpg) # weibull fit (has an extra scale parameter) weibull.fit <- survival::survreg(Surv(lwr, upr, type = "interval2") ~ wt, data = df, dist = "weibull") td = tidy(weibull.fit) check_tidy(td, exp.row = 3) expect_equal(td$term, c("(Intercept)", "wt", "Log(scale)")) # exponential fit (scale = 1) exp.fit <- survival::survreg(Surv(lwr, upr, type = "interval2") ~ wt, data = df, dist = "exponential") td2 = tidy(exp.fit) check_tidy(td2, exp.row = 2) expect_equal(td2$term, c("(Intercept)", "wt")) }) context("tidying hypothesis tests") test_that("tidy.htest works on correlation tests", { pco = cor.test(mtcars$mpg, mtcars$wt) td = tidy(pco) n = c("estimate", "p.value", "statistic", "conf.high", "conf.low") check_tidy(td, exp.row=1, exp.names=n) # suppress warning about ties sco = suppressWarnings(cor.test(mtcars$mpg, mtcars$wt, method="spearman")) td = tidy(sco) check_tidy(td, exp.row=1, exp.names=c("estimate", "p.value")) }) test_that("tidy.htest works on t-tests", { tt = t.test(mpg ~ am, mtcars) td = tidy(tt) n = c("estimate1", "estimate2", "p.value", "statistic", "conf.high", "conf.low") check_tidy(td, exp.row=1, exp.names=n) }) test_that("tidy.htest works on wilcoxon tests", { # suppress warning about ties wt = suppressWarnings(wilcox.test(mpg ~ am, mtcars)) td = tidy(wt) n = c("p.value", "statistic") check_tidy(td, exp.row=1, exp.names=n) }) test_that("tidy.summary works (even with NAs)", { df <- data.frame(group = c(rep('M', 6), 'F', 'F', 'M', 'M', 'F', 'F'), val = c(6, 5, NA, NA, 6, 13, NA, 8, 10, 7, 14, 6)) td <- tidy(summary(df$val)) expect_is(td, "data.frame") expect_equal(nrow(td), 1) expect_equal(td$minimum, 5) expect_equal(td$q1, 6) expect_equal(td$median, 7) expect_lt(abs(td$mean - 25 / 3), .001) expect_equal(td$q3, 10) expect_equal(td$maximum, 14) expect_equal(td$na, 3) }) broom/tests/testthat/test-vectors.R0000644000177700017770000000211613204300607020504 0ustar herbrandtherbrandtcontext("vector tidiers") test_that("tidying numeric vectors works", { vec <- 1:10 tidy_vec <- tidy(vec) check_tidy(tidy_vec, exp.row = 10, exp.col = 1) # test with names vec2 <- vec names(vec2) <- LETTERS[1:10] tidy_vec2 <- tidy(vec2) check_tidy(tidy_vec2, exp.row = 10, exp.col = 2) expect_true(all(c("names", "x") %in% names(tidy_vec2))) }) test_that("tidying logical vectors works", { vec <- rep(c(TRUE, FALSE), 5) tidy_vec <- tidy(vec) check_tidy(tidy_vec, exp.row = 10, exp.col = 1) # test with names vec2 <- vec names(vec2) <- 1:10 tidy_vec2 <- tidy(vec2) check_tidy(tidy_vec2, exp.row = 10, exp.col = 2) expect_true(all(c("names", "x") %in% names(tidy_vec2))) }) test_that("tidying character vectors works", { vec <- LETTERS[1:10] tidy_vec <- tidy(vec) check_tidy(tidy_vec, exp.row = 10, exp.col = 1) # test with names vec2 <- vec names(vec2) <- 1:10 tidy_vec2 <- tidy(vec2) check_tidy(tidy_vec2, exp.row = 10, exp.col = 2) expect_true(all(c("names", "x") %in% names(tidy_vec2))) }) broom/tests/testthat/test-survdiff.R0000644000177700017770000000436513204276216020667 0ustar herbrandtherbrandt if( require("survival", quietly = TRUE)) { # Examples from survival::survdiff help page ex1 <- survdiff(Surv(futime, fustat) ~ rx,data=ovarian) ex2 <- survdiff(Surv(time, status) ~ pat.karno + strata(inst), data=lung) # More factors and strata ex2a <- survdiff(Surv(time, status) ~ pat.karno + ph.ecog + strata(inst) + strata(sex), data=lung) expect <- survexp(futime ~ ratetable( age=(accept.dt - birth.dt), sex=1, year=accept.dt, race="white" ), jasa, cohort=FALSE, ratetable=survexp.usr) ex3 <- survdiff(Surv(jasa$futime, jasa$fustat) ~ offset(expect)) ex4 <- survdiff( Surv(futime, fustat) ~ rx + ecog.ps, data=ovarian) rm(expect) context("Testing tidy() of 'survdiff' objects") tidy_names <- c("N", "obs", "exp") test_that("tidy works in 2-group case", { td <- tidy(ex1) check_tidy(td, exp.names=tidy_names) }) test_that("tidy works in 7-group stratified case", { td <- tidy(ex2) check_tidy(td, exp.names = tidy_names) }) test_that("tidy works for ex2a", { td <- tidy(ex2a) check_tidy(td, exp.names=tidy_names) }) test_that("tidy works for ex3", { td <- tidy(ex3) check_tidy(td, exp.names=tidy_names) }) test_that("tidy works for ex4", { td <- tidy(ex4) check_tidy(td, exp.names=tidy_names) }) context("Testing glance() for 'survdiff' objects") glance_names <- c("statistic", "df", "p.value") test_that("glance works in 2-group case", { gl <- glance(ex1) check_tidy(gl, exp.names = glance_names) }) test_that("glance works in 7-group stratified case", { gl <- glance(ex2) check_tidy(gl, exp.names = glance_names) }) test_that("glance works for ex2a", { gl <- glance(ex2a) check_tidy(gl, exp.names = glance_names) }) test_that("glance works in ex3", { gl <- glance(ex3) check_tidy(gl, exp.names = glance_names) }) test_that("glance works in ex4", { gl <- glance(ex4) check_tidy(gl, exp.names = glance_names) }) } broom/tests/testthat/test-speedlm.R0000644000177700017770000000207113204317475020463 0ustar herbrandtherbrandt# test tidy, augment, glance from speedlm objects if (require("speedglm", quietly = TRUE)) { context("speedlm tidiers") test_that("tidy works on speedlm", { speedlmfit <- speedlm(mpg ~ wt, mtcars) td = tidy(speedlmfit) check_tidy(td, exp.row=2) expect_equal(td$term, c("(Intercept)", "wt")) speedlmfit2 <- lm(mpg ~ wt + disp, mtcars) td2 = tidy(speedlmfit2) check_tidy(td2, exp.row=3) expect_equal(td2$term, c("(Intercept)", "wt", "disp")) }) test_that("glance works on speedlm", { speedlmfit <- speedlm(mpg ~ wt, mtcars) glance <- glance(speedlmfit) expect_equal(nrow(glance), 1) }) test_that("augment works on speedlm", { speedlmfit <- speedlm(mpg ~ wt, mtcars) # we don't do check_augment_NAs because speedlm doesn't accept a na.action argument au <- augment(speedlmfit) check_augment(au, mtcars) au2 <- augment(speedlmfit, mtcars) check_augment(au2, mtcars, same = colnames(mtcars)) }) } broom/tests/testthat/test-lm.R0000644000177700017770000000355513204301355017440 0ustar herbrandtherbrandt# test tidy, augment, glance from lm objects context("lm tidiers") test_that("tidy.lm works", { lmfit <- lm(mpg ~ wt, mtcars) td <- tidy(lmfit) check_tidy(td, exp.row = 2) expect_equal(td$term, c("(Intercept)", "wt")) lmfit2 <- lm(mpg ~ wt + disp, mtcars) td2 <- tidy(lmfit2) check_tidy(td2, exp.row = 3) expect_equal(td2$term, c("(Intercept)", "wt", "disp")) expect_warning(tidy(lmfit2, exponentiate = TRUE)) }) test_that("tidy.lm with confint = TRUE works even if rank-deficient", { d <- data.frame(y = rnorm(4), x = letters[seq_len(4)]) expect_is(tidy(lm(y ~ x, data = d), confint = TRUE), "data.frame") }) test_that("tidy.glm works", { glmfit <- glm(am ~ wt, mtcars, family = "binomial") td <- tidy(glmfit) check_tidy(td, exp.row = 2, exp.col = 5) expect_equal(td$term, c("(Intercept)", "wt")) # check exponentiation works check_tidy(tidy(glmfit, exponentiate = TRUE), exp.row = 2, exp.col = 5) glmfit2 <- glm(cyl ~ wt + disp, mtcars, family = "poisson") td2 <- tidy(glmfit2) check_tidy(td2, exp.row = 3, exp.col = 5) expect_equal(td2$term, c("(Intercept)", "wt", "disp")) # check exponentiation works check_tidy(tidy(glmfit2, exponentiate = TRUE), exp.row = 3, exp.col = 5) }) test_that("glance.glm works", { glmfit <- glm(am ~ wt, mtcars, family = "binomial") td <- glance(glmfit) check_tidy(td, exp.row = 1, exp.col = 7) glmfit2 <- glm(cyl ~ wt + disp, mtcars, family = "poisson") td2 <- glance(glmfit2) check_tidy(td2, exp.row = 1, exp.col = 7) }) test_that("tidy.lm works with quick", { lmfit <- lm(mpg ~ wt, mtcars) td <- tidy(lmfit, quick = TRUE) check_tidy(td, exp.row = 2, exp.col = 2) lmfit2 <- lm(mpg ~ wt + disp, mtcars) td2 <- tidy(lmfit2, quick = TRUE) check_tidy(td2, exp.row = 3, exp.col = 2) }) broom/tests/testthat/test-muhaz.R0000644000177700017770000000052013204320671020143 0ustar herbrandtherbrandtif (requireNamespace("muhaz")) { context("Kernel based hazard rate estimates via muhaz") data(ovarian, package = "survival") mz <- muhaz::muhaz(ovarian$futime, ovarian$fustat) test_that("tidy works on muhaz objects", { tidy(mz) }) test_that("glance works on muhaz objects", { glance(mz) }) } broom/tests/testthat/test-gam.R0000644000177700017770000000131013204276216017566 0ustar herbrandtherbrandtif (requireNamespace("gam")) { context("gam models from package gam") data(kyphosis, package = "gam") g <- gam::gam(Kyphosis ~ gam::s(Age,4) + Number, family = binomial, data = kyphosis) test_that("tidy works on gam models", { tidy(g) }) test_that("glance works on gam models", { glance(g) }) } if (requireNamespace("mgcv")) { context("gam models from package mgcv") d <- as.data.frame(ChickWeight) g <- mgcv::gam(weight ~ s(Time) + factor(Diet), data = d) test_that("tidy works on mgcv::gam models", { tidy(g) tidy(g, parametric = TRUE) }) test_that("glance works on mgcv::gam models", { glance(g) }) } broom/tests/testthat/test-rstanarm.R0000644000177700017770000000312713204276216020661 0ustar herbrandtherbrandt# test tidy and glance methods from rstanarm_tidiers.R context("rstanarm tidiers") suppressPackageStartupMessages(library(rstanarm)) if (require(rstanarm, quietly = TRUE)) { set.seed(2016) capture.output( fit <- stan_glmer(mpg ~ wt + (1|cyl) + (1+wt|gear), data = mtcars, iter = 200, chains = 2) ) context("rstanarm models") test_that("tidy works on rstanarm fits", { td1 <- tidy(fit) td2 <- tidy(fit, parameters = "varying") td3 <- tidy(fit, parameters = "hierarchical") td4 <- tidy(fit, parameters = "auxiliary") expect_equal(colnames(td1), c("term", "estimate", "std.error")) }) test_that("tidy with multiple 'parameters' selections works on rstanarm fits", { td1 <- tidy(fit, parameters = c("varying", "auxiliary")) expect_true(all(c("sigma", "mean_PPD") %in% td1$term)) expect_equal(colnames(td1), c("term", "estimate", "std.error", "level", "group")) }) test_that("intervals works on rstanarm fits", { td1 <- tidy(fit, intervals = TRUE, prob = 0.8) td2 <- tidy(fit, parameters = "varying", intervals = TRUE, prob = 0.5) nms <- c("level", "group", "term", "estimate", "std.error", "lower", "upper") expect_equal(colnames(td2), nms) }) test_that("glance works on rstanarm fits", { g1 <- glance(fit) g2 <- glance(fit, looic = TRUE, cores = 1) expect_equal(colnames(g1), c("algorithm", "pss", "nobs", "sigma")) expect_equal(colnames(g2), c(colnames(g1), "looic", "elpd_loo", "p_loo")) }) } broom/tests/testthat/test-bootstrap.R0000644000177700017770000000250713204276216021050 0ustar herbrandtherbrandtcontext("bootstrapping") test_that("bootstrap works with by_group and grouped tbl", { df <- data_frame(x = c(rep("a", 3), rep("b", 5)), y = rnorm(length(x))) df_reps <- df %>% group_by(x) %>% bootstrap(20, by_group = TRUE) %>% do(tally(group_by(., x))) expect_true(all(filter(df_reps, x == "a")$n == 3)) expect_true(all(filter(df_reps, x == "b")$n == 5)) }) test_that("bootstrap does not sample within groups if by_group = FALSE", { set.seed(12334) df <- data_frame(x = c(rep("a", 3), rep("b", 5)), y = rnorm(length(x))) df_reps <- df %>% group_by(x) %>% bootstrap(20, by_group = FALSE) %>% do(tally(group_by(., x))) expect_true(!all(filter(df_reps, x == "a")$n == 3)) expect_true(!all(filter(df_reps, x == "b")$n == 5)) }) test_that("bootstrap does not sample within groups if no groups", { set.seed(12334) df <- data_frame(x = c(rep("a", 3), rep("b", 5)), y = rnorm(length(x))) df_reps <- df %>% ungroup() %>% bootstrap(20, by_group = TRUE) %>% do(tally(group_by(., x))) expect_true(!all(filter(df_reps, x == "a")$n == 3)) expect_true(!all(filter(df_reps, x == "b")$n == 5)) }) broom/tests/testthat/test-ivreg.R0000644000177700017770000000256713204276216020155 0ustar herbrandtherbrandt# test tidy, augment, glance methods from lme4-tidiers.R if (require(AER, quietly = TRUE)) { context("AER::ivreg models") data("CigarettesSW", package = "AER") CigarettesSW$rprice <- with(CigarettesSW, price/cpi) CigarettesSW$rincome <- with(CigarettesSW, income/population/cpi) CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax)/cpi) ivr <- ivreg(log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax/cpi), data = CigarettesSW, subset = year == "1995") test_that("tidy works on AER::ivreg fits", { td <- tidy(ivr) td2 <- tidy(ivr, conf.int = TRUE) expect_warning(tidy(ivr, exponentiate = TRUE)) # warning as we didn't use a link function, maybe this is bad? }) test_that("augment works on ivreg fits", { au <- augment(ivr) expect_true(all(c('.resid', '.fitted') %in% names(au))) expect_equivalent(au$.resid, residuals(ivr)) expect_equivalent(au$.fitted, fitted(ivr)) old_cigs <- CigarettesSW[CigarettesSW$year == "1985" & CigarettesSW$tax < 40, ] au2 <- augment(ivr, newdata = old_cigs) expect_true('.fitted' %in% names(au2)) expect_equivalent(au2$.fitted, predict(ivr, newdata = old_cigs)) }) test_that("glance works on ivreg fits", { g <- glance(ivr) g <- glance(ivr, diagnostics = TRUE) }) } broom/tests/testthat/test-nlme.R0000644000177700017770000000531213204276216017763 0ustar herbrandtherbrandt# test tidy, augment, glance methods from nlme-tidiers.R if (suppressPackageStartupMessages(require(nlme, quietly = TRUE))) { context("nlme models") d <- as.data.frame(ChickWeight) colnames(d) <- c("y", "x", "subj", "tx") fit <- lme(y ~ tx * x, random = ~x | subj, data = d) test_that("tidy works on nlme/lme fits", { td <- tidy(fit) }) test_that("augment works on lme4 fits with or without data", { au <- augment(fit) au <- augment(fit, d) }) dNAs <- d dNAs$y[c(1, 3, 5)] <- NA test_that("augment works on lme fits with NAs and na.omit", { fitNAs <- lme(y ~ tx*x, random = ~x | subj, data = dNAs, na.action = "na.omit") au <- augment(fitNAs) expect_equal(nrow(au), sum(complete.cases(dNAs))) }) test_that("augment works on lme fits with na.omit", { fitNAs <- lme(y ~ tx*x, random = ~x | subj, data = dNAs, na.action = "na.exclude") au <- augment(fitNAs, dNAs) # with na.exclude, should have NAs in the output where there were NAs in input expect_equal(nrow(au), nrow(dNAs)) expect_equal(complete.cases(au), complete.cases(dNAs)) }) test_that("glance works on nlme fits", { g <- glance(fit) }) testFit <- function(fit, data = NULL){ test_that("Pinheiro/Bates fit works", { tidy(fit, "fixed") tidy(fit) glance(fit) if (is.null(data)) { augment(fit) } else { augment(fit, data) } }) } testFit(lme(score ~ Machine, data = Machines, random = ~1 | Worker)) testFit(lme(score ~ Machine, data = Machines, random = ~1 | Worker)) testFit(lme(score ~ Machine, data = Machines, random = ~1 | Worker / Machine)) testFit(lme(pixel ~ day + day ^ 2, data = Pixel, random = list(Dog = ~day, Side = ~1))) testFit(lme(pixel ~ day + day ^ 2 + Side, data = Pixel, random = list(Dog = ~day, Side = ~1))) testFit(lme(yield ~ ordered(nitro)*Variety, data = Oats, random = ~1/Block/Variety)) # There are cases where no data set is returned in the result # We can do nothing about this inconsitency but give a useful error message in augment fit = nlme(conc ~ SSfol(Dose, Time, lKe, lKa, lCl), data = Theoph, random = pdDiag(lKe + lKa + lCl ~ 1)) test_that( "Fit without data in returned structure works when data are given", { testFit(fit, Theoph) }) # When no data are passed, a meaningful message is issued expect_error(augment(fit), "explicit") } broom/tests/testthat/test-rowwise.R0000644000177700017770000000366713204276216020542 0ustar herbrandtherbrandt# test tidiers for rowwise data frames (that contain individual # objects as a list column, see ?rowwise_df_tidiers) context("rowwise tidiers") mods <- mtcars %>% group_by(cyl) %>% do(mod = lm(mpg ~ wt + qsec, .)) test_that("rowwise tidiers can be applied to sub-models", { expect_is(mods, "rowwise_df") tidied <- mods %>% tidy(mod) augmented <- mods %>% augment(mod) glanced <- mods %>% glance(mod) expect_equal(nrow(augmented), nrow(mtcars)) expect_equal(nrow(glanced), 3) expect_true(!("disp" %in% colnames(augmented))) }) test_that("rowwise tidiers can be given additional arguments", { augmented <- mods %>% augment(mod, newdata = head(mtcars, 5)) expect_equal(nrow(augmented), 3 * 5) }) test_that("rowwise augment can use a column as the data", { mods <- mtcars %>% group_by(cyl) %>% do(mod = lm(mpg ~ wt + qsec, .), data = (.)) expect_is(mods, "rowwise_df") augmented <- mods %>% augment(mod, data = data) # order has changed, but original columns should be there expect_true(!is.null(augmented$disp)) expect_equal(sort(mtcars$disp), sort(augmented$disp)) expect_equal(sort(mtcars$drat), sort(augmented$drat)) expect_true(!is.null(augmented$.fitted)) # column name doesn't have to be data mods <- mtcars %>% group_by(cyl) %>% do(mod = lm(mpg ~ wt + qsec, .), original = (.)) augmented <- mods %>% augment(mod, data = original) expect_true(!is.null(augmented$disp)) expect_equal(sort(mtcars$disp), sort(augmented$disp)) }) test_that("rowwise tidiers work even when an ungrouped data frame was used", { one_row <- mtcars %>% do(model = lm(mpg ~ wt, .)) tidied <- one_row %>% tidy(model) expect_equal(nrow(tidied), 2) augmented <- one_row %>% augment(model) expect_equal(nrow(augmented), nrow(mtcars)) glanced <- one_row %>% glance(model) expect_equal(nrow(glanced), 1) }) broom/tests/testthat/test-dplyr.R0000644000177700017770000000334213204276216020163 0ustar herbrandtherbrandtcontext("dplyr and broom") suppressPackageStartupMessages(library(dplyr)) # set up the lahman batting table, and filter to make it faster batting <- tbl(src_df("Lahman"), "Batting") batting <- batting %>% filter(yearID > 1980) lm0 <- failwith(NULL, lm, quiet = TRUE) test_that("can perform regressions with tidying in dplyr", { regressions <- batting %>% group_by(yearID) %>% do(tidy(lm0(SB ~ CS, data=.))) expect_lt(30, nrow(regressions)) expect_true(all(c("yearID", "estimate", "statistic", "p.value") %in% colnames(regressions))) }) test_that("tidying methods work with rowwise_df", { regressions <- batting %>% group_by(yearID) %>% do(mod = lm0(SB ~ CS, data=.)) tidied <- regressions %>% tidy(mod) augmented <- regressions %>% augment(mod) glanced <- regressions %>% glance(mod) num_years <- length(unique(batting$yearID)) expect_equal(nrow(tidied), num_years * 2) expect_equal(nrow(augmented), sum(!is.na(batting$SB) & !is.na(batting$CS))) expect_equal(nrow(glanced), num_years) }) test_that("can perform correlations with tidying in dplyr", { cor.test0 <- purrr::possibly(cor.test, NULL) pcors <- batting %>% group_by(yearID) %>% do(tidy(cor.test0(.$SB, .$CS))) expect_true(all(c("yearID", "estimate", "statistic", "p.value") %in% colnames(pcors))) expect_lt(30, nrow(pcors)) scors <- suppressWarnings(batting %>% group_by(yearID) %>% do(tidy(cor.test0(.$SB, .$CS, method="spearman")))) expect_true(all(c("yearID", "estimate", "statistic", "p.value") %in% colnames(scors))) expect_lt(30, nrow(scors)) expect_false(all(pcors$estimate == scors$estimate)) }) broom/tests/testthat/test-data.frame.R0000644000177700017770000000071013204300607021017 0ustar herbrandtherbrandtcontext("data.frame tidiers") test_that("tidy.data.frame works", { tidy_df <- tidy(mtcars) check_tidy(tidy_df, exp.row = 11, exp.col = 13) expect_false("var" %in% names(tidy_df)) expect_equal(names(tidy_df)[1], "column") }) test_that("augment.data.frame throws an error", { expect_error(augment(mtcars)) }) test_that("glance.data.frame works", { glance_df <- glance(mtcars) check_tidy(glance_df, exp.row = 1, exp.col = 4) }) broom/tests/testthat/test-glmnet.R0000644000177700017770000000231213204300607020303 0ustar herbrandtherbrandtcontext("glmnet tidiers") glm_td <- function() { cars_matrix <- model.matrix(mpg ~ wt + disp, data = mtcars) glm_fit <- glmnet::glmnet(cars_matrix[, -1], mtcars$mpg) glm_fit } cv_glm_td <- function() { set.seed(1234) cars_matrix <- model.matrix(mpg ~ wt + disp, data = mtcars) glm_fit <- glmnet::cv.glmnet(cars_matrix[, -1], mtcars$mpg) glm_fit } test_that("tidy.glmnet works", { td <- tidy(glm_td()) tidy_names <- c("term", "step", "estimate", "lambda", "dev.ratio") check_tidy(td, exp.col = 5, exp.names = tidy_names) expect_true(all(c("(Intercept)", "wt", "disp") %in% td$term)) }) test_that("glance.glmnet works", { td <- glance(glm_td()) tidy_names <- c("nulldev", "npasses") check_tidy(td, exp.row = 1, exp.col = 2, exp.names = tidy_names) }) test_that("tidy.cv.glmnet works", { td <- tidy(cv_glm_td()) tidy_names <- unlist(strsplit("lambda estimate std.error conf.high conf.low nzero", split = "[ ]")) check_tidy(td, exp.col = 6, exp.names = tidy_names) }) test_that("glance.cv.glmnet works", { td <- glance(cv_glm_td()) tidy_names <- c("lambda.min", "lambda.1se") check_tidy(td, exp.row = 1, exp.col = 2, exp.names = tidy_names) }) broom/tests/testthat/test-survival.R0000644000177700017770000000214313204302342020670 0ustar herbrandtherbrandtif(require("survival", quietly = TRUE)) { context("test survival tidiers") surv_fit <- survreg(Surv(futime, fustat) ~ ecog.ps + rx, ovarian, dist = "exponential") coxph_fit <- coxph(Surv(time, status) ~ age + sex, lung) test_that("tidy.survreg works", { tidy_names <- unlist(strsplit("term estimate std.error statistic p.value conf.low conf.high", " ")) td <- tidy(surv_fit) check_tidy(td, exp.row = 3, exp.col = 7, exp.names = tidy_names) }) test_that("glance.survreg works", { tidy_names <- unlist(strsplit("iter df chi p.value logLik AIC BIC df.residual", " ")) td <- glance(surv_fit) check_tidy(td, exp.row = 1, exp.col = 9, exp.names = tidy_names) }) test_that("tidy.coxph works", { tidy_names <- unlist(strsplit("term estimate std.error statistic p.value conf.low conf.high", " ")) td <- tidy(coxph_fit) check_tidy(td, exp.row = 2, exp.col = 7, exp.names = tidy_names) }) test_that("glance.coxph works", { td <- glance(coxph_fit) check_tidy(td, exp.row = 1) }) } broom/tests/testthat/helper-checkers.R0000644000177700017770000001031613204276216021117 0ustar herbrandtherbrandt#' test the basics of tidy/augment/glance output: is a data frame, no row names check_tidiness <- function(o) { expect_is(o, "data.frame") expect_equal(rownames(o), as.character(seq_len(nrow(o)))) } #' check the output of a tidy function check_tidy <- function(o, exp.row = NULL, exp.col = NULL, exp.names = NULL) { check_tidiness(o) if (!is.null(exp.row)) { expect_equal(nrow(o), exp.row) } if (!is.null(exp.col)) { expect_equal(ncol(o), exp.col) } if (!is.null(exp.names)) { expect_true(all(exp.names %in% colnames(o))) } } #' check the output of an augment function check_augment <- function(au, original = NULL, exp.names = NULL, same = NULL) { check_tidiness(au) if (!is.null(original)) { # check that all rows in original appear in output expect_equal(nrow(au), nrow(original)) # check that columns are the same for (column in same) { expect_equal(au[[column]], original[[column]]) } } if (!is.null(exp.names)) { expect_true(all(exp.names %in% colnames(au))) } } #' add NAs to a vector randomly #' #' @param v vector to add NAs to #' @param number number of NAs to add #' #' @return vector with NAs added randomly add_NAs <- function(v, number) { if (number >= length(v)) { stop("Would replace all or more values with NA") } v[sample(length(v), number)] <- NA v } #' check an augmentation function works as expected when given NAs #' #' @param func A modeling function that takes a dataset and additional #' arguments, including na.action #' @param .data dataset to test function on; must have at least 3 rows #' and no NA values #' @param column a column included in the model to be replaced with NULLs #' @param column2 another column in the model; optional #' @param ... extra arguments, not used #' #' @export check_augment_NAs <- function(func, .data, column, column2 = NULL, ...) { # test original version, with and without giving data to augment obj <- class(func(.data))[1] test_that(paste("augment works with", obj), { m <- func(.data) au <- augment(m) check_augment(au, .data) au_d <- augment(m, .data) check_augment(au_d, .data, same = colnames(.data)) }) # add NAs if (nrow(.data) < 3) { stop(".data must have at least 3 rows in NA testing") } check_omit <- function(au, dat) { NAs <- is.na(dat[[column]]) check_augment(au, dat[!NAs, ], same = c(column, column2)) if (!is.null(au$.rownames)) { expect_equal(rownames(dat)[!NAs], au$.rownames) } } check_exclude <- function(au, dat) { check_augment(au, dat, colnames(dat)) # check .fitted and .resid columns have NAs in the right place for (col in c(".fitted", ".resid")) { if (!is.null(au[[col]])) { expect_equal(is.na(au[[col]]), is.na(dat[[column]])) } } } # test augment with na.omit (factory-fresh setting in R) # and test with or without rownames num_NAs <- min(5, nrow(.data) - 2) .dataNA <- .data .dataNA[[column]] <- add_NAs(.dataNA[[column]], num_NAs) test_that(paste("augment works with", obj, "with na.omit"), { .dataNA_noname <- unrowname(.dataNA) for (d in list(.dataNA, .dataNA_noname)) { m <- func(d, na.action = "na.omit") au <- augment(m) check_omit(au, d) au_d <- augment(m, d) check_omit(au_d, d) } }) for(rnames in c(TRUE, FALSE)) { msg <- paste("augment works with", obj, "with na.exclude") if (!rnames) { msg <- paste(msg, "without rownames") } test_that(msg, { d <- if (rnames) { .dataNA } else { unrowname(.dataNA) } m <- func(d, na.action = "na.exclude") # without the data argument, it works like na.exclude expect_warning(au <- augment(m), "na.exclude") check_omit(au, d) # with the data argument, it keeps the NAs au_d <- augment(m, d) check_exclude(au_d, d) }) } } broom/tests/testthat/test-augment.R0000644000177700017770000000241313204276216020467 0ustar herbrandtherbrandt# test the augment method of lm, glm, nls, lmer, coxph, and survreg # (note that test_that cases contained within the check_augment_NAs # function) context("lm augment") lm_func <- function(.data, ...) lm(mpg ~ wt, .data, ...) check_augment_NAs(lm_func, mtcars, "mpg", "wt") context("glm augment") glm_func <- function(.data, ...) glm(am ~ wt, .data, family = "poisson", ...) check_augment_NAs(glm_func, mtcars, "am", "wt") context("nls augment") nls_func <- function(.data, ...) { nls(mpg ~ k * e ^ wt, data = .data, start = list(k = 50, e = 1), ...) } check_augment_NAs(nls_func, mtcars, "mpg", "wt") if (require("lme4", quietly = TRUE)) { context("lme4 augment") lmer_func <- function(.data, ...) { lmer(Reaction ~ Days + (Days | Subject), .data, ...) } check_augment_NAs(lmer_func, sleepstudy, "Reaction", "Days") } if (require("survival", quietly = TRUE)) { context("survival augment") coxph_func <- function(.data, ...) { coxph(Surv(time, status) ~ age + sex, .data, ...) } check_augment_NAs(coxph_func, lung, "age", "sex") survreg_func <- function(.data, ...) { survreg(Surv(futime, fustat) ~ ecog.ps + rx, .data, dist = "exponential", ...) } check_augment_NAs(survreg_func, ovarian, "ecog.ps", "rx") }broom/tests/test-all.R0000644000177700017770000000004613204276216015737 0ustar herbrandtherbrandtlibrary(testthat) test_check("broom") broom/NAMESPACE0000644000177700017770000001211713204526663014152 0ustar herbrandtherbrandt# Generated by roxygen2: do not edit by hand S3method(augment,"NULL") S3method(augment,Mclust) S3method(augment,betareg) S3method(augment,coxph) S3method(augment,data.frame) S3method(augment,decomposed.ts) S3method(augment,default) S3method(augment,felm) S3method(augment,glmRob) S3method(augment,ivreg) S3method(augment,kmeans) S3method(augment,lm) S3method(augment,lmRob) S3method(augment,lme) S3method(augment,loess) S3method(augment,merMod) S3method(augment,mlm) S3method(augment,nlrq) S3method(augment,nls) S3method(augment,plm) S3method(augment,poLCA) S3method(augment,prcomp) S3method(augment,rowwise_df) S3method(augment,rq) S3method(augment,rqs) S3method(augment,smooth.spline) S3method(augment,speedlm) S3method(augment,stl) S3method(augment,survreg) S3method(augment,tbl_df) S3method(augment_,rowwise_df) S3method(glance,"NULL") S3method(glance,Arima) S3method(glance,Mclust) S3method(glance,aareg) S3method(glance,betareg) S3method(glance,biglm) S3method(glance,binDesign) S3method(glance,cch) S3method(glance,coxph) S3method(glance,cv.glmnet) S3method(glance,data.frame) S3method(glance,default) S3method(glance,ergm) S3method(glance,felm) S3method(glance,fitdistr) S3method(glance,gam) S3method(glance,glm) S3method(glance,glmRob) S3method(glance,glmnet) S3method(glance,gmm) S3method(glance,htest) S3method(glance,ivreg) S3method(glance,kmeans) S3method(glance,list) S3method(glance,lm) S3method(glance,lmRob) S3method(glance,lme) S3method(glance,lmodel2) S3method(glance,matrix) S3method(glance,merMod) S3method(glance,mlm) S3method(glance,muhaz) S3method(glance,multinom) S3method(glance,nlrq) S3method(glance,nls) S3method(glance,orcutt) S3method(glance,plm) S3method(glance,poLCA) S3method(glance,pyears) S3method(glance,ridgelm) S3method(glance,rlm) S3method(glance,rowwise_df) S3method(glance,rq) S3method(glance,rqs) S3method(glance,smooth.spline) S3method(glance,speedlm) S3method(glance,stanreg) S3method(glance,summary.lm) S3method(glance,summaryDefault) S3method(glance,survdiff) S3method(glance,survexp) S3method(glance,survfit) S3method(glance,survreg) S3method(glance,tbl_df) S3method(glance_,rowwise_df) S3method(tidy,"NULL") S3method(tidy,Arima) S3method(tidy,Line) S3method(tidy,Lines) S3method(tidy,Mclust) S3method(tidy,Polygon) S3method(tidy,Polygons) S3method(tidy,SpatialLinesDataFrame) S3method(tidy,SpatialPolygons) S3method(tidy,SpatialPolygonsDataFrame) S3method(tidy,TukeyHSD) S3method(tidy,aareg) S3method(tidy,acf) S3method(tidy,anova) S3method(tidy,aov) S3method(tidy,aovlist) S3method(tidy,betareg) S3method(tidy,biglm) S3method(tidy,binDesign) S3method(tidy,binWidth) S3method(tidy,boot) S3method(tidy,brmsfit) S3method(tidy,btergm) S3method(tidy,cch) S3method(tidy,character) S3method(tidy,cld) S3method(tidy,coeftest) S3method(tidy,confint.glht) S3method(tidy,coxph) S3method(tidy,cv.glmnet) S3method(tidy,data.frame) S3method(tidy,default) S3method(tidy,density) S3method(tidy,dgCMatrix) S3method(tidy,dgTMatrix) S3method(tidy,dist) S3method(tidy,emmGrid) S3method(tidy,ergm) S3method(tidy,felm) S3method(tidy,fitdistr) S3method(tidy,ftable) S3method(tidy,gam) S3method(tidy,gamlss) S3method(tidy,geeglm) S3method(tidy,glht) S3method(tidy,glmRob) S3method(tidy,glmnet) S3method(tidy,gmm) S3method(tidy,htest) S3method(tidy,ivreg) S3method(tidy,kappa) S3method(tidy,kde) S3method(tidy,kmeans) S3method(tidy,list) S3method(tidy,lm) S3method(tidy,lmRob) S3method(tidy,lme) S3method(tidy,lmodel2) S3method(tidy,logical) S3method(tidy,lsmobj) S3method(tidy,manova) S3method(tidy,map) S3method(tidy,matrix) S3method(tidy,merMod) S3method(tidy,mle2) S3method(tidy,muhaz) S3method(tidy,multinom) S3method(tidy,nlrq) S3method(tidy,nls) S3method(tidy,numeric) S3method(tidy,orcutt) S3method(tidy,pairwise.htest) S3method(tidy,plm) S3method(tidy,poLCA) S3method(tidy,power.htest) S3method(tidy,prcomp) S3method(tidy,pyears) S3method(tidy,rcorr) S3method(tidy,ref.grid) S3method(tidy,ridgelm) S3method(tidy,rjags) S3method(tidy,roc) S3method(tidy,rowwise_df) S3method(tidy,rq) S3method(tidy,rqs) S3method(tidy,sparseMatrix) S3method(tidy,spec) S3method(tidy,speedlm) S3method(tidy,stanfit) S3method(tidy,stanreg) S3method(tidy,summary.glht) S3method(tidy,summary.lm) S3method(tidy,summaryDefault) S3method(tidy,survdiff) S3method(tidy,survexp) S3method(tidy,survfit) S3method(tidy,survreg) S3method(tidy,table) S3method(tidy,tbl_df) S3method(tidy,ts) S3method(tidy,zoo) S3method(tidy_,rowwise_df) export(augment) export(augment_columns) export(bootstrap) export(confint_tidy) export(finish_glance) export(fix_data_frame) export(glance) export(inflate) export(tidy) export(tidyMCMC) import(dplyr) import(tidyr) importFrom(nlme,VarCorr) importFrom(nlme,ranef) importFrom(plyr,ldply) importFrom(plyr,rbind.fill) importFrom(psych,describe) importFrom(reshape2,melt) importFrom(stats,AIC) importFrom(stats,coef) importFrom(stats,confint) importFrom(stats,fitted) importFrom(stats,logLik) importFrom(stats,model.frame) importFrom(stats,na.omit) importFrom(stats,predict) importFrom(stats,qnorm) importFrom(stats,qt) importFrom(stats,residuals) importFrom(stats,setNames) importFrom(stats,var) importFrom(tidyr,gather) importFrom(tidyr,spread) importFrom(utils,head) broom/R/0000755000177700017770000000000013204523664013130 5ustar herbrandtherbrandtbroom/R/stats_tidiers.R0000644000177700017770000002520213204276216016133 0ustar herbrandtherbrandt### tidy methods for S3 classes used by the built-in stats package ### This file is only for miscellaneous methods that have *only* a tidy ### method (not augment or glance). In general, tidiers belong in in ### a file of "_tidiers.R" #' tidy a table object #' #' A table, typically created by the \link{table} function, contains a #' contingency table of frequencies across multiple vectors. This directly #' calls the \code{\link{as.data.frame.table}} method, which melts it into a #' data frame with one column for each variable and a \code{Freq} column. #' #' @param x An object of class "table" #' @param ... Extra arguments (not used) #' #' @examples #' #' tab <- with(airquality, table(cut(Temp, quantile(Temp)), Month)) #' tidy(tab) #' #' @seealso \code{\link{as.data.frame.table}} #' #' @export tidy.table <- function(x, ...) { as.data.frame(x) } #' tidy an ftable object #' #' An ftable contains a "flat" contingency table. This melts it into a #' data.frame with one column for each variable, then a \code{Freq} #' column. It directly uses the \code{stats:::as.data.frame.ftable} function #' #' @param x An object of class "ftable" #' @param ... Extra arguments (not used) #' #' @examples #' #' tidy(ftable(Titanic, row.vars = 1:3)) #' #' @seealso \code{\link{ftable}} #' #' @export tidy.ftable <- function(x, ...) { as.data.frame(x) } #' tidy a density objet #' #' Given a "density" object, returns a tidy data frame with two #' columns: points x where the density is estimated, points y #' for the estimate #' #' @param x an object of class "density" #' @param ... extra arguments (not used) #' #' @return a data frame with "x" and "y" columns #' #' d <- density(faithful$eruptions, bw = "sj") #' head(tidy(d)) #' #' library(ggplot2) #' ggplot(tidy(d), aes(x, y)) + geom_line() #' #' @seealso \code{\link{density}} #' #' @export tidy.density <- function(x, ...) { as.data.frame(x[c("x", "y")]) } #' Tidy a distance matrix #' #' Tidy a distance matrix, such as that computed by the \link{dist} #' function, into a one-row-per-pair table. If the distance matrix #' does not include an upper triangle and/or diagonal, this will #' not either. #' #' @param x A "dist" object #' @param diag Whether to include the diagonal of the distance #' matrix. Defaults to whether the distance matrix includes it #' @param upper Whether to include the upper right triangle of #' the distance matrix. Defaults to whether the distance matrix #' includes it #' @param ... Extra arguments, not used #' #' @return A data frame with one row for each pair of #' item distances, with columns: #' \describe{ #' \item{item1}{First item} #' \item{item2}{Second item} #' \item{distance}{Distance between items} #' } #' #' @examples #' #' iris_dist <- dist(t(iris[, 1:4])) #' iris_dist #' #' tidy(iris_dist) #' tidy(iris_dist, upper = TRUE) #' tidy(iris_dist, diag = TRUE) #' #' @export tidy.dist <- function(x, diag = attr(x, "Diag"), upper = attr(x, "Upper"), ...) { m <- as.matrix(x) ret <- reshape2::melt(m, varnames = c("item1", "item2"), value.name = "distance") if (!upper) { ret <- ret[!upper.tri(m), ] } if (!diag) { # filter out the diagonal ret <- filter(ret, item1 != item2) } ret } #' tidy a spec objet #' #' Given a "spec" object, which shows a spectrum across a range of frequencies, #' returns a tidy data frame with two columns: "freq" and "spec" #' #' @param x an object of class "spec" #' @param ... extra arguments (not used) #' #' @return a data frame with "freq" and "spec" columns #' #' @examples #' #' spc <- spectrum(lh) #' tidy(spc) #' #' library(ggplot2) #' ggplot(tidy(spc), aes(freq, spec)) + geom_line() #' #' @export tidy.spec <- function(x, ...) { as.data.frame(x[c("freq", "spec")]) } #' tidy a TukeyHSD object #' #' Returns a data.frame with one row for each pairwise comparison #' #' @param x object of class "TukeyHSD" #' @param separate.levels Whether to separate comparison into #' \code{level1} and \code{level2} columns #' @param ... additional arguments (not used) #' #' @return A data.frame with one row per comparison, containing columns #' \item{term}{Term for which levels are being compared} #' \item{comparison}{Levels being compared, separated by -} #' \item{estimate}{Estimate of difference} #' \item{conf.low}{Low end of confidence interval of difference} #' \item{conf.high}{High end of confidence interval of difference} #' \item{adj.p.value}{P-value adjusted for multiple comparisons} #' #' If \code{separate.levels = TRUE}, the \code{comparison} column will be #' split up into \code{level1} and \code{level2}. #' #' @examples #' #' fm1 <- aov(breaks ~ wool + tension, data = warpbreaks) #' thsd <- TukeyHSD(fm1, "tension", ordered = TRUE) #' tidy(thsd) #' tidy(thsd, separate.levels = TRUE) #' #' # may include comparisons on multiple terms #' fm2 <- aov(mpg ~ as.factor(gear) * as.factor(cyl), data = mtcars) #' tidy(TukeyHSD(fm2)) #' #' @seealso \code{\link{TukeyHSD}} #' #' @export tidy.TukeyHSD <- function(x, separate.levels = FALSE, ...) { ret <- plyr::ldply(x, function(e) { nn <- c("estimate", "conf.low", "conf.high", "adj.p.value") fix_data_frame(e, nn, "comparison") }, .id = "term") if (separate.levels) { ret <- tidyr::separate(ret, comparison, c("level1", "level2"), sep = "-") } ret } #' tidy a MANOVA object #' #' Constructs a data frame with one row for each of the terms in the model, #' containing the information from \link{summary.manova}. #' #' @param x object of class "manova" #' @param test one of "Pillai" (Pillai's trace), "Wilks" (Wilk's lambda), "Hotelling-Lawley" (Hotelling-Lawley trace) or "Roy" (Roy's greatest root) indicating which test statistic should be used. Defaults to "Pillai" #' @param ... additional arguments passed on to \code{summary.manova} #' #' @return A data.frame with the columns #' \item{term}{Term in design} #' \item{statistic}{Approximate F statistic} #' \item{num.df}{Degrees of freedom} #' \item{p.value}{P-value} #' #' Depending on which test statistic is specified, one of the following columns is also included: #' \item{pillai}{Pillai's trace} #' \item{wilks}{Wilk's lambda} #' \item{hl}{Hotelling-Lawley trace} #' \item{roy}{Roy's greatest root} #' #' @examples #' #' npk2 <- within(npk, foo <- rnorm(24)) #' npk2.aov <- manova(cbind(yield, foo) ~ block + N*P*K, npk2) #' #' @seealso \code{\link{summary.manova}} #' #' @export tidy.manova <- function(x, test = "Pillai", ...) { # match test name (default to 'pillai') # partially match the name so we're consistent with the underlying function test.pos <- pmatch(test, c("Pillai", "Wilks", "Hotelling-Lawley", "Roy")) test.name <- c("pillai", "wilks", "hl", "roy")[test.pos] nn <- c("df", test.name, "statistic", "num.df", "den.df", "p.value") ret <- fix_data_frame(summary(x, test = test, ...)$stats, nn) # remove residuals row (doesn't have useful information) ret <- ret[-nrow(ret), ] ret } #' tidy a ts timeseries object #' #' Turn a ts object into a tidy data frame. Right now simply uses #' \code{as.data.frame.ts}. #' #' @param x a "ts" object #' @param ... extra arguments (not used) #' #' @return a tidy data frame #' #' @seealso \link{as.data.frame.ts} #' #' @export tidy.ts <- function(x, ...) { as.data.frame(x) } #' tidy a pairwise hypothesis test #' #' Tidy a pairwise.htest object, containing (adjusted) p-values for multiple #' pairwise hypothesis tests. #' #' @param x a "pairwise.htest" object #' @param ... extra arguments (not used) #' #' @return A data frame with one row per group/group comparison, with columns #' \item{group1}{First group being compared} #' \item{group2}{Second group being compared} #' \item{p.value}{(Adjusted) p-value of comparison} #' #' @details Note that in one-sided tests, the alternative hypothesis of each #' test can be stated as "group1 is greater/less than group2". #' #' Note also that the columns of group1 and group2 will always be a factor, #' even if the original input is (e.g.) numeric. #' #' @examples #' #' attach(airquality) #' Month <- factor(Month, labels = month.abb[5:9]) #' ptt <- pairwise.t.test(Ozone, Month) #' tidy(ptt) #' #' attach(iris) #' ptt2 <- pairwise.t.test(Petal.Length, Species) #' tidy(ptt2) #' #' tidy(pairwise.t.test(Petal.Length, Species, alternative = "greater")) #' tidy(pairwise.t.test(Petal.Length, Species, alternative = "less")) #' #' tidy(pairwise.wilcox.test(Petal.Length, Species)) #' #' @seealso \link{pairwise.t.test}, \link{pairwise.wilcox.test} #' #' @export tidy.pairwise.htest <- function(x, ...) { data.frame(group1 = rownames(x$p.value)) %>% cbind(as.data.frame(x$p.value)) %>% tidyr::gather(group2, p.value, -group1) %>% stats::na.omit() } #' tidy a power.htest #' #' @param x a power.htest object #' @param ... extra arguments, not used #' #' @return A data frame with one row per parameter passed in, with #' columns \code{n}, \code{delta}, \code{sd}, \code{sig.level}, and #' \code{power} (from the \code{power.htest} object). #' #' @seealso \link{power.t.test} #' #' @examples #' #' ptt <- power.t.test(n = 2:30, delta = 1) #' tidy(ptt) #' #' library(ggplot2) #' ggplot(tidy(ptt), aes(n, power)) + geom_line() #' #' @export tidy.power.htest <- function(x, ...) { cols <- compact(x[c("n", "delta", "sd", "sig.level", "power", "p1", "p2")]) as.data.frame(cols) } #' Tidying method for the acf function #' #' Tidy an "acf" object, which is the output of \code{acf} and the #' related \code{pcf} and \code{ccf} functions. #' #' @name acf_tidiers #' #' @param x acf object #' @param ... (not used) #' #' @return \code{data.frame} with columns #' \item{lag}{lag values} #' \item{acf}{calucated correlation} #' #' @examples #' #' # acf #' result <- acf(lh, plot=FALSE) #' tidy(result) #' #' # ccf #' result <- ccf(mdeaths, fdeaths, plot=FALSE) #' tidy(result) #' #' # pcf #' result <- pacf(lh, plot=FALSE) #' tidy(result) #' #' # lag plot #' library(ggplot2) #' result <- tidy(acf(lh, plot=FALSE)) #' p <- ggplot(result, aes(x=lag, y=acf)) + #' geom_bar(stat='identity', width=0.1) + #' theme_bw() #' p #' #' # with confidence intervals #' conf.level <- 0.95 #' # from \code{plot.acf} method #' len.data <- length(lh) # same as acf$n.used #' conf.int <- qnorm((1 + conf.level) / 2) / sqrt(len.data) #' p + geom_hline(yintercept = c(-conf.int, conf.int), #' color='blue', linetype='dashed') #' #' @export tidy.acf <- function(x, ...) { ret <- data.frame(lag = x$lag, acf = x$acf) return(ret) } # todo? # tidy.infl # tidy.stepfun broom/R/mle2_tidiers.R0000644000177700017770000000172613204276216015641 0ustar herbrandtherbrandt#' Tidy mle2 maximum likelihood objects #' #' Tidy mle2 objects from the bbmle package. #' #' @param x An "mle2" object #' @param conf.int Whether to add \code{conf.low} and \code{conf.high} columns #' @param conf.level Confidence level to use for interval #' @param ... Extra arguments, not used #' #' @examples #' #' if (require("bbmle", quietly = TRUE)) { #' x <- 0:10 #' y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8) #' d <- data.frame(x,y) #' #' fit <- mle2(y ~ dpois(lambda = ymean), #' start = list(ymean = mean(y)), data = d) #' #' tidy(fit) #' } #' #' @name mle2_tidiers #' #' @export tidy.mle2 <- function(x, conf.int = FALSE, conf.level = .95, ...) { co <- bbmle::coef(bbmle::summary(x)) nn <- c("estimate", "std.error", "statistic", "p.value") ret <- fix_data_frame(co, nn) if (conf.int) { CI <- confint_tidy(x, conf.level = conf.level, func = bbmle::confint) ret <- cbind(ret, CI) } ret } broom/R/sparse_tidiers.R0000644000177700017770000000172713204276216016300 0ustar herbrandtherbrandt#' Tidy a sparseMatrix object from the Matrix package #' #' Tidy a sparseMatrix object from the Matrix package into #' a three-column data frame, row, column, and value (with #' zeros missing). If there are row names or column names, #' use those, otherwise use indices #' #' @param x A Matrix object #' @param ... Extra arguments, not used #' #' @name sparse_tidiers #' #' @export tidy.dgTMatrix <- function(x, ...) { s <- Matrix::summary(x) row <- s$i if (!is.null(rownames(x))) { row <- rownames(x)[row] } col <- s$j if (!is.null(colnames(x))) { col <- colnames(x)[col] } ret <- data.frame(row = row, column = col, value = s$x, stringsAsFactors = FALSE) ret } #' @rdname sparse_tidiers #' @export tidy.dgCMatrix <- function(x, ...) { tidy(methods::as(x, "dgTMatrix")) } #' @rdname sparse_tidiers #' @export tidy.sparseMatrix <- function(x, ...) { tidy(methods::as(x, "dgTMatrix")) } broom/R/ergm_tidiers.R0000644000177700017770000001737513204276216015743 0ustar herbrandtherbrandt#' Tidying methods for an exponential random graph model #' #' These methods tidy the coefficients of an exponential random graph model #' estimated with the \pkg{ergm} package into a summary, and construct #' a one-row glance of the model's statistics. The methods should work with #' any model that conforms to the \pkg{ergm} class, such as those #' produced from weighted networks by the \pkg{ergm.count} package. #' #' @return All tidying methods return a \code{data.frame} without rownames. #' The structure depends on the method chosen. #' #' @references Hunter DR, Handcock MS, Butts CT, Goodreau SM, Morris M (2008b). #' \pkg{ergm}: A Package to Fit, Simulate and Diagnose Exponential-Family #' Models for Networks. \emph{Journal of Statistical Software}, 24(3). #' \url{http://www.jstatsoft.org/v24/i03/}. #' #' @seealso \code{\link[ergm]{ergm}}, #' \code{\link[ergm]{control.ergm}}, #' \code{\link[ergm]{summary.ergm}} #' #' @name ergm_tidiers #' #' @param x an \pkg{ergm} object #' @examples #' #' if (require("ergm")) { #' # Using the same example as the ergm package #' # Load the Florentine marriage network data #' data(florentine) #' #' # Fit a model where the propensity to form ties between #' # families depends on the absolute difference in wealth #' gest <- ergm(flomarriage ~ edges + absdiff("wealth")) #' #' # Show terms, coefficient estimates and errors #' tidy(gest) #' #' # Show coefficients as odds ratios with a 99% CI #' tidy(gest, exponentiate = TRUE, conf.int = TRUE, conf.level = 0.99) #' #' # Take a look at likelihood measures and other #' # control parameters used during MCMC estimation #' glance(gest) #' glance(gest, deviance = TRUE) #' glance(gest, mcmc = TRUE) #' } NULL #' @rdname ergm_tidiers #' #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param exponentiate whether to exponentiate the coefficient estimates #' and confidence intervals #' @param quick whether to compute a smaller and faster version, containing #' only the \code{term} and \code{estimate} columns. #' #' @details There is no \code{augment} method for \pkg{ergm} objects. #' #' @return \code{tidy.ergm} returns one row for each coefficient, with five columns: #' \item{term}{The term in the model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error} #' \item{mcmc.error}{The MCMC error} #' \item{p.value}{The two-sided p-value} #' #' If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and #' \code{conf.high}. #' #' @export tidy.ergm <- function(x, conf.int = FALSE, conf.level = .95, exponentiate = FALSE, quick = FALSE, ...) { if (quick) { co <- x$coef ret <- data.frame(term = names(co), estimate = unname(co)) return(process_ergm(ret, conf.int = FALSE, exponentiate = exponentiate)) } co <- ergm::summary.ergm(x, ...)$coefs nn <- c("estimate", "std.error", "mcmc.error", "p.value") if (inherits(co, "listof")) { # multiple response variables ret <- plyr::ldply(co, fix_data_frame, nn[1:ncol(co[[1]])], .id = "response") ret$response <- stringr::str_replace(ret$response, "Response ", "") } else { ret <- fix_data_frame(co, nn[1:ncol(co)]) } process_ergm(ret, x, conf.int = conf.int, conf.level = conf.level, exponentiate = exponentiate) } #' @rdname ergm_tidiers #' #' @param deviance whether to report null and residual deviance for the model, #' along with degrees of freedom; defaults to \code{FALSE} #' @param mcmc whether to report MCMC interval, burn-in and sample size used to #' estimate the model; defaults to \code{FALSE} #' @param ... extra arguments passed to \code{\link[ergm]{summary.ergm}} #' #' @return \code{glance.ergm} returns a one-row data.frame with the columns #' \item{independence}{Whether the model assumed dyadic independence} #' \item{iterations}{The number of iterations performed before convergence} #' \item{logLik}{If applicable, the log-likelihood associated with the model} #' \item{AIC}{The Akaike Information Criterion} #' \item{BIC}{The Bayesian Information Criterion} #' #' If \code{deviance=TRUE}, and if the model supports it, the #' data frame will also contain the columns #' \item{null.deviance}{The null deviance of the model} #' \item{df.null}{The degrees of freedom of the null deviance} #' \item{residual.deviance}{The residual deviance of the model} #' \item{df.residual}{The degrees of freedom of the residual deviance} #' #' Last, if \code{mcmc=TRUE}, the data frame will also contain #' the columns #' \item{MCMC.interval}{The interval used during MCMC estimation} #' \item{MCMC.burnin}{The burn-in period of the MCMC estimation} #' \item{MCMC.samplesize}{The sample size used during MCMC estimation} #' #' @export glance.ergm <- function(x, deviance = FALSE, mcmc = FALSE, ...) { # will show appropriate warnings about standard errors, pseudolikelihood etc. s <- ergm::summary.ergm(x, ...) # dyadic (in)dependence and number of MCMLE iterations ret <- data.frame(independence = s$independence, iterations = x$iterations) # log-likelihood ret$logLik <- tryCatch(as.numeric(ergm::logLik.ergm(x)), error = function(e) NULL) # null and residual deviance if (deviance & !is.null(ret$logLik)) { dyads <- ergm::get.miss.dyads(x$constrained, x$constrained.obs) dyads <- statnet.common::NVL(dyads, network::network.initialize(1)) dyads <- network::network.edgecount(dyads) dyads <- network::network.dyadcount(x$network, FALSE) - dyads ret$null.deviance <- ergm::logLikNull(x) ret$null.deviance <- ifelse(is.na(ret$null.deviance), 0, -2 * ret$null.deviance) ret$df.null <- dyads ret$residual.deviance <- -2 * ret$logLik ret$df.residual <- dyads - length(x$coef) } # AIC and BIC ret$AIC <- tryCatch(stats::AIC(x), error = function(e) NULL) ret$BIC <- tryCatch(stats::BIC(x), error = function(e) NULL) if (mcmc) { ret <- cbind(ret, data.frame( MCMC.interval = x$control$MCMC.interval, MCMC.burnin = x$control$MCMC.burnin, MCMC.samplesize = x$control$MCMC.samplesize)) } ret <- unrowname(ret) ret } #' helper function to process a tidied ergm object #' #' Optionally exponentiates the coefficients, and optionally adds #' a confidence interval, to a tidied ergm object. #' #' @param ret data frame with a tidied version of a coefficient matrix #' @param x an "ergm" object #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param exponentiate whether to exponentiate the coefficient estimates #' and confidence intervals (typical for logistic regression) process_ergm <- function(ret, x, conf.int = FALSE, conf.level = .95, exponentiate = FALSE) { if (exponentiate) { # save transformation function for use on confidence interval if (is.null(x$glm) || (x$glm$family$link != "logit" && x$glm$family$link != "log")) { warning(paste("Exponentiating coefficients, but model did not use", "a log or logit link function")) } trans <- exp } else { trans <- identity } if (conf.int) { z <- stats::qnorm(1 - (1 - conf.level) / 2) CI <- cbind(conf.low = ret$estimate - z * ret$std.error, conf.high = ret$estimate + z * ret$std.error) ret <- cbind(ret, trans(unrowname(CI))) } ret$estimate <- trans(ret$estimate) ret } broom/R/felm_tidiers.R0000644000177700017770000001565413204276216015732 0ustar herbrandtherbrandt#' Tidying methods for models with multiple group fixed effects #' #' These methods tidy the coefficients of a linear model with multiple group fixed effects #' #' @template boilerplate #' #' @name felm_tidiers #' #' @param x felm object #' @param data Original data, defaults to extracting it from the model #' @examples #' #' if (require("lfe", quietly = TRUE)) { #' N=1e2 #' DT <- data.frame( #' id = sample(5, N, TRUE), #' v1 = sample(5, N, TRUE), #' v2 = sample(1e6, N, TRUE), #' v3 = sample(round(runif(100,max=100),4), N, TRUE), #' v4 = sample(round(runif(100,max=100),4), N, TRUE) #' ) #' #' result_felm <- felm(v2~v3, DT) #' tidy(result_felm) #' augment(result_felm) #' result_felm <- felm(v2~v3|id+v1, DT) #' tidy(result_felm, fe = TRUE) #' augment(result_felm) #' v1<-DT$v1 #' v2 <- DT$v2 #' v3 <- DT$v3 #' id <- DT$id #' result_felm <- felm(v2~v3|id+v1) #' tidy(result_felm) #' augment(result_felm) #' glance(result_felm) #' } NULL #' @rdname felm_tidiers #' #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param fe whether to include estimates of fixed effects #' @param fe.error whether to include standard error of fixed effects #' #' @details If \code{conf.int=TRUE}, the confidence interval is computed #' #' @return \code{tidy.felm} returns one row for each coefficient. If \code{fe=TRUE}, it also includes rows for for fixed effects estimates. #' There are five columns: #' \item{term}{The term in the linear model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error from the linear model} #' \item{statistic}{t-statistic} #' \item{p.value}{two-sided p-value} #' #' If \code{cont.int=TRUE}, it also includes columns for \code{conf.low} and \code{conf.high}, computed with \code{\link{confint}}. #' @export tidy.felm <- function(x, conf.int=FALSE, conf.level=.95, fe = FALSE, fe.error = fe, ...) { nn <- c("estimate", "std.error", "statistic", "p.value") ret <- fix_data_frame(stats::coef(summary(x)), nn) if (conf.int) { # avoid "Waiting for profiling to be done..." message CI <- suppressMessages(stats::confint(x, level = conf.level)) colnames(CI) = c("conf.low", "conf.high") ret <- cbind(ret, unrowname(CI)) } if (fe){ ret <- mutate(ret, N = NA, comp = NA) object <- lfe::getfe(x) if (fe.error){ nn <- c("estimate", "std.error", "N", "comp") ret_fe <- lfe::getfe(x, se = TRUE, bN = 100) %>% select(effect, se, obs, comp) %>% fix_data_frame(nn) %>% mutate(statistic = estimate/std.error) %>% mutate(p.value = 2*(1-stats::pt(statistic, df = N))) } else{ nn <- c("estimate", "N", "comp") ret_fe <- lfe::getfe(x) %>% select(effect, obs, comp) %>% fix_data_frame(nn) %>% mutate(statistic = NA, p.value = NA) } if (conf.int){ ret_fe <- ret_fe %>% mutate(conf.low=estimate-stats::qnorm(1-(1-conf.level)/2)*std.error, conf.high=estimate+stats::qnorm(1-(1-conf.level)/2)*std.error ) } ret <- rbind(ret, ret_fe) } ret } # Things it does not do (no simple way to get it) # 1. does not work if new data # 2. does not give SE of the fit #' @rdname felm_tidiers #' @return \code{augment.felm} returns one row for each observation, with multiple columns added to the original data: #' \item{.fitted}{Fitted values of model} #' \item{.resid}{Residuals} #' If fixed effect are present, #' \item{.comp}{Connected component} #' \item{.fe_}{Fixed effects (as many columns as factors)} #' @export augment.felm <- function(x, data = NULL, ...) { if (is.null(data)){ if (is.null(x$call$data)){ list <- lapply(all.vars(x$call$formula), as.name) data <- eval(as.call(list(quote(data.frame),list)), parent.frame()) } else{ data <- eval(x$call$data,parent.frame()) } if (!is.null(x$na.action)){ data <- slice(data,- as.vector(x$na.action)) } } data <- fix_data_frame(data, newcol = ".rownames") y <- eval(x$call$formula[[2]], envir = data) data$.fitted <- c(x$fitted.values) data$.resid <- c(x$residuals) object <- lfe::getfe(x) if (!is.null(object)){ fe_list <- levels(object$fe) object <- object %>% mutate(effect = as.numeric(effect))%>% mutate(fe = as.character(fe)) length <- length(object) for (fe in names(x$fe)){ if ("xnam" %in% names(attributes(x$fe[[fe]]))){ factor_name <- attributes(x$fe[[fe]])$fnam } else{ factor_name <- fe } formula1 <- stats::as.formula(paste0("~fe==","\"",fe,"\"")) ans <- object %>% filter_(formula1) if (is.character(data[,factor_name])){ ans <- ans %>% mutate_(.dots = stats::setNames(list(~as.character(idx)), factor_name)) } else if (is.numeric(data[,factor_name])){ ans <- ans %>% mutate_(.dots = stats::setNames(list(~as.numeric(as.character(idx))), factor_name)) } else{ ans <- ans %>% mutate_(.dots = stats::setNames(list(~idx), factor_name)) } if (fe==names(x$fe)[1]){ ans <- select_(ans, .dots= c("effect", "comp", "obs", factor_name)) names(ans) <- c(paste0(".fe.",fe), ".comp", ".obs", factor_name) } else{ ans <- select_(ans, .dots= c("effect", factor_name)) names(ans) <- c(paste0(".fe.",fe), factor_name) } data <- left_join(data, ans , factor_name) } } return(data) } #' @rdname felm_tidiers #' #' @param ... extra arguments (not used) #' #' @return \code{glance.lm} returns a one-row data.frame with the columns #' \item{r.squared}{The percent of variance explained by the model} #' \item{adj.r.squared}{r.squared adjusted based on the degrees of freedom} #' \item{sigma}{The square root of the estimated residual variance} #' \item{statistic}{F-statistic} #' \item{p.value}{p-value from the F test} #' \item{df}{Degrees of freedom used by the coefficients} #' \item{df.residual}{residual degrees of freedom} #' #' @export glance.felm <- function(x, ...) { s <- summary(x) ret <- with(s, data.frame(r.squared=r2, adj.r.squared=r2adj, sigma = rse, statistic=fstat, p.value = pval, df=df[1], df.residual = rdf )) ret } broom/R/loess_tidiers.R0000644000177700017770000000252213204276216016122 0ustar herbrandtherbrandt#' Augmenting methods for loess models #' #' This method augments the original data with information #' on the fitted values and residuals, and optionally the #' standard errors. #' #' @param x A "loess" object #' @param data Original data, defaults to the extracting it from the model #' @param newdata If provided, performs predictions on the new data #' @param ... extra arguments #' #' @name loess_tidiers #' #' @template augment_NAs #' #' @return When \code{newdata} is not supplied \code{augment.loess} #' returns one row for each observation with three columns added #' to the original data: #' \item{.fitted}{Fitted values of model} #' \item{.se.fit}{Standard errors of the fitted values} #' \item{.resid}{Residuals of the fitted values} #' #' When \code{newdata} is supplied \code{augment.loess} returns #' one row for each observation with one additional column: #' \item{.fitted}{Fitted values of model} #' \item{.se.fit}{Standard errors of the fitted values} #' #' @examples #' #' lo <- loess(mpg ~ wt, mtcars) #' augment(lo) #' #' # with all columns of original data #' augment(lo, mtcars) #' #' # with a new dataset #' augment(lo, newdata = head(mtcars)) #' #' @export augment.loess <- function(x, data = stats::model.frame(x), newdata, ...){ augment_columns(x, data, newdata, se.fit = FALSE, se = TRUE, ...) } broom/R/fitdistr_tidiers.R0000644000177700017770000000253713204276216016633 0ustar herbrandtherbrandt#' Tidying methods for fitdistr objects from the MASS package #' #' These methods tidies the parameter estimates resulting #' from an estimation of a univariate distribution's parameters. #' #' @param x An object of class "fitdistr" #' @param ... extra arguments (not used) #' #' @template boilerplate #' #' @name fitdistr_tidiers #' #' @examples #' #' set.seed(2015) #' x <- rnorm(100, 5, 2) #' #' library(MASS) #' fit <- fitdistr(x, dnorm, list(mean = 3, sd = 1)) #' #' tidy(fit) #' glance(fit) NULL #' @rdname fitdistr_tidiers #' #' @return \code{tidy.fitdistr} returns one row for each parameter that #' was estimated, with columns: #' \item{term}{The term that was estimated} #' \item{estimate}{Estimated value} #' \item{std.error}{Standard error of estimate} #' #' @export tidy.fitdistr <- function(x, ...) { data.frame(term = names(x$estimate), estimate = unname(x$estimate), std.error = unname(x$sd)) } #' @rdname fitdistr_tidiers #' #' @return \code{glance.fitdistr} returns a one-row data.frame with the columns #' \item{n}{Number of observations used in estimation} #' \item{logLik}{log-likelihood of estimated data} #' \item{AIC}{Akaike Information Criterion} #' \item{BIC}{Bayesian Information Criterion} #' #' @export glance.fitdistr <- function(x, ...) { finish_glance(data.frame(n = x$n), x) } broom/R/svd_tidiers.R0000644000177700017770000000544413204276216015577 0ustar herbrandtherbrandt#' Tidying methods for singular value decomposition #' #' These methods tidy the U, D, and V matrices returned by the #' \code{\link{svd}} function into a tidy format. Because #' \code{svd} returns a list without a class, this function has to be #' called by \code{\link{tidy.list}} when it recognizes a list as an #' SVD object. #' #' @return An SVD object contains a decomposition into u, d, and v matrices, such that #' \code{u \%\*\% diag(d) \%\*\% t(v)} gives the original matrix. This tidier gives #' a choice of which matrix to tidy. #' #' When \code{matrix = "u"}, each observation represents one pair of row and #' principal component, with variables: #' \item{row}{Number of the row in the original data being described} #' \item{PC}{Principal component} #' \item{loading}{Loading of this principal component for this row} #' #' When \code{matrix = "d"}, each observation represents one principal component, #' with variables: #' \item{PC}{Principal component} #' \item{d}{Value in the \code{d} vector} #' \item{percent}{Percent of variance explained by this PC, which is #' proportional to $d^2$} #' #' When \code{matrix = "v"}, each observation represents a pair of a principal #' component and a column of the original matrix, with variables: #' \item{column}{Column of original matrix described} #' \item{PC}{Principal component} #' \item{value}{Value of this PC for this column} #' #' @seealso \code{\link{svd}}, \code{\link{tidy.list}} #' #' @name svd_tidiers #' #' @param x list containing d, u, v components, returned from \code{svd} #' @param matrix which of the u, d or v matrix to tidy #' @param ... Extra arguments (not used) #' #' @examples #' #' mat <- as.matrix(iris[, 1:4]) #' s <- svd(mat) #' #' tidy_u <- tidy(s, matrix = "u") #' head(tidy_u) #' #' tidy_d <- tidy(s, matrix = "d") #' tidy_d #' #' tidy_v <- tidy(s, matrix = "v") #' head(tidy_v) #' #' library(ggplot2) #' library(dplyr) #' #' ggplot(tidy_d, aes(PC, percent)) + #' geom_point() + #' ylab("% of variance explained") #' #' tidy_u %>% #' mutate(Species = iris$Species[row]) %>% #' ggplot(aes(Species, loading)) + #' geom_boxplot() + #' facet_wrap(~ PC, scale = "free_y") tidy_svd <- function(x, matrix = "u", ...) { if (matrix == "u") { # change into a format with three columns: # row, column, loading ret <- x$u %>% reshape2::melt(varnames = c("row", "PC"), value.name = "loading") ret } else if (matrix == "d") { # return as a data.frame data.frame(PC = seq_along(x$d), d = x$d, percent = x$d ^ 2 / sum(x$d ^ 2)) } else if (matrix == "v") { ret <- x$v %>% reshape2::melt(varnames = c("column", "PC"), value.name = "loading") ret } } broom/R/arima_tidiers.R0000644000177700017770000000414013204276216016064 0ustar herbrandtherbrandt#' Tidying methods for ARIMA modeling of time series #' #' These methods tidy the coefficients of ARIMA models of univariate time #' series. #' #' @param x An object of class "Arima" #' #' @details \code{augment} is not currently implemented, as it is not clear #' whether ARIMA predictions can or should be merged with the original #' data frame. #' #' @template boilerplate #' #' @seealso \link{arima} #' #' @examples #' #' fit <- arima(lh, order = c(1, 0, 0)) #' tidy(fit) #' glance(fit) #' #' @name Arima_tidiers NULL #' @rdname Arima_tidiers #' #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' #' @return \code{tidy} returns one row for each coefficient in the model, #' with five columns: #' \item{term}{The term in the nonlinear model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error from the linear model} #' #' If \code{conf.int = TRUE}, also returns #' \item{conf.low}{low end of confidence interval} #' \item{conf.high}{high end of confidence interval} #' #' @export tidy.Arima <- function(x, conf.int=FALSE, conf.level=.95, ...) { coefs <- stats::coef(x) # standard errors are computed as in stats:::print.Arima ses <- rep.int(0, length(coefs)) ses[x$mask] <- sqrt(diag(x$var.coef)) ret <- unrowname(data.frame(term = names(coefs), estimate = coefs, std.error = ses)) if (conf.int) { ret <- cbind(ret, confint_tidy(x)) } ret } #' @rdname Arima_tidiers #' #' @param ... extra arguments (not used) #' #' @return \code{glance} returns one row with the columns #' \item{sigma}{the square root of the estimated residual variance} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' #' @export glance.Arima <- function(x, ...) { ret <- unrowname(data.frame(sigma = sqrt(x$sigma2))) finish_glance(ret, x) } broom/R/bingroup_tidiers.R0000644000177700017770000000556613204276216016635 0ustar herbrandtherbrandt#' Tidy a binWidth object #' #' Tidy a binWidth object from the "binGroup" package, #' which calculates the expected width of a confidence #' interval from a binomial test. #' #' @param x A "binWidth" object #' @param ... Extra arguments (not used) #' #' @return A one-row data.frame with columns: #' \item{ci.width}{Expected width of confidence interval} #' \item{alternative}{Alternative hypothesis} #' \item{p}{True proportion} #' \item{n}{Total sample size} #' #' @examples #' #' if (require("binGroup", quietly = TRUE)) { #' bw <- binWidth(100, .1) #' bw #' tidy(bw) #' #' library(dplyr) #' d <- expand.grid(n = seq(100, 800, 100), #' p = .5, #' method = c("CP", "Blaker", "Score", "Wald"), #' stringsAsFactors = FALSE) %>% #' group_by(n, p, method) %>% #' do(tidy(binWidth(.$n, .$p, method = .$method))) #' #' library(ggplot2) #' ggplot(d, aes(n, ci.width, color = method)) + #' geom_line() + #' xlab("Total Observations") + #' ylab("Expected CI Width") #' } #' #' @name binWidth_tidiers #' #' @export tidy.binWidth <- function(x, ...) { ret <- as.data.frame(unclass(x)) dplyr::rename(ret, ci.width = expCIWidth) } #' Tidy a binDesign object #' #' Tidy a binDesign object from the "binGroup" package, #' which determines the sample size needed for #' a particular power. #' #' @param x A "binDesign" object #' @param ... Extra arguments (not used) #' #' @template boilerplate #' #' @return The \code{tidy} method returns a data.frame #' with one row for each iteration that was performed, #' with columns #' \item{n}{Number of trials in this iteration} #' \item{power}{The power achieved for this n} #' #' The \code{glance} method returns a one-row data.frame #' with columns #' \item{power}{The power achieved by the analysis} #' \item{n}{The sample size used to achieve this power} #' \item{power.reached}{Whether the desired power was reached} #' \item{maxit}{Number of iterations performed} #' #' @examples #' #' if (require("binGroup", quietly = TRUE)) { #' des <- binDesign(nmax = 300, delta = 0.06, #' p.hyp = 0.1, power = .8) #' #' glance(des) #' head(tidy(des)) #' #' # the ggplot2 equivalent of plot(des) #' library(ggplot2) #' ggplot(tidy(des), aes(n, power)) + #' geom_line() #' } #' @name binDesign_tidiers #' #' @export tidy.binDesign <- function(x, ...) { ret <- data.frame(n = x$nit, power = x$powerit) # only up to the number of iterations performed head(ret, x$maxit) } #' @rdname binDesign_tidiers #' @export glance.binDesign <- function(x, ...) { with(unclass(x), data.frame(power = powerout, n = nout, power.reached, maxit = maxit)) } broom/R/lme4_tidiers.R0000644000177700017770000002534313204276216015644 0ustar herbrandtherbrandt#' Tidying methods for mixed effects models #' #' These methods tidy the coefficients of mixed effects models, particularly #' responses of the \code{merMod} class #' #' @param x An object of class \code{merMod}, such as those from \code{lmer}, #' \code{glmer}, or \code{nlmer} #' #' @return All tidying methods return a \code{data.frame} without rownames. #' The structure depends on the method chosen. #' #' @name lme4_tidiers #' #' @examples #' #' if (require("lme4")) { #' # example regressions are from lme4 documentation #' lmm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) #' tidy(lmm1) #' tidy(lmm1, effects = "fixed") #' tidy(lmm1, effects = "fixed", conf.int=TRUE) #' tidy(lmm1, effects = "fixed", conf.int=TRUE, conf.method="profile") #' tidy(lmm1, effects = "ran_modes", conf.int=TRUE) #' head(augment(lmm1, sleepstudy)) #' glance(lmm1) #' #' glmm1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd), #' data = cbpp, family = binomial) #' tidy(glmm1) #' tidy(glmm1, effects = "fixed") #' head(augment(glmm1, cbpp)) #' glance(glmm1) #' #' startvec <- c(Asym = 200, xmid = 725, scal = 350) #' nm1 <- nlmer(circumference ~ SSlogis(age, Asym, xmid, scal) ~ Asym|Tree, #' Orange, start = startvec) #' tidy(nm1) #' tidy(nm1, effects = "fixed") #' head(augment(nm1, Orange)) #' glance(nm1) #' } NULL #' @rdname lme4_tidiers #' #' @param effects A character vector including one or more of "fixed" (fixed-effect parameters), "ran_pars" (variances and covariances or standard deviations and correlations of random effect terms) or "ran_modes" (conditional modes/BLUPs/latent variable estimates) #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level for CI #' @param conf.method method for computing confidence intervals (see \code{lme4::confint.merMod}) #' @param scales scales on which to report the variables: for random effects, the choices are \sQuote{"sdcor"} (standard deviations and correlations: the default if \code{scales} is \code{NULL}) or \sQuote{"vcov"} (variances and covariances). \code{NA} means no transformation, appropriate e.g. for fixed effects; inverse-link transformations (exponentiation #' or logistic) are not yet implemented, but may be in the future. #' @param ran_prefix a length-2 character vector specifying the strings to use as prefixes for self- (variance/standard deviation) and cross- (covariance/correlation) random effects terms #' #' @return \code{tidy} returns one row for each estimated effect, either #' with groups depending on the \code{effects} parameter. #' It contains the columns #' \item{group}{the group within which the random effect is being estimated: \code{"fixed"} for fixed effects} #' \item{level}{level within group (\code{NA} except for modes)} #' \item{term}{term being estimated} #' \item{estimate}{estimated coefficient} #' \item{std.error}{standard error} #' \item{statistic}{t- or Z-statistic (\code{NA} for modes)} #' \item{p.value}{P-value computed from t-statistic (may be missing/NA)} #' #' @importFrom plyr ldply rbind.fill #' @import dplyr #' @importFrom tidyr gather spread #' @importFrom nlme VarCorr ranef ## FIXME: is it OK/sensible to import these from (priority='recommended') ## nlme rather than (priority=NA) lme4? #' #' @export tidy.merMod <- function(x, effects = c("ran_pars","fixed"), scales = NULL, ## c("sdcor",NA), ran_prefix=NULL, conf.int = FALSE, conf.level = 0.95, conf.method = "Wald", ...) { effect_names <- c("ran_pars", "fixed", "ran_modes") if (!is.null(scales)) { if (length(scales) != length(effects)) { stop("if scales are specified, values (or NA) must be provided ", "for each effect") } } if (length(miss <- setdiff(effects,effect_names))>0) stop("unknown effect type ",miss) base_nn <- c("estimate", "std.error", "statistic", "p.value") ret_list <- list() if ("fixed" %in% effects) { # return tidied fixed effects rather than random ret <- stats::coef(summary(x)) # p-values may or may not be included nn <- base_nn[1:ncol(ret)] if (conf.int) { cifix <- confint(x,parm="beta_",method=conf.method,...) ret <- data.frame(ret,cifix) nn <- c(nn,"conf.low","conf.high") } if ("ran_pars" %in% effects || "ran_modes" %in% effects) { ret <- data.frame(ret,group="fixed") nn <- c(nn,"group") } ret_list$fixed <- fix_data_frame(ret, newnames = nn) } if ("ran_pars" %in% effects) { if (is.null(scales)) { rscale <- "sdcor" } else rscale <- scales[effects=="ran_pars"] if (!rscale %in% c("sdcor","vcov")) stop(sprintf("unrecognized ran_pars scale %s",sQuote(rscale))) ret <- as.data.frame(VarCorr(x)) ret[] <- lapply(ret, function(x) if (is.factor(x)) as.character(x) else x) if (is.null(ran_prefix)) { ran_prefix <- switch(rscale, vcov=c("var","cov"), sdcor=c("sd","cor")) } pfun <- function(x) { v <- na.omit(unlist(x)) if (length(v)==0) v <- "Observation" p <- paste(v,collapse=".") if (!identical(ran_prefix,NA)) { p <- paste(ran_prefix[length(v)],p,sep="_") } return(p) } rownames(ret) <- paste(apply(ret[c("var1","var2")],1,pfun), ret[,"grp"], sep = ".") ## FIXME: this is ugly, but maybe necessary? ## set 'term' column explicitly, disable fix_data_frame ## rownames -> term conversion ## rownames(ret) <- seq(nrow(ret)) if (conf.int) { ciran <- confint(x,parm="theta_",method=conf.method,...) ret <- data.frame(ret,ciran) nn <- c(nn,"conf.low","conf.high") } ## replicate lme4:::tnames, more or less ret_list$ran_pars <- fix_data_frame(ret[c("grp", rscale)], newnames = c("group", "estimate")) } if ("ran_modes" %in% effects) { ## fix each group to be a tidy data frame nn <- c("estimate", "std.error") re <- ranef(x,condVar=TRUE) getSE <- function(x) { v <- attr(x,"postVar") setNames(as.data.frame(sqrt(t(apply(v,3,diag)))), colnames(x)) } fix <- function(g,re,.id) { newg <- fix_data_frame(g, newnames = colnames(g), newcol = "level") # fix_data_frame doesn't create a new column if rownames are numeric, # which doesn't suit our purposes newg$level <- rownames(g) newg$type <- "estimate" newg.se <- getSE(re) newg.se$level <- rownames(re) newg.se$type <- "std.error" data.frame(rbind(newg,newg.se),.id=.id, check.names=FALSE) ## prevent coercion of variable names } mm <- do.call(rbind,Map(fix,coef(x),re,names(re))) ## block false-positive warnings due to NSE type <- spread <- est <- NULL mm %>% gather(term, estimate, -.id, -level, -type) %>% spread(type,estimate) -> ret ## FIXME: doesn't include uncertainty of population-level estimate if (conf.int) { if (conf.method != "Wald") stop("only Wald CIs available for conditional modes") mult <- qnorm((1+conf.level)/2) ret <- transform(ret, conf.low=estimate-mult*std.error, conf.high=estimate+mult*std.error) } ret <- dplyr::rename(ret,group=.id) ret_list$ran_modes <- ret } return(rbind.fill(ret_list)) } #' @rdname lme4_tidiers #' #' @param data original data this was fitted on; if not given this will #' attempt to be reconstructed #' @param newdata new data to be used for prediction; optional #' #' @template augment_NAs #' #' @return \code{augment} returns one row for each original observation, #' with columns (each prepended by a .) added. Included are the columns #' \item{.fitted}{predicted values} #' \item{.resid}{residuals} #' \item{.fixed}{predicted values with no random effects} #' #' Also added for "merMod" objects, but not for "mer" objects, #' are values from the response object within the model (of type #' \code{lmResp}, \code{glmResp}, \code{nlsResp}, etc). These include \code{".mu", #' ".offset", ".sqrtXwt", ".sqrtrwt", ".eta"}. #' #' @export augment.merMod <- function(x, data = stats::model.frame(x), newdata, ...) { # move rownames if necessary if (missing(newdata)) { newdata <- NULL } ret <- augment_columns(x, data, newdata, se.fit = NULL) # add predictions with no random effects (population means) predictions <- stats::predict(x, re.form = NA) # some cases, such as values returned from nlmer, return more than one # prediction per observation. Not clear how those cases would be tidied if (length(predictions) == nrow(ret)) { ret$.fixed <- predictions } # columns to extract from resp reference object # these include relevant ones that could be present in lmResp, glmResp, # or nlsResp objects respCols <- c("mu", "offset", "sqrtXwt", "sqrtrwt", "weights", "wtres", "gam", "eta") cols <- lapply(respCols, function(n) x@resp[[n]]) names(cols) <- paste0(".", respCols) cols <- as.data.frame(compact(cols)) # remove missing fields cols <- insert_NAs(cols, ret) if (length(cols) > 0) { ret <- cbind(ret, cols) } unrowname(ret) } #' @rdname lme4_tidiers #' #' @param ... extra arguments (not used) #' #' @return \code{glance} returns one row with the columns #' \item{sigma}{the square root of the estimated residual variance} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{deviance}{deviance} #' #' @export glance.merMod <- function(x, ...) { # We cannot use stats::sigma or lme4::sigma here, even in an # if statement, since that leads to R CMD CHECK warnings on 3.2 # or dev R, respectively sigma <- if (getRversion() >= "3.3.0") { get("sigma", asNamespace("stats")) } else { get("sigma", asNamespace("lme4")) } ret <- unrowname(data.frame(sigma = sigma(x))) finish_glance(ret, x) } broom/R/lm_tidiers.R0000644000177700017770000002507613204302367015413 0ustar herbrandtherbrandt#' Tidying methods for a linear model #' #' These methods tidy the coefficients of a linear model into a summary, #' augment the original data with information on the fitted values and #' residuals, and construct a one-row glance of the model's statistics. #' #' @details If you have missing values in your model data, you may need to refit #' the model with \code{na.action = na.exclude}. #' #' @return All tidying methods return a \code{data.frame} without rownames. #' The structure depends on the method chosen. #' #' @seealso \code{\link{summary.lm}} #' #' @name lm_tidiers #' #' @param x lm object #' @param data Original data, defaults to the extracting it from the model #' @param newdata If provided, performs predictions on the new data #' @param type.predict Type of prediction to compute for a GLM; passed on to #' \code{\link{predict.glm}} #' @param type.residuals Type of residuals to compute for a GLM; passed on to #' \code{\link{residuals.glm}} #' #' @examples #' #' library(ggplot2) #' library(dplyr) #' #' mod <- lm(mpg ~ wt + qsec, data = mtcars) #' #' tidy(mod) #' glance(mod) #' #' # coefficient plot #' d <- tidy(mod) %>% mutate(low = estimate - std.error, #' high = estimate + std.error) #' ggplot(d, aes(estimate, term, xmin = low, xmax = high, height = 0)) + #' geom_point() + #' geom_vline(xintercept = 0) + #' geom_errorbarh() #' #' head(augment(mod)) #' head(augment(mod, mtcars)) #' #' # predict on new data #' newdata <- mtcars %>% head(6) %>% mutate(wt = wt + 1) #' augment(mod, newdata = newdata) #' #' au <- augment(mod, data = mtcars) #' #' plot(mod, which = 1) #' qplot(.fitted, .resid, data = au) + #' geom_hline(yintercept = 0) + #' geom_smooth(se = FALSE) #' qplot(.fitted, .std.resid, data = au) + #' geom_hline(yintercept = 0) + #' geom_smooth(se = FALSE) #' qplot(.fitted, .std.resid, data = au, #' colour = factor(cyl)) #' qplot(mpg, .std.resid, data = au, colour = factor(cyl)) #' #' plot(mod, which = 2) #' qplot(sample =.std.resid, data = au, stat = "qq") + #' geom_abline() #' #' plot(mod, which = 3) #' qplot(.fitted, sqrt(abs(.std.resid)), data = au) + geom_smooth(se = FALSE) #' #' plot(mod, which = 4) #' qplot(seq_along(.cooksd), .cooksd, data = au) #' #' plot(mod, which = 5) #' qplot(.hat, .std.resid, data = au) + geom_smooth(se = FALSE) #' ggplot(au, aes(.hat, .std.resid)) + #' geom_vline(size = 2, colour = "white", xintercept = 0) + #' geom_hline(size = 2, colour = "white", yintercept = 0) + #' geom_point() + geom_smooth(se = FALSE) #' #' qplot(.hat, .std.resid, data = au, size = .cooksd) + #' geom_smooth(se = FALSE, size = 0.5) #' #' plot(mod, which = 6) #' ggplot(au, aes(.hat, .cooksd)) + #' geom_vline(xintercept = 0, colour = NA) + #' geom_abline(slope = seq(0, 3, by = 0.5), colour = "white") + #' geom_smooth(se = FALSE) + #' geom_point() #' qplot(.hat, .cooksd, size = .cooksd / .hat, data = au) + scale_size_area() #' #' # column-wise models #' a <- matrix(rnorm(20), nrow = 10) #' b <- a + rnorm(length(a)) #' result <- lm(b ~ a) #' tidy(result) NULL #' @rdname lm_tidiers #' #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param exponentiate whether to exponentiate the coefficient estimates #' and confidence intervals (typical for logistic regression) #' @param quick whether to compute a smaller and faster version, containing #' only the \code{term} and \code{estimate} columns. #' #' @details If \code{conf.int=TRUE}, the confidence interval is computed with #' the \code{\link{confint}} function. #' #' While \code{tidy} is supported for "mlm" objects, \code{augment} and #' \code{glance} are not. #' #' @return \code{tidy.lm} returns one row for each coefficient, with five columns: #' \item{term}{The term in the linear model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error from the linear model} #' \item{statistic}{t-statistic} #' \item{p.value}{two-sided p-value} #' #' If the linear model is an "mlm" object (multiple linear model), there is an #' additional column: #' \item{response}{Which response column the coefficients correspond to #' (typically Y1, Y2, etc)} #' #' If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and #' \code{conf.high}, computed with \code{\link{confint}}. #' #' @export tidy.lm <- function(x, conf.int = FALSE, conf.level = .95, exponentiate = FALSE, quick = FALSE, ...) { if (quick) { co <- stats::coef(x) ret <- data.frame(term = names(co), estimate = unname(co), stringsAsFactors = FALSE) return(process_lm(ret, x, conf.int = FALSE, exponentiate = exponentiate)) } s <- summary(x) ret <- tidy.summary.lm(s) process_lm(ret, x, conf.int = conf.int, conf.level = conf.level, exponentiate = exponentiate) } #' @rdname lm_tidiers #' @export tidy.summary.lm <- function(x, ...) { co <- stats::coef(x) nn <- c("estimate", "std.error", "statistic", "p.value") if (inherits(co, "listof")) { # multiple response variables ret <- plyr::ldply(co, fix_data_frame, nn[1:ncol(co[[1]])], .id = "response") ret$response <- stringr::str_replace(ret$response, "Response ", "") } else { ret <- fix_data_frame(co, nn[1:ncol(co)]) } ret } #' @rdname lm_tidiers #' #' @template augment_NAs #' #' @details Code and documentation for \code{augment.lm} originated in the #' ggplot2 package, where it was called \code{fortify.lm} #' #' @return When \code{newdata} is not supplied \code{augment.lm} returns #' one row for each observation, with seven columns added to the original #' data: #' \item{.hat}{Diagonal of the hat matrix} #' \item{.sigma}{Estimate of residual standard deviation when #' corresponding observation is dropped from model} #' \item{.cooksd}{Cooks distance, \code{\link{cooks.distance}}} #' \item{.fitted}{Fitted values of model} #' \item{.se.fit}{Standard errors of fitted values} #' \item{.resid}{Residuals} #' \item{.std.resid}{Standardised residuals} #' #' (Some unusual "lm" objects, such as "rlm" from MASS, may omit #' \code{.cooksd} and \code{.std.resid}. "gam" from mgcv omits #' \code{.sigma}) #' #' When \code{newdata} is supplied, \code{augment.lm} returns one row for each #' observation, with three columns added to the new data: #' \item{.fitted}{Fitted values of model} #' \item{.se.fit}{Standard errors of fitted values} #' \item{.resid}{Residuals of fitted values on the new data} #' #' @export augment.lm <- function(x, data = stats::model.frame(x), newdata, type.predict, type.residuals, ...) { augment_columns(x, data, newdata, type.predict = type.predict, type.residuals = type.residuals) } #' @rdname lm_tidiers #' #' @param ... extra arguments (not used) #' #' @return \code{glance.lm} returns a one-row data.frame with the columns #' \item{r.squared}{The percent of variance explained by the model} #' \item{adj.r.squared}{r.squared adjusted based on the degrees of freedom} #' \item{sigma}{The square root of the estimated residual variance} #' \item{statistic}{F-statistic} #' \item{p.value}{p-value from the F test, describing whether the full #' regression is significant} #' \item{df}{Degrees of freedom used by the coefficients} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{deviance}{deviance} #' \item{df.residual}{residual degrees of freedom} #' #' @export glance.lm <- function(x, ...) { # use summary.lm explicity, so that c("aov", "lm") objects can be # summarized and glanced at s <- stats::summary.lm(x) ret <- glance.summary.lm(s, ...) ret <- finish_glance(ret, x) ret } #' @rdname lm_tidiers #' @export glance.summary.lm <- function(x, ...) { ret <- with(x, cbind(data.frame(r.squared=r.squared, adj.r.squared=adj.r.squared, sigma=sigma), if (exists("fstatistic")) { data.frame( statistic=fstatistic[1], p.value=pf(fstatistic[1], fstatistic[2], fstatistic[3], lower.tail=FALSE))} else { data.frame( statistic=NA_real_, p.value=NA_real_) }, data.frame( df=df[1]))) unrowname(ret) } #' @export augment.mlm <- function(x, ...) { stop("augment does not support multiple responses") } #' @export glance.mlm <- function(x, ...) { stop("glance does not support multiple responses") } #' helper function to process a tidied lm object #' #' Adds a confidence interval, and possibly exponentiates, a tidied #' object. Useful for operations shared between lm and biglm. #' #' @param ret data frame with a tidied version of a coefficient matrix #' @param x an "lm", "glm", "biglm", or "bigglm" object #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param exponentiate whether to exponentiate the coefficient estimates #' and confidence intervals (typical for logistic regression) process_lm <- function(ret, x, conf.int = FALSE, conf.level = .95, exponentiate = FALSE) { if (exponentiate) { # save transformation function for use on confidence interval if (is.null(x$family) || (x$family$link != "logit" && x$family$link != "log")) { warning(paste("Exponentiating coefficients, but model did not use", "a log or logit link function")) } trans <- exp } else { trans <- identity } if (conf.int) { # avoid "Waiting for profiling to be done..." message CI <- suppressMessages(stats::confint(x, level = conf.level)) # Handle case if regression is rank deficient p <- x$rank if (!is.null(p) && !is.null(x$qr)) { piv <- x$qr$pivot[seq_len(p)] CI <- CI[piv, , drop = FALSE] } colnames(CI) = c("conf.low", "conf.high") ret <- cbind(ret, trans(unrowname(CI))) } ret$estimate <- trans(ret$estimate) ret } broom/R/list_tidiers.R0000644000177700017770000000372413204276216015755 0ustar herbrandtherbrandt#' Tidiers for return values from functions that aren't S3 objects #' #' This method handles the return values of functions that return lists #' rather than S3 objects, such as \code{optim}, \code{svd}, or #' \code{\link[akima]{interp}}, and therefore cannot be handled by #' S3 dispatch. #' #' @param x list object #' @param ... extra arguments, passed to the tidying function #' #' @details Those tiders themselves are implemented as functions of the #' form tidy_ or glance_ that are not exported. #' #' @seealso \link{optim_tidiers}, \link{xyz_tidiers}, #' \link{svd_tidiers}, \link{orcutt_tidiers} #' #' @name list_tidiers #' #' @export tidy.list <- function(x, ...) { if (all(c("par", "value", "counts", "convergence", "message") %in% names(x))) { # returned from optim tidy_optim(x, ...) } else if (all(c("x", "y", "z") %in% names(x)) & is.matrix(x$z)) { if ( length(x$x) != nrow(x$z) ) { stop("The list looks like an x,y,z list but is not. Element x of the list needs to be the same length as the number of rows of element z") } if ( length(x$y) != ncol(x$z) ) { stop("The list looks like an x,y,z list but is not. Element y of the list needs to be the same length as the number of columns of element z") } # xyz list suitable for persp, image, etc. tidy_xyz(x, ...) } else if (all(sort(names(x)) == c("d", "u", "v"))) { tidy_svd(x, ...) } else if ("Cochrane.Orcutt" %in% names(x)) { tidy.orcutt(x, ...) } else { stop("No tidying method recognized for this list") } } #' @rdname list_tidiers #' #' @export glance.list <- function(x, ...) { if (all(c("par", "value", "counts", "convergence", "message") %in% names(x))) { glance_optim(x, ...) } else if ("Cochrane.Orcutt" %in% names(x)) { glance.orcutt(x, ...) } else { stop("No glance method recognized for this list") } } broom/R/map_tidiers.R0000644000177700017770000000233713204276216015556 0ustar herbrandtherbrandt#' Tidy method for map objects. #' #' This function turns a map into a data frame. #' #' This code and documentation originated in ggplot2, but was called "fortify." #' In broom, "fortify" became "augment", which is reserved for functions that *add* #' columns to existing data (based on a model fit, for example) so these functions #' were renamed as "tidy." #' #' @param x map object #' @param ... not used by this method #' #' @examples #' if (require("maps") && require("ggplot2")) { #' ca <- map("county", "ca", plot = FALSE, fill = TRUE) #' head(tidy(ca)) #' qplot(long, lat, data = ca, geom = "polygon", group = group) #' #' tx <- map("county", "texas", plot = FALSE, fill = TRUE) #' head(tidy(tx)) #' qplot(long, lat, data = tx, geom = "polygon", group = group, #' colour = I("white")) #' } #' #' @export tidy.map <- function(x, ...) { df <- as.data.frame(x[c("x", "y")]) names(df) <- c("long", "lat") df$group <- cumsum(is.na(df$long) & is.na(df$lat)) + 1 df$order <- 1:nrow(df) names <- do.call("rbind", lapply(strsplit(x$names, "[:,]"), "[", 1:2)) df$region <- names[df$group, 1] df$subregion <- names[df$group, 2] df[stats::complete.cases(df$lat, df$long), ] } broom/R/lmtest_tidiers.R0000644000177700017770000000170613204276216016310 0ustar herbrandtherbrandt#' Tidying methods for coeftest objects #' #' This tidies the result of a coefficient test, from the \code{coeftest} #' function in the \code{lmtest} package. #' #' @param x coeftest object #' @param ... extra arguments (not used) #' #' @return A \code{data.frame} with one row for each coefficient, with five columns: #' \item{term}{The term in the linear model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error} #' \item{statistic}{test statistic} #' \item{p.value}{p-value} #' #' @examples #' #' if (require("lmtest", quietly = TRUE)) { #' data(Mandible) #' fm <- lm(length ~ age, data=Mandible, subset=(age <= 28)) #' #' coeftest(fm) #' tidy(coeftest(fm)) #' } #' #' @export tidy.coeftest <- function(x, ...) { co <- as.data.frame(unclass(x)) nn <- c("estimate", "std.error", "statistic", "p.value")[1:ncol(co)] ret <- fix_data_frame(co, nn) ret } broom/R/orcutt_tidiers.R0000644000177700017770000000356713204276216016327 0ustar herbrandtherbrandt#' Tidiers for Cochrane Orcutt object #' #' Tidies a Cochrane Orcutt object, which estimates autocorrelation #' and beta coefficients in a linear fit. #' #' @param x An "orcutt" object returned by \code{cochrane.orcutt} #' @param ... Extra arguments passed on to \code{\link{tidy.lm}} #' #' @template boilerplate #' #' @return \code{tidy} returns the same information as #' \code{\link{tidy.lm}}, though without confidence interval options. #' #' @return \code{glance}{} #' #' @name orcutt_tidiers #' #' @examples #' #' reg <- lm(mpg ~ wt + qsec + disp, mtcars) #' tidy(reg) #' #' if (require("orcutt", quietly = TRUE)) { #' co <- cochrane.orcutt(reg) #' co #' #' tidy(co) #' glance(co) #' } #' #' @export tidy.orcutt <- function(x, ...) { tidy.lm(x, ...) } #' @rdname orcutt_tidiers #' #' @return \code{glance} returns a one-row data frame with the following columns: #' \item{r.squared}{R-squared} #' \item{adj.r.squared}{Adjusted R-squared} #' \item{rho}{Spearman's rho autocorrelation} #' \item{number.interaction}{Number of interactions} #' \item{dw.original}{Durbin-Watson statistic of original fit} #' \item{p.value.original}{P-value of original Durbin-Watson statistic} #' \item{dw.transformed}{Durbin-Watson statistic of transformed fit} #' \item{p.value.transformed}{P-value of autocorrelation after transformation} #' #' @export glance.orcutt <- function(x, ...) { ret <- data.frame(r.squared = x$r.squared, adj.r.squared = x$adj.r.squared, rho = x$rho, number.interaction = x$number.interaction, dw.original = x$DW[1], p.value.original = x$DW[2], dw.transformed = x$DW[3], p.value.transformed = x$DW[4]) ret$rho <- x$rho ret$number.interaction <- x$number.interaction ret } broom/R/plm_tidiers.R0000644000177700017770000000513613204276216015571 0ustar herbrandtherbrandt#' Tidiers for panel regression linear models #' #' @param x a "plm" object representing a panel object #' @param data original dataset #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param exponentiate whether to exponentiate the coefficient estimates #' and confidence intervals #' #' @template boilerplate #' #' @return \code{tidy.plm} returns a data frame with one row per #' coefficient, of the same form as \code{\link{tidy.lm}}. #' #' @seealso \code{\link{lm_tidiers}} #' #' @name plm_tidiers #' #' @examples #' #' if (require("plm", quietly = TRUE)) { #' data("Produc", package = "plm") #' zz <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, #' data = Produc, index = c("state","year")) #' #' summary(zz) #' #' tidy(zz) #' tidy(zz, conf.int = TRUE) #' tidy(zz, conf.int = TRUE, conf.level = .9) #' #' head(augment(zz)) #' #' glance(zz) #' } #' #' @export tidy.plm <- function(x, conf.int = FALSE, conf.level = .95, exponentiate = FALSE, ...) { tidy.lm(x, conf.int = conf.int, conf.level = conf.level, exponentiate = exponentiate) } #' @rdname plm_tidiers #' #' @return \code{augment} returns a data frame with one row for each #' initial observation, adding the columns #' \item{.fitted}{predicted (fitted) values} #' \item{.resid}{residuals} #' #' @export augment.plm <- function(x, data = as.data.frame(stats::model.frame(x)), ...) { # Random effects and fixed effect (within model) have individual intercepts, # thus we cannot take the ususal procedure for augment(). # Also, there is currently no predict() method for plm objects. augment_columns(x, data, ...) } #' @rdname plm_tidiers #' #' @param ... extra arguments, not used #' #' @return \code{glance} returns a one-row data frame with columns #' \item{r.squared}{The percent of variance explained by the model} #' \item{adj.r.squared}{r.squared adjusted based on the degrees of freedom} #' \item{statistic}{F-statistic} #' \item{p.value}{p-value from the F test, describing whether the full #' regression is significant} #' \item{deviance}{deviance} #' \item{df.residual}{residual degrees of freedom} #' #' @export glance.plm <- function(x, ...) { s <- summary(x) ret <- with(s, data.frame(r.squared = r.squared[1], adj.r.squared = r.squared[2], statistic = fstatistic$statistic, p.value = fstatistic$p.value )) finish_glance(ret, x) } broom/R/anova_tidiers.R0000644000177700017770000000662413204276216016110 0ustar herbrandtherbrandt#' Tidying methods for anova and AOV objects #' #' Tidies the result of an analysis of variance into an ANOVA table. #' Only a \code{tidy} method is provided, not an \code{augment} or #' \code{glance} method. #' #' @param x An object of class "anova", "aov", or "aovlist" #' @param ... extra arguments (not used) #' #' @return A data.frame with columns #' \item{term}{Term within the model, or "Residuals"} #' \item{df}{Degrees of freedom used by this term in the model} #' \item{sumsq}{Sum of squares explained by this term} #' \item{meansq}{Mean of sum of squares among degrees of freedom} #' \item{statistic}{F statistic} #' \item{p.value}{P-value from F test} #' #' In the case of an \code{"aovlist"} object, there is also a \code{stratum} #' column describing the error stratum #' #' @details Note that the "term" column of an ANOVA table can come with #' leading or trailing whitespace, which this tidying method trims. #' #' @examples #' #' a <- anova(lm(mpg ~ wt + qsec + disp, mtcars)) #' tidy(a) #' #' a <- aov(mpg ~ wt + qsec + disp, mtcars) #' tidy(a) #' #' al <- aov(mpg ~ wt + qsec + Error(disp / am), mtcars) #' tidy(al) #' #' @name anova_tidiers NULL #' @rdname anova_tidiers #' #' @import dplyr #' #' @export tidy.anova <- function(x, ...) { # x is stats::anova # there are many possible column names that need to be transformed renamers <- c("Df" = "df", "Sum Sq" = "sumsq", "Mean Sq" = "meansq", "F value" = "statistic", "Pr(>F)" = "p.value", "Res.Df" = "res.df", "RSS" = "rss", "Sum of Sq" = "sumsq", "F" = "statistic", "Chisq" = "statistic", "P(>|Chi|)" = "p.value", "Pr(>Chi)" = "p.value", "Pr..Chisq." = "p.value", "Pr..Chi." = "p.value", "p.value" = "p.value", "Chi.sq" = "statistic", "edf" = "edf", "Ref.df" = "ref.df") names(renamers) <- make.names(names(renamers)) x <- fix_data_frame(x) unknown_cols <- setdiff(colnames(x), c("term", names(renamers))) if (length(unknown_cols) > 0) { warning("The following column names in ANOVA output were not ", "recognized or transformed: ", paste(unknown_cols, collapse = ", ")) } ret <- plyr::rename(x, renamers, warn_missing = FALSE) if (!is.null(ret$term)) { # if rows had names, strip whitespace in them ret <- ret %>% mutate(term = stringr::str_trim(term)) } ret } #' @rdname anova_tidiers #' #' @import dplyr #' #' @export tidy.aov <- function(x, ...) { s <- summary(x) tidy.anova(s[[1]]) } #' @rdname anova_tidiers #' @export tidy.aovlist <- function(x, ...) { # must filter out Intercept stratum since it has no dimensions if (names(x)[1L] == "(Intercept)") { x <- x[-1L] } # ret <- plyr::ldply(x, tidy, .id = "stratum") ret <- lapply(x, function(a) tidy(stats::anova(a))) ret <- lapply(names(ret), function(a) dplyr::mutate(ret[[a]], stratum = a)) ret <- do.call("rbind", ret) # get rid of leading and trailing whitespace in term and stratum columns ret <- ret %>% mutate(term = stringr::str_trim(term), stratum = stringr::str_trim(stratum)) ret } broom/R/glance.R0000644000177700017770000000110213204326676014502 0ustar herbrandtherbrandt#' Construct a single row summary "glance" of a model, fit, or other #' object #' #' glance methods always return either a one-row data frame (except on NULL, which #' returns an empty data frame) #' #' @param x model or other R object to convert to single-row data frame #' @param ... other arguments passed to methods #' @export glance <- function(x, ...) UseMethod("glance") #' @export glance.NULL <- function(x, ...) data.frame() #' @export glance.default <- function(x, ...) { stop("glance doesn't know how to deal with data of class ", class(x), call. = FALSE) } broom/R/mcmc_tidiers.R0000644000177700017770000001072413204276216015717 0ustar herbrandtherbrandt#' Tidying methods for MCMC (Stan, JAGS, etc.) fits #' #' @param x an object of class \sQuote{"stanfit"} #' @param pars (character) specification of which parameters to include #' @param estimate.method method for computing point estimate ("mean" or median") #' @param conf.int (logical) include confidence interval? #' @param conf.level probability level for CI #' @param conf.method method for computing confidence intervals #' ("quantile" or "HPDinterval") #' @param droppars Parameters not to include in the output (such #' as log-probability information) #' @param rhat,ess (logical) include Rhat and/or effective sample size estimates? #' @param ... unused #' #' @name mcmc_tidiers #' #' @examples #' #' \dontrun{ #' #' # Using example from "RStan Getting Started" #' # https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started #' #' model_file <- system.file("extdata", "8schools.stan", package = "broom") #' #' schools_dat <- list(J = 8, #' y = c(28, 8, -3, 7, -1, 1, 18, 12), #' sigma = c(15, 10, 16, 11, 9, 11, 10, 18)) #' #' if (requireNamespace("rstan", quietly = TRUE)) { #' set.seed(2015) #' rstan_example <- stan(file = model_file, data = schools_dat, #' iter = 100, chains = 2) #' } #' #' } #' #' if (requireNamespace("rstan", quietly = TRUE)) { #' # the object from the above code was saved as rstan_example.rda #' infile <- system.file("extdata", "rstan_example.rda", package = "broom") #' load(infile) #' #' tidy(rstan_example) #' tidy(rstan_example, conf.int = TRUE, pars = "theta") #' #' td_mean <- tidy(rstan_example, conf.int = TRUE) #' td_median <- tidy(rstan_example, conf.int = TRUE, estimate.method = "median") #' #' library(dplyr) #' library(ggplot2) #' tds <- rbind(mutate(td_mean, method = "mean"), #' mutate(td_median, method = "median")) #' #' ggplot(tds, aes(estimate, term)) + #' geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) + #' geom_point(aes(color = method)) #' } #' #' #' @export tidyMCMC <- function(x, pars, ## ?? do other estimate.method = "mean", conf.int = FALSE, conf.level = 0.95, conf.method = "quantile", droppars = "lp__", rhat = FALSE, ess = FALSE, ...) { stan <- inherits(x, "stanfit") ss <- if (stan) as.matrix(x, pars = pars) else as.matrix(x) ss <- ss[, !colnames(ss) %in% droppars, drop = FALSE] ## drop log-probability info if (!missing(pars) && !stan) { if (length(badpars <- which(!pars %in% colnames(ss))) > 0) { stop("unrecognized parameters: ", pars[badpars]) } ss <- ss[, pars] } estimate.method <- match.arg(estimate.method, c("mean", "median")) m <- switch(estimate.method, mean = colMeans(ss), median = apply(ss, 2, stats::median)) ret <- data.frame(estimate = m, std.error = apply(ss, 2, stats::sd)) if (conf.int) { levs <- c((1 - conf.level) / 2, (1 + conf.level) / 2) conf.method <- match.arg(conf.method, c("quantile", "HPDinterval")) ci <- switch(conf.method, quantile = t(apply(ss, 2, stats::quantile, levs)), coda::HPDinterval(coda::as.mcmc(ss), prob = conf.level)) colnames(ci) <- c("conf.low", "conf.high") ret <- data.frame(ret, ci) } if (rhat || ess) { if (!stan) warning("ignoring 'rhat' and 'ess' (only available for stanfit objects)") summ <- rstan::summary(x, pars = pars, probs = NULL)$summary[, c("Rhat", "n_eff"), drop = FALSE] summ <- summ[!dimnames(summ)[[1L]] %in% droppars,, drop = FALSE] if (rhat) ret$rhat <- summ[, "Rhat"] if (ess) ret$ess <- as.integer(round(summ[, "n_eff"])) } return(fix_data_frame(ret)) } ##' @rdname mcmc_tidiers ##' @export tidy.rjags <- function(x, pars, ## ?? do other estimate.method = "mean", conf.int = FALSE, conf.level = 0.95, conf.method = "quantile", ...) { tidyMCMC(coda::as.mcmc(x$BUGS), pars, estimate.method, conf.int, conf.level, conf.method, droppars = "deviance") } ##' @rdname mcmc_tidiers ##' @export tidy.stanfit <- tidyMCMC broom/R/summary_tidiers.R0000644000177700017770000000223413204276216016472 0ustar herbrandtherbrandt#' Tidiers for summaryDefault objects #' #' Tidy a summary of a vector. #' #' @param x summaryDefault object #' @param ... extra arguments, not used #' #' @return Both \code{tidy} and \code{glance} return the same object: #' a one-row data frame with columns #' \item{minimum}{smallest value in original vector} #' \item{q1}{value at the first quartile} #' \item{median}{median of original vector} #' \item{mean}{mean of original vector} #' \item{q3}{value at the third quartile} #' \item{maximum}{largest value in original vector} #' \item{NAs}{number of NA values (if any)} #' @seealso \code{\link{summary}} #' #' @examples #' #' v <- rnorm(1000) #' s <- summary(v) #' s #' #' tidy(s) #' glance(s) #' #' v2 <- c(v,NA) #' tidy(summary(v2)) #' #' @name summary_tidiers #' #' @export tidy.summaryDefault <- function(x, ...) { ret <- as.data.frame(t(as.matrix(x))) cnms <- c("minimum", "q1", "median", "mean", "q3", "maximum") if ("NA's" %in% names(x)) { cnms <- c(cnms, "na") } return(setNames(ret,cnms)) } #' @rdname summary_tidiers #' #' @export glance.summaryDefault <- function(x, ...) { tidy.summaryDefault(x, ...) } broom/R/vector_tidiers.R0000644000177700017770000000177113204276216016304 0ustar herbrandtherbrandt#' Tidy atomic vectors #' #' Turn atomic vectors into data frames, where the names of the vector (if they #' exist) are a column and the values of the vector are a column. #' #' @param x An object of class "numeric", "integer", "character", or "logical". #' Most likely a named vector #' @param ... Extra arguments (not used) #' #' @examples #' #' x <- 1:5 #' names(x) <- letters[1:5] #' tidy(x) #' @export #' @rdname vector_tidiers tidy.numeric <- function(x, ...) { if (!is.null(names(x))) dplyr::data_frame(names = names(x), x = unname(x)) else dplyr::data_frame(x = x) } #' @export #' @rdname vector_tidiers tidy.character <- function(x, ...) { if (!is.null(names(x))) dplyr::data_frame(names = names(x), x = unname(x)) else dplyr::data_frame(x = x) } #' @export #' @rdname vector_tidiers tidy.logical <- function(x, ...) { if (!is.null(names(x))) dplyr::data_frame(names = names(x), x = unname(x)) else dplyr::data_frame(x = x) } broom/R/zoo_tidiers.R0000644000177700017770000000301713204276216015604 0ustar herbrandtherbrandt#' Tidying methods for a zoo object #' #' Tidies \code{zoo} (Z's ordered observations) time series objects. #' \code{zoo} objects are not tidy by default because they contain one row #' for each index and one series per column, rather than one row per #' observation per series. #' #' @param x An object of class \code{"zoo"} #' @param ... extra arguments (not used) #' #' @return \code{tidy} returns a data frame with one row for each observation #' in each series, with the following columns: #' \item{index}{Index (usually date) for the zoo object} #' \item{series}{Name of the series} #' \item{value}{Value of the observation} #' #' @name zoo_tidiers #' #' @examples #' #' if (require("zoo", quietly = TRUE)) { #' set.seed(1071) #' #' # data generated as shown in the zoo vignette #' Z.index <- as.Date(sample(12450:12500, 10)) #' Z.data <- matrix(rnorm(30), ncol = 3) #' colnames(Z.data) <- c("Aa", "Bb", "Cc") #' Z <- zoo(Z.data, Z.index) #' #' tidy(Z) #' #' if (require("ggplot2", quietly = TRUE)) { #' ggplot(tidy(Z), aes(index, value, color = series)) + geom_line() #' ggplot(tidy(Z), aes(index, value)) + geom_line() + #' facet_wrap(~ series, ncol = 1) #' #' Zrolled <- rollmean(Z, 5) #' ggplot(tidy(Zrolled), aes(index, value, color = series)) + geom_line() #' } #' } #' #' @export tidy.zoo <- function(x, ...) { ret <- data.frame(as.matrix(x), index = zoo::index(x)) ret <- tidyr::gather(ret, series, value, -index) ret } broom/R/glm_tidiers.R0000644000177700017770000000235013204276216015553 0ustar herbrandtherbrandt#' Tidying methods for a glm object #' #' Tidy a \code{glm} object. The \code{tidy} and \code{augment} methods are handled #' by \link{lm_tidiers}. #' #' @param x glm object #' @param ... extra arguments, not used #' #' @return \code{tidy} and \code{augment} return the same values as do #' \code{\link{tidy.lm}} and \code{\link{augment.lm}}. #' #' @seealso \code{\link{tidy.lm}} and \code{\link{augment.lm}}. Also \code{\link{glm}}, which #' computes the values reported by the \code{glance} method. #' #' @name glm_tidiers #' #' @examples #' #' g <- glm(am ~ mpg, mtcars, family = "binomial") #' glance(g) #' #' @export #' @rdname glm_tidiers #' #' @return \code{glance} returns a one-row data.frame with the columns #' \item{null.deviance}{the deviance of the null model} #' \item{df.null}{the residual degrees of freedom for the null model} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{deviance}{deviance} #' \item{df.residual}{residual degrees of freedom} glance.glm <- function(x, ...) { s <- summary(x) ret <- unrowname(as.data.frame(s[c("null.deviance", "df.null")])) finish_glance(ret, x) } broom/R/geeglm_tidiers.R0000644000177700017770000001217113204276216016236 0ustar herbrandtherbrandt#' Tidying methods for generalized estimating equations models #' #' These methods tidy the coefficients of generalized estimating #' equations models of the \code{geeglm} class from functions of the #' \code{geepack} package. #' #' #' @param x An object of class \code{geeglm}, such as from \code{geeglm} #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param exponentiate whether to exponentiate the coefficient estimates #' and confidence intervals (typical for log distributions) #' @param quick whether to compute a smaller and faster version, containing #' only the \code{term} and \code{estimate} columns. #' @param ... Additional arguments to be passed to other methods. Currently #' not used. #' #' @details If \code{conf.int=TRUE}, the confidence interval is computed with #' the \code{\link{confint.geeglm}} function. #' #' While \code{tidy} is supported for "geeglm" objects, \code{augment} and #' \code{glance} are not. #' #' If you have missing values in your model data, you may need to #' refit the model with \code{na.action = na.exclude} or deal with the #' missingness in the data beforehand. #' #' @return All tidying methods return a \code{data.frame} without rownames. #' The structure depends on the method chosen. #' #' @name geeglm_tidiers #' #' @examples #' #' if (require('geepack')) { #' data(state) #' ds <- data.frame(state.region, state.x77) #' #' geefit <- geeglm(Income ~ Frost + Murder, id = state.region, #' data = ds, family = gaussian, #' corstr = 'exchangeable') #' #' tidy(geefit) #' tidy(geefit, quick = TRUE) #' tidy(geefit, conf.int = TRUE) #' } #' #' @rdname geeglm_tidiers #' @return \code{tidy.geeglm} returns one row for each coefficient, with five columns: #' \item{term}{The term in the linear model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error from the GEE model} #' \item{statistic}{Wald statistic} #' \item{p.value}{two-sided p-value} #' #' If \code{conf.int=TRUE}, it also includes columns for #' \code{conf.low} and \code{conf.high}, computed with #' \code{\link{confint.geeglm}} (included as part of broom). #' #' @export #' #' @import dplyr #' #' @export tidy.geeglm <- function(x, conf.int = FALSE, conf.level = .95, exponentiate = FALSE, quick = FALSE, ...) { if (quick) { co <- stats::coef(x) ret <- data.frame(term = names(co), estimate = unname(co)) return(ret) } co <- stats::coef(summary(x)) nn <- c("estimate", "std.error", "statistic", "p.value") ret <- fix_data_frame(co, nn[1:ncol(co)]) process_geeglm(ret, x, conf.int = conf.int, conf.level = conf.level, exponentiate = exponentiate) } #' helper function to process a tidied geeglm object #' #' Adds a confidence interval, and possibly exponentiates, a tidied #' object. #' #' @param ret data frame with a tidied version of a coefficient matrix #' @param x a "geeglm" object #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param exponentiate whether to exponentiate the coefficient estimates #' and confidence intervals (typical for log distributions) process_geeglm <- function(ret, x, conf.int = FALSE, conf.level = .95, exponentiate = FALSE) { if (exponentiate) { # save transformation function for use on confidence interval if (is.null(x$family) || (x$family$link != "logit" && x$family$link != "log")) { warning(paste("Exponentiating coefficients, but model did not use", "a log or logit link function")) } trans <- exp } else { trans <- identity } if (conf.int) { # avoid "Waiting for profiling to be done..." message CI <- suppressMessages(stats::confint(x, level = conf.level)) colnames(CI) = c("conf.low", "conf.high") ret <- cbind(ret, trans(unrowname(CI))) } ret$estimate <- trans(ret$estimate) ret } ##' Generate confidence intervals for GEE analyses ##' ##' @title Confidence interval for \code{geeglm} objects ##' @param object The 'geeglm' object ##' @param parm The parameter to calculate the confidence interval ##' for. If not specified, the default is to calculate a confidence ##' interval on all parameters (all variables in the model). #' @param level confidence level of the interval, used only if #' \code{conf.int=TRUE} ##' @param ... Additional parameters ##' @details This function was taken from ##' http://stackoverflow.com/a/21221995/2632184. ##' @return Returns the upper and lower confidence intervals confint.geeglm <- function(object, parm, level = 0.95, ...) { cc <- stats::coef(summary(object)) mult <- stats::qnorm((1+level)/2) citab <- with(as.data.frame(cc), cbind(lwr=Estimate-mult*Std.err, upr=Estimate+mult*Std.err)) rownames(citab) <- rownames(cc) citab[parm,] } broom/R/globals.R0000644000177700017770000000107413204366617014703 0ustar herbrandtherbrandtglobalVariables(c(".", "estimate", "std.error", "statistic", "term", "p.value", "effect", "se", "objs", "comp", "N", "lambda", "GCV", "obs", ".id", "level", "group1", "group2", "series", "value", "index", "df.residual", "stratum", "conf.low", "conf.high", "step", "fit", "type", "expCIWidth", "column1", "column2", "PC", "loading", "column", "comparison", "item1", "item2", "Method", "Intercept", "Slope", "method", "key", "Var1", "Var2", "variable", "data")) broom/R/augment.R0000644000177700017770000000172513204326630014712 0ustar herbrandtherbrandt#' Augment data according to a tidied model #' #' Given an R statistical model or other non-tidy object, add columns to the #' original dataset such as predictions, residuals and cluster assignments. #' #' Note that by convention the first argument is almost always \code{data}, #' which specifies the original data object. This is not part of the S3 #' signature, partly because it prevents \link{rowwise_df_tidiers} from #' taking a column name as the first argument. #' #' @details This generic originated in the ggplot2 package, where it was called #' "fortify." #' #' @seealso \code{\link{augment.lm}} #' @param x model or other R object to convert to data frame #' @param ... other arguments passed to methods #' @export augment <- function(x, ...) UseMethod("augment") #' @export augment.NULL <- function(x, ...) data.frame() #' @export augment.default <- function(x, ...) { stop("augment doesn't know how to deal with data of class ", class(x), call. = FALSE) } broom/R/utilities.R0000644000177700017770000002425213204323441015262 0ustar herbrandtherbrandt#' Ensure an object is a data frame, with rownames moved into a column #' #' @param x a data.frame or matrix #' @param newnames new column names, not including the rownames #' @param newcol the name of the new rownames column #' #' @return a data.frame, with rownames moved into a column and new column #' names assigned #' #' @export fix_data_frame <- function(x, newnames = NULL, newcol = "term") { if (!is.null(newnames) && length(newnames) != ncol(x)) { stop("newnames must be NULL or have length equal to number of columns") } if (all(rownames(x) == seq_len(nrow(x)))) { # don't need to move rownames into a new column ret <- data.frame(x, stringsAsFactors = FALSE) if (!is.null(newnames)) { colnames(ret) <- newnames } } else { ret <- data.frame(...new.col... = rownames(x), unrowname(x), stringsAsFactors = FALSE) colnames(ret)[1] <- newcol if (!is.null(newnames)) { colnames(ret)[-1] <- newnames } } unrowname(ret) } #' strip rownames from an object #' #' @param x a data frame unrowname <- function(x) { rownames(x) <- NULL x } #' Remove NULL items in a vector or list #' #' @param x a vector or list compact <- function(x) Filter(Negate(is.null), x) #' insert a row of NAs into a data frame wherever another data frame has NAs #' #' @param x data frame that has one row for each non-NA row in original #' @param original data frame with NAs insert_NAs <- function(x, original) { indices <- rep(NA, nrow(original)) indices[which(stats::complete.cases(original))] = seq_len(nrow(x)) x[indices, ] } #' add fitted values, residuals, and other common outputs to #' an augment call #' #' Add fitted values, residuals, and other common outputs to #' the value returned from \code{augment}. #' #' In the case that a residuals or influence generic is not implemented for the #' model, fail quietly. #' #' @param x a model #' @param data original data onto which columns should be added #' @param newdata new data to predict on, optional #' @param type Type of prediction and residuals to compute #' @param type.predict Type of prediction to compute; by default #' same as \code{type} #' @param type.residuals Type of residuals to compute; by default #' same as \code{type} #' @param se.fit Value to pass to predict's \code{se.fit}, or NULL for #' no value #' @param ... extra arguments (not used) #' #' @export augment_columns <- function(x, data, newdata, type, type.predict = type, type.residuals = type, se.fit = TRUE, ...) { notNAs <- function(o) if (is.null(o) || all(is.na(o))) { NULL } else {o} residuals0 <- purrr::possibly(stats::residuals, NULL) influence0 <- purrr::possibly(stats::influence, NULL) cooks.distance0 <- purrr::possibly(stats::cooks.distance, NULL) rstandard0 <- purrr::possibly(stats::rstandard, NULL) predict0 <- purrr::possibly(stats::predict, NULL) # call predict with arguments args <- list(x) if (!missing(newdata)) { args$newdata <- newdata } if (!missing(type.predict)) { args$type <- type.predict } args$se.fit <- se.fit args <- c(args, list(...)) if ("panelmodel" %in% class(x)) { # work around for panel models (plm) # stat::predict() returns wrong fitted values when applied to random or fixed effect panel models [plm(..., model="random"), plm(, ..., model="pooling")] # It works only for pooled OLS models (plm( ..., model="pooling")) pred <- model.frame(x)[, 1] - residuals(x) } else { # suppress warning: geeglm objects complain about predict being used pred <- suppressWarnings(do.call(predict0, args)) } if (is.null(pred)) { # try "fitted" instead- some objects don't have "predict" method pred <- do.call(stats::fitted, args) } if (is.list(pred)) { ret <- data.frame(.fitted = pred$fit) ret$.se.fit <- pred$se.fit } else { ret <- data.frame(.fitted = as.numeric(pred)) } na_action <- if (isS4(x)) { attr(stats::model.frame(x), "na.action") } else { stats::na.action(x) } if (missing(newdata) || is.null(newdata)) { if (!missing(type.residuals)) { ret$.resid <- residuals0(x, type = type.residuals) } else { ret$.resid <- residuals0(x) } infl <- influence0(x, do.coef = FALSE) if (!is.null(infl)) { if(is_mgcv(x)){ ret$.hat <- infl ret$.sigma <- NA }else{ ret$.hat <- infl$hat ret$.sigma <- infl$sigma } } # if cooksd and rstandard can be computed and aren't all NA # (as they are in rlm), do so ret$.cooksd <- notNAs(cooks.distance0(x)) ret$.std.resid <- notNAs(rstandard0(x)) original <- data if (class(na_action) == "exclude") { # check if values are missing if (length(stats::residuals(x)) > nrow(data)) { warning("When fitting with na.exclude, rows with NA in ", "original data will be dropped unless those rows are provided ", "in 'data' argument") } } } else { original <- newdata } if (is.null(na_action) || nrow(original) == nrow(ret)) { # no NAs were left out; we can simply recombine original <- fix_data_frame(original, newcol = ".rownames") return(unrowname(cbind(original, ret))) } else if (class(na_action) == "omit") { # if the option is "omit", drop those rows from the data original <- fix_data_frame(original, newcol = ".rownames") original <- original[-na_action, ] return(unrowname(cbind(original, ret))) } # add .rownames column to merge the results with the original; resilent to NAs ret$.rownames <- rownames(ret) original$.rownames <- rownames(original) ret <- merge(original, ret, by = ".rownames") # reorder to line up with original ret <- ret[order(match(ret$.rownames, rownames(original))), ] rownames(ret) <- NULL # if rownames are just the original 1...n, they can be removed if (all(ret$.rownames == seq_along(ret$.rownames))) { ret$.rownames <- NULL } ret } #' Add logLik, AIC, BIC, and other common measurements to a glance of #' a prediction #' #' A helper function for several functions in the glance generic. Methods #' such as logLik, AIC, and BIC are defined for many prediction #' objects, such as lm, glm, and nls. This is a helper function that adds #' them to a glance data.frame can be performed. If any of them cannot be #' computed, it fails quietly. #' #' @details In one special case, deviance for objects of the #' \code{lmerMod} class from lme4 is computed with #' \code{deviance(x, REML=FALSE)}. #' #' @param ret a one-row data frame (a partially complete glance) #' @param x the prediction model #' #' @return a one-row data frame with additional columns added, such as #' \item{logLik}{log likelihoods} #' \item{AIC}{Akaike Information Criterion} #' \item{BIC}{Bayesian Information Criterion} #' \item{deviance}{deviance} #' \item{df.residual}{residual degrees of freedom} #' #' Each of these are produced by the corresponding generics #' #' @export finish_glance <- function(ret, x) { ret$logLik <- tryCatch(as.numeric(stats::logLik(x)), error = function(e) NULL) ret$AIC <- tryCatch(stats::AIC(x), error = function(e) NULL) ret$BIC <- tryCatch(stats::BIC(x), error = function(e) NULL) # special case for REML objects (better way?) if ("lmerMod" %in% class(x)) { ret$deviance <- tryCatch(stats::deviance(x, REML=FALSE), error = function(e) NULL) } else { ret$deviance <- tryCatch(stats::deviance(x), error = function(e) NULL) } ret$df.residual <- tryCatch(df.residual(x), error = function(e) NULL) return(unrowname(ret)) } #' Calculate confidence interval as a tidy data frame #' #' Return a confidence interval as a tidy data frame. This directly wraps the #' \code{\link{confint}} function, but ensures it folllows broom conventions: #' column names of \code{conf.low} and \code{conf.high}, and no row names #' #' @param x a model object for which \code{\link{confint}} can be calculated #' @param conf.level confidence level #' @param func Function to use for computing confint #' @param ... extra arguments passed on to \code{confint} #' #' @return A data frame with two columns: \code{conf.low} and \code{conf.high}. #' #' @seealso \link{confint} #' #' @export confint_tidy <- function(x, conf.level = .95, func = stats::confint, ...) { # avoid "Waiting for profiling to be done..." message for some models CI <- suppressMessages(func(x, level = conf.level, ...)) if (is.null(dim(CI))) { CI = matrix(CI, nrow=1) } colnames(CI) = c("conf.low", "conf.high") unrowname(as.data.frame(CI)) } #' Expand a dataset to include all factorial combinations of one or more #' variables #' #' This function is deprecated: use \code{tidyr::crossing} instead #' #' @param df a tbl #' @param ... arguments #' @param stringsAsFactors logical specifying if character vectors are #' converted to factors. #' #' @return A tbl #' #' @import dplyr #' @import tidyr #' #' @export inflate <- function(df, ..., stringsAsFactors = FALSE) { .Deprecated("tidyr::crossing") ret <- expand.grid(..., stringsAsFactors = stringsAsFactors) ret <- ret %>% group_by_all() %>% do(data = df) %>% ungroup() %>% tidyr::unnest(data) if (!is.null(groups(df))) { ret <- ret %>% group_by_all() } ret } # utility function from tidyr::col_name col_name <- function (x, default = stop("Please supply column name", call. = FALSE)) { if (is.character(x)) return(x) if (identical(x, quote(expr = ))) return(default) if (is.name(x)) return(as.character(x)) if (is.null(x)) return(x) stop("Invalid column specification", call. = FALSE) } broom/R/tidy.R0000644000177700017770000000275713204276216014235 0ustar herbrandtherbrandt#' Tidy the result of a test into a summary data.frame #' #' The output of tidy is always a data.frame with disposable row names. It is #' therefore suited for further manipulation by packages like dplyr, reshape2, #' ggplot2 and ggvis. #' #' @param x An object to be converted into a tidy data.frame #' @param ... extra arguments #' #' @return a data.frame #' #' @export tidy <- function(x, ...) UseMethod("tidy") #' tidy on a NULL input #' #' tidy on a NULL input returns an empty data frame, which means it can be #' combined with other data frames (treated as "empty") #' #' @param x A value NULL #' @param ... extra arguments (not used) #' #' @return An empty data.frame #' #' @export tidy.NULL <- function(x, ...) { data.frame() } #' Default tidying method #' #' By default, tidy uses \code{as.data.frame} to convert its output. This is #' dangerous, as it may fail with an uninformative error message. #' Generally tidy is intended to be used on structured model objects #' such as lm or htest for which a specific S3 object exists. #' #' If you know that you want to use \code{as.data.frame} on your untidy #' object, just use it directly. #' #' @param x an object to be tidied #' @param ... extra arguments (not used) #' #' @return A data frame, from \code{as.data.frame} applied to the input x. #' #' @export tidy.default <- function(x, ...) { warning(paste("No method for tidying an S3 object of class", class(x), ", using as.data.frame")) as.data.frame(x) } broom/R/emmeans_tidiers.R0000644000177700017770000001010713204316736016422 0ustar herbrandtherbrandt#' Tidy estimated marginal means (least-squares means) objects from the emmeans and lsmeans packages #' #' Tidiers for estimated marginal means objects, which report the predicted #' means for factors or factor combinations in a linear model. This #' covers three classes: #' \code{emmGrid}, \code{lsmobj}, and \code{ref.grid}. (The first class is from the \code{emmeans} #' package, and is the successor to the latter two classes, which have slightly different #' purposes within the \code{lsmeans} package but have similar output). #' #' @param x "emmGrid", lsmobj", or "ref.grid" object #' @param conf.level Level of confidence interval, used only for #' \code{emmGrid} and \code{lsmobj} objects #' @param ... Extra arguments, passed on to #' \link[emmeans]{summary.emmGrid} or \link[lsmeans]{summary.ref.grid} #' #' @return A data frame with one observation for each estimated #' mean, and one column for each combination of factors, along with #' the following variables: #' \item{estimate}{Estimated least-squares mean} #' \item{std.error}{Standard error of estimate} #' \item{df}{Degrees of freedom} #' \item{conf.low}{Lower bound of confidence interval} #' \item{conf.high}{Upper bound of confidence interval} #' #' When the input is a contrast, each row will contain one estimated #' contrast, along with some of the following columns: #' \item{level1}{One level of the factor being contrasted} #' \item{level2}{Second level} #' \item{contrast}{In cases where the contrast is not made up of #' two levels, describes each} #' \item{statistic}{T-ratio statistic} #' \item{p.value}{P-value} #' #' @details There are a large number of arguments that can be #' passed on to \link[emmeans]{summary.emmGrid} or \link[lsmeans]{summary.ref.grid}. #' By broom convention, we use \code{conf.level} to pass the \code{level} argument. #' #' @examples #' #' if (require("emmeans", quietly = TRUE)) { #' # linear model for sales of oranges per day #' oranges_lm1 <- lm(sales1 ~ price1 + price2 + day + store, data = oranges) #' #' # reference grid; see vignette("basics", package = "emmeans") #' oranges_rg1 <- ref_grid(oranges_lm1) #' td <- tidy(oranges_rg1) #' head(td) #' #' # marginal averages #' marginal <- emmeans(oranges_rg1, "day") #' tidy(marginal) #' #' # contrasts #' tidy(contrast(marginal)) #' tidy(contrast(marginal, method = "pairwise")) #' #' # plot confidence intervals #' library(ggplot2) #' ggplot(tidy(marginal), aes(day, estimate)) + #' geom_point() + #' geom_errorbar(aes(ymin = conf.low, ymax = conf.high)) #' #' # by multiple prices #' by_price <- emmeans(oranges_lm1, "day", by = "price2", #' at = list(price1 = 50, price2 = c(40, 60, 80), #' day = c("2", "3", "4")) ) #' by_price #' tidy(by_price) #' #' ggplot(tidy(by_price), aes(price2, estimate, color = day)) + #' geom_line() + #' geom_errorbar(aes(ymin = conf.low, ymax = conf.high)) #' } #' #' @name emmeans_tidiers #' @export tidy.lsmobj <- function(x, conf.level = .95, ...) { tidy_emmeans(x, level = conf.level, ...) } #' @rdname emmeans_tidiers #' @export tidy.ref.grid <- function(x, ...) { tidy_emmeans(x, ...) } #' @rdname emmeans_tidiers #' @export tidy.emmGrid <- function(x, ...) { tidy_emmeans(x, ...) } #' Tidy one of several object from the emmeans or lsmeans packages, which have #' a similar structure #' #' @noRd tidy_emmeans <- function(x, ...) { s <- summary(x, ...) ret <- as.data.frame(s) repl <- c(lsmean = "estimate", emmean = "estimate", pmmean = "estimate", prediction = "estimate", SE = "std.error", lower.CL = "conf.low", upper.CL = "conf.high", t.ratio = "statistic") if ("contrast" %in% colnames(ret) && all(stringr::str_detect(ret$contrast, " - "))) { ret <- tidyr::separate_(ret, "contrast", c("level1", "level2"), sep = "-") } colnames(ret) <- plyr::revalue(colnames(ret), repl, warn_missing = FALSE) ret } broom/R/lmodel2_tidiers.R0000644000177700017770000000604713204276216016341 0ustar herbrandtherbrandt#' Tidiers for linear model II objects from the lmodel2 package #' #' Tidy or glance an lmodel2 object. An lmodel2 represents model II simple #' linear regression, where both variables in the regression equation are #' random. #' #' @param x lmodel2 object #' @param ... Extra arguments, not used #' #' @details Note that unlike linear regression, there are always only two terms #' in an lmodel2: Intercept and Slope. Furthermore, these are computed by four #' methods: OLS (ordinary least squares), MA (major axis), SMA (standard major #' axis), and RMA (ranged major axis). See the lmodel2 documentation for more. #' #' Note that there is no \code{augment} method for lmodel2 objects because #' lmodel2 does not provide a \code{predict} or {\code{residuals}} method #' (and since when both observations are random, fitted values and residuals #' have a less clear meaning). #' #' @template boilerplate #' #' @return \code{tidy} returns a data frame with one row for each combination #' of method (OLS/MA/SMA/RMA) and term (always Intercept/Slope). Its columns #' are: #' \describe{ #' \item{method}{Either OLS/MA/SMA/RMA} #' \item{term}{Either "Intercept" or "Slope"} #' \item{estimate}{Estimated coefficient} #' \item{conf.low}{Lower bound of 95\% confidence interval} #' \item{conf.high}{Upper bound of 95\% confidence interval} #' } #' #' @examples #' #' if (require("lmodel2", quietly = TRUE)) { #' data(mod2ex2) #' Ex2.res <- lmodel2(Prey ~ Predators, data=mod2ex2, "relative", "relative", 99) #' Ex2.res #' #' tidy(Ex2.res) #' glance(Ex2.res) #' #' # this allows coefficient plots with ggplot2 #' library(ggplot2) #' ggplot(tidy(Ex2.res), aes(estimate, term, color = method)) + #' geom_point() + #' geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) + #' geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) #' } #' #' @name lmodel2_tidiers #' #' @export tidy.lmodel2 <- function(x, ...) { ret <- x$regression.results[1:3] %>% select(method = Method, Intercept, Slope) %>% tidyr::gather(term, estimate, -method) %>% arrange(method, term) # add confidence intervals confints <- x$confidence.intervals %>% tidyr::gather(key, value, -Method) %>% tidyr::separate(key, c("level", "term"), "-") %>% mutate(level = ifelse(level == "2.5%", "conf.low", "conf.high")) %>% tidyr::spread(level, value) %>% select(method = Method, term, conf.low, conf.high) ret %>% inner_join(confints, by = c("method", "term")) } #' @rdname lmodel2_tidiers #' #' @return \code{glance} returns a one-row data frame with columns #' \describe{ #' \item{r.squared}{OLS R-squared} #' \item{p.value}{OLS parametric p-value} #' \item{theta}{Angle between OLS lines \code{lm(y ~ x)} and \code{lm(x ~ y)}} #' \item{H}{H statistic for computing confidence interval of major axis slope} #' } #' #' @export glance.lmodel2 <- function(x, ...) { data.frame(r.squared = x$rsquare, theta = x$theta, p.value = x$P.param, H = x$H) } broom/R/gam_tidiers.R0000644000177700017770000000610013204276216015535 0ustar herbrandtherbrandt#' Tidying methods for a generalized additive model (gam) #' #' These methods tidy the coefficients of a "gam" object (generalized additive #' model) into a summary, augment the original data with information on the #' fitted values and residuals, and construct a one-row glance of the model's #' statistics. #' #' The "augment" method is handled by \link{lm_tidiers}. #' #' @param x gam object #' @param parametric logical. Return parametric coefficients (\code{TRUE}) or #' information about smooth terms (\code{FALSE})? #' #' @template boilerplate #' #' @return \code{tidy.gam} called on an object from the gam package, #' or an object from the mgcv package with \code{parametric = FALSE}, returns the #' tidied output of the parametric ANOVA with one row for each term in the formula. #' The columns match those in \link{anova_tidiers}. #' \code{tidy.gam} called on a gam object from the mgcv package with #' \code{parametric = TRUE} returns the fixed coefficients. #' #' @name gam_tidiers #' #' @seealso \link{lm_tidiers}, \link{anova_tidiers} #' #' @examples #' #' if (require("gam", quietly = TRUE)) { #' data(kyphosis) #' g <- gam(Kyphosis ~ s(Age,4) + Number, family = binomial, data = kyphosis) #' tidy(g) #' augment(g) #' glance(g) #' } #' #' @export tidy.gam <- function(x, parametric = FALSE, ...) { if(is_mgcv(x)){ tidy_mcgv(x, parametric) }else{ tidy_gam(x) } } is_mgcv <- function(x) { #figure out if gam is from package gam or mcgv mgcv_names <- c("linear.predictors", "converged", "sig2", "edf", "edf1", "hat", "boundary", "sp", "nsdf", "Ve", "Vp", "rV", "gcv.ubre", "scale.estimated", "var.summary", "cmX", "pred.formula", "pterms", "min.edf", "optimizer") all(mgcv_names %in% names(x)) } tidy_gam <- function(x) { # return the output of the parametric ANOVA tidy(summary(x)$parametric.anova) } tidy_mcgv <- function(x, parametric = FALSE) { if (parametric) { px <- summary(x)$p.table px <- as.data.frame(px) fix_data_frame(px, c("estimate", "std.error", "statistic", "p.value")) } else { sx <- summary(x)$s.table sx <- as.data.frame(sx) class(sx) <- c("anova", "data.frame") tidy(sx) } } #' @rdname gam_tidiers #' #' @param ... extra arguments (not used) #' #' @return \code{glance.gam} returns a one-row data.frame with the columns #' \item{df}{Degrees of freedom used by the coefficients} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{deviance}{deviance} #' \item{df.residual}{residual degrees of freedom} #' #' @export glance.gam <- function(x, ...) { if(is_mgcv(x)){ glance_mcgv(x) }else{ glance_gam(x) } } glance_gam <- function(x) { s <- summary(x) ret <- data.frame(df = s$df[1]) finish_glance(ret, x) } glance_mcgv <- function(x) { ret <- data.frame(df = sum(x$edf)) finish_glance(ret, x) } broom/R/nlme_tidiers.R0000644000177700017770000001152413204300607015722 0ustar herbrandtherbrandt#' Tidying methods for mixed effects models #' #' These methods tidy the coefficients of mixed effects models #' of the \code{lme} class from functions of the \code{nlme} package. #' #' @param x An object of class \code{lme}, such as those from \code{lme} #' or \code{nlme} #' #' @return All tidying methods return a \code{data.frame} without rownames. #' The structure depends on the method chosen. #' #' @name nlme_tidiers #' #' @examples #' #' if (require("nlme") & require("lme4")) { #' # example regressions are from lme4 documentation, but used for nlme #' lmm1 <- lme(Reaction ~ Days, random=~ Days|Subject, sleepstudy) #' tidy(lmm1) #' tidy(lmm1, effects = "fixed") #' head(augment(lmm1, sleepstudy)) #' glance(lmm1) #' #' #' startvec <- c(Asym = 200, xmid = 725, scal = 350) #' nm1 <- nlme(circumference ~ SSlogis(age, Asym, xmid, scal), #' data = Orange, #' fixed = Asym + xmid + scal ~1, #' random = Asym ~1, #' start = startvec) #' tidy(nm1) #' tidy(nm1, effects = "fixed") #' head(augment(nm1, Orange)) #' glance(nm1) #' } #' #' @rdname nlme_tidiers #' #' @param effects Either "random" (default) or "fixed" #' #' @return \code{tidy} returns one row for each estimated effect, either #' random or fixed depending on the \code{effects} parameter. If #' \code{effects = "random"}, it contains the columns #' \item{group}{the group within which the random effect is being estimated} #' \item{level}{level within group} #' \item{term}{term being estimated} #' \item{estimate}{estimated coefficient} #' #' If \code{effects="fixed"}, \code{tidy} returns the columns #' \item{term}{fixed term being estimated} #' \item{estimate}{estimate of fixed effect} #' \item{std.error}{standard error} #' \item{statistic}{t-statistic} #' \item{p.value}{P-value computed from t-statistic} #' #' @importFrom plyr ldply #' @import dplyr #' #' @export tidy.lme <- function(x, effects = "random", ...) { effects <- match.arg(effects, c("random", "fixed")) if (effects == "fixed") { # return tidied fixed effects rather than random ret <- summary(x)$tTable # p-values are always there in nlme before Douglas banned them in lme4 nn <- c("estimate", "std.error", "statistic", "p.value") # remove DF return(fix_data_frame(ret[,-3], newnames = nn, newcol = "term")) } # fix to be a tidy data frame fix <- function(g) { newg <- fix_data_frame(g, newnames = colnames(g), newcol = "level") # fix_data_frame doesn't create a new column if rownames are numeric, # which doesn't suit our purposes newg$level <- rownames(g) cbind(.id = attr(g,"grpNames"),newg ) } # combine them and gather terms ret <- fix(stats::coef(x)) %>% tidyr::gather(term, estimate, -.id, -level) colnames(ret)[1] <- "group" ret } #' @rdname nlme_tidiers #' #' @param data original data this was fitted on; if not given this will #' attempt to be reconstructed #' @param newdata new data to be used for prediction; optional #' #' @template augment_NAs #' #' @return \code{augment} returns one row for each original observation, #' with columns (each prepended by a .) added. Included are the columns #' \item{.fitted}{predicted values} #' \item{.resid}{residuals} #' \item{.fixed}{predicted values with no random effects} #' #' #' @export augment.lme <- function(x, data = x$data, newdata, ...) { if (is.null(data)){ stop("augment.lme must be called with an explicit 'data' argument for this (n)lme fit because of an inconsistency in nlme.") } # move rownames if necessary if (missing(newdata)) { newdata <- NULL } ret <- augment_columns(x, data, newdata, se.fit = NULL) # add predictions with no random effects (population means) predictions <- stats::predict(x, level=0) if (length(predictions) == nrow(ret)) { ret$.fixed <- predictions } unrowname(ret) } #' @rdname nlme_tidiers #' #' @param ... extra arguments (not used) #' #' @return \code{glance} returns one row with the columns #' \item{sigma}{the square root of the estimated residual variance} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{deviance}{returned as NA. To quote Brian Ripley on R-help: #' McCullagh & Nelder (1989) would be the authoritative reference, but the 1982 #' first edition manages to use 'deviance' in three separate senses on one #' page. } #' #' @export glance.lme <- function(x, ...) { ret <- unrowname(data.frame(sigma = x$sigma)) ret = finish_glance(ret, x) ret$deviance = NA # ret$deviance = -2 * x$logLik # Or better leave this out totally? ret } broom/R/polca_tidiers.R0000644000177700017770000001331513204276216016075 0ustar herbrandtherbrandt#' Tidiers for poLCA objects #' #' Tidiers for poLCA latent class regression models. Summarize the #' probabilities of each outcome for each variable within each class #' with \code{tidy}, add predictions to the data with \code{augment}, #' or find the log-likelihood/AIC/BIC with \code{glance}. #' #' @param x A poLCA object #' @param data For \code{augment}, the original dataset used to fit #' the latent class model. If not given, uses manifest variables in #' \code{x$y} and, if applicable, covariates in \code{x$x} #' @param ... Extra arguments, not used #' #' @name poLCA_tidiers #' #' @template boilerplate #' #' @return \code{tidy} returns a data frame with one row per #' variable-class-outcome combination, with columns: #' \describe{ #' \item{variable}{Manifest variable} #' \item{class}{Latent class ID, an integer} #' \item{outcome}{Outcome of manifest variable} #' \item{estimate}{Estimated class-conditional response probability} #' \item{std.error}{Standard error of estimated probability} #' } #' #' @examples #' #' if (require("poLCA", quietly = TRUE)) { #' library(poLCA) #' library(dplyr) #' #' data(values) #' f <- cbind(A, B, C, D)~1 #' M1 <- poLCA(f, values, nclass = 2, verbose = FALSE) #' #' M1 #' tidy(M1) #' head(augment(M1)) #' glance(M1) #' #' library(ggplot2) #' #' ggplot(tidy(M1), aes(factor(class), estimate, fill = factor(outcome))) + #' geom_bar(stat = "identity", width = 1) + #' facet_wrap(~ variable) #' #' set.seed(2016) #' # compare multiple #' mods <- data_frame(nclass = 1:3) %>% #' group_by(nclass) %>% #' do(mod = poLCA(f, values, nclass = .$nclass, verbose = FALSE)) #' #' # compare log-likelihood and/or AIC, BIC #' mods %>% #' glance(mod) #' #' ## Three-class model with a single covariate. #' #' data(election) #' f2a <- cbind(MORALG,CARESG,KNOWG,LEADG,DISHONG,INTELG, #' MORALB,CARESB,KNOWB,LEADB,DISHONB,INTELB)~PARTY #' nes2a <- poLCA(f2a, election, nclass = 3, nrep = 5, verbose = FALSE) #' #' td <- tidy(nes2a) #' head(td) #' #' # show #' #' ggplot(td, aes(outcome, estimate, color = factor(class), group = class)) + #' geom_line() + #' facet_wrap(~ variable, nrow = 2) + #' theme(axis.text.x = element_text(angle = 90, hjust = 1)) #' #' au <- augment(nes2a) #' head(au) #' au %>% #' count(.class) #' #' # if the original data is provided, it leads to NAs in new columns #' # for rows that weren't predicted #' au2 <- augment(nes2a, data = election) #' head(au2) #' dim(au2) #' } #' #' @export tidy.poLCA <- function(x, ...) { probs <- plyr::ldply(x$probs, reshape2::melt, .id = "variable") %>% transmute(variable, class = stringr::str_match(Var1, "class (.*):")[, 2], outcome = Var2, estimate = value) if (all(stringr::str_detect(probs$outcome, "^Pr\\(\\d*\\)$"))) { probs$outcome <- as.numeric(stringr::str_match(probs$outcome, "Pr\\((\\d*)\\)")[, 2]) } probs <- probs %>% mutate(class = utils::type.convert(class)) probs_se <- plyr::ldply(x$probs.se, reshape2::melt, .id = "variable") probs$std.error <- probs_se$value probs } #' @rdname poLCA_tidiers #' #' @return \code{augment} returns a data frame with one row #' for each original observation, augmented with the following #' columns: #' \describe{ #' \item{.class}{Predicted class, using modal assignment} #' \item{.probability}{Posterior probability of predicted class} #' } #' #' If the \code{data} argument is given, those columns are included in the output #' (only rows for which predictions could be made). #' Otherwise, the \code{y} element of the poLCA object, which contains the #' manifest variables used to fit the model, are used, along with any covariates, #' if present, in \code{x}. #' #' Note that while the probability of all the classes (not just the predicted #' modal class) can be found in the \code{posterior} element, these are not #' included in the augmented output, since it would result in potentially #' many additional columns, which augment tends to avoid. #' #' @export augment.poLCA <- function(x, data, ...) { indices <- cbind(seq_len(nrow(x$posterior)), x$predclass) ret <- data.frame(.class = x$predclass, .probability = x$posterior[indices], stringsAsFactors = FALSE) if (missing(data)) { data <- x$y if (!is.null(x$x)) { data <- cbind(data, x$x) } } else { if (nrow(data) != nrow(ret)) { # rows may have been removed for NAs. # For those rows, the new columns get NAs rownames(ret) <- rownames(x$y) ret <- ret[rownames(data), ] } } ret <- cbind(data, ret) unrowname(ret) } #' @rdname poLCA_tidiers #' #' @return \code{glance} returns a one-row data frame with the #' following columns: #' \describe{ #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{g.squared}{The likelihood ratio/deviance statistic} #' \item{chi.squared}{The Pearson Chi-Square goodness of fit statistic #' for multiway tables} #' \item{df}{Number of parameters estimated, and therefore degrees of #' freedom used} #' \item{df.residual}{Number of residual degrees of freedom left} #' } #' #' @export glance.poLCA <- function(x, ...) { data.frame(logLik = x$llik, AIC = x$aic, BIC = x$bic, g.squared = x$Gsq, chi.squared = x$Chisq, df = x$npar, df.residual = x$resid.df) } broom/R/optim_tidiers.R0000644000177700017770000000311613204276216016125 0ustar herbrandtherbrandt#' Tidiers for lists returned from optim #' #' Tidies objects returned by the \code{\link{optim}} function for #' general-purpose minimization and maximization. #' #' @param x list returned from \code{optim} #' @param ... extra arguments #' #' @template boilerplate #' #' @return \code{tidy} returns a data frame with one row per parameter that #' was estimated, with columns #' \item{parameter}{name of the parameter, or \code{parameter1}, #' \code{parameter2}... if the input vector is not named} #' \item{value}{parameter value that minimizes or maximizes the output} #' #' @examples #' #' func <- function(x) { #' (x[1] - 2)^2 + (x[2] - 3)^2 + (x[3] - 8)^2 #' } #' #' o <- optim(c(1, 1, 1), func) #' #' tidy(o) #' glance(o) #' #' @name optim_tidiers tidy_optim <- function(x, ...) { if (is.null(names(x$par))) { names(x$par) <- paste0("parameter", seq_along(x$par)) } data.frame(parameter = names(x$par), value = unname(x$par)) } #' @rdname optim_tidiers #' #' @return \code{glance} returns a one-row data frame with the columns #' \item{value}{minimized or maximized output value} #' \item{function.count}{number of calls to \code{fn}} #' \item{gradient.count}{number of calls to \code{gr}} #' \item{convergence}{convergence code representing the error state} #' #' @seealso \code{\link{optim}} glance_optim <- function(x, ...) { unrowname(data.frame(value = x$value, function.count = x$counts["function"], gradient.count = x$counts["gradient"], convergence = x$convergence)) } broom/R/rowwise_df_tidiers.R0000644000177700017770000001075513204276216017154 0ustar herbrandtherbrandt## helper and setup functions tidy_ <- function(x, ...) UseMethod("tidy_") augment_ <- function(x, data, ...) UseMethod("augment_") glance_ <- function(x, ...) UseMethod("glance_") apply_rowwise_df <- function(x, object, func, data, ...) { # group by columns that are not lists groupers <- colnames(x)[sapply(x, function(e) class(e)[1]) != "list"] groupers <- setdiff(groupers, object) # suppress "group_by" warning x <- suppressWarnings(group_by_(x, .dots = as.list(groupers))) # let the "data" argument specify column (for augment) if (!missing(data)) { if (as.character(substitute(data)) %in% colnames(x)) { data_column <- col_name(substitute(data)) do(x, func(.[[object]][[1]], data = .[[data_column]][[1]], ...)) } else { do(x, func(.[[object]][[1]], data = data, ...)) } } else { do(x, func(.[[object]][[1]], ...)) } } wrap_rowwise_df_ <- function(func) { function(x, object, ...) apply_rowwise_df(x, object, func, ...) } wrap_rowwise_df <- function(func) { function(x, object, ...) { n <- col_name(substitute(object)) func(x, n, ...) } } #' Tidying methods for rowwise_dfs from dplyr, for tidying each row and #' recombining the results #' #' These \code{tidy}, \code{augment} and \code{glance} methods are for #' performing tidying on each row of a rowwise data frame created by dplyr's #' \code{group_by} and \code{do} operations. They first group a rowwise data #' frame based on all columns that are not lists, then perform the tidying #' operation on the specified column. This greatly shortens a common idiom #' of extracting tidy/augment/glance outputs after a do statement. #' #' @param x a rowwise_df #' @param object the column name of the column containing the models to #' be tidied. For tidy, augment, and glance it should be the bare name; for #' _ methods it should be quoted. #' @param ... additional arguments to pass on to the respective tidying method #' #' @return A \code{"grouped_df"}, where the non-list columns of the #' original are used as grouping columns alongside the tidied outputs. #' #' @details Note that this functionality is not currently implemented for #' data.tables, since the result of the do operation is difficult to #' distinguish from a regular data.table. #' #' @examples #' #' library(dplyr) #' regressions <- mtcars %>% #' group_by(cyl) %>% #' do(mod = lm(mpg ~ wt, .)) #' #' regressions #' #' regressions %>% tidy(mod) #' regressions %>% augment(mod) #' regressions %>% glance(mod) #' #' # we can provide additional arguments to the tidying function #' regressions %>% tidy(mod, conf.int = TRUE) #' #' # we can also include the original dataset as a "data" argument #' # to augment: #' regressions <- mtcars %>% #' group_by(cyl) %>% #' do(mod = lm(mpg ~ wt, .), original = (.)) #' #' # this allows all the original columns to be included: #' regressions %>% augment(mod) # doesn't include all original #' regressions %>% augment(mod, data = original) # includes all original #' #' @name rowwise_df_tidiers NULL #' @rdname rowwise_df_tidiers #' @export tidy.rowwise_df <- wrap_rowwise_df(tidy_.rowwise_df) #' @rdname rowwise_df_tidiers #' @export tidy_.rowwise_df <- wrap_rowwise_df_(tidy) #' @rdname rowwise_df_tidiers #' @export augment.rowwise_df <- wrap_rowwise_df(augment_.rowwise_df) #' @rdname rowwise_df_tidiers #' @export augment_.rowwise_df <- wrap_rowwise_df_(augment) #' @rdname rowwise_df_tidiers #' @export glance.rowwise_df <- wrap_rowwise_df(glance_.rowwise_df) #' @rdname rowwise_df_tidiers #' @export glance_.rowwise_df <- wrap_rowwise_df_(glance) # when dplyr::do is performed on an ungrouped tbl, it results in # a one-row tbl_df with list columns. Need a special case for this. wrap_tbl_df <- function(func, rowwise_func) { function(x, ...) { # use the rowwise function if it is a one-row tbl_df with all list # columns if (nrow(x) == 1 && all(sapply(x, function(col) inherits(col, "list")))) { return(rowwise_func(x, ...)) } else { # otherwise, do traditional tidy/augment/glancing # (generally goes to the data.frame method) func(x, ...) } } } #' @rdname rowwise_df_tidiers #' @export tidy.tbl_df <- wrap_tbl_df(tidy, tidy.rowwise_df) #' @rdname rowwise_df_tidiers #' @export augment.tbl_df <- wrap_tbl_df(augment, augment.rowwise_df) #' @rdname rowwise_df_tidiers #' @export glance.tbl_df <- wrap_tbl_df(glance, glance.rowwise_df) broom/R/rstanarm_tidiers.R0000644000177700017770000002231613204276216016627 0ustar herbrandtherbrandt#' Tidying methods for an rstanarm model #' #' These methods tidy the estimates from \code{\link[rstanarm]{stanreg-objects}} #' (fitted model objects from the \pkg{rstanarm} package) into a summary. #' #' #' @return All tidying methods return a \code{data.frame} without rownames. #' The structure depends on the method chosen. #' #' @seealso \code{\link[rstanarm]{summary.stanreg}} #' #' @name rstanarm_tidiers #' #' @param x Fitted model object from the \pkg{rstanarm} package. See #' \code{\link[rstanarm]{stanreg-objects}}. #' @examples #' #' \dontrun{ #' fit <- stan_glmer(mpg ~ wt + (1|cyl) + (1+wt|gear), data = mtcars, #' iter = 300, chains = 2) #' # non-varying ("population") parameters #' tidy(fit, intervals = TRUE, prob = 0.5) #' #' # hierarchical sd & correlation parameters #' tidy(fit, parameters = "hierarchical") #' #' # group-specific deviations from "population" parameters #' tidy(fit, parameters = "varying") #' #' # glance method #' glance(fit) #' glance(fit, looic = TRUE, cores = 1) #' } #' NULL #' @rdname rstanarm_tidiers #' @param parameters One or more of \code{"non-varying"}, \code{"varying"}, #' \code{"hierarchical"}, \code{"auxiliary"} (can be abbreviated). See the #' Value section for details. #' @param prob See \code{\link[rstanarm]{posterior_interval}}. #' @param intervals If \code{TRUE} columns for the lower and upper bounds of the #' \code{100*prob}\% posterior uncertainty intervals are included. See #' \code{\link[rstanarm]{posterior_interval}} for details. #' #' @return #' When \code{parameters="non-varying"} (the default), \code{tidy.stanreg} returns #' one row for each coefficient, with three columns: #' \item{term}{The name of the corresponding term in the model.} #' \item{estimate}{A point estimate of the coefficient (posterior median).} #' \item{std.error}{A standard error for the point estimate based on #' \code{\link[stats]{mad}}. See the \emph{Uncertainty estimates} section in #' \code{\link[rstanarm]{print.stanreg}} for more details.} #' #' For models with group-specific parameters (e.g., models fit with #' \code{\link[rstanarm]{stan_glmer}}), setting \code{parameters="varying"} #' selects the group-level parameters instead of the non-varying regression #' coefficients. Addtional columns are added indicating the \code{level} and #' \code{group}. Specifying \code{parameters="hierarchical"} selects the #' standard deviations and (for certain models) correlations of the group-level #' parameters. #' #' Setting \code{parameters="auxiliary"} will select parameters other than those #' included by the other options. The particular parameters depend on which #' \pkg{rstanarm} modeling function was used to fit the model. For example, for #' models fit using \code{\link[rstanarm]{stan_glm.nb}} the overdispersion #' parameter is included if \code{parameters="aux"}, for #' \code{\link[rstanarm]{stan_lm}} the auxiliary parameters include the residual #' SD, R^2, and log(fit_ratio), etc. #' #' If \code{intervals=TRUE}, columns for the \code{lower} and \code{upper} #' values of the posterior intervals computed with #' \code{\link[rstanarm]{posterior_interval}} are also included. #' #' @export tidy.stanreg <- function(x, parameters = "non-varying", intervals = FALSE, prob = 0.9, ...) { parameters <- match.arg(parameters, several.ok = TRUE, choices = c("non-varying", "varying", "hierarchical", "auxiliary")) if (any(parameters %in% c("varying", "hierarchical"))) { if (!inherits(x, "lmerMod")) stop("Model does not have 'varying' or 'hierarchical' parameters.") } nn <- c("estimate", "std.error") ret_list <- list() if ("non-varying" %in% parameters) { nv_pars <- names(rstanarm::fixef(x)) ret <- cbind(rstanarm::fixef(x), rstanarm::se(x)[nv_pars]) if (inherits(x, "polr")) { # also include cutpoints cp <- x$zeta se_cp <- apply(as.matrix(x, pars = names(cp)), 2, stats::mad) ret <- rbind(ret, cbind(cp, se_cp)) nv_pars <- c(nv_pars, names(cp)) } if (intervals) { cifix <- rstanarm::posterior_interval( object = x, pars = nv_pars, prob = prob ) ret <- data.frame(ret, cifix) nn <- c(nn, "lower", "upper") } ret_list$non_varying <- fix_data_frame(ret, newnames = nn) } if ("auxiliary" %in% parameters) { nn <- c("estimate", "std.error") parnames <- rownames(x$stan_summary) auxpars <- c("sigma", "shape", "overdispersion", "R2", "log-fit_ratio", grep("mean_PPD", parnames, value = TRUE)) auxpars <- auxpars[which(auxpars %in% parnames)] ret <- summary(x, pars = auxpars)[, c("50%", "sd"), drop = FALSE] if (intervals) { ints <- rstanarm::posterior_interval(x, pars = auxpars, prob = prob) ret <- data.frame(ret, ints) nn <- c(nn,"lower","upper") } ret_list$auxiliary <- fix_data_frame(ret, newnames = nn) } if ("hierarchical" %in% parameters) { ret <- as.data.frame(rstanarm::VarCorr(x)) ret[] <- lapply(ret, function(x) if (is.factor(x)) as.character(x) else x) rscale <- "sdcor" # FIXME ran_prefix <- c("sd", "cor") # FIXME pfun <- function(x) { v <- na.omit(unlist(x)) if (length(v)==0) v <- "Observation" p <- paste(v,collapse=".") if (!identical(ran_prefix,NA)) { p <- paste(ran_prefix[length(v)],p,sep="_") } return(p) } rownames(ret) <- paste(apply(ret[c("var1","var2")],1,pfun), ret[,"grp"], sep = ".") ret_list$hierarchical <- fix_data_frame(ret[c("grp", rscale)], newnames = c("group", "estimate")) } if ("varying" %in% parameters) { nn <- c("estimate", "std.error") s <- summary(x, pars = "varying") ret <- cbind(s[, "50%"], rstanarm::se(x)[rownames(s)]) if (intervals) { ciran <- rstanarm::posterior_interval(x, regex_pars = "^b\\[", prob = prob) ret <- data.frame(ret,ciran) nn <- c(nn,"lower","upper") } double_splitter <- function(x, split1, sel1, split2, sel2) { y <- unlist(lapply(strsplit(x, split = split1, fixed = TRUE), "[[", sel1)) unlist(lapply(strsplit(y, split = split2, fixed = TRUE), "[[", sel2)) } vv <- fix_data_frame(ret, newnames = nn) nn <- c("level", "group", "term", nn) nms <- vv$term vv$term <- NULL lev <- double_splitter(nms, ":", 2, "]", 1) grp <- double_splitter(nms, " ", 2, ":", 1) trm <- double_splitter(nms, " ", 1, "[", 2) vv <- data.frame(lev, grp, trm, vv) ret_list$varying <- fix_data_frame(vv, newnames = nn) } return(rbind.fill(ret_list)) } #' @rdname rstanarm_tidiers #' #' @param looic Should the LOO Information Criterion (and related info) be #' included? See \code{\link[rstanarm]{loo.stanreg}} for details. Note: for #' models fit to very large datasets this can be a slow computation. #' @param ... For \code{glance}, if \code{looic=TRUE}, optional arguments to #' \code{\link[rstanarm]{loo.stanreg}}. #' #' @return \code{glance} returns one row with the columns #' \item{algorithm}{The algorithm used to fit the model.} #' \item{pss}{The posterior sample size (except for models fit using #' optimization).} #' \item{nobs}{The number of observations used to fit the model.} #' \item{sigma}{The square root of the estimated residual variance, if #' applicable. If not applicable (e.g., for binomial GLMs), \code{sigma} will #' be given the value \code{1} in the returned object.} #' #' If \code{looic=TRUE}, then the following additional columns are also #' included: #' \item{looic}{The LOO Information Criterion.} #' \item{elpd_loo}{The expected log predictive density (\code{elpd_loo = -2 * #' looic}).} #' \item{p_loo}{The effective number of parameters.} #' #' @export glance.stanreg <- function(x, looic = FALSE, ...) { sigma <- if (getRversion() >= "3.3.0") { get("sigma", asNamespace("stats")) } else { get("sigma", asNamespace("rstanarm")) } ret <- data.frame(algorithm = x$algorithm) if (x$algorithm != "optimizing") { pss <- x$stanfit@sim$n_save if (x$algorithm == "sampling") pss <- sum(pss - x$stanfit@sim$warmup2) ret <- data.frame(ret, pss = pss) } ret <- data.frame(ret, nobs = stats::nobs(x), sigma = sigma(x)) if (looic) { if (x$algorithm == "sampling") { loo1 <- rstanarm::loo(x, ...) ret <- data.frame(ret, loo1[c("looic", "elpd_loo", "p_loo")]) } else { message("looic only available for models fit using MCMC") } } unrowname(ret) } broom/R/speedlm_tidiers.R0000644000177700017770000000441513204317475016434 0ustar herbrandtherbrandt#' Tidying methods for a speedlm model #' #' These methods tidy the coefficients of a "speedlm" object #' into a summary, augment the original data with information on the #' fitted values and residuals, and construct a one-row glance of the model's #' statistics. #' #' #' @param x speedlm object #' #' @template boilerplate #' #' @return \code{tidy.speedlm} returns the tidied output of the #' lm with one row for each term in the formula. #' The columns match those in \link{lm_tidiers}. #' #' @name speedlm_tidiers #' @inheritParams lm_tidiers #' @seealso \link{lm_tidiers}, \link{biglm_tidiers} #' #' @examples #' #' if (require("speedglm", quietly = TRUE)) { #' mod <- speedglm::speedlm(mpg ~ wt + qsec, data = mtcars) #' tidy(mod) #' glance(mod) #' augment(mod) #' } #' #' @export tidy.speedlm <- tidy.lm #' @rdname speedlm_tidiers #' #' @param ... extra arguments (not used) #' #' @return \code{glance.speedlm} returns a one-row data.frame with the columns #' \item{r.squared}{The percent of variance explained by the model} #' \item{adj.r.squared}{r.squared adjusted based on the degrees of freedom} #' \item{statistic}{F-statistic} #' \item{p.value}{p-value from the F test, describing whether the full #' regression is significant} #' \item{df}{Degrees of freedom used by the coefficients} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{deviance}{deviance} #' \item{df.residual}{residual degrees of freedom} #' #' @export glance.speedlm <- function(x, ...) { s <- summary(x) ret <- data.frame(r.squared = s$r.squared, adj.r.squared = s$adj.r.squared, statistic = s$fstatistic[1], p.value = s$f.pvalue, df = x$nvar) ret <- finish_glance(ret, x) ret$deviance <- x$RSS # overwritten by finish_glance ret } #' @rdname speedlm_tidiers #' @param data data frame to augment #' @param newdata new data to use for predictions, optional #' @return \code{augment.speedlm} returns one row for each observation, with just one column added: #' \item{.fitted}{Fitted values of model} #' @export augment.speedlm <- function(x, data = stats::model.frame(x), newdata = data, ...) { augment_columns(x, data, newdata) } broom/R/brms_tidiers.R0000644000177700017770000001122713204276216015742 0ustar herbrandtherbrandt#' Tidying methods for a brms model #' #' These methods tidy the estimates from #' \code{\link[brms:brmsfit-class]{brmsfit-objects}} #' (fitted model objects from the \pkg{brms} package) into a summary. #' #' @return All tidying methods return a \code{data.frame} without rownames. #' The structure depends on the method chosen. #' #' @seealso \code{\link[brms]{brms}}, \code{\link[brms]{brmsfit-class}} #' #' @name brms_tidiers #' #' @param x Fitted model object from the \pkg{brms} package. See #' \code{\link[brms]{brmsfit-class}}. #' @examples #' \dontrun{ #' library(brms) #' fit <- brm(mpg ~ wt + (1|cyl) + (1+wt|gear), data = mtcars, #' iter = 500, chains = 2) #' tidy(fit) #' tidy(fit, parameters = "^sd_", intervals = FALSE) #' tidy(fit, par_type = "non-varying") #' tidy(fit, par_type = "varying") #' tidy(fit, par_type = "hierarchical", robust = TRUE) #' } #' NULL #' @rdname brms_tidiers #' @param parameters Names of parameters for which a summary should be #' returned, as given by a character vector or regular expressions. #' If \code{NA} (the default) summarized parameters are specified #' by the \code{par_type} argument. #' @param par_type One of \code{"all"}, \code{"non-varying"}, #' \code{"varying"}, or \code{"hierarchical"} (can be abbreviated). #' See the Value section for details. #' @param robust Whether to use median and median absolute deviation rather #' than mean and standard deviation. #' @param intervals If \code{TRUE} columns for the lower and upper bounds of #' posterior uncertainty intervals are included. #' @param prob Defines the range of the posterior uncertainty intervals, #' such that \code{100 * prob}\% of the parameter's posterior distribution #' lies within the corresponding interval. #' Only used if \code{intervals = TRUE}. #' @param ... Extra arguments, not used #' #' @return #' When \code{parameters = NA}, the \code{par_type} argument is used #' to determine which parameters to summarize. #' #' Generally, \code{tidy.brmsfit} returns #' one row for each coefficient, with at least three columns: #' \item{term}{The name of the model parameter.} #' \item{estimate}{A point estimate of the coefficient (mean or median).} #' \item{std.error}{A standard error for the point estimate (sd or mad).} #' #' When \code{par_type = "non-varying"}, only population-level #' effects are returned. #' #' When \code{par_type = "varying"}, only group-level effects are returned. #' In this case, two additional columns are added: #' \item{group}{The name of the grouping factor.} #' \item{level}{The name of the level of the grouping factor.} #' #' Specifying \code{par_type = "hierarchical"} selects the #' standard deviations and correlations of the group-level parameters. #' #' If \code{intervals = TRUE}, columns for the \code{lower} and #' \code{upper} bounds of the posterior intervals computed. #' #' @export tidy.brmsfit <- function(x, parameters = NA, par_type = c("all", "non-varying", "varying", "hierarchical"), robust = FALSE, intervals = TRUE, prob = 0.9, ...) { use_par_type <- anyNA(parameters) if (use_par_type) { par_type <- match.arg(par_type) if (par_type == "all") { parameters <- NA } else if (par_type == "non-varying") { parameters <- "^b_" } else if (par_type == "varying") { parameters <- "^r_" } else if (par_type == "hierarchical") { parameters <- c("^sd_", "^cor_") } } samples <- brms::posterior_samples(x, parameters) if (is.null(samples)) { stop("No parameter name matches the specified pattern.", call. = FALSE) } out <- data.frame(term = names(samples), stringsAsFactors = FALSE) if (use_par_type) { if (par_type == "non-varying") { out$term <- gsub("^b_", "", out$term) } else if (par_type == "varying") { out$term <- gsub("^r_", "", out$term) out$group <- gsub("\\[.*", "", out$term) out$level <- gsub(".*\\[|,.*", "", out$term) out$term <- gsub(".*,|\\]", "", out$term) } # no renaming if par_type %in% c("all", "hierarchical") } if (robust) { out$estimate <- apply(samples, 2, stats::median) out$std.error <- apply(samples, 2, stats::mad) } else { out$estimate <- apply(samples, 2, base::mean) out$std.error <- apply(samples, 2, stats::sd) } if (intervals) { stopifnot(length(prob) == 1L) probs <- c((1 - prob) / 2, 1 - (1 - prob) / 2) out[, c("lower", "upper")] <- t(apply(samples, 2, stats::quantile, probs = probs)) } out } broom/R/data.frame_tidiers.R0000644000177700017770000000507413204276216017004 0ustar herbrandtherbrandt#' Tidiers for data.frame objects #' #' These perform tidy summaries of data.frame objects. \code{tidy} produces #' summary statistics about each column, while \code{glance} simply reports #' the number of rows and columns. Note that \code{augment.data.frame} will #' throw an error. #' #' @param x A data.frame #' @param data data, not used #' @param ... extra arguments: for \code{tidy}, these are passed on to #' \code{\link{describe}} from \code{psych} package #' #' @details The \code{tidy} method calls the psych method #' \code{\link{describe}} directly to produce its per-columns summary #' statistics. #' #' @examples #' #' td <- tidy(mtcars) #' td #' #' glance(mtcars) #' #' library(ggplot2) #' # compare mean and standard deviation #' ggplot(td, aes(mean, sd)) + geom_point() + #' geom_text(aes(label = column), hjust = 1, vjust = 1) + #' scale_x_log10() + scale_y_log10() + geom_abline() #' #' @name data.frame_tidiers #' @rdname data.frame_tidiers #' #' @return \code{tidy.data.frame} produces a data frame with one #' row per original column, containing summary statistics of each: #' \item{column}{name of original column} #' \item{n}{Number of valid (non-NA) values} #' \item{mean}{mean} #' \item{sd}{standard deviation} #' \item{median}{median} #' \item{trimmed}{trimmed mean, with trim defaulting to .1} #' \item{mad}{median absolute deviation (from the median)} #' \item{min}{minimum value} #' \item{max}{maximum value} #' \item{range}{range} #' \item{skew}{skew} #' \item{kurtosis}{kurtosis} #' \item{se}{standard error} #' #' @importFrom psych describe #' #' @seealso \code{\link{describe}} #' #' @export tidy.data.frame <- function(x, ...) { ret <- psych::describe(x, ...) ret <- fix_data_frame(ret, newcol = "column") # remove vars column, which contains an index (not useful here) ret$vars <- NULL ret } #' @rdname data.frame_tidiers #' #' @export augment.data.frame <- function(x, data, ...) { stop(paste("augment's first argument should be a model, not a data.frame")) } #' @rdname data.frame_tidiers #' #' @return \code{glance} returns a one-row data.frame with #' \item{nrow}{number of rows} #' \item{ncol}{number of columns} #' \item{complete.obs}{number of rows that have no missing values} #' \item{na.fraction}{fraction of values across all rows and columns that #' are missing} #' #' @export glance.data.frame <- function(x, ...) { ret <- data.frame(nrow = nrow(x), ncol = ncol(x)) ret$complete.obs <- sum(stats::complete.cases(x)) ret$na.fraction <- mean(is.na(x)) return(ret) } broom/R/htest_tidiers.R0000644000177700017770000000613213204276216016125 0ustar herbrandtherbrandt#' Tidying methods for an htest object #' #' Tidies hypothesis test objects, such as those from \code{cor.test}, #' \code{t.test}, and \code{wilcox.test}, into a one-row data frame. #' #' @details No \code{augment} method is provided for \code{"htest"}, #' since there is no sense in which a hypothesis test generates one #' value for each observation. #' #' @param x An object of class \code{"htest"} #' @param ... extra arguments (not used) #' #' @return Both \code{tidy} and \code{glance} return the same output, #' a one-row data frame with one or more of the following columns: #' \item{estimate}{Estimate of the effect size} #' \item{statistic}{Test statistic used to compute the p-value} #' \item{p.value}{P-value} #' \item{parameter}{Parameter field in the htest, typically degrees of #' freedom} #' \item{conf.low}{Lower bound on a confidence interval} #' \item{conf.high}{Upper bound on a confidence interval} #' \item{estimate1}{Sometimes two estimates are computed, such as in a #' two-sample t-test} #' \item{estimate2}{Sometimes two estimates are computed, such as in a #' two-sample t-test} #' \item{method}{Method used to compute the statistic as a string} #' \item{alternative}{Alternative hypothesis as a string} #' #' Which columns are included depends on the hypothesis test used. #' #' @examples #' #' tt <- t.test(rnorm(10)) #' tidy(tt) #' glance(tt) # same output for all htests #' #' tt <- t.test(mpg ~ am, data = mtcars) #' tidy(tt) #' #' wt <- wilcox.test(mpg ~ am, data = mtcars) #' tidy(wt) #' #' ct <- cor.test(mtcars$wt, mtcars$mpg) #' tidy(ct) #' #' @name htest_tidiers NULL #' @rdname htest_tidiers #' @export tidy.htest <- function(x, ...) { ret <- x[c("estimate", "statistic", "p.value", "parameter")] # estimate may have multiple values if (length(ret$estimate) > 1) { names(ret$estimate) <- paste0("estimate", seq_along(ret$estimate)) ret <- c(ret$estimate, ret) ret$estimate <- NULL # special case: in a t-test, estimate = estimate1 - estimate2 if (x$method == "Welch Two Sample t-test") { ret <- c(estimate=ret$estimate1 - ret$estimate2, ret) } } # parameter may have multiple values as well, such as oneway.test if (length(x$parameter) > 1) { ret$parameter <- NULL if (is.null(names(x$parameter))) { warning("Multiple unnamed parameters in hypothesis test; dropping them") } else { message("Multiple parameters; naming those columns ", paste(make.names(names(x$parameter)), collapse = ", ")) ret <- append(ret, x$parameter, after = 1) } } ret <- compact(ret) if (!is.null(x$conf.int)) { ret <- c(ret, conf.low=x$conf.int[1], conf.high=x$conf.int[2]) } if (!is.null(x$method)) { ret <- c(ret, method = as.character(x$method)) } if (!is.null(x$alternative)) { ret <- c(ret, alternative = as.character(x$alternative)) } unrowname(as.data.frame(ret)) } #' @rdname htest_tidiers #' @export glance.htest <- function(x, ...) tidy(x) broom/R/broom.R0000644000177700017770000000150713204276216014372 0ustar herbrandtherbrandt#' @title Convert Statistical Analysis Objects into Tidy Data Frames #' @name broom #' @description Convert statistical analysis objects from R into tidy data frames, #' so that they can more easily be combined, reshaped and otherwise processed #' with tools like dplyr, tidyr and ggplot2. The package provides three S3 #' generics: tidy, which summarizes a model's statistical findings such as #' coefficients of a regression; augment, which adds columns to the original #' data such as predictions, residuals and cluster assignments; and glance, #' which provides a one-row summary of model-level statistics. #' #' @importFrom stats AIC coef confint fitted logLik model.frame na.omit #' @importFrom stats predict qnorm qt residuals setNames var #' #' @importFrom utils head #' #' @docType package #' @aliases broom broom-package NULL broom/R/ivreg_tidiers.R0000644000177700017770000000746113204321535016113 0ustar herbrandtherbrandt#' Tidiers for ivreg models #' #' @param x An "ivreg" object #' @param data Original dataset #' @param conf.int Whether to include a confidence interval #' @param conf.level Confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param exponentiate Whether to exponentiate the coefficient estimates #' and confidence intervals #' #' @template boilerplate #' #' @return \code{tidy.ivreg} returns a data frame with one row per #' coefficient, of the same form as \code{\link{tidy.lm}}. #' #' @seealso \code{\link{lm_tidiers}} #' #' @name ivreg_tidiers #' #' @examples #' #' if (require("AER", quietly = TRUE)) { #' data("CigarettesSW", package = "AER") #' CigarettesSW$rprice <- with(CigarettesSW, price/cpi) #' CigarettesSW$rincome <- with(CigarettesSW, income/population/cpi) #' CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax)/cpi) #' ivr <- ivreg(log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax/cpi), #' data = CigarettesSW, subset = year == "1995") #' #' summary(ivr) #' #' tidy(ivr) #' tidy(ivr, conf.int = TRUE) #' tidy(ivr, conf.int = TRUE, exponentiate = TRUE) #' #' head(augment(ivr)) #' #' glance(ivr) #' } #' #' @export tidy.ivreg <- function(x, conf.int = FALSE, conf.level = .95, exponentiate = FALSE, ...) { co <- stats::coef(summary(x)) nn <- c("estimate", "std.error", "statistic", "p.value") ret <- fix_data_frame(co, nn[1:ncol(co)]) process_lm(ret, x, conf.int = conf.int, conf.level = conf.level, exponentiate = exponentiate) } #' @rdname ivreg_tidiers #' #' @param newdata New data to make predictions from (optional) #' @return \code{augment} returns a data frame with one row for each #' initial observation, adding the columns: #' \item{.fitted}{predicted (fitted) values} #' and if \code{newdata} is \code{NULL}: #' \item{.resid}{residuals} #' #' #' @export augment.ivreg <- function(x, data = as.data.frame(stats::model.frame(x)), newdata, ...) { augment_columns(x, data, newdata, ...) } #' @rdname ivreg_tidiers #' #' @param ... extra arguments, not used #' @param diagnostics Logical. Return results of diagnostic tests. #' #' @return \code{glance} returns a one-row data frame with columns #' \item{r.squared}{The percent of variance explained by the model} #' \item{adj.r.squared}{r.squared adjusted based on the degrees of freedom} #' \item{statistic}{Wald test statistic} #' \item{p.value}{p-value from the Wald test} #' \item{df}{Degrees of freedom used by the coefficients} #' \item{sigma}{The square root of the estimated residual variance} #' \item{df.residual}{residual degrees of freedom} #' If \code{diagnostics} is \code{TRUE}, \code{glance} also returns: #' \item{p.value.Sargan}{P value of Sargan test} #' \item{p.value.Wu.Hausman}{P value of Wu-Hausman test} #' \item{p.value.weakinst}{P value of weak instruments test} #' #' @export glance.ivreg <- function(x, diagnostics = FALSE, ...) { s <- summary(x, diagnostics = diagnostics) ret <- with(s, data.frame( r.squared = r.squared, adj.r.squared = adj.r.squared, sigma = sigma, statistic = waldtest[1], p.value = waldtest[2], df = df[1] )) if (diagnostics) { ret <- cbind(ret, with(s, data.frame( statistic.Sargan = diagnostics["Sargan", "statistic"], p.value.Sargan = diagnostics["Sargan", "p-value"], statistic.Wu.Hausman = diagnostics["Wu-Hausman", "statistic"], p.value.Wu.Hausman = diagnostics["Wu-Hausman", "p-value"], statistic.weakinst = diagnostics["Weak instruments", "statistic"], p.value.weakinst = diagnostics["Weak instruments", "p-value"] ))) } finish_glance(ret, x) } broom/R/multcomp_tidiers.R0000644000177700017770000000462113204276216016637 0ustar herbrandtherbrandt#' tidying methods for objects produced by \pkg{multcomp} #' #' These methods originated in ggplot2, as "fortify." In broom, #' they were renamed "tidy" because they summarize terms and #' tests, rather than adding columns to a dataset. #' #' @param x an object of class \code{glht}, \code{confint.glht}, #' \code{summary.glht} or \code{\link[multcomp]{cld}} #' @param ... extra arguments (not used) #' #' @name multcomp_tidiers #' @examples #' #' if (require("multcomp") && require("ggplot2")) { #' amod <- aov(breaks ~ wool + tension, data = warpbreaks) #' wht <- glht(amod, linfct = mcp(tension = "Tukey")) #' #' tidy(wht) #' ggplot(wht, aes(lhs, estimate)) + geom_point() #' #' CI <- confint(wht) #' tidy(CI) #' ggplot(CI, aes(lhs, estimate, ymin = lwr, ymax = upr)) + #' geom_pointrange() #' #' tidy(summary(wht)) #' ggplot(mapping = aes(lhs, estimate)) + #' geom_linerange(aes(ymin = lwr, ymax = upr), data = CI) + #' geom_point(aes(size = p), data = summary(wht)) + #' scale_size(trans = "reverse") #' #' cld <- cld(wht) #' tidy(cld) #' } NULL #' @method tidy glht #' @rdname multcomp_tidiers #' @export tidy.glht <- function(x, ...) { unrowname(data.frame( lhs = rownames(x$linfct), rhs = x$rhs, estimate = stats::coef(x), check.names = FALSE, stringsAsFactors = FALSE)) } #' @rdname multcomp_tidiers #' @method tidy confint.glht #' @export tidy.confint.glht <- function(x, ...) { coef <- x$confint colnames(coef) <- c("estimate", "conf.low", "conf.high") unrowname(data.frame( lhs = rownames(coef), rhs = x$rhs, coef, check.names = FALSE, stringsAsFactors = FALSE)) } #' @method tidy summary.glht #' @rdname multcomp_tidiers #' @export tidy.summary.glht <- function(x, ...) { coef <- as.data.frame( x$test[c("coefficients", "sigma", "tstat", "pvalues")]) names(coef) <- c("estimate", "std.error", "statistic", "p.value") unrowname(data.frame( lhs = rownames(coef), rhs = x$rhs, coef, check.names = FALSE, stringsAsFactors = FALSE)) } #' @method tidy cld #' @rdname multcomp_tidiers #' @export tidy.cld <- function(x, ...) { unrowname(data.frame( lhs = names(x$mcletters$Letters), letters = x$mcletters$Letters, check.names = FALSE, stringsAsFactors = FALSE)) } broom/R/rq_tidiers.R0000644000177700017770000002246013204276216015422 0ustar herbrandtherbrandt#' Tidying methods for quantile regression models #' #' These methods tidy the coefficients of a quantile regression #' model into a summary, augment the original data with information #' on the fitted values and residuals, and construct a glance of #' the model's statistics. #' #' @template boilerplate #' #' @name rq_tidiers #' #' @param x model object returned by \code{rq} or \code{nlrq} #' @param data Original data, defaults to extracting it from the model #' NULL #' @rdname rq_tidiers #' #' @param se.type Type of standard errors to calculate; see \code{summary.rq} #' @param conf.int boolean; should confidence intervals be calculated, ignored #' if \code{se.type = "rank"} #' @param conf.level confidence level for intervals #' @param alpha confidence level when \code{se.type = "rank"}; defaults to the same #' as \code{conf.level} although the specification is inverted #' @param \dots other arguments passed on to \code{summary.rq} #' #' @details If \code{se.type != "rank"} and \code{conf.int = TRUE} confidence #' intervals are calculated by \code{summary.rq}. Otherwise they are standard t #' based intervals. #' #' @return \code{tidy.rq} returns a data frame with one row for each coefficient. #' The columns depend upon the confidence interval method selected. #' #' @export tidy.rq <- function(x,se.type = "rank",conf.int = TRUE,conf.level = 0.95,alpha = 1 - conf.level, ...){ #summary.rq often issues warnings when computing standard errors rq_summary <- suppressWarnings(summary(x,se = se.type, alpha = alpha, ...)) process_rq(rq_obj = rq_summary,se.type = se.type,conf.int = conf.int,conf.level = conf.level,...) } #' @rdname rq_tidiers #' #' @return \code{tidy.rqs} returns a data frame with one row for each coefficient at #' each quantile that was estimated. The columns depend upon the confidence interval #' method selected. #' #' @export tidy.rqs <- function(x,se.type = "rank",conf.int = TRUE,conf.level = 0.95,alpha = 1 - conf.level, ...){ #summary.rq often issues warnings when computing standard errors rq_summary <- suppressWarnings(summary(x,se = se.type,alpha = alpha, ...)) plyr::ldply(rq_summary,process_rq,se.type = se.type,conf.int = conf.int,conf.level = conf.level,...) } #' @rdname rq_tidiers #' #' @return \code{tidy.nlrq} returns one row for each coefficient in the model, #' with five columns: #' \item{term}{The term in the nonlinear model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error from the linear model} #' \item{statistic}{t-statistic} #' \item{p.value}{two-sided p-value} #' #' @export tidy.nlrq <- function(x, conf.int = FALSE, conf.level = 0.95, ...){ nn <- c("estimate", "std.error", "statistic", "p.value") ret <- fix_data_frame(coef(summary(x)), nn) if (conf.int){ x_summary <- summary(x) a <- (1 - conf.level) / 2 cv <- qt(c(a,1-a),df = x_summary[["rdf"]]) ret[["conf.low"]] <- ret[["estimate"]] + (cv[1] * ret[["std.error"]]) ret[["conf.high"]] <- ret[["estimate"]] + (cv[2] * ret[["std.error"]]) } ret } #' @rdname rq_tidiers #' #' @return \code{glance.rq} returns one row for each quantile (tau) #' with the columns: #' \item{tau}{quantile estimated} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{df.residual}{residual degrees of freedom} #' @export glance.rq <- function(x,...){ n <- length(fitted(x)) s <- summary(x) data.frame(tau = x[["tau"]], logLik = logLik(x), AIC = AIC(x), BIC = AIC(x,k = log(n)), df.residual = rep(s[["rdf"]],times = length(x[["tau"]]))) } #' @export glance.rqs <- glance.rq #' @rdname rq_tidiers #' #' @return \code{glance.rq} returns one row for each quantile (tau) #' with the columns: #' \item{tau}{quantile estimated} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{df.residual}{residual degrees of freedom} #' @export glance.nlrq <- function(x,...){ n <- length(x[["m"]]$fitted()) s <- summary(x) data.frame(tau = x[["m"]]$tau(), logLik = logLik(x), AIC = AIC(x), BIC = AIC(x,k = log(n)), df.residual = s[["rdf"]]) } #' @rdname rq_tidiers #' #' @param newdata If provided, new data frame to use for predictions #' #' @return \code{augment.rq} returns a row for each original observation #' with the following columns added: #' \item{.resid}{Residuals} #' \item{.fitted}{Fitted quantiles of the model} #' \item{.tau}{Quantile estimated} #' #' Depending on the arguments passed on to \code{predict.rq} via \code{\dots} #' a confidence interval is also calculated on the fitted values resulting in #' columns: #' \item{.conf.low}{Lower confidence interval value} #' \item{.conf.high}{Upper confidence interval value} #' #' See \code{predict.rq} for details on additional arguments to specify #' confidence intervals. \code{predict.rq} does not provide confidence intervals #' when \code{newdata} is provided. #' #' @export augment.rq <- function(x,data = model.frame(x),newdata, ...){ args <- list(...) force_newdata <- FALSE if ("interval" %in% names(args) && args[["interval"]] != "none"){ force_newdata <- TRUE } if (missing(newdata) || is.null(newdata)){ original <- data original[[".resid"]] <- residuals(x) if (force_newdata){ pred <- predict(x, newdata = data,...) } else{ pred <- predict(x,...) } } else{ original <- newdata pred <- predict(x, newdata = newdata,...) } if (NCOL(pred) == 1){ original[[".fitted"]] <- pred original[[".tau"]] <- x[["tau"]] return(unrowname(original)) } else{ colnames(pred) <- c(".fitted",".conf.low",".conf.low") original[[".tau"]] <- x[["tau"]] return(unrowname(cbind(original,pred))) } } #' @rdname rq_tidiers #' #' @return \code{augment.rqs} returns a row for each original observation #' and each estimated quantile (\code{tau}) with the following columns added: #' \item{.resid}{Residuals} #' \item{.fitted}{Fitted quantiles of the model} #' \item{.tau}{Quantile estimated} #' #' \code{predict.rqs} does not return confidence interval estimates. #' #' @export augment.rqs <- function(x,data = model.frame(x), newdata, ...){ n_tau <- length(x[["tau"]]) if (missing(newdata) || is.null(newdata)){ original <- data[rep(seq_len(nrow(data)), n_tau),] pred <- predict(x,stepfun = FALSE,...) resid <- residuals(x) resid <- setNames(as.data.frame(resid),x[["tau"]]) #resid <- reshape2::melt(resid,measure.vars = 1:ncol(resid),variable.name = ".tau",value.name = ".resid") resid <- tidyr::gather(data = resid,key = ".tau",value = ".resid") original <- cbind(original,resid) pred <- setNames(as.data.frame(pred),x[["tau"]]) #pred <- reshape2::melt(pred,measure.vars = 1:ncol(pred),variable.name = ".tau",value.name = ".fitted") pred <- tidyr::gather(data = pred,key = ".tau",value = ".fitted") return(unrowname(cbind(original,pred[,-1,drop = FALSE]))) } else{ original <- newdata[rep(seq_len(nrow(newdata)), n_tau),] pred <- predict(x, newdata = newdata, stepfun = FALSE,...) pred <- setNames(as.data.frame(pred),x[["tau"]]) #pred <- reshape2::melt(pred,measure.vars = 1:ncol(pred),variable.name = ".tau",value.name = ".fitted") pred <- tidyr::gather(data = pred,key = ".tau",value = ".fitted") return(unrowname(cbind(original,pred))) } } #' @rdname rq_tidiers #' #' @details This simply calls \code{augment.nls} on the "nlrq" object. #' #' @return \code{augment.rqs} returns a row for each original observation #' with the following columns added: #' \item{.resid}{Residuals} #' \item{.fitted}{Fitted quantiles of the model} #' #' #' @export augment.nlrq <- augment.nls #' Helper function for tidy.rq and tidy.rqs #' #' See documentation for \code{summary.rq} for complete description #' of the options for \code{se.type}, \code{conf.int}, etc. #' #' @param rq_obj an object returned by \code{summary.rq} or \code{summary.rqs} #' @param se.type type of standard errors used in \code{summary.rq} or \code{summary.rqs} #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level for confidence interval #' @param \dots currently unused process_rq <- function(rq_obj, se.type = "rank", conf.int = TRUE, conf.level = 0.95, ...){ nn <- c("estimate", "std.error", "statistic", "p.value") co <- as.data.frame(rq_obj[["coefficients"]]) if (se.type == "rank") { co <- setNames(co,c("estimate","conf.low","conf.high")) conf.int <- FALSE }else{ co <- setNames(co,nn) } if (conf.int) { a <- (1 - conf.level) / 2 cv <- qt(c(a, 1 - a), df = rq_obj[["rdf"]]) co[["conf.low"]] <- co[["estimate"]] + (cv[1] * co[["std.error"]]) co[["conf.high"]] <- co[["estimate"]] + (cv[2] * co[["std.error"]]) } co[["tau"]] <- rq_obj[["tau"]] fix_data_frame(co,colnames(co)) } broom/R/biglm_tidiers.R0000644000177700017770000000622213204276216016070 0ustar herbrandtherbrandt#' Tidiers for biglm and bigglm object #' #' Tidiers for biglm object from the "biglm" package, which contains a linear model #' object that is limited in memory usage. Generally the behavior is as similar #' to the \code{\link{lm_tidiers}} as is possible. Currently no \code{augment} #' is defined. #' #' @param x a "biglm" object #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param exponentiate whether to exponentiate the coefficient estimates #' and confidence intervals (typical for logistic regression) #' @param quick whether to compute a smaller and faster version, containing #' only the \code{term} and \code{estimate} columns. #' @param ... extra arguments (not used) #' #' @template boilerplate #' #' @return \code{tidy.biglm} returns one row for each coefficient, with columns #' \item{term}{The term in the linear model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error from the linear model} #' \item{p.value}{two-sided p-value} #' #' If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and #' \code{conf.high}, computed with \code{\link{confint}}. #' #' @name biglm_tidiers #' #' @examples #' #' if (require("biglm", quietly = TRUE)) { #' bfit <- biglm(mpg ~ wt + disp, mtcars) #' tidy(bfit) #' tidy(bfit, conf.int = TRUE) #' tidy(bfit, conf.int = TRUE, conf.level = .9) #' #' glance(bfit) #' #' # bigglm: logistic regression #' bgfit <- bigglm(am ~ mpg, mtcars, family = binomial()) #' tidy(bgfit) #' tidy(bgfit, exponentiate = TRUE) #' tidy(bgfit, conf.int = TRUE) #' tidy(bgfit, conf.int = TRUE, conf.level = .9) #' tidy(bgfit, conf.int = TRUE, conf.level = .9, exponentiate = TRUE) #' #' glance(bgfit) #' } #' #' @import dplyr #' #' @export tidy.biglm <- function(x, conf.int = FALSE, conf.level = .95, exponentiate = FALSE, quick = FALSE, ...) { if (quick) { co <- stats::coef(x) ret <- data.frame(term = names(co), estimate = unname(co)) return(ret) } mat <- summary(x)$mat nn <- c("estimate", "conf.low", "conf.high", "std.error", "p.value") ret <- fix_data_frame(mat, nn) # remove the 95% confidence interval and replace: # it isn't exactly 95% (uses 2 rather than 1.96), and doesn't allow # specification of confidence level in any case ret <- ret %>% dplyr::select(-conf.low, -conf.high) process_lm(ret, x, conf.int = conf.int, conf.level = conf.level, exponentiate = exponentiate) } #' @rdname biglm_tidiers #' #' @return \code{glance.biglm} returns a one-row data frame, with columns #' \item{r.squared}{The percent of variance explained by the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{deviance}{deviance} #' \item{df.residual}{residual degrees of freedom} #' #' @export glance.biglm <- function(x, ...) { s <- summary(x) ret <- data.frame(r.squared = s$rsq) ret <- finish_glance(ret, x) ret$df.residual <- x$df.resid # add afterwards ret } broom/R/decompose_tidiers.R0000644000177700017770000000714713204327720016760 0ustar herbrandtherbrandt#' Tidying methods for seasonal decompositions #' #' These tidiers provide an \code{augment} method for the results of a seasonal #' decomposition with \code{\link[stats]{decompose}} or #' \code{\link[stats]{stl}}. #' #' The \code{augment} method returns the computed seasonal and trend components, #' as well as the "remainder" term and the seasonally adjusted (or #' "deseasonalised") series. #' #' @param x An object of class \code{"stl"} or \code{"decomposed.ts"}, #' resulting from a call to \code{\link[stats]{decompose}} or #' \code{\link[stats]{stl}}. #' @param ... Extra arguments. Unused. #' #' @name decompose_tidiers #' @author Aaron Jacobs #' #' @seealso \code{\link[stats]{decompose}}, \code{\link[stats]{stl}} #' #' @return #' #' The \code{augment} method returns a tidy data frame with the following #' columns: #' #' \describe{ #' \item{\code{.seasonal}}{The seasonal component of the decomposition.} #' \item{\code{.trend}}{The trend component of the decomposition.} #' \item{\code{.remainder}}{The remainder, or "random" component of the #' decomposition.} #' \item{\code{.weight}}{The final robust weights (\code{stl} only).} #' \item{\code{.seasadj}}{The seasonally adjusted (or "deseasonalised") #' series.} #' } #' #' @examples #' #' # Time series of temperatures in Nottingham, 1920-1939: #' nottem #' #' # Perform seasonal decomposition on the data with both decompose #' # and stl: #' d1 <- stats::decompose(nottem) #' d2 <- stats::stl(nottem, s.window = "periodic", robust = TRUE) #' #' # Compare the original series to its decompositions. #' #' cbind(broom::tidy(nottem), broom::augment(d1), #' broom::augment(d2)) #' #' # Visually compare seasonal decompositions in tidy data frames. #' #' library(tibble) #' library(dplyr) #' library(tidyr) #' library(ggplot2) #' #' decomps <- tibble( #' # Turn the ts objects into data frames. #' series = list(broom::tidy(nottem), broom::tidy(nottem)), #' # Add the models in, one for each row. #' decomp = c("decompose", "stl"), #' model = list(d1, d2) #' ) %>% #' rowwise() %>% #' # Pull out the fitted data using broom::augment. #' mutate(augment = list(broom::augment(model))) %>% #' ungroup() %>% #' # Unnest the data frames into a tidy arrangement of #' # the series next to its seasonal decomposition, grouped #' # by the method (stl or decompose). #' group_by(decomp) %>% #' unnest(series, augment) %>% #' mutate(index = 1:n()) %>% #' ungroup() %>% #' select(decomp, index, x, adjusted = .seasadj) #' #' ggplot(decomps) + #' geom_line(aes(x = index, y = x), colour = "black") + #' geom_line(aes(x = index, y = adjusted, colour = decomp, #' group = decomp)) #' NULL #' @rdname decompose_tidiers #' @export augment.decomposed.ts <- function(x, ...) { ret <- data.frame(seasonal = as.numeric(x$seasonal), trend = as.numeric(x$trend), remainder = as.numeric(x$random)) # Inspired by forecast::seasadj, this is the "deseasonalised" data: ret$seasadj <- if (x$type == "additive") { as.numeric(x$x) - ret$seasonal } else { as.numeric(x$x) / ret$seasonal } colnames(ret) <- paste0(".", colnames(ret)) ret } #' @rdname decompose_tidiers #' #' @param weights Whether to include the robust weights in the output. #' #' @export augment.stl <- function(x, weights = TRUE, ...) { ret <- as.data.frame(x$time.series) ret$weight <- x$weights # Inspired by forecast::seasadj, this is the "deseasonalised" data: ret$seasadj <- ret$trend + ret$remainder colnames(ret) <- paste0(".", colnames(ret)) ret } broom/R/kde_tidiers.R0000644000177700017770000000256513204276216015547 0ustar herbrandtherbrandt#' Tidy a kernel density estimate object from the ks package #' #' Tidy a kernel density estimate object, into a table with #' one row for each point in the estimated grid, and one column #' for each dimension (along with an \code{estimate} column with #' the estimated density). #' #' @param x A "ks" object from the kde package #' @param ... Extra arguments, not used #' #' @return A data frame with one row for each point in the #' estimated grid. The result contains one column (named \code{x1}, #' \code{x2}, etc) for each dimension, and an \code{estimate} column #' containing the estimated density. #' #' @name kde_tidiers #' #' @examples #' #' if (require("ks", quietly = TRUE)) { #' dat <- replicate(2, rnorm(100)) #' k <- kde(dat) #' #' td <- tidy(k) #' head(td) #' #' library(ggplot2) #' ggplot(td, aes(x1, x2, fill = estimate)) + #' geom_tile() + #' theme_void() #' #' # also works with 3 dimensions #' dat3 <- replicate(3, rnorm(100)) #' k3 <- kde(dat3) #' #' td3 <- tidy(k3) #' head(td3) #' } #' #' @export tidy.kde <- function(x, ...) { estimate <- reshape2::melt(x$estimate) dims <- seq_len(length(x$eval.points)) ret <- purrr::map2(x$eval.points, estimate[dims], function(e, d) e[d]) %>% as.data.frame() %>% setNames(paste0("x", dims)) %>% mutate(estimate = estimate$value) ret } broom/R/boot_tidiers.R0000644000177700017770000001041013204301355015724 0ustar herbrandtherbrandt#' Tidying methods for bootstrap computations #' #' Tidying methods for "boot" objects from the "boot" package. #' #' @param x \code{\link{boot}} object #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level for CI #' @param conf.method method for computing confidence intervals (see \code{\link{boot.ci}}) #' @param \dots extra arguments (not used) #' #' @return The \code{tidy} method returns a data frame with one row per #' bootstrapped statistic that was calculated, and the #' following columns: #' \item{term}{Name of the computed statistic, if present} #' \item{statistic}{The original values of the statistic} #' \item{bias}{The bias of the original statistic value} #' \item{std.error}{Standard error of the statistic} #' #' If weights were provided to the \code{boot} function, an \code{estimate} #' column is included showing the weighted bootstrap estimate, and the #' standard error is of that estimate. #' #' If there are no original statistics in the "boot" object, such as with a #' call to \code{tsboot} with \code{orig.t = FALSE}, the \code{original} #' and \code{statistic} columns are omitted, and only \code{estimate} and #' \code{std.error} columns shown. #' #' @name boot_tidiers #' #' @examples #' if (require("boot")) { #' clotting <- data.frame( #' u = c(5,10,15,20,30,40,60,80,100), #' lot1 = c(118,58,42,35,27,25,21,19,18), #' lot2 = c(69,35,26,21,18,16,13,12,12)) #' #' g1 <- glm(lot2 ~ log(u), data = clotting, family = Gamma) #' #' bootfun <- function(d, i) { #' coef(update(g1, data= d[i,])) #' } #' bootres <- boot(clotting, bootfun, R = 999) #' tidy(g1, conf.int=TRUE) #' tidy(bootres, conf.int=TRUE) #' } #' #' @export tidy.boot <- function(x, ## is there a convention for the default value of ## conf.int? conf.int = FALSE, conf.level = 0.95, conf.method = "perc", ...) { # calculate the bias and standard error # this is an adapted version of the code in print.boot, where the bias # and standard error are calculated boot.out <- x index <- 1:ncol(boot.out$t) sim <- boot.out$sim cl <- boot.out$call t <- matrix(boot.out$t[, index], nrow = nrow(boot.out$t)) allNA <- apply(t, 2L, function(t) all(is.na(t))) index <- index[!allNA] t <- matrix(t[, !allNA], nrow = nrow(t)) rn <- paste("t", index, "*", sep = "") if (is.null(t0 <- boot.out$t0)) { if (is.null(boot.out$call$weights)) op <- cbind(apply(t, 2L, mean, na.rm = TRUE), sqrt(apply(t, 2L, function(t.st) var(t.st[!is.na(t.st)])))) else { op <- NULL for (i in index) op <- rbind(op, boot::imp.moments(boot.out, index = i)$rat) op[, 2L] <- sqrt(op[, 2]) } colnames(op) <- c("estimate", "std.error") } else { t0 <- boot.out$t0[index] if (is.null(boot.out$call$weights)) { op <- cbind(t0, apply(t, 2L, mean, na.rm = TRUE) - t0, sqrt(apply(t, 2L, function(t.st) var(t.st[!is.na(t.st)])))) colnames(op) <- c("statistic", "bias", "std.error") } else { op <- NULL for (i in index) op <- rbind(op, boot::imp.moments(boot.out, index = i)$rat) op <- cbind(t0, op[, 1L] - t0, sqrt(op[, 2L]), apply(t, 2L, mean, na.rm = TRUE)) colnames(op) <- c("statistic", "bias", "std.error", "estimate") } } # bring in rownames as "term" column, and turn into a data.frame ret <- fix_data_frame(op) if (conf.int) { ci.list <- lapply(seq_along(x$t0), boot::boot.ci, boot.out = x, conf = conf.level, type = conf.method) ## boot.ci uses c("norm", "basic", "perc", "stud") for types ## stores them with longer names ci.pos <- pmatch(conf.method, names(ci.list[[1]])) ci.tab <- t(sapply(ci.list, function(x) x[[ci.pos]][4:5])) colnames(ci.tab) <- c("conf.low", "conf.high") ret <- cbind(ret, ci.tab) } return(ret) } broom/R/ridgelm_tidiers.R0000644000177700017770000000521613204276216016423 0ustar herbrandtherbrandt#' Tidying methods for ridgelm objects from the MASS package #' #' These methods tidies the coefficients of a ridge regression model #' chosen at each value of lambda into a data frame, or constructs #' a one-row glance of the model's choices of lambda (the ridge #' constant) #' #' @param x An object of class "ridgelm" #' @param ... extra arguments (not used) #' #' @template boilerplate #' #' @name ridgelm_tidiers #' #' @examples #' #' names(longley)[1] <- "y" #' fit1 <- MASS::lm.ridge(y ~ ., longley) #' tidy(fit1) #' #' fit2 <- MASS::lm.ridge(y ~ ., longley, lambda = seq(0.001, .05, .001)) #' td2 <- tidy(fit2) #' g2 <- glance(fit2) #' #' # coefficient plot #' library(ggplot2) #' ggplot(td2, aes(lambda, estimate, color = term)) + geom_line() #' #' # GCV plot #' ggplot(td2, aes(lambda, GCV)) + geom_line() #' #' # add line for the GCV minimizing estimate #' ggplot(td2, aes(lambda, GCV)) + geom_line() + #' geom_vline(xintercept = g2$lambdaGCV, col = "red", lty = 2) NULL #' @rdname ridgelm_tidiers #' #' @return \code{tidy.ridgelm} returns one row for each combination of #' choice of lambda and term in the formula, with columns: #' \item{lambda}{choice of lambda} #' \item{GCV}{generalized cross validation value for this lambda} #' \item{term}{the term in the ridge regression model being estimated} #' \item{estimate}{estimate of scaled coefficient using this lambda} #' \item{scale}{Scaling factor of estimated coefficient} #' #' @export tidy.ridgelm <- function(x, ...) { if (length(x$lambda) == 1) { # only one choice of lambda ret <- data.frame(lambda = x$lambda, term = names(x$coef), estimate = x$coef, scale = x$scales, xm = x$xm) return(unrowname(ret)) } # otherwise, multiple lambdas/coefs/etc, have to tidy cotidy <- data.frame(plyr::unrowname(t(x$coef)), lambda = x$lambda, GCV = unname(x$GCV)) %>% tidyr::gather(term, estimate, -lambda, -GCV) %>% mutate(term = as.character(term)) %>% mutate(scale = x$scales[term]) cotidy } #' @rdname ridgelm_tidiers #' #' @return \code{glance.ridgelm} returns a one-row data.frame with the columns #' \item{kHKB}{modified HKB estimate of the ridge constant} #' \item{kLW}{modified L-W estimate of the ridge constant} #' \item{lambdaGCV}{choice of lambda that minimizes GCV} #' #' This is similar to the output of \code{select.ridgelm}, but it is returned #' rather than printed. #' #' @export glance.ridgelm <- function(x, ...) { ret <- data.frame(kHKB = x$kHKB, kLW = x$kLW, lambdaGCV = x$lambda[which.min(x$GCV)]) ret } broom/R/survival_tidiers.R0000644000177700017770000005770713204276216016667 0ustar herbrandtherbrandt# tidying functions for the survival package # http://cran.r-project.org/web/packages/survival/index.html # In particular, tidies objects of the following classes: # - aareg # - cch # - coxph # - pyears # - survexp # - survfit # - survreg # - survdiff #' Tidiers for aareg objects #' #' These tidy the coefficients of Aalen additive regression objects. #' #' @param x an "aareg" object #' @param ... extra arguments (not used) #' #' @template boilerplate #' #' @examples #' #' if (require("survival", quietly = TRUE)) { #' afit <- aareg(Surv(time, status) ~ age + sex + ph.ecog, data=lung, #' dfbeta=TRUE) #' summary(afit) #' tidy(afit) #' } #' #' @name aareg_tidiers #' @name aareg_tidiers #' #' @return \code{tidy.aareg} returns one row for each coefficient, with #' the columns #' \item{term}{name of coefficient} #' \item{estimate}{estimate of the slope} #' \item{statistic}{test statistic for coefficient} #' \item{std.error}{standard error of statistic} #' \item{robust.se}{robust version of standard error estimate} #' \item{z}{z score} #' \item{p.value}{p-value} #' #' @export tidy.aareg <- function(x, ...) { nn <- c("estimate", "statistic", "std.error", "robust.se", "statistic.z", "p.value") fix_data_frame(summary(x)$table, nn) } #' @name aareg_tidiers #' #' @return \code{glance} returns a one-row data frame containing #' \item{statistic}{chi-squared statistic} #' \item{p.value}{p-value based on chi-squared statistic} #' \item{df}{degrees of freedom used by coefficients} #' #' @export glance.aareg <- function(x, ...) { s <- summary(x) chi <- s$chisq df <- length(s$test.statistic) - 1 data.frame(statistic = chi, p.value = 1 - stats::pchisq(chi, df), df = df) } #' tidiers for case-cohort data #' #' Tidiers for case-cohort analyses: summarize each estimated coefficient, #' or test the overall model. #' #' @param x a "cch" object #' @param conf.level confidence level for CI #' @param ... extra arguments (not used) #' #' @details It is not clear what an \code{augment} method would look like, #' so none is provided. Nor is there currently any way to extract the #' covariance or the residuals. #' #' @template boilerplate #' #' @examples #' #' if (require("survival", quietly = TRUE)) { #' # examples come from cch documentation #' subcoh <- nwtco$in.subcohort #' selccoh <- with(nwtco, rel==1|subcoh==1) #' ccoh.data <- nwtco[selccoh,] #' ccoh.data$subcohort <- subcoh[selccoh] #' ## central-lab histology #' ccoh.data$histol <- factor(ccoh.data$histol,labels=c("FH","UH")) #' ## tumour stage #' ccoh.data$stage <- factor(ccoh.data$stage,labels=c("I","II","III" ,"IV")) #' ccoh.data$age <- ccoh.data$age/12 # Age in years #' #' fit.ccP <- cch(Surv(edrel, rel) ~ stage + histol + age, data = ccoh.data, #' subcoh = ~subcohort, id= ~seqno, cohort.size = 4028) #' #' tidy(fit.ccP) #' #' # coefficient plot #' library(ggplot2) #' ggplot(tidy(fit.ccP), aes(x = estimate, y = term)) + geom_point() + #' geom_errorbarh(aes(xmin = conf.low, xmax = conf.high), height = 0) + #' geom_vline(xintercept = 0) #' #' # compare between methods #' library(dplyr) #' fits <- data_frame(method = c("Prentice", "SelfPrentice", "LinYing")) %>% #' group_by(method) %>% #' do(tidy(cch(Surv(edrel, rel) ~ stage + histol + age, data = ccoh.data, #' subcoh = ~subcohort, id= ~seqno, cohort.size = 4028, #' method = .$method))) #' #' # coefficient plots comparing methods #' ggplot(fits, aes(x = estimate, y = term, color = method)) + geom_point() + #' geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) + #' geom_vline(xintercept = 0) #' } #' #' @seealso \link{cch} #' #' @name cch_tidiers #' @rdname cch_tidiers #' #' @template coefficients #' #' @export tidy.cch <- function(x, conf.level = .95, ...) { s <- summary(x) co <- stats::coefficients(s) ret <- fix_data_frame(co, newnames = c("estimate", "std.error", "statistic", "p.value")) # add confidence interval CI <- unrowname(stats::confint(x, level = conf.level)) colnames(CI) <- c("conf.low", "conf.high") cbind(ret, CI) } #' @rdname cch_tidiers #' #' @return \code{glance} returns a one-row data.frame with the following #' columns: #' \item{score}{score} #' \item{rscore}{rscore} #' \item{p.value}{p-value from Wald test} #' \item{iter}{number of iterations} #' \item{n}{number of predictions} #' \item{nevent}{number of events} #' #' @export glance.cch <- function(x, ...) { ret <- compact(unclass(x)[c("score", "rscore", "wald.test", "iter", "n", "nevent")]) ret <- as.data.frame(ret) plyr::rename(ret, c("wald.test" = "p.value")) } #' Tidiers for coxph object #' #' Tidy the coefficients of a Cox proportional hazards regression model, #' construct predictions, or summarize the entire model into a single row. #' #' @param x "coxph" object #' @param data original data for \code{augment} #' @param exponentiate whether to report the estimate and confidence intervals #' on an exponential scale #' @param conf.int confidence level to be used for CI #' @param newdata new data on which to do predictions #' @param type.predict type of predicted value (see \code{\link{predict.coxph}}) #' @param type.residuals type of residuals (see \code{\link{residuals.coxph}}) #' @param ... Extra arguments, not used #' #' @name coxph_tidiers #' #' @examples #' #' if (require("survival", quietly = TRUE)) { #' cfit <- coxph(Surv(time, status) ~ age + sex, lung) #' #' tidy(cfit) #' tidy(cfit, exponentiate = TRUE) #' #' lp <- augment(cfit, lung) #' risks <- augment(cfit, lung, type.predict = "risk") #' expected <- augment(cfit, lung, type.predict = "expected") #' #' glance(cfit) #' #' # also works on clogit models #' resp <- levels(logan$occupation) #' n <- nrow(logan) #' indx <- rep(1:n, length(resp)) #' logan2 <- data.frame(logan[indx,], #' id = indx, #' tocc = factor(rep(resp, each=n))) #' logan2$case <- (logan2$occupation == logan2$tocc) #' #' cl <- clogit(case ~ tocc + tocc:education + strata(id), logan2) #' tidy(cl) #' glance(cl) #' #' library(ggplot2) #' ggplot(lp, aes(age, .fitted, color = sex)) + geom_point() #' ggplot(risks, aes(age, .fitted, color = sex)) + geom_point() #' ggplot(expected, aes(time, .fitted, color = sex)) + geom_point() #' } #' @rdname coxph_tidiers #' #' @return \code{tidy} returns a data.frame with one row for each term, #' with columns #' \item{estimate}{estimate of slope} #' \item{std.error}{standard error of estimate} #' \item{statistic}{test statistic} #' \item{p.value}{p-value} #' #' @export tidy.coxph <- function(x, exponentiate = FALSE, conf.int = .95, ...) { s <- summary(x, conf.int = conf.int) co <- stats::coef(s) if (s$used.robust) nn <- c("estimate", "std.error", "robust.se", "statistic", "p.value") else nn <- c("estimate", "std.error", "statistic", "p.value") ret <- fix_data_frame(co[, -2, drop=FALSE], nn) if (exponentiate) { ret$estimate <- exp(ret$estimate) } if (!is.null(s$conf.int)) { CI <- as.matrix(unrowname(s$conf.int[, 3:4, drop=FALSE])) colnames(CI) <- c("conf.low", "conf.high") if (!exponentiate) { CI <- log(CI) } ret <- cbind(ret, CI) } ret } #' @rdname coxph_tidiers #' #' @template augment_NAs #' #' @return \code{augment} returns the original data.frame with additional #' columns added: #' \item{.fitted}{predicted values} #' \item{.se.fit}{standard errors } #' \item{.resid}{residuals (not present if \code{newdata} is provided)} #' #' @export augment.coxph <- function(x, data = stats::model.frame(x), newdata, type.predict = "lp", type.residuals = "martingale", ...) { ret <- fix_data_frame(data, newcol = ".rownames") augment_columns(x, data, newdata, type.predict = type.predict, type.residuals = type.residuals) } #' @rdname coxph_tidiers #' #' @return \code{glance} returns a one-row data.frame with statistics #' calculated on the cox regression. #' #' @export glance.coxph <- function(x, ...) { s <- summary(x) # including all the test statistics and p-values as separate # columns. Admittedly not perfect but does capture most use cases. ret <- list(n = s$n, nevent = s$nevent, statistic.log = s$logtest[1], p.value.log = s$logtest[3], statistic.sc = s$sctest[1], p.value.sc = s$sctest[3], statistic.wald = s$waldtest[1], p.value.wald = s$waldtest[3], statistic.robust = s$robscore[1], p.value.robust = s$robscore[3], r.squared = s$rsq[1], r.squared.max = s$rsq[2], concordance = s$concordance[1], std.error.concordance = s$concordance[2]) ret <- as.data.frame(compact(ret)) finish_glance(ret, x) } #' tidy survival curve fits #' #' Construct tidied data frames showing survival curves over time. #' #' @param x "survfit" object #' @param ... extra arguments, not used #' #' @details \code{glance} does not work on multi-state survival curves, #' since the values \code{glance} outputs would be calculated for each state. #' \code{tidy} does work for multi-state survival objects, and includes a #' \code{state} column to distinguish between them. #' #' @template boilerplate #' #' @examples #' #' if (require("survival", quietly = TRUE)) { #' cfit <- coxph(Surv(time, status) ~ age + sex, lung) #' sfit <- survfit(cfit) #' #' head(tidy(sfit)) #' glance(sfit) #' #' library(ggplot2) #' ggplot(tidy(sfit), aes(time, estimate)) + geom_line() + #' geom_ribbon(aes(ymin=conf.low, ymax=conf.high), alpha=.25) #' #' # multi-state #' fitCI <- survfit(Surv(stop, status * as.numeric(event), type = "mstate") ~ 1, #' data = mgus1, subset = (start == 0)) #' td_multi <- tidy(fitCI) #' head(td_multi) #' tail(td_multi) #' ggplot(td_multi, aes(time, estimate, group = state)) + #' geom_line(aes(color = state)) + #' geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .25) #' #' # perform simple bootstrapping #' library(dplyr) #' bootstraps <- lung %>% bootstrap(100) %>% #' do(tidy(survfit(coxph(Surv(time, status) ~ age + sex, .)))) #' #' ggplot(bootstraps, aes(time, estimate, group = replicate)) + #' geom_line(alpha = .25) #' #' bootstraps_bytime <- bootstraps %>% group_by(time) %>% #' summarize(median = median(estimate), #' low = quantile(estimate, .025), #' high = quantile(estimate, .975)) #' #' ggplot(bootstraps_bytime, aes(x = time, y = median)) + geom_line() + #' geom_ribbon(aes(ymin = low, ymax = high), alpha = .25) #' #' # bootstrap for median survival #' glances <- lung %>% #' bootstrap(100) %>% #' do(glance(survfit(coxph(Surv(time, status) ~ age + sex, .)))) #' #' glances #' #' qplot(glances$median, binwidth = 15) #' quantile(glances$median, c(.025, .975)) #' } #' #' @name survfit_tidiers #' @rdname survfit_tidiers #' #' @return \code{tidy} returns a row for each time point, with columns #' \item{time}{timepoint} #' \item{n.risk}{number of subjects at risk at time t0} #' \item{n.event}{number of events at time t} #' \item{n.censor}{number of censored events} #' \item{estimate}{estimate of survival or cumulative incidence rate when multistate} #' \item{std.error}{standard error of estimate} #' \item{conf.high}{upper end of confidence interval} #' \item{conf.low}{lower end of confidence interval} #' \item{state}{state if multistate survfit object inputted} #' \item{strata}{strata if stratified survfit object inputted} #' @export tidy.survfit <- function(x, ...) { if (inherits(x, "survfitms")) { # c(x$???) when value is a matrix and needs to be stacked ret <- data.frame( time=x$time, n.risk=c(x$n.risk), n.event=c(x$n.event), n.censor = c(x$n.censor), estimate = c(x$pstate), std.error = c(x$std.err), conf.high = c(x$upper), conf.low = c(x$lower), state = rep(x$states, each = nrow(x$pstate)) ) ret <- ret[ret$state != "",] } else { ret <- data.frame( time = x$time, n.risk=x$n.risk, n.event=x$n.event, n.censor = x$n.censor, estimate=x$surv, std.error=x$std.err, conf.high=x$upper, conf.low=x$lower) } # strata are automatically recycled if there are multiple states if (!is.null(x$strata)) { ret$strata <- rep(names(x$strata), x$strata) } ret } #' @rdname survfit_tidiers #' #' @return \code{glance} returns one-row data.frame with the columns #' displayed by \code{\link{print.survfit}} #' \item{records}{number of observations} #' \item{n.max}{n.max} #' \item{n.start}{n.start} #' \item{events}{number of events} #' \item{rmean}{Restricted mean (see \link[survival]{print.survfit})} #' \item{rmean.std.error}{Restricted mean standard error} #' \item{median}{median survival} #' \item{conf.low}{lower end of confidence interval on median} #' \item{conf.high}{upper end of confidence interval on median} #' #' @export glance.survfit <- function(x, ...) { if (inherits(x, "survfitms")) { stop("Cannot construct a glance of a multi-state survfit object") } if (!is.null(x$strata)) { stop("Cannot construct a glance of a multi-strata survfit object") } s <- summary(x) ret <- unrowname(as.data.frame(t(s$table))) colnames(ret) <- plyr::revalue(colnames(ret), c("*rmean" = "rmean", "*se(rmean)" = "rmean.std.error"), warn_missing = FALSE) colnames(ret)[utils::tail(seq_along(ret), 2)] <- c("conf.low", "conf.high") ret } #' Tidy an expected survival curve #' #' This constructs a summary across time points or overall of an expected survival #' curve. Note that this contains less information than most survfit objects. #' #' @param x "survexp" object #' @param ... extra arguments (not used) #' #' @template boilerplate #' #' @examples #' #' if (require("survival", quietly = TRUE)) { #' sexpfit <- survexp(futime ~ 1, rmap=list(sex="male", year=accept.dt, #' age=(accept.dt-birth.dt)), #' method='conditional', data=jasa) #' #' tidy(sexpfit) #' glance(sexpfit) #' } #' #' @name sexpfit_tidiers #' @rdname sexpfit_tidiers #' #' @return \code{tidy} returns a one row for each time point, with columns #' \item{time}{time point} #' \item{estimate}{estimated survival} #' \item{n.risk}{number of individuals at risk} #' #' @export tidy.survexp <- function(x, ...) { ret <- as.data.frame(summary(x)[c("time", "surv", "n.risk")]) plyr::rename(ret, c(surv = "estimate")) } #' @rdname sexpfit_tidiers #' #' @return \code{glance} returns a one-row data.frame with the columns: #' \item{n.max}{maximum number of subjects at risk} #' \item{n.start}{starting number of subjects at risk} #' \item{timepoints}{number of timepoints} #' #' @export glance.survexp <- function(x, ...) { data.frame(n.max = max(x$n.risk), n.start = x$n.risk[1], timepoints = length(x$n.risk)) } #' Tidy person-year summaries #' #' These tidy the output of \code{pyears}, a calculation of the person-years #' of follow-up time contributed by a cohort of subject. Since the output of #' \code{pyears$data} is already tidy (if the \code{data.frame = TRUE} argument #' is given), this does only a little work and should rarely be necessary. #' #' @param x a "pyears" object #' @param ... extra arguments (not used) #' #' @examples #' #' if (require("survival", quietly = TRUE)) { #' temp.yr <- tcut(mgus$dxyr, 55:92, labels=as.character(55:91)) #' temp.age <- tcut(mgus$age, 34:101, labels=as.character(34:100)) #' ptime <- ifelse(is.na(mgus$pctime), mgus$futime, mgus$pctime) #' pstat <- ifelse(is.na(mgus$pctime), 0, 1) #' pfit <- pyears(Surv(ptime/365.25, pstat) ~ temp.yr + temp.age + sex, mgus, #' data.frame=TRUE) #' head(tidy(pfit)) #' glance(pfit) #' #' # if data.frame argument is not given, different information is present in #' # output #' pfit2 <- pyears(Surv(ptime/365.25, pstat) ~ temp.yr + temp.age + sex, mgus) #' head(tidy(pfit2)) #' glance(pfit2) #' } #' #' @seealso \link{pyears} #' #' @name pyears_tidiers #' @rdname pyears_tidiers #' #' @return \code{tidy} returns a data.frame with the columns #' \item{pyears}{person-years of exposure} #' \item{n}{number of subjects contributing time} #' \item{event}{observed number of events} #' \item{expected}{expected number of events (present only if a #' \code{ratetable} term is present)} #' #' If the \code{data.frame = TRUE} argument is supplied to \code{pyears}, #' this is simply the contents of \code{x$data}. #' #' @export tidy.pyears <- function(x, ...) { if (is.null(x$data)) { ret <- compact(unclass(x)[c("pyears", "n", "event", "expected")]) as.data.frame(ret) } else { x$data } } #' @rdname pyears_tidiers #' #' @return \code{glance} returns a one-row data frame with #' \item{total}{total number of person-years tabulated} #' \item{offtable}{total number of person-years off table} #' #' This contains the values printed by \code{summary.pyears}. #' #' @export glance.pyears <- function(x, ...) { if (is.null(x$data)) { data.frame(total = sum(x$pyears), offtable = x$offtable) } else { data.frame(total = sum(x$data$pyears), offtable = x$offtable) } } #' Tidiers for a parametric regression survival model #' #' Tidies the coefficients of a parametric survival regression model, #' from the "survreg" function, adds fitted values and residuals, or #' summarizes the model statistics. #' #' @param x a "survreg" model #' @param conf.level confidence level for CI #' @param ... extra arguments (not used) #' #' @template boilerplate #' #' @examples #' #' if (require("survival", quietly = TRUE)) { #' sr <- survreg(Surv(futime, fustat) ~ ecog.ps + rx, ovarian, #' dist="exponential") #' #' td <- tidy(sr) #' augment(sr, ovarian) #' augment(sr) #' glance(sr) #' #' # coefficient plot #' library(ggplot2) #' ggplot(td, aes(estimate, term)) + geom_point() + #' geom_errorbarh(aes(xmin = conf.low, xmax = conf.high), height = 0) + #' geom_vline(xintercept = 0) #' } #' #' @name survreg_tidiers #' @rdname survreg_tidiers #' #' @template coefficients #' #' @export tidy.survreg <- function(x, conf.level = .95, ...) { s <- summary(x) nn <- c("estimate", "std.error", "statistic", "p.value") ret <- fix_data_frame(s$table, newnames = nn) ret # add confidence interval CI <- stats::confint(x, level = conf.level) colnames(CI) <- c("conf.low", "conf.high") CI <- fix_data_frame(CI) merge(ret, CI, all.x = TRUE, sort = FALSE) } #' @name survreg_tidiers #' #' @param data original data; if it is not provided, it is reconstructed #' as best as possible with \code{\link{model.frame}} #' @param newdata New data to use for prediction; optional #' @param type.predict type of prediction, default "response" #' @param type.residuals type of residuals to calculate, default "response" #' #' @template augment_NAs #' #' @return \code{augment} returns the original data.frame with the following #' additional columns: #' \item{.fitted}{Fitted values of model} #' \item{.se.fit}{Standard errors of fitted values} #' \item{.resid}{Residuals} #' #' @export augment.survreg <- function(x, data = stats::model.frame(x), newdata, type.predict = "response", type.residuals = "response", ...) { ret <- fix_data_frame(data, newcol = ".rownames") augment_columns(x, data, newdata, type.predict = type.predict, type.residuals = type.residuals) } #' @rdname survreg_tidiers #' #' @return \code{glance} returns a one-row data.frame with the columns: #' \item{iter}{number of iterations} #' \item{df}{degrees of freedom} #' \item{statistic}{chi-squared statistic} #' \item{p.value}{p-value from chi-squared test} #' \item{logLik}{log likelihood} #' \item{AIC}{Akaike information criterion} #' \item{BIC}{Bayesian information criterion} #' \item{df.residual}{residual degrees of freedom} #' #' @export glance.survreg <- function(x, conf.level = .95, ...) { ret <- data.frame(iter = x$iter, df = sum(x$df)) ret$chi <- 2 * diff(x$loglik) ret$p.value <- 1 - stats::pchisq(ret$chi, sum(x$df) - x$idf) finish_glance(ret, x) } #' Tidiers for Tests of Differences between Survival Curves #' #' @param x a "survdiff" object #' @param strata logical, whether to include strata in the output #' @param ... other arguments passed to/from other methods, currently ignored #' #' @seealso \code{\link[survival]{survdiff}} #' #' @template boilerplate #' #' @name survdiff_tidiers #' #' @examples #' if( require("survival") ) { #' s <- survdiff( Surv(time, status) ~ pat.karno + strata(inst), data=lung) #' tidy(s) #' glance(s) #' } NULL #' @rdname survdiff_tidiers #' #' @return #' \code{tidy} on "survdiff" objects returns a data frame with the following columns: #' \item{...}{initial column(s) correspond to grouping factors (right-hand side of the formula)} #' \item{obs}{weighted observed number of events in each group} #' \item{exp}{weighted expected number of events in each group} #' \item{N}{number of subjects in each group} #' #' @export tidy.survdiff <- function(x, strata=FALSE, ...) { # if one-sample test if( length(x$obs) == 1 ) { return( data.frame( N = x$n, obs = x$obs, exp = x$exp ) ) } # grouping variables (unless one-sample test) l <- lapply(strsplit(rownames(x$n), ", "), strsplit, "=") row_list <- lapply(l, function(x) structure( as.data.frame(lapply(x, "[", 2), stringsAsFactors = FALSE), names = sapply(x, "[", 1) ) ) gvars <- do.call("rbind", row_list) has_strata <- "strata" %in% names(x) if(strata && has_strata) { .NotYetUsed(strata) d_obs <- cbind(gvars, as.data.frame(x$obs)) %>% tidyr::gather(strata, obs, dplyr::matches("V[0-9]+") ) %>% tidyr::extract(strata, "strata", "([0-9]+)") d_exp <- cbind(gvars, as.data.frame(x$exp)) %>% tidyr::gather(strata, exp, dplyr::matches("V[0-9]+") ) %>% tidyr::extract(strata, "strata", "([0-9]+)") z <- d_obs %>% dplyr::left_join(d_exp) } else { rval <- data.frame( N = as.numeric(x$n), obs = if(has_strata) apply(x$obs, 1, sum) else x$obs, exp = if(has_strata) apply(x$exp, 1, sum) else x$exp ) } cbind( gvars, rval ) } #' @rdname survdiff_tidiers #' #' @return #' \code{glance} on "survdiff" objects returns a data frame with the following columns: #' \item{statistic}{value of the test statistic} #' \item{df}{degrees of freedom} #' \item{p.value}{p-value} #' #' #' @export glance.survdiff <- function(x, ...) { e <- x$exp if(is.matrix(e)) { tmp <- apply(e, 1, sum) } else { tmp <- e } rval <- data.frame( statistic = x$chisq, df = (sum(1 * (tmp > 0))) - 1 ) rval$p.value <- 1 - stats::pchisq(rval$statistic, rval$df) rval } broom/R/nls_tidiers.R0000644000177700017770000001067213204276216015576 0ustar herbrandtherbrandt#' Tidying methods for a nonlinear model #' #' These methods tidy the coefficients of a nonlinear model into a summary, #' augment the original data with information on the fitted values and residuals, #' and construct a one-row glance of the model's statistics. #' #' @param x An object of class "nls" #' @param data original data this was fitted on; if not given this will #' attempt to be reconstructed from nls (may not be successful) #' #' @return All tidying methods return a \code{data.frame} without rownames. #' The structure depends on the method chosen. #' #' @template augment_NAs #' #' @seealso \code{\link{nls}} and \code{\link{summary.nls}} #' #' @examples #' #' n <- nls(mpg ~ k * e ^ wt, data = mtcars, start = list(k = 1, e = 2)) #' #' tidy(n) #' augment(n) #' glance(n) #' #' library(ggplot2) #' ggplot(augment(n), aes(wt, mpg)) + geom_point() + geom_line(aes(y = .fitted)) #' #' # augment on new data #' newdata <- head(mtcars) #' newdata$wt <- newdata$wt + 1 #' augment(n, newdata = newdata) #' #' @name nls_tidiers NULL #' @rdname nls_tidiers #' #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param quick whether to compute a smaller and faster version, containing #' only the \code{term} and \code{estimate} columns. #' #' @return \code{tidy} returns one row for each coefficient in the model, #' with five columns: #' \item{term}{The term in the nonlinear model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error from the linear model} #' \item{statistic}{t-statistic} #' \item{p.value}{two-sided p-value} #' #' @export tidy.nls <- function(x, conf.int = FALSE, conf.level = .95, quick = FALSE, ...) { if (quick) { co <- stats::coef(x) ret <- data.frame(term = names(co), estimate = unname(co)) return(ret) } nn <- c("estimate", "std.error", "statistic", "p.value") ret <- fix_data_frame(stats::coef(summary(x)), nn) if (conf.int) { # avoid "Waiting for profiling to be done..." message CI <- suppressMessages(stats::confint(x, level = conf.level)) if (is.null(dim(CI))) { CI = matrix(CI, nrow=1) } colnames(CI) = c("conf.low", "conf.high") ret <- cbind(ret, unrowname(CI)) } ret } #' @rdname nls_tidiers #' #' @param newdata new data frame to use for predictions #' #' @return \code{augment} returns one row for each original observation, #' with two columns added: #' \item{.fitted}{Fitted values of model} #' \item{.resid}{Residuals} #' #' If \code{newdata} is provided, these are computed on based on predictions #' of the new data. #' #' @export augment.nls <- function(x, data = NULL, newdata = NULL, ...) { if (!is.null(newdata)) { # use predictions on new data newdata <- fix_data_frame(newdata, newcol = ".rownames") newdata$.fitted <- stats::predict(x, newdata = newdata) return(newdata) } if (is.null(data)) { pars <- names(x$m$getPars()) env <- as.list(x$m$getEnv()) data <- as.data.frame(env[!(names(env) %in% pars)]) } else { # preprocess data to fit NAs #if (!is.null(x$na.action) && class(x$na.action) == "omit") { # data <- data[-x$na.action, ] # # get rid of rownames # rownames(data) <- NULL #} } return(augment_columns(x, data)) # move rownames if necessary data <- fix_data_frame(data, newcol = ".rownames") data$.fitted <- stats::predict(x) data$.resid <- stats::resid(x) data } #' @rdname nls_tidiers #' #' @param ... extra arguments (not used) #' #' @return \code{glance} returns one row with the columns #' \item{sigma}{the square root of the estimated residual variance} #' \item{isConv}{whether the fit successfully converged} #' \item{finTol}{the achieved convergence tolerance} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{deviance}{deviance} #' \item{df.residual}{residual degrees of freedom} #' #' @export glance.nls <- function(x, ...) { s <- summary(x) ret <- unrowname(data.frame(sigma=s$sigma, isConv=s$convInfo$isConv, finTol=s$convInfo$finTol)) finish_glance(ret, x) } broom/R/kmeans_tidiers.R0000644000177700017770000000554713204276216016265 0ustar herbrandtherbrandt#' Tidying methods for kmeans objects #' #' These methods summarize the results of k-means clustering into three #' tidy forms. \code{tidy} describes the center and size of each cluster, #' \code{augment} adds the cluster assignments to the original data, and #' \code{glance} summarizes the total within and between sum of squares #' of the clustering. #' #' @param x kmeans object #' @param data Original data (required for \code{augment}) #' @param col.names The names to call each dimension of the data in \code{tidy}. #' Defaults to \code{x1, x2...} #' @param ... extra arguments, not used #' #' @return All tidying methods return a \code{data.frame} without rownames. #' The structure depends on the method chosen. #' #' @seealso \code{\link{kmeans}} #' #' @examples #' #' library(dplyr) #' library(ggplot2) #' #' set.seed(2014) #' centers <- data.frame(cluster=factor(1:3), size=c(100, 150, 50), #' x1=c(5, 0, -3), x2=c(-1, 1, -2)) #' points <- centers %>% group_by(cluster) %>% #' do(data.frame(x1=rnorm(.$size[1], .$x1[1]), #' x2=rnorm(.$size[1], .$x2[1]))) #' #' k <- kmeans(points %>% dplyr::select(x1, x2), 3) #' tidy(k) #' head(augment(k, points)) #' glance(k) #' #' ggplot(augment(k, points), aes(x1, x2)) + #' geom_point(aes(color = .cluster)) + #' geom_text(aes(label = cluster), data = tidy(k), size = 10) #' #' @name kmeans_tidiers #' NULL #' @rdname kmeans_tidiers #' #' @return \code{tidy} returns one row per cluster, with one column for each #' dimension in the data describing the center, followed by #' \item{size}{The size of each cluster} #' \item{withinss}{The within-cluster sum of squares} #' \item{cluster}{A factor describing the cluster from 1:k} #' #' @export tidy.kmeans <- function(x, col.names=paste0("x", 1:ncol(x$centers)), ...) { ret <- data.frame(x$centers) colnames(ret) <- col.names ret$size <- x$size ret$withinss <- x$withinss ret$cluster <- factor(seq_len(nrow(ret))) ret } #' @rdname kmeans_tidiers #' #' @return \code{augment} returns the original data with one extra column: #' \item{.cluster}{The cluster assigned by the k-means algorithm} #' #' @export augment.kmeans <- function(x, data, ...) { # move rownames if necessary data <- fix_data_frame(data, newcol = ".rownames") # show cluster assignment as a factor (it's not numeric) cbind(as.data.frame(data), .cluster = factor(x$cluster)) } #' @rdname kmeans_tidiers #' #' @return \code{glance} returns a one-row data.frame with the columns #' \item{totss}{The total sum of squares} #' \item{tot.withinss}{The total within-cluster sum of squares} #' \item{betweenss}{The total between-cluster sum of squares} #' \item{iter}{The numbr of (outer) iterations} #' #' @export glance.kmeans <- function(x, ...) { ret <- as.data.frame(x[c("totss", "tot.withinss", "betweenss", "iter")]) ret } broom/R/betareg_tidiers.R0000644000177700017770000000577613204276216016424 0ustar herbrandtherbrandt#' Tidy betareg objects from the betareg package #' #' Tidy beta regression objects into summarized coefficients, add their fitted values #' and residuals, or find their model parameters. #' #' @param x A "betareg" object #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' #' @name betareg_tidiers #' #' @template boilerplate #' #' @return tidy returns a data.frame with one row for each term used to predict #' the mean, along with at least one term used to predict phi (the inverse of #' the variance). It starts with the column \code{component} containing either #' "mean" or "precision" to describe which is being modeled, then has the same #' columns as tidied linear models or glm's (see \code{\link{lm_tidiers}}). #' #' @examples #' #' if (require("betareg", quietly = TRUE)) { #' data("GasolineYield", package = "betareg") #' #' mod <- betareg(yield ~ batch + temp, data = GasolineYield) #' #' mod #' tidy(mod) #' tidy(mod, conf.int = TRUE) #' tidy(mod, conf.int = TRUE, conf.level = .99) #' #' head(augment(mod)) #' #' glance(mod) #' } #' #' @export tidy.betareg <- function(x, conf.int = FALSE, conf.level = .95, ...) { nn <- c("estimate", "std.error", "statistic", "p.value") ret <- plyr::ldply(coef(summary(x)), fix_data_frame, .id = "component", newnames = nn) if (conf.int) { conf <- unrowname(confint(x, level = conf.level)) colnames(conf) <- c("conf.low", "conf.high") ret <- cbind(ret, conf) } ret } #' @rdname betareg_tidiers #' #' @param data Original data frame the regression was fit on #' @param newdata New data frame to use for prediction #' @param type.predict Type of predictions to calculate #' @param type.residuals Type of residuals to calculate #' #' @return augment returns the original data, along with new columns describing #' each observation: #' \item{.fitted}{Fitted values of model} #' \item{.resid}{Residuals} #' \item{.cooksd}{Cooks distance, \code{\link{cooks.distance}}} #' #' @export augment.betareg <- function(x, data = stats::model.frame(x), newdata, type.predict, type.residuals, ...) { augment_columns(x, data, newdata, type.predict = type.predict, type.residuals = type.residuals) } #' @rdname betareg_tidiers #' #' @param ... Extra arguments, not used #' #' @return \code{glance} returns a one-row data.frame with the columns #' \item{pseudo.r.squared}{the deviance of the null model} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{df.residual}{residual degrees of freedom} #' \item{df.null}{degrees of freedom under the null} #' #' @export glance.betareg <- function(x, ...) { s <- summary(x) ret <- unrowname(as.data.frame(s[c("pseudo.r.squared")])) ret <- finish_glance(ret, x) ret$df.null <- s$df.null ret } broom/R/gmm_tidiers.R0000644000177700017770000001252313204276216015557 0ustar herbrandtherbrandt#' Tidying methods for generalized method of moments "gmm" objects #' #' These methods tidy the coefficients of "gmm" objects from the gmm package, #' or glance at the model-wide statistics (especially the J-test). #' #' @param x gmm object #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param exponentiate whether to exponentiate the coefficient estimates #' and confidence intervals (typical for logistic regression) #' @param quick whether to compute a smaller and faster version, containing #' only the \code{term} and \code{estimate} columns (and confidence interval #' if requested, which may be slower) #' #' @details If \code{conf.int=TRUE}, the confidence interval is computed with #' the \code{\link{confint}} function. #' #' Note that though the "gmm" object contains residuals and fitted values, #' there is not yet an \code{augment} method implemented. This is because #' the input to gmm is not tidy (it's a "wide" matrix), so it is not immediately #' clear what the augmented results should look like. #' #' @return All tidying methods return a \code{data.frame} without rownames. #' The structure depends on the method chosen. #' #' \code{tidy.gmm} returns one row for each coefficient, with six columns: #' \item{term}{The term in the model being estimated} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error from the linear model} #' \item{statistic}{t-statistic} #' \item{p.value}{two-sided p-value} #' #' If all the the terms have _ in them (e.g. \code{WMK_(Intercept)}), #' they are split into \code{variable} and \code{term}. #' #' If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and #' \code{conf.high}, computed with \code{\link{confint}}. #' #' @name gmm_tidiers #' #' @examples #' #' if (require("gmm", quietly = TRUE)) { #' # examples come from the "gmm" package #' ## CAPM test with GMM #' data(Finance) #' r <- Finance[1:300, 1:10] #' rm <- Finance[1:300, "rm"] #' rf <- Finance[1:300, "rf"] #' #' z <- as.matrix(r-rf) #' t <- nrow(z) #' zm <- rm-rf #' h <- matrix(zm, t, 1) #' res <- gmm(z ~ zm, x = h) #' #' # tidy result #' tidy(res) #' tidy(res, conf.int = TRUE) #' tidy(res, conf.int = TRUE, conf.level = .99) #' #' # coefficient plot #' library(ggplot2) #' library(dplyr) #' tidy(res, conf.int = TRUE) %>% #' mutate(variable = reorder(variable, estimate)) %>% #' ggplot(aes(estimate, variable)) + #' geom_point() + #' geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) + #' facet_wrap(~ term) + #' geom_vline(xintercept = 0, color = "red", lty = 2) #' #' # from a function instead of a matrix #' g <- function(theta, x) { #' e <- x[,2:11] - theta[1] - (x[,1] - theta[1]) %*% matrix(theta[2:11], 1, 10) #' gmat <- cbind(e, e*c(x[,1])) #' return(gmat) } #' #' x <- as.matrix(cbind(rm, r)) #' res_black <- gmm(g, x = x, t0 = rep(0, 11)) #' #' tidy(res_black) #' tidy(res_black, conf.int = TRUE) #' #' ## APT test with Fama-French factors and GMM #' #' f1 <- zm #' f2 <- Finance[1:300, "hml"] - rf #' f3 <- Finance[1:300, "smb"] - rf #' h <- cbind(f1, f2, f3) #' res2 <- gmm(z ~ f1 + f2 + f3, x = h) #' #' td2 <- tidy(res2, conf.int = TRUE) #' td2 #' #' # coefficient plot #' td2 %>% #' mutate(variable = reorder(variable, estimate)) %>% #' ggplot(aes(estimate, variable)) + #' geom_point() + #' geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) + #' facet_wrap(~ term) + #' geom_vline(xintercept = 0, color = "red", lty = 2) #' } #' #' @export tidy.gmm <- function(x, conf.int = FALSE, conf.level = .95, exponentiate = FALSE, quick = FALSE, ...) { if (quick) { co <- stats::coef(x) ret <- data.frame(term = names(co), estimate = unname(co)) } else { co <- stats::coef(summary(x)) nn <- c("estimate", "std.error", "statistic", "p.value") ret <- fix_data_frame(co, nn[1:ncol(co)]) } # newer versions of GMM create a 'confint' object, so we can't use process_lm ret <- process_lm(ret, x, conf.int = FALSE, conf.level = conf.level, exponentiate = exponentiate) if (conf.int) { CI <- suppressMessages(stats::confint(x, level = conf.level)) if (! is.matrix(CI)) CI <- CI$test colnames(CI) = c("conf.low", "conf.high") trans <- if (exponentiate) exp else identity ret <- cbind(ret, trans(unrowname(CI))) } if (all(grepl("_", ret$term))) { # separate the variable and term ret <- tidyr::separate(ret, term, c("variable", "term"), sep = "_", extra = "merge") } ret } #' @rdname gmm_tidiers #' #' @param ... extra arguments (not used) #' #' @return \code{glance.gmm} returns a one-row data.frame with the columns #' \item{df}{Degrees of freedom} #' \item{statistic}{Statistic from J-test for E(g)=0} #' \item{p.value}{P-value from J-test} #' \item{df.residual}{Residual degrees of freedom, if included in "gmm" object} #' #' @export glance.gmm <- function(x, ...) { s <- gmm::summary.gmm(x) st <- suppressWarnings(as.numeric(s$stest$test)) ret <- data.frame(df = x$df, statistic = st[1], p.value = st[2]) ret <- finish_glance(unrowname(ret), x) ret } broom/R/rcorr_tidiers.R0000644000177700017770000000363513204276216016132 0ustar herbrandtherbrandt#' Tidying methods for rcorr objects #' #' Tidies a correlation matrix from the \code{rcorr} function in the #' "Hmisc" package, including correlation estimates, p-values, #' and the number of observations in each pairwise correlation. #' Note that it returns these in "long", or "melted", format, #' with one row for each pair of columns being compared. #' #' @param x An object of class "rcorr" #' @param diagonal Whether to include diagonal elements (where #' \code{estimate} is 1 and \code{p.value} is NA), default FALSE #' @param ... extra arguments (not used) #' #' @return A data.frame with one row for each pairing #' in the correlation matrix. Columns are: #' \item{column1}{Name or index of the first column being described} #' \item{column2}{Name or index of the second column being described} #' \item{estimate}{Estimate of Pearson's r or Spearman's rho} #' \item{n}{Number of observations used to compute the correlation} #' \item{p.value}{P-value of correlation} #' #' @details Only half the symmetric matrix is shown. #' #' @examples #' #' if (require("Hmisc", quietly = TRUE)) { #' mat <- replicate(52, rnorm(100)) #' # add some NAs #' mat[sample(length(mat), 2000)] <- NA #' # also column names #' colnames(mat) <- c(LETTERS, letters) #' #' rc <- rcorr(mat) #' #' td <- tidy(rc) #' head(td) #' #' library(ggplot2) #' ggplot(td, aes(p.value)) + #' geom_histogram(binwidth = .1) #' #' ggplot(td, aes(estimate, p.value)) + #' geom_point() + #' scale_y_log10() #' } #' #' @name rcorr_tidiers #' #' @export tidy.rcorr <- function(x, diagonal = FALSE, ...) { ret <- reshape2::melt(x$r, varnames = c("column1", "column2"), value.name = "estimate") ret$n <- c(x$n) ret$p.value <- c(x$P) # include only half the symmetric matrix. ret <- ret[upper.tri(x$r, diag = diagonal), ] ret } broom/R/gamlss_tidiers.R0000644000177700017770000000350213204276216016262 0ustar herbrandtherbrandt#' Tidying methods for gamlss objects #' #' Tidying methods for "gamlss" objects from the gamlss package. #' #' @param x A "gamlss" object #' @param quick Whether to perform a fast version, and return only the coefficients #' @param ... Extra arguments (not used) #' #' @name gamlss_tidiers #' #' @template boilerplate #' #' @return A data.frame with one row for each coefficient, containing columns #' \item{parameter}{Type of coefficient being estimated: \code{mu}, \code{sigma}, #' \code{nu}, or \code{tau}} #' \item{term}{The term in the model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error from the linear model} #' \item{statistic}{t-statistic} #' \item{p.value}{two-sided p-value} #' #' if (requireNamespace("gamlss", quietly = TRUE)) { #' data(abdom) #' mod<-gamlss(y~pb(x),sigma.fo=~pb(x),family=BCT, data=abdom, method=mixed(1,20)) #' #' tidy(mod) #' } #' #' @export tidy.gamlss <- function(x, quick = FALSE, ...){ if (quick) { co <- stats::coef(x) return(data.frame(term = names(co), estimate = unname(co))) } # need gamlss for summary to work if (!requireNamespace("gamlss", quietly = TRUE)) { stop("gamlss package not installed, cannot tidy gamlss") } # use capture.output to prevent summary from being printed to screen utils::capture.output(s <- summary(x, type = "qr")) # tidy the coefficients much as would be done for a linear model nn <- c("estimate", "std.error", "statistic", "p.value") ret <- fix_data_frame(s, nn) # add parameter types. This assumes each coefficient table starts # with "(Intercept)": unclear if this is guaranteed parameters <- x$parameters[cumsum(ret$term == "(Intercept)")] cbind(parameter = parameters, ret) } broom/R/matrix_tidiers.R0000644000177700017770000000240013204276216016274 0ustar herbrandtherbrandt#' Tidiers for matrix objects #' #' These perform tidying operations on matrix objects. \code{tidy} turns the #' matrix into a data.frame while bringing rownames, if they exist, in as #' a column called \code{.rownames} (since results of tidying operations never #' contain rownames). \code{glance} simply reports the number of rows and #' columns. Note that no augment method exists for matrices. #' #' @param x A matrix #' @param ... extra arguments, not used #' #' @examples #' #' mat <- as.matrix(mtcars) #' tidy(mat) #' glance(mat) #' #' @name matrix_tidiers #' @rdname matrix_tidiers #' #' @return \code{tidy.matrix} returns the original matrix converted into #' a data.frame, except that it incorporates rownames (if they exist) #' into a column called \code{.rownames}. #' #' @export tidy.matrix <- function(x, ...) { fix_data_frame(x, newcol = ".rownames") } #' @rdname matrix_tidiers #' #' @return \code{glance} returns a one-row data.frame with #' \item{nrow}{number of rows} #' \item{ncol}{number of columns} #' \item{complete.obs}{number of rows that have no missing values} #' \item{na.fraction}{fraction of values across all rows and columns that #' are missing} #' #' @export glance.matrix <- function(x, ...) { glance.data.frame(x) } broom/R/muhaz_tidiers.R0000644000177700017770000000301213204320671016107 0ustar herbrandtherbrandt#' Tidying methods for kernel based hazard rate estimates #' #' These methods tidy the output of \code{muhaz} objects as returned by the #' \code{\link[muhaz]{muhaz}} function, which provides kernel based #' non-parametric hazard rate estimators. #' #' The "augment" method is not useful and therefore not #' available for \code{muhaz} objects. #' #' @param x \code{muhaz} object #' #' @template boilerplate #' #' @return \code{tidy.muhaz} returns a tibble containing two columns: #' \code{time} at which the hazard rate was estimated and \code{estimate}. #' #' @name muhaz_tidiers #' #' @examples #' if (require("muhaz", quietly = TRUE)) { #' data(ovarian, package="survival") #' x <- muhaz(ovarian$futime, ovarian$fustat) #' tidy(x) #' glance(x) #' } #' #' @export tidy.muhaz <- function(x, ...) { bind_cols(x[c("est.grid", "haz.est")]) %>% rename("time"="est.grid", "estimate"="haz.est") } #' @rdname muhaz_tidiers #' #' @param ... extra arguments (not used) #' #' @return \code{glance.muhaz} returns a one-row data.frame with the columns #' \item{nobs}{Number of observations used for estimation} #' \item{min.time}{The minimum observed event or censoring time} #' \item{max.time}{The maximum observed event or censoring time} #' \item{min.harzard}{Minimal estimated hazard} #' \item{max.hazard}{Maximal estimated hazard} #' #' @export glance.muhaz <- function(x, ...) { bind_cols(x$pin[c("nobs", "min.time", "max.time")]) %>% mutate( min.hazard = min(x$haz.est), max.hazard = max(x$haz.est)) } broom/R/prcomp_tidiers.R0000644000177700017770000001302413204276216016274 0ustar herbrandtherbrandt#' Tidying methods for principal components analysis via \code{\link{prcomp}} #' #' These tidiers operate on the results of a principal components analysis #' computed using \code{prcomp}. The \code{tidy} method returns a data frame #' with either the eigenvectors representing each row or each column. #' #' @param x an object of class \code{"prcomp"} resulting from a call to #' \code{\link[stats]{prcomp}} #' @param matrix character; Indicates which sets of eigenvectors are returned #' in tidy form. "v", "rotation", or "variables" will return information about #' each variable, while "u", "x", or "samples" (default) returns the loadings #' for each original row. "d" or "pcs" returns information about each #' principal component. #' #' @name prcomp_tidiers #' #' @seealso \code{\link{prcomp}}, \link{svd_tidiers} #' #' @template boilerplate #' #' @return If \code{matrix} is "u", "samples", or "x", the \code{tidy} method #' returns #' \describe{ #' \item{\code{row}}{The sample labels (rownames) of the data set on #' which PCA was performed} #' \item{\code{PC}}{An integer vector indicating the principal component} #' \item{\code{value}}{The value of the eigenvector (axis score) on the #' indicated principal component} #' } #' #' If \code{matrix} is "v", "variables", or "rotation", the \code{tidy} method #' returns #' \describe{ #' \item{\code{row}}{The variable labels (colnames) of the data set on #' which PCA was performed} #' \item{\code{PC}}{An integer vector indicating the principal component} #' \item{\code{value}}{The value of the eigenvector (axis score) on the #' indicated principal component} #' } #' #' If \code{matrix} is "d" or "pcs", the \code{tidy} method returns #' \describe{ #' \item{\code{PC}}{An integer vector indicating the principal component} #' \item{\code{std.dev}}{Standard deviation explained by this PC} #' \item{\code{percent}}{Percentage of variation explained} #' \item{\code{cumulative}}{Cumulative percentage of variation explained} #' } #' #' @author Gavin L. Simpson #' #' @examples #' #' pc <- prcomp(USArrests, scale = TRUE) #' #' # information about rotation #' head(tidy(pc)) #' #' # information about samples (states) #' head(tidy(pc, "samples")) #' #' # information about PCs #' tidy(pc, "pcs") #' #' # state map #' library(dplyr) #' library(ggplot2) #' #' pc %>% #' tidy(matrix = "samples") %>% #' mutate(region = tolower(row)) %>% #' inner_join(map_data("state"), by = "region") %>% #' ggplot(aes(long, lat, group = group, fill = value)) + #' geom_polygon() + #' facet_wrap(~ PC) + #' theme_void() + #' ggtitle("Principal components of arrest data") #' #' au <- augment(pc, data = USArrests) #' head(au) #' #' ggplot(au, aes(.fittedPC1, .fittedPC2)) + #' geom_point() + #' geom_text(aes(label = .rownames), vjust = 1, hjust = 1) #' #' @export tidy.prcomp <- function(x, matrix = "u", ...) { if (length(matrix) > 1) { stop("Tidying multiple matrices not supported") } MATRIX <- c("rotation", "x", "variables", "samples", "v", "u", "pcs", "d") matrix <- match.arg(matrix, MATRIX) ncomp <- NCOL(x$rotation) if (matrix %in% c("pcs", "d")) { nn <- c("std.dev", "percent", "cumulative") ret <- fix_data_frame(t(summary(x)$importance), newnames = nn, newcol = "PC") } else if (matrix %in% c("rotation", "variables", "v")) { labels <- rownames(x$rotation) variables <- tidyr::gather(as.data.frame(x$rotation)) ret <- data.frame(label = rep(labels, times = ncomp), variables, stringsAsFactors = FALSE) names(ret) <- c("column", "PC", "value") } else if (matrix %in% c("x", "samples", "u")) { labels <- rownames(x$x) samples <- tidyr::gather(as.data.frame(x$x)) ret <- data.frame(label = rep(labels, times = ncomp), samples) names(ret) <- c("row", "PC", "value") } ## change the PC to a numeric ret <- mutate(ret, PC = as.numeric(stringr::str_replace(PC, "PC", ""))) ret } #' @rdname prcomp_tidiers #' #' @param data the original data on which principal components analysis #' was performed. This cannot be recovered from \code{x}. If \code{newdata} #' is supplied, \code{data} is ignored. If both \code{data} and \code{newdata} #' are missing, only the fitted locations on the principal components are #' returned. #' @param newdata data frame; new observations for which locations on principal #' components are sought. #' @param ... Extra arguments, not used #' #' @return The \code{augment.prcomp} method returns a data frame containing #' fitted locations on the principal components for the observed data plus #' either the original data or the new data if supplied via \code{data} or #' \code{newdata} respectively. #' #' @export augment.prcomp <- function(x, data = NULL, newdata, ...) { ret <- if (!missing(newdata)) { ret <- data.frame(.rownames = rownames(newdata)) pred <- as.data.frame(predict(x, newdata = newdata)) names(pred) <- paste0(".fitted", names(pred)) cbind(ret, newdata, pred) } else { pred <- as.data.frame(predict(x)) names(pred) <- paste0(".fitted", names(pred)) if (!missing(data) && !is.null(data)) { cbind(.rownames = rownames(data), data, pred) } else { data.frame(.rownames = rownames(x$x), pred) } } ret <- unrowname(ret) ret } broom/R/btergm_tidiers.R0000644000177700017770000000627413204276216016265 0ustar herbrandtherbrandt#' Tidying method for a bootstrapped temporal exponential random graph model #' #' This method tidies the coefficients of a bootstrapped temporal exponential #' random graph model estimated with the \pkg{xergm}. It simply returns the #' coefficients and their confidence intervals. #' #' @return A \code{data.frame} without rownames. #' #' @seealso \code{\link[btergm]{btergm}} #' #' @name btergm_tidiers #' #' @param x a \code{\link[btergm]{btergm}} object #' @examples #' #' if (require("xergm")) { #' # Using the same simulated example as the xergm package #' # Create 10 random networks with 10 actors #' networks <- list() #' for(i in 1:10){ #' mat <- matrix(rbinom(100, 1, .25), nrow = 10, ncol = 10) #' diag(mat) <- 0 #' nw <- network::network(mat) #' networks[[i]] <- nw #' } #' # Create 10 matrices as covariates #' covariates <- list() #' for (i in 1:10) { #' mat <- matrix(rnorm(100), nrow = 10, ncol = 10) #' covariates[[i]] <- mat #' } #' # Fit a model where the propensity to form ties depends #' # on the edge covariates, controlling for the number of #' # in-stars #' btfit <- btergm(networks ~ edges + istar(2) + #' edgecov(covariates), R = 100) #' #' # Show terms, coefficient estimates and errors #' tidy(btfit) #' #' # Show coefficients as odds ratios with a 99% CI #' tidy(btfit, exponentiate = TRUE, conf.level = 0.99) #' } NULL #' @rdname btergm_tidiers #' #' @param conf.level confidence level of the bootstrapped interval #' @param exponentiate whether to exponentiate the coefficient estimates #' and confidence intervals #' @param quick whether to compute a smaller and faster version, containing #' only the \code{term} and \code{estimate} columns. #' @param ... extra arguments (currently not used) #' #' @details There is no \code{augment} or \code{glance} method #' for \pkg{ergm} objects. #' #' @return \code{tidy.btergm} returns one row for each coefficient, #' with four columns: #' \item{term}{The term in the model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{conf.low}{The lower bound of the confidence interval} #' \item{conf.high}{The lower bound of the confidence interval} #' #' @export tidy.btergm <- function(x, conf.level = .95, exponentiate = FALSE, quick = FALSE, ...) { if (exponentiate) { trans <- exp } else { trans <- identity } if (quick) { co <- fit@coef ret <- data.frame(term = names(co), estimate = trans(unname(co))) return(ret) } co <- btergm::confint(x, level = conf.level) nn <- c("estimate", "conf.low", "conf.high") if (inherits(co, "listof")) { # multiple response variables ret <- plyr::ldply(co, fix_data_frame, nn[1:ncol(co[[1]])], .id = "response") ret$response <- stringr::str_replace(ret$response, "Response ", "") } else { ret <- fix_data_frame(co, nn[1:ncol(co)]) } ret$conf.low <- trans(ret$conf.low) ret$conf.high <- trans(ret$conf.high) ret$estimate <- trans(ret$estimate) ret } broom/R/smooth.spline_tidiers.R0000644000177700017770000000317413204276216017603 0ustar herbrandtherbrandt#' tidying methods for smooth.spline objects #' #' This combines the original data given to smooth.spline with the #' fit and residuals #' #' @details No \code{tidy} method is provided for smooth.spline objects. #' #' @param x a smooth.spline object #' @param data defaults to data used to fit model #' @param ... not used in this method #' #' @examples #' #' spl <- smooth.spline(mtcars$wt, mtcars$mpg, df = 4) #' head(augment(spl, mtcars)) #' head(augment(spl)) # calls original columns x and y #' #' library(ggplot2) #' ggplot(augment(spl, mtcars), aes(wt, mpg)) + #' geom_point() + geom_line(aes(y = .fitted)) #' #' @name smooth.spline_tidiers #' @rdname smooth.spline_tidiers #' #' @return \code{augment} returns the original data with extra columns: #' \item{.fitted}{Fitted values of model} #' \item{.resid}{Residuals} #' #' @export augment.smooth.spline <- function(x, data = x$data, ...) { # move rownames if necessary data <- unrowname(as.data.frame(data)) data <- as.data.frame(data) data$.fitted <- stats::fitted(x) data$.resid <- stats::resid(x) data } #' @rdname smooth.spline_tidiers #' #' @return \code{glance} returns one row with columns #' \item{spar}{smoothing parameter} #' \item{lambda}{choice of lambda corresponding to \code{spar}} #' \item{df}{equivalent degrees of freedom} #' \item{crit}{minimized criterion} #' \item{pen.crit}{penalized criterion} #' \item{cv.crit}{cross-validation score} #' #' @export glance.smooth.spline <- function(x, ...) { unrowname(as.data.frame( x[c("df", "lambda", "cv.crit", "pen.crit", "crit", "spar", "lambda")] )) } broom/R/auc_tidiers.R0000644000177700017770000000254113204276216015546 0ustar herbrandtherbrandt#' Tidiers for objects from the AUC package #' #' Tidy "roc" objects from the "auc" package. This can be used to, #' for example, draw ROC curves in ggplot2. #' #' @param x an "roc" object #' @param ... Additional arguments, not used #' #' @return A data frame with three columns: #' \item{cutoff}{The cutoff of the prediction scores used #' for classification} #' \item{tpr}{The resulting true positive rate at that cutoff} #' \item{fpr}{The resulting false positive rate at that cutoff} #' #' If the labels had names, those are added as an "instance" column. #' #' @examples #' #' if (require("AUC", quietly = TRUE)) { #' data(churn) #' r <- roc(churn$predictions,churn$labels) #' #' td <- tidy(r) #' head(td) #' #' library(ggplot2) #' ggplot(td, aes(fpr, tpr)) + #' geom_line() #' #' # compare the ROC curves for two prediction algorithms #' library(dplyr) #' library(tidyr) #' #' rocs <- churn %>% #' tidyr::gather(algorithm, value, -labels) %>% #' group_by(algorithm) %>% #' do(tidy(roc(.$value, .$labels))) #' #' ggplot(rocs, aes(fpr, tpr, color = algorithm)) + #' geom_line() #' } #' #' @name auc_tidiers #' #' @export tidy.roc <- function(x, ...) { fix_data_frame(as.data.frame(unclass(x)), newnames = c("cutoff", "fpr", "tpr"), newcol = "instance") } broom/R/xyz_tidiers.R0000644000177700017770000000161413204276216015630 0ustar herbrandtherbrandt#' Tidiers for x, y, z lists suitable for persp, image, etc. #' #' Tidies lists with components x, y (vector of coordinates) and z (matrix of #' values) which are typically used by functions such as #' \code{\link[graphics]{persp}} or \code{\link[graphics]{image}} and returned #' by interpolation functions such as \code{\link[akima]{interp}}. #' #' @param x list with components x, y and z #' @param ... extra arguments #' #' @template boilerplate #' #' @return \code{tidy} returns a data frame with columns x, y and z and one row #' per value in matrix z. #' #' @examples #' #' A <- list(x=1:5, y=1:3, z=matrix(runif(5*3), nrow=5)) #' image(A) #' tidy(A) #' #' @name xyz_tidiers #' @importFrom reshape2 melt tidy_xyz <- function(x, ...) { # convert to data.frame d <- melt(x$z) names(d) <- c("x", "y", "z") # get coordinates d$x <- x$x[d$x] d$y <- x$y[d$y] return(d) } broom/R/mclust_tidiers.R0000644000177700017770000000732013204276216016305 0ustar herbrandtherbrandt#' Tidying methods for Mclust objects #' #' These methods summarize the results of Mclust clustering into three #' tidy forms. \code{tidy} describes the size, mixing probability, mean #' and variabilty of each class, \code{augment} adds the class assignments and #' their probabilities to the original data, and #' \code{glance} summarizes the model parameters of the clustering. #' #' @param x Mclust object #' @param data Original data (required for \code{augment}) #' @param ... extra arguments, not used #' #' @template boilerplate #' #' @seealso \code{\link[mclust]{Mclust}} #' #' @examples #' #' library(dplyr) #' library(ggplot2) #' library(mclust) #' #' set.seed(2016) #' centers <- data.frame(cluster=factor(1:3), size=c(100, 150, 50), #' x1=c(5, 0, -3), x2=c(-1, 1, -2)) #' points <- centers %>% group_by(cluster) %>% #' do(data.frame(x1=rnorm(.$size[1], .$x1[1]), #' x2=rnorm(.$size[1], .$x2[1]))) %>% #' ungroup() #' #' m = Mclust(points %>% dplyr::select(x1, x2)) #' #' tidy(m) #' head(augment(m, points)) #' glance(m) #' #' @name mclust_tidiers #' NULL #' @rdname mclust_tidiers #' #' @return \code{tidy} returns one row per component, with #' \item{component}{A factor describing the cluster from 1:k #' (or 0:k in presence of a noise term in x)} #' \item{size}{The size of each component} #' \item{proportion}{The mixing proportion of each component} #' \item{variance}{In case of one-dimensional and spherical models, #' the variance for each component, omitted otherwise. NA for noise component} #' \item{mean}{The mean for each component. In case of two- or more dimensional models, #' a column with the mean is added for each dimension. NA for noise component} #' #' @export tidy.Mclust <- function(x, ...) { np = max(x$G,length(table(x$classification))) ret = data.frame(seq_len(np)) colnames(ret) = c("component") if(x$G < np) ret$component = ret$component-1 ret$size = sapply(seq(1,np), function(c) { sum(x$classification == c)}) ret$proportion = x$parameters$pro if(x$modelName %in% c("E","V","EII","VII")) { ret$variance = rep_len(x$parameters$variance$sigmasq, length.out = x$G) } if(dim(as.matrix(x$parameters$mean))[2] > 1) { mean = t(x$parameters$mean) }else{ mean = as.matrix(x$parameters$mean) } cbind(ret, mean = rbind(matrix(, np-nrow(mean), ncol(mean)), mean)) } #' @rdname mclust_tidiers #' #' @return \code{augment} returns the original data with two extra columns: #' \item{.class}{The class assigned by the Mclust algorithm} #' \item{.uncertainty}{The uncertainty associated with the classification} #' #' @export augment.Mclust <- function(x, data, ...) { # move rownames if necessary data <- fix_data_frame(data, newcol = ".rownames") # show cluster assignment as a factor (it's not numeric) cbind(as.data.frame(data), .class = factor(x$classification), .uncertainty = x$uncertainty) } #' @rdname mclust_tidiers #' #' @return \code{glance} returns a one-row data.frame with the columns #' \item{model}{A character string denoting the model at which the optimal BIC occurs} #' \item{n}{The number of observations in the data} #' \item{G}{The optimal number of mixture components} #' \item{BIC}{The optimal BIC value} #' \item{logLik}{The log-likelihood corresponding to the optimal BIC} #' \item{df}{The number of estimated parameters} #' \item{hypvol}{The hypervolume parameter for the noise component if required, #' otherwise set to NA} #' #' @export glance.Mclust <- function(x, ...) { ret <- with(x, data.frame(model = modelName, n, G, BIC = bic, logLik = loglik, df, hypvol)) ret } broom/R/psych_tidiers.R0000644000177700017770000000264513204276216016131 0ustar herbrandtherbrandt#' Tidy a kappa object from a Cohen's kappa calculation #' #' Tidy a "kappa" object, from the \code{\link{cohen.kappa}} function #' in the psych package. This represents the agreement of two raters #' when using nominal scores. #' #' @param x An object of class "kappa" #' @param ... extra arguments (not used) #' #' @return A data.frame with columns #' \item{type}{Either "weighted" or "unweighted"} #' \item{estimate}{The estimated value of kappa with this method} #' \item{conf.low}{Lower bound of confidence interval} #' \item{conf.high}{Upper bound of confidence interval} #' #' @details Note that the alpha of the confidence interval is determined #' when the \code{cohen.kappa} function is originally run. #' #' @seealso \code{\link{cohen.kappa}} #' #' @name kappa_tidiers #' #' @examples #' #' library(psych) #' #' rater1 = 1:9 #' rater2 = c(1, 3, 1, 6, 1, 5, 5, 6, 7) #' ck <- cohen.kappa(cbind(rater1, rater2)) #' #' tidy(ck) #' #' # graph the confidence intervals #' library(ggplot2) #' ggplot(tidy(ck), aes(estimate, type)) + #' geom_point() + #' geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) #' #' @name anova_tidiers #' #' @export tidy.kappa <- function(x, ...) { nn <- c("conf.low", "estimate", "conf.high") ret <- fix_data_frame(x$confid, nn, newcol = "type") %>% select(type, estimate, conf.low, conf.high) %>% mutate(type = gsub(" kappa", "", type)) ret } broom/R/glmnet_tidiers.R0000644000177700017770000001465613204276216016276 0ustar herbrandtherbrandt# tidiers from the glmnet package, specifically glmnet and cv.glmnet objects #' Tidiers for LASSO or elasticnet regularized fits #' #' Tidying methods for regularized fits produced by \code{glmnet}, summarizing #' the estimates across values of the penalty parameter lambda. #' #' @template boilerplate #' #' @details Note that while this representation of GLMs is much easier #' to plot and combine than the default structure, it is also much #' more memory-intensive. Do not use for extremely large, sparse matrices. #' #' No \code{augment} method is yet provided even though the model produces #' predictions, because the input data is not tidy (it is a matrix that #' may be very wide) and therefore combining predictions with it is not #' logical. Furthermore, predictions make sense only with a specific #' choice of lambda. #' #' @examples #' #' if (require("glmnet", quietly = TRUE)) { #' set.seed(2014) #' x <- matrix(rnorm(100*20),100,20) #' y <- rnorm(100) #' fit1 <- glmnet(x,y) #' #' head(tidy(fit1)) #' glance(fit1) #' #' library(dplyr) #' library(ggplot2) #' #' tidied <- tidy(fit1) %>% filter(term != "(Intercept)") #' #' ggplot(tidied, aes(step, estimate, group = term)) + geom_line() #' ggplot(tidied, aes(lambda, estimate, group = term)) + #' geom_line() + scale_x_log10() #' #' ggplot(tidied, aes(lambda, dev.ratio)) + geom_line() #' #' # works for other types of regressions as well, such as logistic #' g2 <- sample(1:2, 100, replace=TRUE) #' fit2 <- glmnet(x, g2, family="binomial") #' head(tidy(fit2)) #' } #' #' @rdname glmnet_tidiers #' @name glmnet_tidiers #' #' @param x a "glmnet" object #' @param ... extra arguments (not used) #' #' @return \code{tidy} produces a data.frame with one row per combination of #' coefficient (including the intercept) and value of lambda for which the estimate #' is nonzero, with the columns: #' \item{term}{coefficient name (V1...VN by default, along with #' "(Intercept)")} #' \item{step}{which step of lambda choices was used} #' \item{estimate}{estimate of coefficient} #' \item{lambda}{value of penalty parameter lambda} #' \item{dev.ratio}{fraction of null deviance explained at each #' value of lambda} #' #' @import dplyr #' @importFrom tidyr gather #' #' @export tidy.glmnet <- function(x, ...) { beta <- glmnet::coef.glmnet(x) if (inherits(x, "multnet")) { beta_d <- plyr::ldply(beta, function(b) { fix_data_frame(as.matrix(b), newcol = "term") }, .id = "class") ret <- beta_d %>% tidyr::gather(step, estimate, -term, -class) } else { beta_d <- fix_data_frame(as.matrix(beta), newnames = 1:ncol(beta), newcol = "term") ret <- tidyr::gather(beta_d, step, estimate, -term) } # add values specific to each step ret %>% mutate(step = as.numeric(step), lambda = x$lambda[step], dev.ratio = x$dev.ratio[step]) %>% filter(estimate != 0) } #' @rdname glmnet_tidiers #' #' @return \code{glance} returns a one-row data.frame with the values #' \item{nulldev}{null deviance} #' \item{npasses}{total passes over the data across all lambda values} #' #' @export glance.glmnet <- function(x, ...) { data.frame(nulldev = x$nulldev, npasses = x$npasses) } #' Tidiers for glmnet cross-validation objects #' #' Tidying methods for cross-validation performed by \code{glmnet.cv}, #' summarizing the mean-squared-error across choices of the penalty parameter #' lambda. #' #' @details No \code{augment} method exists for this class. #' #' @template boilerplate #' #' @examples #' #' if (require("glmnet", quietly = TRUE)) { #' set.seed(2014) #' #' nobs <- 100 #' nvar <- 50 #' real <- 5 #' #' x <- matrix(rnorm(nobs * nvar), nobs, nvar) #' beta <- c(rnorm(real, 0, 1), rep(0, nvar - real)) #' y <- c(t(beta) %*% t(x)) + rnorm(nvar, sd = 3) #' #' cvfit1 <- cv.glmnet(x,y) #' #' head(tidy(cvfit1)) #' glance(cvfit1) #' #' library(ggplot2) #' tidied_cv <- tidy(cvfit1) #' glance_cv <- glance(cvfit1) #' #' # plot of MSE as a function of lambda #' g <- ggplot(tidied_cv, aes(lambda, estimate)) + geom_line() + scale_x_log10() #' g #' #' # plot of MSE as a function of lambda with confidence ribbon #' g <- g + geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .25) #' g #' #' # plot of MSE as a function of lambda with confidence ribbon and choices #' # of minimum lambda marked #' g <- g + geom_vline(xintercept = glance_cv$lambda.min) + #' geom_vline(xintercept = glance_cv$lambda.1se, lty = 2) #' g #' #' # plot of number of zeros for each choice of lambda #' ggplot(tidied_cv, aes(lambda, nzero)) + geom_line() + scale_x_log10() #' #' # coefficient plot with min lambda shown #' tidied <- tidy(cvfit1$glmnet.fit) #' ggplot(tidied, aes(lambda, estimate, group = term)) + scale_x_log10() + #' geom_line() + #' geom_vline(xintercept = glance_cv$lambda.min) + #' geom_vline(xintercept = glance_cv$lambda.1se, lty = 2) #' } #' #' @rdname cv.glmnet_tidiers #' @name cv.glmnet_tidiers #' #' @param x a "cv.glmnet" object #' @param ... extra arguments (not used) #' #' @return \code{tidy} produces a data.frame with one row per choice of lambda, #' with columns #' \item{lambda}{penalty parameter lambda} #' \item{estimate}{estimate (median) of mean-squared error or other #' criterion} #' \item{std.error}{standard error of criterion} #' \item{conf.high}{high end of confidence interval on criterion} #' \item{conf.low}{low end of confidence interval on criterion} #' \item{nzero}{number of parameters that are zero at this choice of lambda} #' #' @import dplyr #' @importFrom tidyr gather #' #' @export tidy.cv.glmnet <- function(x, ...) { ret <- as.data.frame(x[c("lambda", "cvm", "cvsd", "cvup", "cvlo", "nzero")]) colnames(ret) <- c("lambda", "estimate", "std.error", "conf.high", "conf.low", "nzero") return(unrowname(ret)) } #' @rdname cv.glmnet_tidiers #' #' @return \code{glance} returns a one-row data.frame with the values #' \item{nulldev}{null deviance} #' \item{npasses}{total passes over the data across all lambda values} #' #' @export glance.cv.glmnet <- function(x, ...) { data.frame(lambda.min = x$lambda.min, lambda.1se = x$lambda.1se) } broom/R/robust_tidiers.R0000644000177700017770000000413613204276216016316 0ustar herbrandtherbrandt#' Tidiers for lmRob and glmRob objects #' #' Tidying robust regression objects from the robust package. The tidy and augment #' methods simply pass it on to the linear model tidiers. #' #' @param x An lmRob or glmRob object with a robust regression #' @param ... Extra arguments, not used #' #' @template boilerplate #' #' @return \code{tidy} and \code{augment} return the same results as \code{\link{lm_tidiers}}. #' #' On an \code{lmRob} \code{glance} returns a one-row data frame with the following columns: #' \item{r.squared}{R-squared} #' \item{deviance}{Robust deviance} #' \item{sigma}{Residual scale estimate} #' \item{df.residual}{Number of residual degrees of freedom} #' #' On an \code{lmRob} \code{glance} returns a one-row data frame with the following columns: #' \item{deviance}{Robust deviance} #' \item{null.deviance}{Deviance under the null model} #' \item{df.residual}{Number of residual degrees of freedom} #' #' @examples #' #' if (require("robust", quietly = TRUE)) { #' m <- lmRob(mpg ~ wt, data = mtcars) #' #' tidy(m) #' augment(m) #' glance(m) #' #' gm <- glmRob(am ~ wt, data = mtcars, family = "binomial") #' glance(gm) #' } #' #' @name robust_tidiers #' #' @seealso \code{\link{lm_tidiers}}, \code{\link[robust]{lmRob}}, \code{\link[robust]{glmRob}} #' #' @export tidy.lmRob <- function(x, ...) { tidy.lm(x) } #' @rdname robust_tidiers #' @export augment.lmRob <- function(x, ...) { augment.lm(x) } #' @rdname robust_tidiers #' @export glance.lmRob <- function(x, ...) { s <- robust::summary.lmRob(x) ret <- data.frame(r.squared = x$r.squared, sigma = s$sigma) if (!is.null(ret$dev)) { ret$deviance <- ret$dev } finish_glance(ret, x) } #' @name robust_tidiers #' #' @export tidy.glmRob <- function(x, ...) { tidy.lm(x) } #' @rdname robust_tidiers #' @export augment.glmRob <- function(x, ...) { augment.lm(x) } #' @rdname robust_tidiers #' @export glance.glmRob <- function(x, ...) { ret <- data.frame(deviance = x$deviance, null.deviance = x$null.deviance) finish_glance(ret, x) } broom/R/extras.R0000644000177700017770000000313413204276216014560 0ustar herbrandtherbrandt# this contains functions that are useful to use with broom, but are not # actually involved in tidying objects. #' Set up bootstrap replicates of a dplyr operation #' #' @param df a data frame #' @param m number of bootstrap replicates to perform #' @param by_group If \code{TRUE}, then bootstrap within each group if \code{df} is #' a grouped tbl. #' #' @details This code originates from Hadley Wickham (with a few small #' corrections) here: #' #' https://github.com/hadley/dplyr/issues/269 #' #' Some examples can be found at #' #' https://github.com/dgrtwo/broom/blob/master/vignettes/bootstrapping.Rmd #' #' @examples #' #' library(dplyr) #' mtcars %>% bootstrap(10) %>% do(tidy(lm(mpg ~ wt, .))) #' #' @export bootstrap <- function (df, m, by_group = FALSE) { n <- nrow(df) attr(df, "indices") <- if (by_group && !is.null(groups(df))) { replicate(m, unlist(lapply(attr(df, "indices"), function(x) { sample(x, replace = TRUE) }), recursive = FALSE, use.names = FALSE), simplify = FALSE) } else { replicate(m, sample(n, replace = TRUE) - 1, simplify = FALSE) } attr(df, "drop") <- TRUE attr(df, "group_sizes") <- rep(n, m) attr(df, "biggest_group_size") <- n attr(df, "labels") <- data.frame(replicate = 1:m) attr(df, "vars") <- list(quote(replicate)) class(df) <- c("grouped_df", "tbl_df", "tbl", "data.frame") df } broom/R/rlm_tidiers.R0000644000177700017770000000212213204276216015563 0ustar herbrandtherbrandt#' Tidying methods for an rlm (robust linear model) object #' #' This method provides a glance of an "rlm" object. The \code{tidy} and #' \code{augment} methods are handled by \link{lm_tidiers}. #' #' @param x rlm object #' @param ... extra arguments (not used) #' #' @return \code{glance.rlm} returns a one-row data.frame with the columns #' \item{sigma}{The square root of the estimated residual variance} #' \item{converged}{whether the IWLS converged} #' \item{logLik}{the data's log-likelihood under the model} #' \item{AIC}{the Akaike Information Criterion} #' \item{BIC}{the Bayesian Information Criterion} #' \item{deviance}{deviance} #' #' @name rlm_tidiers #' #' @examples #' #' library(MASS) #' #' r <- rlm(stack.loss ~ ., stackloss) #' tidy(r) #' augment(r) #' glance(r) #' #' @seealso \link{lm_tidiers} #' #' @export glance.rlm <- function(x, ...) { s <- summary(x) ret <- data.frame(sigma = s$sigma, converged = x$converged) ret <- finish_glance(ret, x) # remove df.residual, which is always set to NA in rlm objects dplyr::select(ret, -df.residual) } broom/R/sp_tidiers.R0000644000177700017770000000552213204276216015422 0ustar herbrandtherbrandt#' tidying methods for classes from the sp package. #' #' Tidy classes from the sp package to allow them to be plotted using ggplot2. #' To figure out the correct variable name for region, inspect #' \code{as.data.frame(x)}. #' #' These functions originated in the ggplot2 package as "fortify" functions. #' #' @param x \code{SpatialPolygonsDataFrame} to convert into a dataframe. #' @param region name of variable used to split up regions #' @param ... not used by this method #' #' @name sp_tidiers #' #' @examples #' if (require("maptools")) { #' sids <- system.file("shapes/sids.shp", package="maptools") #' nc1 <- readShapePoly(sids, #' proj4string = CRS("+proj=longlat +datum=NAD27")) #' nc1_df <- tidy(nc1) #' } #' #' @importFrom plyr ldply NULL #' @rdname sp_tidiers #' @export #' @method tidy SpatialPolygonsDataFrame tidy.SpatialPolygonsDataFrame <- function(x, region = NULL, ...) { attr <- as.data.frame(x) # If not specified, split into regions based on polygons if (is.null(region)) { coords <- ldply(x@polygons,tidy) message("Regions defined for each Polygons") } else { cp <- sp::polygons(x) # Union together all polygons that make up a region unioned <- maptools::unionSpatialPolygons(cp, attr[, region]) coords <- tidy(unioned) coords$order <- 1:nrow(coords) } coords } #' @rdname sp_tidiers #' @export #' @method tidy SpatialPolygons tidy.SpatialPolygons <- function(x, ...) { ldply(x@polygons, tidy) } #' @rdname sp_tidiers #' @export #' @method tidy Polygons tidy.Polygons <- function(x, ...) { subpolys <- x@Polygons pieces <- ldply(seq_along(subpolys), function(i) { df <- tidy(subpolys[[x@plotOrder[i]]]) df$piece <- i df }) within(pieces,{ order <- 1:nrow(pieces) id <- x@ID piece <- factor(piece) group <- interaction(id, piece) }) } #' @rdname sp_tidiers #' @export #' @method tidy Polygon tidy.Polygon <- function(x, ...) { df <- as.data.frame(x@coords) names(df) <- c("long", "lat") df$order <- 1:nrow(df) df$hole <- x@hole df } #' @rdname sp_tidiers #' @export #' @method tidy SpatialLinesDataFrame tidy.SpatialLinesDataFrame <- function(x, ...) { ldply(x@lines, tidy) } #' @rdname sp_tidiers #' @export #' @method tidy Lines tidy.Lines <- function(x, ...) { lines <- x@Lines pieces <- ldply(seq_along(lines), function(i) { df <- tidy(lines[[i]]) df$piece <- i df }) within(pieces,{ order <- 1:nrow(pieces) id <- x@ID piece <- factor(piece) group <- interaction(id, piece) }) } #' @rdname sp_tidiers #' @export #' @method tidy Line tidy.Line <- function(x, ...) { df <- as.data.frame(x@coords) names(df) <- c("long", "lat") df$order <- 1:nrow(df) df } broom/R/multinom_tidiers.R0000644000177700017770000001030513204276216016637 0ustar herbrandtherbrandt#' Tidying methods for multinomial logistic regression models #' #' These methods tidy the coefficients of multinomial logistic regression #' models generated by \code{multinom} of the \code{nnet} package. #' #' @param x A model object of class \code{multinom} #' #' @return All tidying methods return a \code{data.frame} without rownames. #' The structure depends on the method chosen. #' #' @name multinom_tidiers #' #' @examples #' #' if (require(nnet) & require(MASS)){ #' example(birthwt) #' bwt.mu <- multinom(low ~ ., bwt) #' tidy(bwt.mu) #' glance(bwt.mu) #' #' #* This model is a truly terrible model #' #* but it should show you what the output looks #' #* like in a multinomial logistic regression #' #' fit.gear <- multinom(gear ~ mpg + factor(am), data=mtcars) #' tidy(fit.gear) #' glance(fit.gear) #' } #' NULL #' @rdname multinom_tidiers #' #' @param conf.int whether to include a confidence interval #' @param conf.level confidence level of the interval, used only if #' \code{conf.int=TRUE} #' @param exponentiate whether to exponentiate the coefficient estimates #' and confidence intervals (typical for multinomial logistic regression) #' @param ... extra arguments, not used #' #' @details If \code{conf.int=TRUE}, the confidence interval is computed with #' the \code{\link{confint}} function. #' #' While \code{tidy} and \code{glance} are supported for "multinom" objects, #' \code{augment} is not. #' #' @return \code{tidy.multinom} returns one row for each coefficient at each #' level of the response variable, with six columns: #' \item{y.value}{The response level} #' \item{term}{The term in the model being estimated and tested} #' \item{estimate}{The estimated coefficient} #' \item{std.error}{The standard error from the linear model} #' \item{statistic}{Wald z-statistic} #' \item{p.value}{two-sided p-value} #' #' If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and #' \code{conf.high}, computed with \code{\link{confint}}. #' #' \code{glance.multinom} returns a #' #' @export tidy.multinom <- function(x, conf.int=FALSE, conf.level=.95, exponentiate=TRUE, ...) { col_names <- if (length(x$lev) > 2) colnames(coef(x)) else names(coef(x)) s <- summary(x) coef <- matrix(coef(s), byrow=FALSE, nrow=length(x$lev)-1, dimnames=list(x$lev[-1], col_names)) se <- matrix(s$standard.errors, byrow=FALSE, nrow=length(x$lev)-1, dimnames=list(x$lev[-1], col_names)) #* Quick utility to convert each row of coef to a data frame multinomRowToDf <- function(r, coef, se, col_names){ unrowname(data.frame(y.level = rep(r, length(col_names)), term = colnames(coef), estimate = coef[r, ], std.error = se[r, ], stringsAsFactors=FALSE)) } #* Convert to coefficients data frame ret <- lapply(rownames(coef), multinomRowToDf, coef, se, col_names) ret <- do.call("rbind", ret) #* Calculate Wald-type Z and p-value ret$statistic <- ret$estimate / ret$std.error ret$p.value <- stats::pnorm(abs(ret$statistic), 0, 1, lower.tail=FALSE) * 2 #* Confidence Interval if (conf.int){ ci <- apply(stats::confint(x), 2, function(a) unlist(as.data.frame(a))) ci <- as.data.frame(ci) names(ci) <- c("conf.low", "conf.high") ret <- cbind(ret, ci) } #* Exponentiate (for Odds Ratios) if (exponentiate){ exp.col <- if(conf.int) c("estimate", "conf.low", "conf.high") else "estimate" ret[, exp.col] <- lapply(ret[, exp.col, drop=FALSE], exp) } ret } #' @rdname multinom_tidiers #' @return \code{glance.multinom} returns a one-row data.frame with the columns #' \item{edf}{The effective degrees of freedom} #' \item{deviance}{deviance} #' \item{AIC}{the Akaike Information Criterion} #' #' @export glance.multinom <- function(x, ...) { ret <- with(x, data.frame(edf=edf, deviance = deviance, AIC = AIC)) ret } broom/vignettes/0000755000177700017770000000000013204542661014735 5ustar herbrandtherbrandtbroom/vignettes/broom.Rmd0000644000177700017770000002304613204276216016524 0ustar herbrandtherbrandt ```{r setup, echo=FALSE} library(knitr) opts_chunk$set(warning=FALSE, message=FALSE) ``` broom: let's tidy up a bit ===================== The broom package takes the messy output of built-in functions in R, such as `lm`, `nls`, or `t.test`, and turns them into tidy data frames. The concept of "tidy data", [as introduced by Hadley Wickham](http://www.jstatsoft.org/v59/i10), offers a powerful framework for data manipulation and analysis. That paper makes a convincing statement of the problem this package tries to solve (emphasis mine): > **While model inputs usually require tidy inputs, such attention to detail doesn't carry over to model outputs. Outputs such as predictions and estimated coefficients aren't always tidy. This makes it more difficult to combine results from multiple models.** For example, in R, the default representation of model coefficients is not tidy because it does not have an explicit variable that records the variable name for each estimate, they are instead recorded as row names. In R, row names must be unique, so combining coefficients from many models (e.g., from bootstrap resamples, or subgroups) requires workarounds to avoid losing important information. **This knocks you out of the flow of analysis and makes it harder to combine the results from multiple models. I'm not currently aware of any packages that resolve this problem.** broom is an attempt to bridge the gap from untidy outputs of predictions and estimations to the tidy data we want to work with. It centers around three S3 methods, each of which take common objects produced by R statistical functions (`lm`, `t.test`, `nls`, etc) and convert them into a data frame. broom is particularly designed to work with Hadley's [dplyr](https://github.com/hadley/dplyr) package (see the [broom+dplyr](broom_and_dplyr.html) vignette for more). broom should be distinguished from packages like [reshape2](https://CRAN.R-project.org/package=reshape2) and [tidyr](https://github.com/hadley/tidyr), which rearrange and reshape data frames into different forms. Those packages perform critical tasks in tidy data analysis but focus on manipulating data frames in one specific format into another. In contrast, broom is designed to take format that is *not* in a data frame (sometimes not anywhere close) and convert it to a tidy data frame. Tidying model outputs is not an exact science, and it's based on a judgment of the kinds of values a data scientist typically wants out of a tidy analysis (for instance, estimates, test statistics, and p-values). You may lose some of the information in the original object that you wanted, or keep more information than you need. If you think the tidy output for a model should be changed, or if you're missing a tidying function for an S3 class that you'd like, I strongly encourage you to [open an issue](http://github.com/dgrtwo/broom/issues) or a pull request. Tidying functions ----------------- This package provides three S3 methods that do three distinct kinds of tidying. * `tidy`: constructs a data frame that summarizes the model's statistical findings. This includes coefficients and p-values for each term in a regression, per-cluster information in clustering applications, or per-test information for `multtest` functions. * `augment`: add columns to the original data that was modeled. This includes predictions, residuals, and cluster assignments. * `glance`: construct a concise *one-row* summary of the model. This typically contains values such as R^2, adjusted R^2, and residual standard error that are computed once for the entire model. Note that some classes may have only one or two of these methods defined. Consider as an illustrative example a linear fit on the built-in `mtcars` dataset. ```{r lmfit} lmfit <- lm(mpg ~ wt, mtcars) lmfit summary(lmfit) ``` This summary output is useful enough if you just want to read it. However, converting it to a data frame that contains all the same information, so that you can combine it with other models or do further analysis, is not trivial. You have to do `coef(summary(lmfit))` to get a matrix of coefficients, the terms are still stored in row names, and the column names are inconsistent with other packages (e.g. `Pr(>|t|)` compared to `p.value`). Instead, you can use the `tidy` function, from the broom package, on the fit: ```{r} library(broom) tidy(lmfit) ``` This gives you a data.frame representation. Note that the row names have been moved into a column called `term`, and the column names are simple and consistent (and can be accessed using `$`). Instead of viewing the coefficients, you might be interested in the fitted values and residuals for each of the original points in the regression. For this, use `augment`, which augments the original data with information from the model: ```{r} head(augment(lmfit)) ``` Note that each of the new columns begins with a `.` (to avoid overwriting any of the original columns). Finally, several summary statistics are computed for the entire regression, such as R^2 and the F-statistic. These can be accessed with the `glance` function: ```{r} glance(lmfit) ``` This distinction between the `tidy`, `augment` and `glance` functions is explored in a different context in the [k-means vignette](kmeans.html). Other Examples -------------- ### Generalized linear and non-linear models These functions apply equally well to the output from `glm`: ```{r glmfit} glmfit <- glm(am ~ wt, mtcars, family="binomial") tidy(glmfit) head(augment(glmfit)) glance(glmfit) ``` Note that the statistics computed by `glance` are different for `glm` objects than for `lm` (e.g. deviance rather than R^2): These functions also work on other fits, such as nonlinear models (`nls`): ```{r} nlsfit <- nls(mpg ~ k / wt + b, mtcars, start=list(k=1, b=0)) tidy(nlsfit) head(augment(nlsfit, mtcars)) glance(nlsfit) ``` ### Hypothesis testing The `tidy` function can also be applied to `htest` objects, such as those output by popular built-in functions like `t.test`, `cor.test`, and `wilcox.test`. ```{r ttest} tt <- t.test(wt ~ am, mtcars) tidy(tt) ``` Some cases might have fewer columns (for example, no confidence interval): ```{r} wt <- wilcox.test(wt ~ am, mtcars) tidy(wt) ``` Since the `tidy` output is already only one row, `glance` returns the same output: ```{r} glance(tt) glance(wt) ``` There is no `augment` function for `htest` objects, since there is no meaningful sense in which a hypothesis test produces output about each initial data point. Conventions ------------ In order to maintain consistency, we attempt to follow some conventions regarding the structure of returned data. ### All functions * The output of the `tidy`, `augment` and `glance` functions is *always* a data frame. * The output never has rownames. This ensures that you can combine it with other tidy outputs without fear of losing information (since rownames in R cannot contain duplicates). * Some column names are kept consistent, so that they can be combined across different models and so that you know what to expect (in contrast to asking "is it `pval` or `PValue`?" every time). The examples below are not all the possible column names, nor will all tidy output contain all or even any of these columns. ### tidy functions * Each row in a `tidy` output typically represents some well-defined concept, such as one term in a regression, one test, or one cluster/class. This meaning varies across models but is usually self-evident. The one thing each row cannot represent is a point in the initial data (for that, use the `augment` method). * Common column names include: * `term`"" the term in a regression or model that is being estimated. * `p.value`: this spelling was chosen (over common alternatives such as `pvalue`, `PValue`, or `pval`) to be consistent with functions in R's built-in `stats` package * `statistic` a test statistic, usually the one used to compute the p-value. Combining these across many sub-groups is a reliable way to perform (e.g.) bootstrap hypothesis testing * `estimate` * `conf.low` the low end of a confidence interval on the `estimate` * `conf.high` the high end of a confidence interval on the `estimate` * `df` degrees of freedom ### augment functions * `augment(model, data)` adds columns to the original data. * If the `data` argument is missing, `augment` attempts to reconstruct the data from the model (note that this may not always be possible, and usually won't contain columns not used in the model). * Each row in an `augment` output matches the corresponding row in the original data. * If the original data contained rownames, `augment` turns them into a column called `.rownames`. * Newly added column names begin with `.` to avoid overwriting columns in the original data. * Common column names include: * `.fitted`: the predicted values, on the same scale as the data. * `.resid`: residuals: the actual y values minus the fitted values * `.cluster`: cluster assignments ### glance functions * `glance` always returns a one-row data frame. * The only exception is that `glance(NULL)` returns an empty data frame. * We avoid including arguments that were *given* to the modeling function. For example, a `glm` glance output does not need to contain a field for `family`, since that is decided by the user calling `glm` rather than the modeling function itself. * Common column names include: * `r.squared` the fraction of variance explained by the model * `adj.r.squared` R^2 adjusted based on the degrees of freedom * `sigma` the square root of the estimated variance of the residuals broom/vignettes/kmeans.Rmd0000644000177700017770000001124613204276216016663 0ustar herbrandtherbrandt Tidying k-means clustering =================================== ```{r, echo=FALSE} library(knitr) opts_chunk$set(message=FALSE, warning=FALSE) ``` K-means clustering serves as a very useful example of tidy data, and especially the distinction between the three tidying functions: `tidy`, `augment`, and `glance`. Let's start by generating some random 2d data with three clusters, within which points are distributed according to a multivariate gaussian: ```{r} library(dplyr) set.seed(2014) centers <- data.frame(cluster=factor(1:3), size=c(100, 150, 50), x1=c(5, 0, -3), x2=c(-1, 1, -2)) points <- centers %>% group_by(cluster) %>% do(data.frame(x1=rnorm(.$size[1], .$x1[1]), x2=rnorm(.$size[1], .$x2[1]))) library(ggplot2) ggplot(points, aes(x1, x2, color=cluster)) + geom_point() ``` This is an ideal case for k-means clustering. Let's examine what the built-in `kmeans` function returns. ```{r} points.matrix <- cbind(x1 = points$x1, x2 = points$x2) kclust <- kmeans(points.matrix, 3) kclust summary(kclust) ``` The output is a list of vectors, where each component has a different length. There's one of length `r nrow(points)`: the same as our original dataset. There are a number of elements of length 3: `withinss`, `tot.withinss`, and `betweenss`- and `centers` is a matrix with 3 rows. And then there are the elements of length 1: `totss`, `tot.withinss`, `betweenss`, and `iter`. These differing lengths have a deeper meaning when we want to tidy our dataset: they signify that each type of component communicates a *different kind* of information. * `cluster` (`r nrow(points.matrix)` values) contains information about each *point* * `centers`, `withinss` and `size` (3 values) contain information about each *cluster* * `totss`, `tot.withinss`, `betweenss`, and `iter` (1 value) contain information about the *full clustering* Which of these do we want to extract? There is no right answer: each of them may be interesting to an analyst. Because they communicate entirely different information (not to mention there's no straightforward way to combine them), they are extracted by separate functions. `augment` adds the point classifications to the original dataset: ```{r} library(broom) head(augment(kclust, points.matrix)) ``` The `tidy` function summarizes on a per-cluster level: ```{r} tidy(kclust) ``` And as it always does, the `glance` function extracts a single-row summary: ```{r} glance(kclust) ``` broom and dplyr for exploratory clustering --------------------------------------- While these summaries are useful, they would not have been too difficult to extract out from the dataset yourself. The real power comes from combining their analyses with dplyr. Let's say we want to explore the effect of different choices of `k`, from 1 to 9, on this clustering. First cluster the data 9 times, each using a different value of k: ```{r} kclusts <- data.frame(k=1:9) %>% group_by(k) %>% do(kclust=kmeans(points.matrix, .$k)) ``` Then tidy the clusterings three ways: using `tidy`, using `augment`, and using `glance`. Each of these goes into a separate dataset as they represent different types of data. ```{r} clusters <- kclusts %>% group_by(k) %>% do(tidy(.$kclust[[1]])) assignments <- kclusts %>% group_by(k) %>% do(augment(.$kclust[[1]], points.matrix)) clusterings <- kclusts %>% group_by(k) %>% do(glance(.$kclust[[1]])) ``` Now we can plot the original points, with each point colored according to the original cluster: ```{r} p1 <- ggplot(assignments, aes(x1, x2)) + geom_point(aes(color=.cluster)) + facet_wrap(~ k) p1 ``` Already we get a good sense of the proper number of clusters (3), and how the k-means algorithm functions when k is too high or too low. We can then add the centers of the cluster using the data from `tidy`: ```{r} p2 <- p1 + geom_point(data=clusters, size=10, shape="x") p2 ``` The data from `glance` fits a different but equally important purpose: it lets you view trends of some summary statistics across values of k. Of particular interest is the total within sum of squares, saved in the `tot.withinss` column. ```{r} ggplot(clusterings, aes(k, tot.withinss)) + geom_line() ``` This represents the variance within the clusters. It decreases as k increases, but one can notice a bend (or "elbow") right at k=3. This bend indicates that additional clusters beyond the third have little value. (See [here](http://web.stanford.edu/~hastie/Papers/gap.pdf) for a more mathematically rigorous interpretation and implementation of this method). Thus, all three methods of tidying data provided by broom are useful for summarizing clustering output. broom/vignettes/broom_and_dplyr.Rmd0000644000177700017770000000723413204276216020561 0ustar herbrandtherbrandt ```{r opts_chunk, echo=FALSE} library(knitr) opts_chunk$set(message=FALSE, warning=FALSE) ``` broom and dplyr =============== While broom is useful for summarizing the result of a single analysis in a consistent format, it is really designed for high-throughput applications, where you must combine results from multiple analyses. These could be subgroups of data, analyses using different models, bootstrap replicates, permutations, and so on. In particular, it plays well with the `group_by` and `do` functions in `dplyr`. Let's try this on a simple dataset, the built-in `Orange` data.frame. ```{r setup} library(broom) library(dplyr) data(Orange) dim(Orange) head(Orange) ``` This contains 35 observations of three variables: `Tree`, `age`, and `circumference`. `Tree` is a factor with five levels describing five trees. As might be expected, age and circumference are correlated: ```{r} cor(Orange$age, Orange$circumference) library(ggplot2) ggplot(Orange, aes(age, circumference, color = Tree)) + geom_line() ``` Suppose you want to test for correlations individually *within* each tree. You can do this with dplyr's `group_by`: ```{r} Orange %>% group_by(Tree) %>% summarize(correlation = cor(age, circumference)) ``` (Note that the correlations are much higher than the aggregated one, and furthermore we can now see it is similar across trees). Suppose that instead of simply estimating a correlation, we want to perform a hypothesis test with `cor.test`: ```{r} cor.test(Orange$age, Orange$circumference) ``` This contains multiple values we could want in our output. Some are vectors of length 1, such as the p-value and the estimate, and some are longer, such as the confidence interval. broom's `tidy` S3 method, combined with dplyr's `do`, makes it easy to summarize the information about each test: ```{r} Orange %>% group_by(Tree) %>% do(tidy(cor.test(.$age, .$circumference))) ``` This becomes even more useful when applied to regressions, which give more than one row of output within each model: ```{r} Orange %>% group_by(Tree) %>% do(tidy(lm(age ~ circumference, data=.))) ``` You can just as easily perform multiple regressions within each group, as shown here on the `mtcars` dataset. We group the data into automatic and manual cars (the `am` column), then perform the regression within each. ```{r} data(mtcars) head(mtcars) mtcars %>% group_by(am) %>% do(tidy(lm(wt ~ mpg + qsec + gear, .))) ``` What if you want not just the `tidy` output, but the `augment` and `glance` outputs as well, while still performing each regression only once? First, save the modeling result into a column `fit`. ```{r} regressions <- mtcars %>% group_by(cyl) %>% do(fit = lm(wt ~ mpg + qsec + gear, .)) regressions ``` This creates a rowwise data frame. Tidying methods are designed to work seamlessly with rowwise data frames, grouping them and performing tidying on each row: ```{r} regressions %>% tidy(fit) regressions %>% augment(fit) regressions %>% glance(fit) ``` By combining the estimates and p-values across all groups into the same tidy data frame (instead of, for example, a list of output model objects), a new class of analyses and visualizations becomes straightforward. This includes * Sorting by p-value or estimate to find the most significant terms across all tests * P-value histograms * Volcano plots comparing p-values to effect size estimates In each of these cases, we can easily filter, facet, or distinguish based on the `term` column. In short, this makes the tools of tidy data analysis available for the *results* of data analysis and models, not just the inputs.broom/vignettes/bootstrapping.Rmd0000644000177700017770000000720113204276216020274 0ustar herbrandtherbrandt ```{r setup, echo=FALSE} library(knitr) opts_chunk$set(message=FALSE) ``` Tidy bootstrapping with dplyr+broom =================================== Another place where combining model fits in a tidy way becomes useful is when performing bootstrapping or permutation tests. These approaches have been explored before, for instance by [Andrew MacDonald here](http://rstudio-pubs-static.s3.amazonaws.com/19698_a4c472606e3c43e4b94720506e49bb7b.html), and [Hadley has explored efficient support for bootstrapping](https://github.com/hadley/dplyr/issues/269) as a potential enhancement to dplyr. broom fits naturally with dplyr in performing these analyses. Bootstrapping consists of randomly sampling a dataset with replacement, then performing the analysis individually on each bootstrapped replicate. The variation in the resulting estimate is then a reasonable approximation of the variance in your estimate. Let's say you want to fit a nonlinear model to the weight/mileage relationship in the `mtcars` dataset. ```{r} library(ggplot2) data(mtcars) ggplot(mtcars, aes(mpg, wt)) + geom_point() ``` You might use the method of nonlinear least squares (`nls` function) to fit a model. ```{r} nlsfit <- nls(mpg ~ k / wt + b, mtcars, start=list(k=1, b=0)) summary(nlsfit) ggplot(mtcars, aes(wt, mpg)) + geom_point() + geom_line(aes(y=predict(nlsfit))) ``` While this does provide a p-value and confidence intervals for the parameters, these are based on model assumptions that may not hold in real data. Bootstrapping is a popular method for providing confidence intervals and predictions that are more robust to the nature of the data. The function `bootstrap` in **broom** can be used to sample bootstrap replications. First, we construct 100 bootstrap replications of the data, each of which has been randomly sampled with replacement. We use `do` to perform an `nls` fit on each replication, using `tidy` to recombine: ```{r} library(dplyr) library(broom) set.seed(2014) bootnls <- mtcars %>% bootstrap(100) %>% do(tidy(nls(mpg ~ k / wt + b, ., start=list(k=1, b=0)))) ``` This produces a summary of each replication, combined into one data.frame: ```{r} bootnls ``` You can then calculate confidence intervals (using what is called the [percentile method](https://www.uvm.edu/~dhowell/StatPages/Randomization%20Tests/ResamplingWithR/BootstMeans/bootstrapping_means.html)): ```{r} alpha = .05 bootnls %>% group_by(term) %>% summarize(low=quantile(estimate, alpha / 2), high=quantile(estimate, 1 - alpha / 2)) ``` Or you can use histograms to give you a more detailed idea of the uncertainty in each estimate: ```{r} library(ggplot2) ggplot(bootnls, aes(estimate)) + geom_histogram(binwidth=2) + facet_wrap(~ term, scales="free") ``` Or you can use `augment` to visualize the uncertainty in the curve: ```{r} bootnls_aug <- mtcars %>% bootstrap(100) %>% do(augment(nls(mpg ~ k / wt + b, ., start=list(k=1, b=0)), .)) ggplot(bootnls_aug, aes(wt, mpg)) + geom_point() + geom_line(aes(y=.fitted, group=replicate), alpha=.2) ``` With only a few small changes, one could easily perform bootstrapping with other kinds of predictive or hypothesis testing models, since the `tidy` and `augment` functions works for many statistical outputs. As another example, you could use `smooth.spline`: ```{r} smoothspline_aug <- mtcars %>% bootstrap(100) %>% do(augment(smooth.spline(.$wt, .$mpg, df=4), .)) ggplot(smoothspline_aug, aes(wt, mpg)) + geom_point() + geom_line(aes(y=.fitted, group=replicate), alpha=.2) ``` broom/README.md0000644000177700017770000005272413204276216014216 0ustar herbrandtherbrandtbroom: let's tidy up a bit ===================== [![CRAN_Status_Badge](http://www.r-pkg.org/badges/version/broom)](https://CRAN.R-project.org/package=broom) [![Travis-CI Build Status](https://travis-ci.org/tidyverse/broom.svg?branch=master)](https://travis-ci.org/tidyverse/broom) [![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/github/tidyverse/broom?branch=master&svg=true)](https://ci.appveyor.com/project/tidyverse/broom) [![Coverage Status](https://img.shields.io/codecov/c/github/tidyverse/broom/master.svg)](https://codecov.io/github/tidyverse/broom?branch=master) The broom package takes the messy output of built-in functions in R, such as `lm`, `nls`, or `t.test`, and turns them into tidy data frames. The concept of "tidy data", [as introduced by Hadley Wickham](http://www.jstatsoft.org/v59/i10), offers a powerful framework for data manipulation and analysis. That paper makes a convincing statement of the problem this package tries to solve (emphasis mine): > **While model inputs usually require tidy inputs, such attention to detail doesn't carry over to model outputs. Outputs such as predictions and estimated coefficients aren't always tidy. This makes it more difficult to combine results from multiple models.** For example, in R, the default representation of model coefficients is not tidy because it does not have an explicit variable that records the variable name for each estimate, they are instead recorded as row names. In R, row names must be unique, so combining coefficients from many models (e.g., from bootstrap resamples, or subgroups) requires workarounds to avoid losing important information. **This knocks you out of the flow of analysis and makes it harder to combine the results from multiple models. I'm not currently aware of any packages that resolve this problem.** broom is an attempt to bridge the gap from untidy outputs of predictions and estimations to the tidy data we want to work with. It centers around three S3 methods, each of which take common objects produced by R statistical functions (`lm`, `t.test`, `nls`, etc) and convert them into a data frame. broom is particularly designed to work with Hadley's [dplyr](https://github.com/hadley/dplyr) package (see the "broom and dplyr" vignette for more). broom should be distinguished from packages like [reshape2](https://CRAN.R-project.org/package=reshape2) and [tidyr](https://CRAN.R-project.org/package=tidyr), which rearrange and reshape data frames into different forms. Those packages perform critical tasks in tidy data analysis but focus on manipulating data frames in one specific format into another. In contrast, broom is designed to take format that is *not* in a data frame (sometimes not anywhere close) and convert it to a tidy data frame. Tidying model outputs is not an exact science, and it's based on a judgment of the kinds of values a data scientist typically wants out of a tidy analysis (for instance, estimates, test statistics, and p-values). You may lose some of the information in the original object that you wanted, or keep more information than you need. If you think the tidy output for a model should be changed, or if you're missing a tidying function for an S3 class that you'd like, I strongly encourage you to [open an issue](http://github.com/tidyverse/broom/issues) or a pull request. Installation and Documentation ------------ The broom package is available on CRAN: install.packages("broom") You can also install the development version of the broom package using [devtools](https://github.com/hadley/devtools): ``` library(devtools) install_github("tidyverse/broom") ``` For additional documentation, please browse the vignettes: ``` browseVignettes(package="broom") ``` Tidying functions ----------------- This package provides three S3 methods that do three distinct kinds of tidying. * `tidy`: constructs a data frame that summarizes the model's statistical findings. This includes coefficients and p-values for each term in a regression, per-cluster information in clustering applications, or per-test information for `multtest` functions. * `augment`: add columns to the original data that was modeled. This includes predictions, residuals, and cluster assignments. * `glance`: construct a concise *one-row* summary of the model. This typically contains values such as R^2, adjusted R^2, and residual standard error that are computed once for the entire model. Note that some classes may have only one or two of these methods defined. Consider as an illustrative example a linear fit on the built-in `mtcars` dataset. ```r lmfit <- lm(mpg ~ wt, mtcars) lmfit ``` ``` ## ## Call: ## lm(formula = mpg ~ wt, data = mtcars) ## ## Coefficients: ## (Intercept) wt ## 37.285 -5.344 ``` ```r summary(lmfit) ``` ``` ## ## Call: ## lm(formula = mpg ~ wt, data = mtcars) ## ## Residuals: ## Min 1Q Median 3Q Max ## -4.5432 -2.3647 -0.1252 1.4096 6.8727 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 37.2851 1.8776 19.858 < 2e-16 *** ## wt -5.3445 0.5591 -9.559 1.29e-10 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 3.046 on 30 degrees of freedom ## Multiple R-squared: 0.7528, Adjusted R-squared: 0.7446 ## F-statistic: 91.38 on 1 and 30 DF, p-value: 1.294e-10 ``` This summary output is useful enough if you just want to read it. However, converting it to a data frame that contains all the same information, so that you can combine it with other models or do further analysis, is not trivial. You have to do `coef(summary(lmfit))` to get a matrix of coefficients, the terms are still stored in row names, and the column names are inconsistent with other packages (e.g. `Pr(>|t|)` compared to `p.value`). Instead, you can use the `tidy` function, from the broom package, on the fit: ```r library(broom) tidy(lmfit) ``` ``` ## term estimate std.error statistic p.value ## 1 (Intercept) 37.285126 1.877627 19.857575 8.241799e-19 ## 2 wt -5.344472 0.559101 -9.559044 1.293959e-10 ``` This gives you a data.frame representation. Note that the row names have been moved into a column called `term`, and the column names are simple and consistent (and can be accessed using `$`). Instead of viewing the coefficients, you might be interested in the fitted values and residuals for each of the original points in the regression. For this, use `augment`, which augments the original data with information from the model: ```r head(augment(lmfit)) ``` ``` ## .rownames mpg wt .fitted .se.fit .resid .hat ## 1 Mazda RX4 21.0 2.620 23.28261 0.6335798 -2.2826106 0.04326896 ## 2 Mazda RX4 Wag 21.0 2.875 21.91977 0.5714319 -0.9197704 0.03519677 ## 3 Datsun 710 22.8 2.320 24.88595 0.7359177 -2.0859521 0.05837573 ## 4 Hornet 4 Drive 21.4 3.215 20.10265 0.5384424 1.2973499 0.03125017 ## 5 Hornet Sportabout 18.7 3.440 18.90014 0.5526562 -0.2001440 0.03292182 ## 6 Valiant 18.1 3.460 18.79325 0.5552829 -0.6932545 0.03323551 ## .sigma .cooksd .std.resid ## 1 3.067494 1.327407e-02 -0.76616765 ## 2 3.093068 1.723963e-03 -0.30743051 ## 3 3.072127 1.543937e-02 -0.70575249 ## 4 3.088268 3.020558e-03 0.43275114 ## 5 3.097722 7.599578e-05 -0.06681879 ## 6 3.095184 9.210650e-04 -0.23148309 ``` Note that each of the new columns begins with a `.` (to avoid overwriting any of the original columns). Finally, several summary statistics are computed for the entire regression, such as R^2 and the F-statistic. These can be accessed with the `glance` function: ```r glance(lmfit) ``` ``` ## r.squared adj.r.squared sigma statistic p.value df logLik ## 1 0.7528328 0.7445939 3.045882 91.37533 1.293959e-10 2 -80.01471 ## AIC BIC deviance df.residual ## 1 166.0294 170.4266 278.3219 30 ``` This distinction between the `tidy`, `augment` and `glance` functions is explored in a different context in the k-means vignette. Other Examples -------------- ### Generalized linear and non-linear models These functions apply equally well to the output from `glm`: ```r glmfit <- glm(am ~ wt, mtcars, family="binomial") tidy(glmfit) ``` ``` ## term estimate std.error statistic p.value ## 1 (Intercept) 12.04037 4.509706 2.669879 0.007587858 ## 2 wt -4.02397 1.436416 -2.801396 0.005088198 ``` ```r head(augment(glmfit)) ``` ``` ## .rownames am wt .fitted .se.fit .resid .hat ## 1 Mazda RX4 1 2.620 1.4975684 0.9175750 0.6353854 0.12577908 ## 2 Mazda RX4 Wag 1 2.875 0.4714561 0.6761141 0.9848344 0.10816226 ## 3 Datsun 710 1 2.320 2.7047594 1.2799233 0.3598458 0.09628500 ## 4 Hornet 4 Drive 0 3.215 -0.8966937 0.6012064 -0.8271767 0.07438175 ## 5 Hornet Sportabout 0 3.440 -1.8020869 0.7486164 -0.5525972 0.06812194 ## 6 Valiant 0 3.460 -1.8825663 0.7669573 -0.5323012 0.06744101 ## .sigma .cooksd .std.resid ## 1 0.8033182 0.018405616 0.6795582 ## 2 0.7897742 0.042434911 1.0428463 ## 3 0.8101256 0.003942789 0.3785304 ## 4 0.7973421 0.017706938 -0.8597702 ## 5 0.8061915 0.006469973 -0.5724389 ## 6 0.8067014 0.005901376 -0.5512128 ``` ```r glance(glmfit) ``` ``` ## null.deviance df.null logLik AIC BIC deviance df.residual ## 1 43.22973 31 -9.588042 23.17608 26.10756 19.17608 30 ``` Note that the statistics computed by `glance` are different for `glm` objects than for `lm` (e.g. deviance rather than R^2): These functions also work on other fits, such as nonlinear models (`nls`): ```r nlsfit <- nls(mpg ~ k / wt + b, mtcars, start=list(k=1, b=0)) tidy(nlsfit) ``` ``` ## term estimate std.error statistic p.value ## 1 k 45.829488 4.249155 10.785554 7.639162e-12 ## 2 b 4.386254 1.536418 2.854858 7.737378e-03 ``` ```r head(augment(nlsfit, mtcars)) ``` ``` ## .rownames mpg cyl disp hp drat wt qsec vs am gear carb ## 1 Mazda RX4 21.0 6 160 110 3.90 2.620 16.46 0 1 4 4 ## 2 Mazda RX4 Wag 21.0 6 160 110 3.90 2.875 17.02 0 1 4 4 ## 3 Datsun 710 22.8 4 108 93 3.85 2.320 18.61 1 1 4 1 ## 4 Hornet 4 Drive 21.4 6 258 110 3.08 3.215 19.44 1 0 3 1 ## 5 Hornet Sportabout 18.7 8 360 175 3.15 3.440 17.02 0 0 3 2 ## 6 Valiant 18.1 6 225 105 2.76 3.460 20.22 1 0 3 1 ## .fitted .resid ## 1 21.87843 -0.8784251 ## 2 20.32695 0.6730544 ## 3 24.14034 -1.3403437 ## 4 18.64115 2.7588507 ## 5 17.70878 0.9912203 ## 6 17.63177 0.4682291 ``` ```r glance(nlsfit) ``` ``` ## sigma isConv finTol logLik AIC BIC deviance ## 1 2.77405 TRUE 2.87694e-08 -77.02329 160.0466 164.4438 230.8606 ## df.residual ## 1 30 ``` ### Hypothesis testing The `tidy` function can also be applied to `htest` objects, such as those output by popular built-in functions like `t.test`, `cor.test`, and `wilcox.test`. ```r tt <- t.test(wt ~ am, mtcars) tidy(tt) ``` ``` ## estimate estimate1 estimate2 statistic p.value parameter conf.low ## 1 1.357895 3.768895 2.411 5.493905 6.27202e-06 29.23352 0.8525632 ## conf.high method alternative ## 1 1.863226 Welch Two Sample t-test two.sided ``` Some cases might have fewer columns (for example, no confidence interval): ```r wt <- wilcox.test(wt ~ am, mtcars) tidy(wt) ``` ``` ## statistic p.value method ## 1 230.5 4.347026e-05 Wilcoxon rank sum test with continuity correction ## alternative ## 1 two.sided ``` Since the `tidy` output is already only one row, `glance` returns the same output: ```r glance(tt) ``` ``` ## estimate estimate1 estimate2 statistic p.value parameter conf.low ## 1 1.357895 3.768895 2.411 5.493905 6.27202e-06 29.23352 0.8525632 ## conf.high method alternative ## 1 1.863226 Welch Two Sample t-test two.sided ``` ```r glance(wt) ``` ``` ## statistic p.value method ## 1 230.5 4.347026e-05 Wilcoxon rank sum test with continuity correction ## alternative ## 1 two.sided ``` There is no `augment` function for `htest` objects, since there is no meaningful sense in which a hypothesis test produces output about each initial data point. ### Available Tidiers Currently broom provides tidying methods for many S3 objects from the built-in stats package, including * `lm` * `glm` * `htest` * `anova` * `nls` * `kmeans` * `manova` * `TukeyHSD` * `arima` It also provides methods for S3 objects in popular third-party packages, including * `lme4` * `glmnet` * `boot` * `gam` * `survival` * `lfe` * `zoo` * `multcomp` * `sp` * `maps` A full list of the `tidy`, `augment` and `glance` methods available for each class is as follows: |Class |`tidy` |`glance` |`augment` | |:------------------------|:------|:--------|:---------| |aareg |x |x | | |acf |x | | | |anova |x | | | |aov |x | | | |aovlist |x | | | |Arima |x |x | | |betareg |x |x |x | |biglm |x |x | | |binDesign |x |x | | |binWidth |x | | | |boot |x | | | |brmsfit |x | | | |btergm |x | | | |cch |x |x | | |character |x | | | |cld |x | | | |coeftest |x | | | |confint.glht |x | | | |coxph |x |x |x | |cv.glmnet |x |x | | |data.frame |x |x |x | |default |x |x |x | |density |x | | | |dgCMatrix |x | | | |dgTMatrix |x | | | |dist |x | | | |ergm |x |x | | |felm |x |x |x | |fitdistr |x |x | | |ftable |x | | | |gam |x |x | | |gamlss |x | | | |geeglm |x | | | |glht |x | | | |glmnet |x |x | | |glmRob |x |x |x | |gmm |x |x | | |htest |x |x | | |kappa |x | | | |kde |x | | | |kmeans |x |x |x | |Line |x | | | |Lines |x | | | |list |x |x | | |lm |x |x |x | |lme |x |x |x | |lmodel2 |x |x | | |lmRob |x |x |x | |logical |x | | | |lsmobj |x | | | |manova |x | | | |map |x | | | |matrix |x |x | | |Mclust |x |x |x | |merMod |x |x |x | |mle2 |x | | | |multinom |x |x | | |nlrq |x |x |x | |nls |x |x |x | |NULL |x |x |x | |numeric |x | | | |pairwise.htest |x | | | |plm |x |x |x | |poLCA |x |x |x | |Polygon |x | | | |Polygons |x | | | |power.htest |x | | | |prcomp |x | |x | |pyears |x |x | | |rcorr |x | | | |ref.grid |x | | | |ridgelm |x |x | | |rjags |x | | | |roc |x | | | |rowwise_df |x |x |x | |rq |x |x |x | |rqs |x |x |x | |sparseMatrix |x | | | |SpatialLinesDataFrame |x | | | |SpatialPolygons |x | | | |SpatialPolygonsDataFrame |x | | | |spec |x | | | |stanfit |x | | | |stanreg |x |x | | |summary.glht |x | | | |summary.lm |x |x | | |summaryDefault |x |x | | |survexp |x |x | | |survfit |x |x | | |survreg |x |x |x | |table |x | | | |tbl_df |x |x |x | |ts |x | | | |TukeyHSD |x | | | |zoo |x | | | Conventions ------------ In order to maintain consistency, we attempt to follow some conventions regarding the structure of returned data. ### All functions * The output of the `tidy`, `augment` and `glance` functions is *always* a data frame. * The output never has rownames. This ensures that you can combine it with other tidy outputs without fear of losing information (since rownames in R cannot contain duplicates). * Some column names are kept consistent, so that they can be combined across different models and so that you know what to expect (in contrast to asking "is it `pval` or `PValue`?" every time). The examples below are not all the possible column names, nor will all tidy output contain all or even any of these columns. ### tidy functions * Each row in a `tidy` output typically represents some well-defined concept, such as one term in a regression, one test, or one cluster/class. This meaning varies across models but is usually self-evident. The one thing each row cannot represent is a point in the initial data (for that, use the `augment` method). * Common column names include: * `term`: the term in a regression or model that is being estimated. * `p.value`: this spelling was chosen (over common alternatives such as `pvalue`, `PValue`, or `pval`) to be consistent with functions in R's built-in `stats` package * `statistic` a test statistic, usually the one used to compute the p-value. Combining these across many sub-groups is a reliable way to perform (e.g.) bootstrap hypothesis testing * `estimate` estimate of an effect size, slope, or other value * `std.error` standard error * `conf.low` the low end of a confidence interval on the `estimate` * `conf.high` the high end of a confidence interval on the `estimate` * `df` degrees of freedom ### augment functions * `augment(model, data)` adds columns to the original data. * If the `data` argument is missing, `augment` attempts to reconstruct the data from the model (note that this may not always be possible, and usually won't contain columns not used in the model). * Each row in an `augment` output matches the corresponding row in the original data. * If the original data contained rownames, `augment` turns them into a column called `.rownames`. * Newly added column names begin with `.` to avoid overwriting columns in the original data. * Common column names include: * `.fitted`: the predicted values, on the same scale as the data. * `.resid`: residuals: the actual y values minus the fitted values * `.cluster`: cluster assignments ### glance functions * `glance` always returns a one-row data frame. * The only exception is that `glance(NULL)` returns an empty data frame. * We avoid including arguments that were *given* to the modeling function. For example, a `glm` glance output does not need to contain a field for `family`, since that is decided by the user calling `glm` rather than the modeling function itself. * Common column names include: * `r.squared` the fraction of variance explained by the model * `adj.r.squared` R^2 adjusted based on the degrees of freedom * `sigma` the square root of the estimated variance of the residuals ### Code of Conduct Please note that this project is released with a [Contributor Code of Conduct](CONDUCT.md). By participating in this project you agree to abide by its terms. broom/MD50000644000177700017770000002723013204551155013237 0ustar herbrandtherbrandta7a953b68b555c5887b866ec633afc56 *DESCRIPTION 10390b90ea27c186e1fe37138085bd25 *LICENSE 337c6a21b4ce65af23f9c770f27b1b87 *NAMESPACE 769228bc990f368b29a6a42f068942f3 *R/anova_tidiers.R 78eb5d289a545cf6bcc54bcc91a0c8a7 *R/arima_tidiers.R 503850b19c9a4d626e009ba5478c36a9 *R/auc_tidiers.R 967d414a1d169d417a252d45a0cf5fa3 *R/augment.R baeb554046fbf807b163bbcb76b30aff *R/betareg_tidiers.R c4c4a1f8fe51359bcde7698bcf974e86 *R/biglm_tidiers.R 60a00fd3a15f1f1f5a352e617241f954 *R/bingroup_tidiers.R cf28348f3872a2bc5c66959ff99b2e48 *R/boot_tidiers.R 6728e816b36747ac0d1153735e77e236 *R/brms_tidiers.R 6997dcfe3bdab42cc13835d7d758b3c1 *R/broom.R 304bb6777a4002c4eac537834d812c88 *R/btergm_tidiers.R 0f44254985370a3af7d0b6a93ab9bc68 *R/data.frame_tidiers.R c5c3cc36c72d7bcd625d256c71e6bb0e *R/decompose_tidiers.R 2f1220ecb91852576e037ae63ec45a45 *R/emmeans_tidiers.R ac14deee7c0889c4a5adbc4560adb1de *R/ergm_tidiers.R 535a3bfc81f9bb5c87261bf7dca29615 *R/extras.R 1d5d16ae3d1692ba977bd1c81148c730 *R/felm_tidiers.R 993fc96c77e94b0d99fa9fdb75a4c1eb *R/fitdistr_tidiers.R d05df1d65dba16f40054de9e45cae22f *R/gam_tidiers.R f1a1ace1b0b495e6896497b16fc36157 *R/gamlss_tidiers.R 022c948bd4c13a35df632c6b7bf05e3a *R/geeglm_tidiers.R b6ead7d5b43fbfa44896a9358d371152 *R/glance.R f5b2129a7dc5beb06a579a1048cf9748 *R/glm_tidiers.R a810002bb5ef2cb1e33983a62a91c050 *R/glmnet_tidiers.R eac1c4a2c3f0da1fa091ec12c4af289d *R/globals.R b3937d6f57e2fa011df530a52b650662 *R/gmm_tidiers.R 5a17f10a0dc9451dfb268169686ace25 *R/htest_tidiers.R 53b553d1264a457af2edef0645ffc4ed *R/ivreg_tidiers.R ee215b11a2a3f360492e8d0c2885ed0f *R/kde_tidiers.R 952df987609b96fe298989a09167337c *R/kmeans_tidiers.R 09f8bbc7ce9866595f2936e6c5db1da9 *R/list_tidiers.R 7a85fea942b7189d00ff20e96222d50a *R/lm_tidiers.R e7eaaf6ec2bb2c6a471ccb172b709e89 *R/lme4_tidiers.R e0981841a661fb7fb0b8fcfccd7ecf09 *R/lmodel2_tidiers.R c7f2e3d6057303f71023ece9f37c08d9 *R/lmtest_tidiers.R 9ba2e6f56315a34dc98ef018ac5ffab7 *R/loess_tidiers.R 53b3b8ff9d6829b90332c77fe10fbb72 *R/map_tidiers.R 95ecefe97c2a8f5dff6e9ba00d2958db *R/matrix_tidiers.R b0ba27ade9ec4e22207ec4c75f94ac02 *R/mclust_tidiers.R 8a35d8e0cef3e4246f1b37e057027efd *R/mcmc_tidiers.R b22609ab578d0007627eb2a01b7977d6 *R/mle2_tidiers.R 27b11582899593a08c8ff23886c0a8f0 *R/muhaz_tidiers.R 6066394c5efd8fb6f3e966ca8a06316c *R/multcomp_tidiers.R 1d867622fa28c8b3198ba93c064e9715 *R/multinom_tidiers.R 0a846d9fa5217ad8c093953ffff6a1e0 *R/nlme_tidiers.R fc06751515c4fc1ff784c8bc194a69b7 *R/nls_tidiers.R 76e1f7761d20ce4444f8118f2005bed2 *R/optim_tidiers.R b3e0a209e87273b9a224c7d293c290fe *R/orcutt_tidiers.R 3f2a0102ac987195d7e271a964ce03c2 *R/plm_tidiers.R 7a1a9a7eca25f7d96b2c06755783bb9f *R/polca_tidiers.R 04a34ee13839374162a573b05150c22a *R/prcomp_tidiers.R 797f9e080581ad337367fbf8a322586e *R/psych_tidiers.R dffb6cfeb3b9702f83e4e767e2db4def *R/rcorr_tidiers.R dcbea9b584be0d27cdf13fe6ecce50c2 *R/ridgelm_tidiers.R d452da783939a87403df5fb75c6c7137 *R/rlm_tidiers.R ffc5f1edb7d2d7714fb901bf500a07eb *R/robust_tidiers.R 70d0063dc32dfb934b9de6b86aa03e92 *R/rowwise_df_tidiers.R ec1d216d0bfdb8574bad307550da295b *R/rq_tidiers.R 7ccc6ca2a4f976afe6e62d5b418ec98d *R/rstanarm_tidiers.R cf4d2a5e264805b1ca136bc98a25f8c7 *R/smooth.spline_tidiers.R 777a418cd23ca0212d1d74900e6bc712 *R/sp_tidiers.R 7994b96aef0597b4ade92ff40650f88d *R/sparse_tidiers.R a471c3761e893780842d4d337ef5f34e *R/speedlm_tidiers.R 35d00828a8bd20c4965789d5a7fec955 *R/stats_tidiers.R 7730aa14038f34f6e6ecbdf3ce8c6319 *R/summary_tidiers.R 813589bddc1f459b285c76d496c2347f *R/survival_tidiers.R f1dbdee8b1fa2afa2b1dccc3697f5325 *R/svd_tidiers.R 9da65b9bfc73d81a8b0c2dfee1e673e0 *R/tidy.R 12bb8db663b8519f07c59ca63ad252d3 *R/utilities.R 8608d0be681928a7b795fc6970282898 *R/vector_tidiers.R 6b2cf12d19b5c6c594653b6448821f8d *R/xyz_tidiers.R d41df2e39d8c565ffbc1f4b6a280bfff *R/zoo_tidiers.R 9d5cd78e85f2712c7929c583a1cb2105 *README.md ee38e68583d4bb80653e877249f6d32f *build/vignette.rds b7aabcde1fba1a4770dcb8a33238d12b *inst/doc/bootstrapping.R 24751ba832bf32d9ac6b4591c594a24d *inst/doc/bootstrapping.Rmd dc870cccd9a240079ac2402cc56ee0bd *inst/doc/bootstrapping.html c643ec18bcfcea852d98df4a1da67087 *inst/doc/broom.R b2d6b1ac3d6da101c21c4ee08ad5a68a *inst/doc/broom.Rmd d88c708a5f85863384967b2eb6da9c29 *inst/doc/broom.html 337b4c545d6c4e4090ae66e26df2edaf *inst/doc/broom_and_dplyr.R 7b35b0e8c5f11ceb37e00bc4832bc582 *inst/doc/broom_and_dplyr.Rmd dc045ee71fbfca05ec4be4261d792c0e *inst/doc/broom_and_dplyr.html e0969ba7973248ede50023e240008244 *inst/doc/kmeans.R e6fba67e5e017ba314855b2fd604d71c *inst/doc/kmeans.Rmd e913cac1ee48d501cd0dd4180907f68f *inst/doc/kmeans.html 286b3180dfa752c4cfedaf0241add0e4 *inst/extdata/8schools.stan 27a1b875a755f57d51f90cc3f45e8249 *inst/extdata/rstan_example.rda 202a4236257431e2ef4b7a2552f6e66c *man/Arima_tidiers.Rd f94c63faff203204855ad3465da608d7 *man/aareg_tidiers.Rd c53289a498fa529027d23631faa13e12 *man/acf_tidiers.Rd e6401842d3d36b626dd4a529f724fff8 *man/anova_tidiers.Rd e2c64038c1256451bad730e3de080296 *man/auc_tidiers.Rd 143b61e53adcb69b9631d2f92ec8b33c *man/augment.Rd 5ae014bae3ed38b71789768483bcfc16 *man/augment_columns.Rd e2a0982d19fabee538c9d528afde87ec *man/betareg_tidiers.Rd 243d5b8afe50207466d0b6a8574c0e8e *man/biglm_tidiers.Rd a1a54cd7b16946adbb828df83c773bac *man/binDesign_tidiers.Rd 6f83f3a2f44f573cb8b59ee1bf84837c *man/binWidth_tidiers.Rd 89b4b71f1b3fa844c415dd18668a3373 *man/boot_tidiers.Rd edf5f40dfe3561384acc1cc05bad35aa *man/bootstrap.Rd 4b91f7a5ed280b6db534e836d5d61db8 *man/brms_tidiers.Rd 6bd9b5cfd50cb08417c711319271cf96 *man/broom.Rd aa13b34d8a8b8528af93743617464453 *man/btergm_tidiers.Rd 280e83f5fee8b58e64ea47a25ea3e802 *man/cch_tidiers.Rd 095b35b60ab46923e8ed4b25dd6bce41 *man/compact.Rd c094e65509465b2b0f0140fa6e7f3b64 *man/confint.geeglm.Rd d7307d6ca29d5c6c9724ee8e988bcffb *man/confint_tidy.Rd 5302735d28781e8f010e409bd7450de7 *man/coxph_tidiers.Rd 5257ca51aea3a37211315ed3cf1b4b1f *man/cv.glmnet_tidiers.Rd 74c3433f5c90e75c317e5c2967c7f801 *man/data.frame_tidiers.Rd b1cfccfb1a0a5fa269d822d795ddd1ff *man/decompose_tidiers.Rd f50241f9bfbdecd101f9561a6ebc8a4b *man/emmeans_tidiers.Rd da338a1a1c50e70280b60d6c3ccaef87 *man/ergm_tidiers.Rd c9a177b23014a1073833a24f73e27773 *man/felm_tidiers.Rd 2d081b5cb5583f6eccd683e1e18e4f13 *man/finish_glance.Rd 3289155123633d51138a111120dc133c *man/fitdistr_tidiers.Rd a60e37d1c53f2e4c5f4c5905a9bdd18e *man/fix_data_frame.Rd 59dfa106dcfe5ece5716ec6fff45a831 *man/gam_tidiers.Rd 4222676e394c0945ca9f5a0800920833 *man/gamlss_tidiers.Rd 0a4a48d88be44226e03cab5bc814cc34 *man/geeglm_tidiers.Rd 2ee00c82d2eae93f1bb1316c2e1ee6e4 *man/glance.Rd ad3bb1b91c7c8c01c207c819c6489621 *man/glm_tidiers.Rd 34a75cc50a9d6067e241dca8f895764f *man/glmnet_tidiers.Rd eff8e6baff40879e27276e6960a84e88 *man/gmm_tidiers.Rd cef6c3358931ab8f022bb1de42a13d1c *man/htest_tidiers.Rd 8f57117ccd6685d35fe430b54dfa4dd9 *man/inflate.Rd 0f563e9b860556abfa22a4ac69e8bad1 *man/insert_NAs.Rd 92c5e72d5f554059f5fc043f994787d6 *man/ivreg_tidiers.Rd 510d9bb9a903f30af0e504bb7784d89d *man/kappa_tidiers.Rd a255f0a7c226ff037b5900a4b47184d4 *man/kde_tidiers.Rd a085ec4d7ba57d193604d7f10bb75608 *man/kmeans_tidiers.Rd 9c5a3bf53618e4ed3d6d0ccfbd78fbb3 *man/list_tidiers.Rd 6363cbc08c8cf3440079d5cdd7b6ee43 *man/lm_tidiers.Rd ff3f0990f91f5989e97130b61e776f86 *man/lme4_tidiers.Rd 54fd1499fca3ac8ad3ea8d0505636e7b *man/lmodel2_tidiers.Rd 7ae616d9d45a0b1e0591bd960e0d1c5f *man/loess_tidiers.Rd 70606909a34d87dc6ff103a8a2e8d88e *man/matrix_tidiers.Rd 80cdf74212b9f63c06d822f15f50872f *man/mclust_tidiers.Rd 0f4bd89599c47b95d8fba64b15857967 *man/mcmc_tidiers.Rd 2126a84677126e207a28a46975ee3434 *man/mle2_tidiers.Rd f188b6064b586cdd5837403ba4dc6c30 *man/muhaz_tidiers.Rd 880fb96b6c776e94b52e5ba5422cf9f4 *man/multcomp_tidiers.Rd 1c918c80fa9be07606eeef7ef3281d13 *man/multinom_tidiers.Rd b177b87ecd1f4d91acbe05db943de24e *man/nlme_tidiers.Rd cec78148429e5ca6a4845c1a3ec7b105 *man/nls_tidiers.Rd 4a4cde54928f41725a131b6ea174693c *man/optim_tidiers.Rd 7f27dd62240b83f17b4de23dc99976fd *man/orcutt_tidiers.Rd 905804e45cb6ed92c547a886fb2e30e6 *man/plm_tidiers.Rd 73558cc060070d2f34aba56874be81dd *man/poLCA_tidiers.Rd fc7289a1ea5a976fb38cc8cf1fa27dda *man/prcomp_tidiers.Rd 42bac381d896462ac807e136ac1ca092 *man/process_ergm.Rd 3ca9df727b52c37ef14baff701c93e90 *man/process_geeglm.Rd 1452b543c7da39fa88fd1f67f331f2c7 *man/process_lm.Rd 23436d24e3856815bb45c972378d3415 *man/process_rq.Rd bcab21bc26cbffd7a3eafccc50a94e4b *man/pyears_tidiers.Rd 0e1d62572279b8dfbfa1e2e1836fdb54 *man/rcorr_tidiers.Rd 3237d9ac138236f4319cddaa10ff38a7 *man/ridgelm_tidiers.Rd b1266d097f8207f76537c84325a0d4d0 *man/rlm_tidiers.Rd 352d344561055caa50b108070172df0a *man/robust_tidiers.Rd e9fe9e634ce5bf372f0574dcd54ca3a2 *man/rowwise_df_tidiers.Rd 254a6bdd9abb0c7e7e90cb13ce0421d6 *man/rq_tidiers.Rd e07a9a1312fb30c04f7e73daacb2bbd2 *man/rstanarm_tidiers.Rd ba408910e48c3f5162af12873ff26388 *man/sexpfit_tidiers.Rd 1645437c9ea5b694ad258da3287a4349 *man/smooth.spline_tidiers.Rd 0e168e99e9b2b6caaef95d3f29aba2ee *man/sp_tidiers.Rd 2acbf327eb124eee7f4e550c0f6da57b *man/sparse_tidiers.Rd 17fee7da17da9f29b83dc00804f68caf *man/speedlm_tidiers.Rd 08fe477e62d7863be9f520398fbe961e *man/summary_tidiers.Rd 44aef84beded81326d28a9547aac2c54 *man/survdiff_tidiers.Rd 5b7266e3e54c48012448d92199920a7f *man/survfit_tidiers.Rd 1575e67a5480436a13bd84fddfd912b6 *man/survreg_tidiers.Rd e4e605eb23165c69bd2c0976f666dddc *man/svd_tidiers.Rd d73ae8d6c1555caac9cab6eb2c6e0eae *man/tidy.NULL.Rd 70e1553e4598765eef03965e6017d469 *man/tidy.Rd c1c97eb1eb89a88a99056156168f1f73 *man/tidy.TukeyHSD.Rd 43e1a6a7f97f026c06bf689dad2b69db *man/tidy.coeftest.Rd dde36e9636f96e878ccca78d5448b5ed *man/tidy.default.Rd 5e84408a7587b344e5ee4874a09d0b03 *man/tidy.density.Rd ec200b7681c0cbadb498095f5e88ae74 *man/tidy.dist.Rd 6fcf6128390590108f3b6f48f2bfde6a *man/tidy.ftable.Rd 55a11854230e0aad9681d9f1af1400bb *man/tidy.manova.Rd a9fea2cf4f76569de17a1508c507378d *man/tidy.map.Rd 01e107f1a323c802455b475f791551ef *man/tidy.pairwise.htest.Rd f12cd63fd9c9f7363b5e3ee8cc98d66f *man/tidy.power.htest.Rd c9be9c1ad7e4b2e600d827d7d7d8d2c7 *man/tidy.spec.Rd 4b8f246fa6ebb78a0730dc5f6085f1b5 *man/tidy.table.Rd c279b8ce8d8c7f50240e378ec7d5c732 *man/tidy.ts.Rd 93faaa2df78b1826e471c9a13dde816e *man/unrowname.Rd b3c6f6a41ce0d774eb796bf71b2ef810 *man/vector_tidiers.Rd 2a98bc814f3c2bef04ccba0657c8a534 *man/xyz_tidiers.Rd 0a4a837bc87974cf81e221ba4695cbea *man/zoo_tidiers.Rd ee8c1b68c4f50d216c1e59dad52e8070 *tests/test-all.R 06715d848758822c57dfea21e32f62c8 *tests/testthat/helper-checkers.R ed268d58507375792dd966efc827d5bc *tests/testthat/test-augment.R 95424d89ff1c81123a774ba7b1ec906a *tests/testthat/test-bootstrap.R 1e9bb68088f10b3969b30aa95e92f640 *tests/testthat/test-data.frame.R 0ad11f3a3323ab16a1ea7fabcdaa35ca *tests/testthat/test-dplyr.R ddeb61c943f968535f55780078c16541 *tests/testthat/test-gam.R 59e1610fb8d5befc08b6e7c961c03488 *tests/testthat/test-glmnet.R a7229a11210aeaf6a7a65f59b3d552dd *tests/testthat/test-ivreg.R 291df18d3c57241efcd5bfb2761e82af *tests/testthat/test-lm.R 002e49e5af36cbe73fe9f29edad6776a *tests/testthat/test-lme4.R 065fa82f6838bbc9a0efb4fcb27297ce *tests/testthat/test-muhaz.R 722f7966b68a005203b78f29534e90da *tests/testthat/test-nlme.R ba7a24f5332a0971d82a176f6b5fe859 *tests/testthat/test-rowwise.R 581d75c7a288b4fe5ecf0f983641aeac *tests/testthat/test-rstanarm.R b104454dd516ee8293e8dea05693ed4e *tests/testthat/test-speedlm.R e4df39d28c351588800b35bf4dbdd5f9 *tests/testthat/test-survdiff.R 160b6c32a590f43e80009d19ea2bd695 *tests/testthat/test-survival.R dcb573b5bb314d28302a0d3b0134c3b1 *tests/testthat/test-tidy.R 5945ef0e31faa8e9367367b42f8e84d3 *tests/testthat/test-vectors.R 24751ba832bf32d9ac6b4591c594a24d *vignettes/bootstrapping.Rmd b2d6b1ac3d6da101c21c4ee08ad5a68a *vignettes/broom.Rmd 7b35b0e8c5f11ceb37e00bc4832bc582 *vignettes/broom_and_dplyr.Rmd e6fba67e5e017ba314855b2fd604d71c *vignettes/kmeans.Rmd broom/build/0000755000177700017770000000000013204542661014024 5ustar herbrandtherbrandtbroom/build/vignette.rds0000644000177700017770000000046113204542661016364 0ustar herbrandtherbrandtR]K0:mEN5_16%`4R]zS "%ܛs=tB"bsئ.a%p^m5z_Jtk.WEkL W4Р{(%>*k4Y3P^"| ۲wS9Pb6Stw{!&μGmGO8YDMPDRԘ",e!|J>KN׾'6@ F78qGվ#fYxUbroom/DESCRIPTION0000644000177700017770000000625513204551155014441 0ustar herbrandtherbrandtPackage: broom Type: Package Title: Convert Statistical Analysis Objects into Tidy Data Frames Version: 0.4.3 Date: 2017-11-20 Authors@R: c( person("David", "Robinson", email = "admiral.david@gmail.com", role = c("aut", "cre")), person("Matthieu", "Gomez", email = "mattg@princeton.edu", role = "ctb"), person("Boris", "Demeshev", email = "boris.demeshev@gmail.com", role = "ctb"), person("Dieter", "Menne", email = "dieter.menne@menne-biomed.de", role = "ctb"), person("Benjamin", "Nutter", email = "nutter@battelle.org", role = "ctb"), person("Luke", "Johnston", email = "luke.johnston@mail.utoronto.ca", role = "ctb"), person("Ben", "Bolker", email = "bolker@mcmaster.ca", role = "ctb"), person("Francois", "Briatte", email = "f.briatte@gmail.com", role = "ctb"), person("Jeffrey", "Arnold", email = "jeffrey.arnold@gmail.com", role = "ctb"), person("Jonah", "Gabry", email = "jsg2201@columbia.edu", role = "ctb"), person("Luciano", "Selzer", email = "luciano.selzer@gmail.com", role = "ctb"), person("Gavin", "Simpson", email = "ucfagls@gmail.com", role = "ctb"), person("Jens", "Preussner", email = " jens.preussner@mpi-bn.mpg.de", role = "ctb"), person("Jay", "Hesselberth", email = "jay.hesselberth@gmail.com", role = "ctb"), person("Hadley", "Wickham", email = "hadley@rstudio.com", role = "ctb"), person("Matthew", "Lincoln", email = "matthew.d.lincoln@gmail.com", role = "ctb")) Maintainer: David Robinson Description: Convert statistical analysis objects from R into tidy data frames, so that they can more easily be combined, reshaped and otherwise processed with tools like 'dplyr', 'tidyr' and 'ggplot2'. The package provides three S3 generics: tidy, which summarizes a model's statistical findings such as coefficients of a regression; augment, which adds columns to the original data such as predictions, residuals and cluster assignments; and glance, which provides a one-row summary of model-level statistics. Imports: plyr, dplyr, tidyr, psych, stringr, reshape2, nlme, methods Suggests: knitr, boot, survival, gam, glmnet, lfe, Lahman, MASS, sp, maps, maptools, multcomp, testthat, lme4, zoo, lmtest, plm, biglm, ggplot2, nnet, geepack, AUC, ergm, network, statnet.common, xergm, btergm, binGroup, Hmisc, bbmle, gamlss, rstan, rstanarm, brms, coda, gmm, Matrix, ks, purrr, orcutt, mgcv, lmodel2, poLCA, mclust, covr, lsmeans, emmeans, betareg, robust, akima, AER, muhaz, speedglm, tibble URL: http://github.com/tidyverse/broom BugReports: http://github.com/tidyverse/broom/issues VignetteBuilder: knitr License: MIT + file LICENSE RoxygenNote: 6.0.1 NeedsCompilation: no Packaged: 2017-11-20 12:08:18 UTC; drobinson Author: David Robinson [aut, cre], Matthieu Gomez [ctb], Boris Demeshev [ctb], Dieter Menne [ctb], Benjamin Nutter [ctb], Luke Johnston [ctb], Ben Bolker [ctb], Francois Briatte [ctb], Jeffrey Arnold [ctb], Jonah Gabry [ctb], Luciano Selzer [ctb], Gavin Simpson [ctb], Jens Preussner [ctb], Jay Hesselberth [ctb], Hadley Wickham [ctb], Matthew Lincoln [ctb] Repository: CRAN Date/Publication: 2017-11-20 13:02:37 UTC broom/man/0000755000177700017770000000000013204526663013504 5ustar herbrandtherbrandtbroom/man/rstanarm_tidiers.Rd0000644000177700017770000001043213204276216017341 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rstanarm_tidiers.R \name{rstanarm_tidiers} \alias{rstanarm_tidiers} \alias{tidy.stanreg} \alias{glance.stanreg} \title{Tidying methods for an rstanarm model} \usage{ \method{tidy}{stanreg}(x, parameters = "non-varying", intervals = FALSE, prob = 0.9, ...) \method{glance}{stanreg}(x, looic = FALSE, ...) } \arguments{ \item{x}{Fitted model object from the \pkg{rstanarm} package. See \code{\link[rstanarm]{stanreg-objects}}.} \item{parameters}{One or more of \code{"non-varying"}, \code{"varying"}, \code{"hierarchical"}, \code{"auxiliary"} (can be abbreviated). See the Value section for details.} \item{intervals}{If \code{TRUE} columns for the lower and upper bounds of the \code{100*prob}\% posterior uncertainty intervals are included. See \code{\link[rstanarm]{posterior_interval}} for details.} \item{prob}{See \code{\link[rstanarm]{posterior_interval}}.} \item{...}{For \code{glance}, if \code{looic=TRUE}, optional arguments to \code{\link[rstanarm]{loo.stanreg}}.} \item{looic}{Should the LOO Information Criterion (and related info) be included? See \code{\link[rstanarm]{loo.stanreg}} for details. Note: for models fit to very large datasets this can be a slow computation.} } \value{ All tidying methods return a \code{data.frame} without rownames. The structure depends on the method chosen. When \code{parameters="non-varying"} (the default), \code{tidy.stanreg} returns one row for each coefficient, with three columns: \item{term}{The name of the corresponding term in the model.} \item{estimate}{A point estimate of the coefficient (posterior median).} \item{std.error}{A standard error for the point estimate based on \code{\link[stats]{mad}}. See the \emph{Uncertainty estimates} section in \code{\link[rstanarm]{print.stanreg}} for more details.} For models with group-specific parameters (e.g., models fit with \code{\link[rstanarm]{stan_glmer}}), setting \code{parameters="varying"} selects the group-level parameters instead of the non-varying regression coefficients. Addtional columns are added indicating the \code{level} and \code{group}. Specifying \code{parameters="hierarchical"} selects the standard deviations and (for certain models) correlations of the group-level parameters. Setting \code{parameters="auxiliary"} will select parameters other than those included by the other options. The particular parameters depend on which \pkg{rstanarm} modeling function was used to fit the model. For example, for models fit using \code{\link[rstanarm]{stan_glm.nb}} the overdispersion parameter is included if \code{parameters="aux"}, for \code{\link[rstanarm]{stan_lm}} the auxiliary parameters include the residual SD, R^2, and log(fit_ratio), etc. If \code{intervals=TRUE}, columns for the \code{lower} and \code{upper} values of the posterior intervals computed with \code{\link[rstanarm]{posterior_interval}} are also included. \code{glance} returns one row with the columns \item{algorithm}{The algorithm used to fit the model.} \item{pss}{The posterior sample size (except for models fit using optimization).} \item{nobs}{The number of observations used to fit the model.} \item{sigma}{The square root of the estimated residual variance, if applicable. If not applicable (e.g., for binomial GLMs), \code{sigma} will be given the value \code{1} in the returned object.} If \code{looic=TRUE}, then the following additional columns are also included: \item{looic}{The LOO Information Criterion.} \item{elpd_loo}{The expected log predictive density (\code{elpd_loo = -2 * looic}).} \item{p_loo}{The effective number of parameters.} } \description{ These methods tidy the estimates from \code{\link[rstanarm]{stanreg-objects}} (fitted model objects from the \pkg{rstanarm} package) into a summary. } \examples{ \dontrun{ fit <- stan_glmer(mpg ~ wt + (1|cyl) + (1+wt|gear), data = mtcars, iter = 300, chains = 2) # non-varying ("population") parameters tidy(fit, intervals = TRUE, prob = 0.5) # hierarchical sd & correlation parameters tidy(fit, parameters = "hierarchical") # group-specific deviations from "population" parameters tidy(fit, parameters = "varying") # glance method glance(fit) glance(fit, looic = TRUE, cores = 1) } } \seealso{ \code{\link[rstanarm]{summary.stanreg}} } broom/man/htest_tidiers.Rd0000644000177700017770000000335713204276216016651 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/htest_tidiers.R \name{htest_tidiers} \alias{htest_tidiers} \alias{tidy.htest} \alias{glance.htest} \title{Tidying methods for an htest object} \usage{ \method{tidy}{htest}(x, ...) \method{glance}{htest}(x, ...) } \arguments{ \item{x}{An object of class \code{"htest"}} \item{...}{extra arguments (not used)} } \value{ Both \code{tidy} and \code{glance} return the same output, a one-row data frame with one or more of the following columns: \item{estimate}{Estimate of the effect size} \item{statistic}{Test statistic used to compute the p-value} \item{p.value}{P-value} \item{parameter}{Parameter field in the htest, typically degrees of freedom} \item{conf.low}{Lower bound on a confidence interval} \item{conf.high}{Upper bound on a confidence interval} \item{estimate1}{Sometimes two estimates are computed, such as in a two-sample t-test} \item{estimate2}{Sometimes two estimates are computed, such as in a two-sample t-test} \item{method}{Method used to compute the statistic as a string} \item{alternative}{Alternative hypothesis as a string} Which columns are included depends on the hypothesis test used. } \description{ Tidies hypothesis test objects, such as those from \code{cor.test}, \code{t.test}, and \code{wilcox.test}, into a one-row data frame. } \details{ No \code{augment} method is provided for \code{"htest"}, since there is no sense in which a hypothesis test generates one value for each observation. } \examples{ tt <- t.test(rnorm(10)) tidy(tt) glance(tt) # same output for all htests tt <- t.test(mpg ~ am, data = mtcars) tidy(tt) wt <- wilcox.test(mpg ~ am, data = mtcars) tidy(wt) ct <- cor.test(mtcars$wt, mtcars$mpg) tidy(ct) } broom/man/unrowname.Rd0000644000177700017770000000041513204276216016002 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{unrowname} \alias{unrowname} \title{strip rownames from an object} \usage{ unrowname(x) } \arguments{ \item{x}{a data frame} } \description{ strip rownames from an object } broom/man/svd_tidiers.Rd0000644000177700017770000000430713204276216016312 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/svd_tidiers.R \name{svd_tidiers} \alias{svd_tidiers} \alias{tidy_svd} \title{Tidying methods for singular value decomposition} \usage{ tidy_svd(x, matrix = "u", ...) } \arguments{ \item{x}{list containing d, u, v components, returned from \code{svd}} \item{matrix}{which of the u, d or v matrix to tidy} \item{...}{Extra arguments (not used)} } \value{ An SVD object contains a decomposition into u, d, and v matrices, such that \code{u \%\*\% diag(d) \%\*\% t(v)} gives the original matrix. This tidier gives a choice of which matrix to tidy. When \code{matrix = "u"}, each observation represents one pair of row and principal component, with variables: \item{row}{Number of the row in the original data being described} \item{PC}{Principal component} \item{loading}{Loading of this principal component for this row} When \code{matrix = "d"}, each observation represents one principal component, with variables: \item{PC}{Principal component} \item{d}{Value in the \code{d} vector} \item{percent}{Percent of variance explained by this PC, which is proportional to $d^2$} When \code{matrix = "v"}, each observation represents a pair of a principal component and a column of the original matrix, with variables: \item{column}{Column of original matrix described} \item{PC}{Principal component} \item{value}{Value of this PC for this column} } \description{ These methods tidy the U, D, and V matrices returned by the \code{\link{svd}} function into a tidy format. Because \code{svd} returns a list without a class, this function has to be called by \code{\link{tidy.list}} when it recognizes a list as an SVD object. } \examples{ mat <- as.matrix(iris[, 1:4]) s <- svd(mat) tidy_u <- tidy(s, matrix = "u") head(tidy_u) tidy_d <- tidy(s, matrix = "d") tidy_d tidy_v <- tidy(s, matrix = "v") head(tidy_v) library(ggplot2) library(dplyr) ggplot(tidy_d, aes(PC, percent)) + geom_point() + ylab("\% of variance explained") tidy_u \%>\% mutate(Species = iris$Species[row]) \%>\% ggplot(aes(Species, loading)) + geom_boxplot() + facet_wrap(~ PC, scale = "free_y") } \seealso{ \code{\link{svd}}, \code{\link{tidy.list}} } broom/man/cv.glmnet_tidiers.Rd0000644000177700017770000000535213204276216017414 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet_tidiers.R \name{cv.glmnet_tidiers} \alias{cv.glmnet_tidiers} \alias{tidy.cv.glmnet} \alias{glance.cv.glmnet} \title{Tidiers for glmnet cross-validation objects} \usage{ \method{tidy}{cv.glmnet}(x, ...) \method{glance}{cv.glmnet}(x, ...) } \arguments{ \item{x}{a "cv.glmnet" object} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} produces a data.frame with one row per choice of lambda, with columns \item{lambda}{penalty parameter lambda} \item{estimate}{estimate (median) of mean-squared error or other criterion} \item{std.error}{standard error of criterion} \item{conf.high}{high end of confidence interval on criterion} \item{conf.low}{low end of confidence interval on criterion} \item{nzero}{number of parameters that are zero at this choice of lambda} \code{glance} returns a one-row data.frame with the values \item{nulldev}{null deviance} \item{npasses}{total passes over the data across all lambda values} } \description{ Tidying methods for cross-validation performed by \code{glmnet.cv}, summarizing the mean-squared-error across choices of the penalty parameter lambda. } \details{ No \code{augment} method exists for this class. } \examples{ if (require("glmnet", quietly = TRUE)) { set.seed(2014) nobs <- 100 nvar <- 50 real <- 5 x <- matrix(rnorm(nobs * nvar), nobs, nvar) beta <- c(rnorm(real, 0, 1), rep(0, nvar - real)) y <- c(t(beta) \%*\% t(x)) + rnorm(nvar, sd = 3) cvfit1 <- cv.glmnet(x,y) head(tidy(cvfit1)) glance(cvfit1) library(ggplot2) tidied_cv <- tidy(cvfit1) glance_cv <- glance(cvfit1) # plot of MSE as a function of lambda g <- ggplot(tidied_cv, aes(lambda, estimate)) + geom_line() + scale_x_log10() g # plot of MSE as a function of lambda with confidence ribbon g <- g + geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .25) g # plot of MSE as a function of lambda with confidence ribbon and choices # of minimum lambda marked g <- g + geom_vline(xintercept = glance_cv$lambda.min) + geom_vline(xintercept = glance_cv$lambda.1se, lty = 2) g # plot of number of zeros for each choice of lambda ggplot(tidied_cv, aes(lambda, nzero)) + geom_line() + scale_x_log10() # coefficient plot with min lambda shown tidied <- tidy(cvfit1$glmnet.fit) ggplot(tidied, aes(lambda, estimate, group = term)) + scale_x_log10() + geom_line() + geom_vline(xintercept = glance_cv$lambda.min) + geom_vline(xintercept = glance_cv$lambda.1se, lty = 2) } } broom/man/tidy.NULL.Rd0000644000177700017770000000066313204276216015516 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidy.R \name{tidy.NULL} \alias{tidy.NULL} \title{tidy on a NULL input} \usage{ \method{tidy}{NULL}(x, ...) } \arguments{ \item{x}{A value NULL} \item{...}{extra arguments (not used)} } \value{ An empty data.frame } \description{ tidy on a NULL input returns an empty data frame, which means it can be combined with other data frames (treated as "empty") } broom/man/geeglm_tidiers.Rd0000644000177700017770000000457213204276216016762 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/geeglm_tidiers.R \name{geeglm_tidiers} \alias{geeglm_tidiers} \alias{tidy.geeglm} \title{Tidying methods for generalized estimating equations models} \usage{ \method{tidy}{geeglm}(x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE, quick = FALSE, ...) } \arguments{ \item{x}{An object of class \code{geeglm}, such as from \code{geeglm}} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals (typical for log distributions)} \item{quick}{whether to compute a smaller and faster version, containing only the \code{term} and \code{estimate} columns.} \item{...}{Additional arguments to be passed to other methods. Currently not used.} } \value{ All tidying methods return a \code{data.frame} without rownames. The structure depends on the method chosen. \code{tidy.geeglm} returns one row for each coefficient, with five columns: \item{term}{The term in the linear model being estimated and tested} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error from the GEE model} \item{statistic}{Wald statistic} \item{p.value}{two-sided p-value} If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and \code{conf.high}, computed with \code{\link{confint.geeglm}} (included as part of broom). } \description{ These methods tidy the coefficients of generalized estimating equations models of the \code{geeglm} class from functions of the \code{geepack} package. } \details{ If \code{conf.int=TRUE}, the confidence interval is computed with the \code{\link{confint.geeglm}} function. While \code{tidy} is supported for "geeglm" objects, \code{augment} and \code{glance} are not. If you have missing values in your model data, you may need to refit the model with \code{na.action = na.exclude} or deal with the missingness in the data beforehand. } \examples{ if (require('geepack')) { data(state) ds <- data.frame(state.region, state.x77) geefit <- geeglm(Income ~ Frost + Murder, id = state.region, data = ds, family = gaussian, corstr = 'exchangeable') tidy(geefit) tidy(geefit, quick = TRUE) tidy(geefit, conf.int = TRUE) } } broom/man/pyears_tidiers.Rd0000644000177700017770000000374713204276216017030 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survival_tidiers.R \name{pyears_tidiers} \alias{pyears_tidiers} \alias{tidy.pyears} \alias{glance.pyears} \title{Tidy person-year summaries} \usage{ \method{tidy}{pyears}(x, ...) \method{glance}{pyears}(x, ...) } \arguments{ \item{x}{a "pyears" object} \item{...}{extra arguments (not used)} } \value{ \code{tidy} returns a data.frame with the columns \item{pyears}{person-years of exposure} \item{n}{number of subjects contributing time} \item{event}{observed number of events} \item{expected}{expected number of events (present only if a \code{ratetable} term is present)} If the \code{data.frame = TRUE} argument is supplied to \code{pyears}, this is simply the contents of \code{x$data}. \code{glance} returns a one-row data frame with \item{total}{total number of person-years tabulated} \item{offtable}{total number of person-years off table} This contains the values printed by \code{summary.pyears}. } \description{ These tidy the output of \code{pyears}, a calculation of the person-years of follow-up time contributed by a cohort of subject. Since the output of \code{pyears$data} is already tidy (if the \code{data.frame = TRUE} argument is given), this does only a little work and should rarely be necessary. } \examples{ if (require("survival", quietly = TRUE)) { temp.yr <- tcut(mgus$dxyr, 55:92, labels=as.character(55:91)) temp.age <- tcut(mgus$age, 34:101, labels=as.character(34:100)) ptime <- ifelse(is.na(mgus$pctime), mgus$futime, mgus$pctime) pstat <- ifelse(is.na(mgus$pctime), 0, 1) pfit <- pyears(Surv(ptime/365.25, pstat) ~ temp.yr + temp.age + sex, mgus, data.frame=TRUE) head(tidy(pfit)) glance(pfit) # if data.frame argument is not given, different information is present in # output pfit2 <- pyears(Surv(ptime/365.25, pstat) ~ temp.yr + temp.age + sex, mgus) head(tidy(pfit2)) glance(pfit2) } } \seealso{ \link{pyears} } broom/man/list_tidiers.Rd0000644000177700017770000000154213204276216016467 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/list_tidiers.R \name{list_tidiers} \alias{list_tidiers} \alias{tidy.list} \alias{glance.list} \title{Tidiers for return values from functions that aren't S3 objects} \usage{ \method{tidy}{list}(x, ...) \method{glance}{list}(x, ...) } \arguments{ \item{x}{list object} \item{...}{extra arguments, passed to the tidying function} } \description{ This method handles the return values of functions that return lists rather than S3 objects, such as \code{optim}, \code{svd}, or \code{\link[akima]{interp}}, and therefore cannot be handled by S3 dispatch. } \details{ Those tiders themselves are implemented as functions of the form tidy_ or glance_ that are not exported. } \seealso{ \link{optim_tidiers}, \link{xyz_tidiers}, \link{svd_tidiers}, \link{orcutt_tidiers} } broom/man/rlm_tidiers.Rd0000644000177700017770000000170713204276216016311 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rlm_tidiers.R \name{rlm_tidiers} \alias{rlm_tidiers} \alias{glance.rlm} \title{Tidying methods for an rlm (robust linear model) object} \usage{ \method{glance}{rlm}(x, ...) } \arguments{ \item{x}{rlm object} \item{...}{extra arguments (not used)} } \value{ \code{glance.rlm} returns a one-row data.frame with the columns \item{sigma}{The square root of the estimated residual variance} \item{converged}{whether the IWLS converged} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{deviance}{deviance} } \description{ This method provides a glance of an "rlm" object. The \code{tidy} and \code{augment} methods are handled by \link{lm_tidiers}. } \examples{ library(MASS) r <- rlm(stack.loss ~ ., stackloss) tidy(r) augment(r) glance(r) } \seealso{ \link{lm_tidiers} } broom/man/auc_tidiers.Rd0000644000177700017770000000237013204276216016264 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/auc_tidiers.R \name{auc_tidiers} \alias{auc_tidiers} \alias{tidy.roc} \title{Tidiers for objects from the AUC package} \usage{ \method{tidy}{roc}(x, ...) } \arguments{ \item{x}{an "roc" object} \item{...}{Additional arguments, not used} } \value{ A data frame with three columns: \item{cutoff}{The cutoff of the prediction scores used for classification} \item{tpr}{The resulting true positive rate at that cutoff} \item{fpr}{The resulting false positive rate at that cutoff} If the labels had names, those are added as an "instance" column. } \description{ Tidy "roc" objects from the "auc" package. This can be used to, for example, draw ROC curves in ggplot2. } \examples{ if (require("AUC", quietly = TRUE)) { data(churn) r <- roc(churn$predictions,churn$labels) td <- tidy(r) head(td) library(ggplot2) ggplot(td, aes(fpr, tpr)) + geom_line() # compare the ROC curves for two prediction algorithms library(dplyr) library(tidyr) rocs <- churn \%>\% tidyr::gather(algorithm, value, -labels) \%>\% group_by(algorithm) \%>\% do(tidy(roc(.$value, .$labels))) ggplot(rocs, aes(fpr, tpr, color = algorithm)) + geom_line() } } broom/man/bootstrap.Rd0000644000177700017770000000144513204276216016010 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extras.R \name{bootstrap} \alias{bootstrap} \title{Set up bootstrap replicates of a dplyr operation} \usage{ bootstrap(df, m, by_group = FALSE) } \arguments{ \item{df}{a data frame} \item{m}{number of bootstrap replicates to perform} \item{by_group}{If \code{TRUE}, then bootstrap within each group if \code{df} is a grouped tbl.} } \description{ Set up bootstrap replicates of a dplyr operation } \details{ This code originates from Hadley Wickham (with a few small corrections) here: https://github.com/hadley/dplyr/issues/269 Some examples can be found at https://github.com/dgrtwo/broom/blob/master/vignettes/bootstrapping.Rmd } \examples{ library(dplyr) mtcars \%>\% bootstrap(10) \%>\% do(tidy(lm(mpg ~ wt, .))) } broom/man/decompose_tidiers.Rd0000644000177700017770000000556613204330346017477 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/decompose_tidiers.R \name{decompose_tidiers} \alias{decompose_tidiers} \alias{augment.decomposed.ts} \alias{augment.stl} \title{Tidying methods for seasonal decompositions} \usage{ \method{augment}{decomposed.ts}(x, ...) \method{augment}{stl}(x, weights = TRUE, ...) } \arguments{ \item{x}{An object of class \code{"stl"} or \code{"decomposed.ts"}, resulting from a call to \code{\link[stats]{decompose}} or \code{\link[stats]{stl}}.} \item{...}{Extra arguments. Unused.} \item{weights}{Whether to include the robust weights in the output.} } \value{ The \code{augment} method returns a tidy data frame with the following columns: \describe{ \item{\code{.seasonal}}{The seasonal component of the decomposition.} \item{\code{.trend}}{The trend component of the decomposition.} \item{\code{.remainder}}{The remainder, or "random" component of the decomposition.} \item{\code{.weight}}{The final robust weights (\code{stl} only).} \item{\code{.seasadj}}{The seasonally adjusted (or "deseasonalised") series.} } } \description{ These tidiers provide an \code{augment} method for the results of a seasonal decomposition with \code{\link[stats]{decompose}} or \code{\link[stats]{stl}}. } \details{ The \code{augment} method returns the computed seasonal and trend components, as well as the "remainder" term and the seasonally adjusted (or "deseasonalised") series. } \examples{ # Time series of temperatures in Nottingham, 1920-1939: nottem # Perform seasonal decomposition on the data with both decompose # and stl: d1 <- stats::decompose(nottem) d2 <- stats::stl(nottem, s.window = "periodic", robust = TRUE) # Compare the original series to its decompositions. cbind(broom::tidy(nottem), broom::augment(d1), broom::augment(d2)) # Visually compare seasonal decompositions in tidy data frames. library(tibble) library(dplyr) library(tidyr) library(ggplot2) decomps <- tibble( # Turn the ts objects into data frames. series = list(broom::tidy(nottem), broom::tidy(nottem)), # Add the models in, one for each row. decomp = c("decompose", "stl"), model = list(d1, d2) ) \%>\% rowwise() \%>\% # Pull out the fitted data using broom::augment. mutate(augment = list(broom::augment(model))) \%>\% ungroup() \%>\% # Unnest the data frames into a tidy arrangement of # the series next to its seasonal decomposition, grouped # by the method (stl or decompose). group_by(decomp) \%>\% unnest(series, augment) \%>\% mutate(index = 1:n()) \%>\% ungroup() \%>\% select(decomp, index, x, adjusted = .seasadj) ggplot(decomps) + geom_line(aes(x = index, y = x), colour = "black") + geom_line(aes(x = index, y = adjusted, colour = decomp, group = decomp)) } \seealso{ \code{\link[stats]{decompose}}, \code{\link[stats]{stl}} } \author{ Aaron Jacobs } broom/man/btergm_tidiers.Rd0000644000177700017770000000451613204276216017000 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/btergm_tidiers.R \name{btergm_tidiers} \alias{btergm_tidiers} \alias{tidy.btergm} \title{Tidying method for a bootstrapped temporal exponential random graph model} \usage{ \method{tidy}{btergm}(x, conf.level = 0.95, exponentiate = FALSE, quick = FALSE, ...) } \arguments{ \item{x}{a \code{\link[btergm]{btergm}} object} \item{conf.level}{confidence level of the bootstrapped interval} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals} \item{quick}{whether to compute a smaller and faster version, containing only the \code{term} and \code{estimate} columns.} \item{...}{extra arguments (currently not used)} } \value{ A \code{data.frame} without rownames. \code{tidy.btergm} returns one row for each coefficient, with four columns: \item{term}{The term in the model being estimated and tested} \item{estimate}{The estimated coefficient} \item{conf.low}{The lower bound of the confidence interval} \item{conf.high}{The lower bound of the confidence interval} } \description{ This method tidies the coefficients of a bootstrapped temporal exponential random graph model estimated with the \pkg{xergm}. It simply returns the coefficients and their confidence intervals. } \details{ There is no \code{augment} or \code{glance} method for \pkg{ergm} objects. } \examples{ if (require("xergm")) { # Using the same simulated example as the xergm package # Create 10 random networks with 10 actors networks <- list() for(i in 1:10){ mat <- matrix(rbinom(100, 1, .25), nrow = 10, ncol = 10) diag(mat) <- 0 nw <- network::network(mat) networks[[i]] <- nw } # Create 10 matrices as covariates covariates <- list() for (i in 1:10) { mat <- matrix(rnorm(100), nrow = 10, ncol = 10) covariates[[i]] <- mat } # Fit a model where the propensity to form ties depends # on the edge covariates, controlling for the number of # in-stars btfit <- btergm(networks ~ edges + istar(2) + edgecov(covariates), R = 100) # Show terms, coefficient estimates and errors tidy(btfit) # Show coefficients as odds ratios with a 99\% CI tidy(btfit, exponentiate = TRUE, conf.level = 0.99) } } \seealso{ \code{\link[btergm]{btergm}} } broom/man/tidy.Rd0000644000177700017770000000077013204276216014744 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidy.R \name{tidy} \alias{tidy} \title{Tidy the result of a test into a summary data.frame} \usage{ tidy(x, ...) } \arguments{ \item{x}{An object to be converted into a tidy data.frame} \item{...}{extra arguments} } \value{ a data.frame } \description{ The output of tidy is always a data.frame with disposable row names. It is therefore suited for further manipulation by packages like dplyr, reshape2, ggplot2 and ggvis. } broom/man/fitdistr_tidiers.Rd0000644000177700017770000000241613204276216017345 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitdistr_tidiers.R \name{fitdistr_tidiers} \alias{fitdistr_tidiers} \alias{tidy.fitdistr} \alias{glance.fitdistr} \title{Tidying methods for fitdistr objects from the MASS package} \usage{ \method{tidy}{fitdistr}(x, ...) \method{glance}{fitdistr}(x, ...) } \arguments{ \item{x}{An object of class "fitdistr"} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy.fitdistr} returns one row for each parameter that was estimated, with columns: \item{term}{The term that was estimated} \item{estimate}{Estimated value} \item{std.error}{Standard error of estimate} \code{glance.fitdistr} returns a one-row data.frame with the columns \item{n}{Number of observations used in estimation} \item{logLik}{log-likelihood of estimated data} \item{AIC}{Akaike Information Criterion} \item{BIC}{Bayesian Information Criterion} } \description{ These methods tidies the parameter estimates resulting from an estimation of a univariate distribution's parameters. } \examples{ set.seed(2015) x <- rnorm(100, 5, 2) library(MASS) fit <- fitdistr(x, dnorm, list(mean = 3, sd = 1)) tidy(fit) glance(fit) } broom/man/binWidth_tidiers.Rd0000644000177700017770000000236413204276216017267 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bingroup_tidiers.R \name{binWidth_tidiers} \alias{binWidth_tidiers} \alias{tidy.binWidth} \title{Tidy a binWidth object} \usage{ \method{tidy}{binWidth}(x, ...) } \arguments{ \item{x}{A "binWidth" object} \item{...}{Extra arguments (not used)} } \value{ A one-row data.frame with columns: \item{ci.width}{Expected width of confidence interval} \item{alternative}{Alternative hypothesis} \item{p}{True proportion} \item{n}{Total sample size} } \description{ Tidy a binWidth object from the "binGroup" package, which calculates the expected width of a confidence interval from a binomial test. } \examples{ if (require("binGroup", quietly = TRUE)) { bw <- binWidth(100, .1) bw tidy(bw) library(dplyr) d <- expand.grid(n = seq(100, 800, 100), p = .5, method = c("CP", "Blaker", "Score", "Wald"), stringsAsFactors = FALSE) \%>\% group_by(n, p, method) \%>\% do(tidy(binWidth(.$n, .$p, method = .$method))) library(ggplot2) ggplot(d, aes(n, ci.width, color = method)) + geom_line() + xlab("Total Observations") + ylab("Expected CI Width") } } broom/man/tidy.pairwise.htest.Rd0000644000177700017770000000253513204276216017715 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_tidiers.R \name{tidy.pairwise.htest} \alias{tidy.pairwise.htest} \title{tidy a pairwise hypothesis test} \usage{ \method{tidy}{pairwise.htest}(x, ...) } \arguments{ \item{x}{a "pairwise.htest" object} \item{...}{extra arguments (not used)} } \value{ A data frame with one row per group/group comparison, with columns \item{group1}{First group being compared} \item{group2}{Second group being compared} \item{p.value}{(Adjusted) p-value of comparison} } \description{ Tidy a pairwise.htest object, containing (adjusted) p-values for multiple pairwise hypothesis tests. } \details{ Note that in one-sided tests, the alternative hypothesis of each test can be stated as "group1 is greater/less than group2". Note also that the columns of group1 and group2 will always be a factor, even if the original input is (e.g.) numeric. } \examples{ attach(airquality) Month <- factor(Month, labels = month.abb[5:9]) ptt <- pairwise.t.test(Ozone, Month) tidy(ptt) attach(iris) ptt2 <- pairwise.t.test(Petal.Length, Species) tidy(ptt2) tidy(pairwise.t.test(Petal.Length, Species, alternative = "greater")) tidy(pairwise.t.test(Petal.Length, Species, alternative = "less")) tidy(pairwise.wilcox.test(Petal.Length, Species)) } \seealso{ \link{pairwise.t.test}, \link{pairwise.wilcox.test} } broom/man/aareg_tidiers.Rd0000644000177700017770000000250013204276216016566 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survival_tidiers.R \name{aareg_tidiers} \alias{aareg_tidiers} \alias{tidy.aareg} \alias{aareg_tidiers} \alias{glance.aareg} \title{Tidiers for aareg objects} \usage{ \method{tidy}{aareg}(x, ...) \method{glance}{aareg}(x, ...) } \arguments{ \item{x}{an "aareg" object} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy.aareg} returns one row for each coefficient, with the columns \item{term}{name of coefficient} \item{estimate}{estimate of the slope} \item{statistic}{test statistic for coefficient} \item{std.error}{standard error of statistic} \item{robust.se}{robust version of standard error estimate} \item{z}{z score} \item{p.value}{p-value} \code{glance} returns a one-row data frame containing \item{statistic}{chi-squared statistic} \item{p.value}{p-value based on chi-squared statistic} \item{df}{degrees of freedom used by coefficients} } \description{ These tidy the coefficients of Aalen additive regression objects. } \examples{ if (require("survival", quietly = TRUE)) { afit <- aareg(Surv(time, status) ~ age + sex + ph.ecog, data=lung, dfbeta=TRUE) summary(afit) tidy(afit) } } broom/man/survdiff_tidiers.Rd0000644000177700017770000000264313204276216017347 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survival_tidiers.R \name{survdiff_tidiers} \alias{survdiff_tidiers} \alias{tidy.survdiff} \alias{glance.survdiff} \title{Tidiers for Tests of Differences between Survival Curves} \usage{ \method{tidy}{survdiff}(x, strata = FALSE, ...) \method{glance}{survdiff}(x, ...) } \arguments{ \item{x}{a "survdiff" object} \item{strata}{logical, whether to include strata in the output} \item{...}{other arguments passed to/from other methods, currently ignored} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} on "survdiff" objects returns a data frame with the following columns: \item{...}{initial column(s) correspond to grouping factors (right-hand side of the formula)} \item{obs}{weighted observed number of events in each group} \item{exp}{weighted expected number of events in each group} \item{N}{number of subjects in each group} \code{glance} on "survdiff" objects returns a data frame with the following columns: \item{statistic}{value of the test statistic} \item{df}{degrees of freedom} \item{p.value}{p-value} } \description{ Tidiers for Tests of Differences between Survival Curves } \examples{ if( require("survival") ) { s <- survdiff( Surv(time, status) ~ pat.karno + strata(inst), data=lung) tidy(s) glance(s) } } \seealso{ \code{\link[survival]{survdiff}} } broom/man/nls_tidiers.Rd0000644000177700017770000000636213204276216016315 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nls_tidiers.R \name{nls_tidiers} \alias{nls_tidiers} \alias{tidy.nls} \alias{augment.nls} \alias{glance.nls} \title{Tidying methods for a nonlinear model} \usage{ \method{tidy}{nls}(x, conf.int = FALSE, conf.level = 0.95, quick = FALSE, ...) \method{augment}{nls}(x, data = NULL, newdata = NULL, ...) \method{glance}{nls}(x, ...) } \arguments{ \item{x}{An object of class "nls"} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{quick}{whether to compute a smaller and faster version, containing only the \code{term} and \code{estimate} columns.} \item{...}{extra arguments (not used)} \item{data}{original data this was fitted on; if not given this will attempt to be reconstructed from nls (may not be successful)} \item{newdata}{new data frame to use for predictions} } \value{ All tidying methods return a \code{data.frame} without rownames. The structure depends on the method chosen. \code{tidy} returns one row for each coefficient in the model, with five columns: \item{term}{The term in the nonlinear model being estimated and tested} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error from the linear model} \item{statistic}{t-statistic} \item{p.value}{two-sided p-value} \code{augment} returns one row for each original observation, with two columns added: \item{.fitted}{Fitted values of model} \item{.resid}{Residuals} If \code{newdata} is provided, these are computed on based on predictions of the new data. \code{glance} returns one row with the columns \item{sigma}{the square root of the estimated residual variance} \item{isConv}{whether the fit successfully converged} \item{finTol}{the achieved convergence tolerance} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{deviance}{deviance} \item{df.residual}{residual degrees of freedom} } \description{ These methods tidy the coefficients of a nonlinear model into a summary, augment the original data with information on the fitted values and residuals, and construct a one-row glance of the model's statistics. } \details{ When the modeling was performed with \code{na.action = "na.omit"} (as is the typical default), rows with NA in the initial data are omitted entirely from the augmented data frame. When the modeling was performed with \code{na.action = "na.exclude"}, one should provide the original data as a second argument, at which point the augmented data will contain those rows (typically with NAs in place of the new columns). If the original data is not provided to \code{augment} and \code{na.action = "na.exclude"}, a warning is raised and the incomplete rows are dropped. } \examples{ n <- nls(mpg ~ k * e ^ wt, data = mtcars, start = list(k = 1, e = 2)) tidy(n) augment(n) glance(n) library(ggplot2) ggplot(augment(n), aes(wt, mpg)) + geom_point() + geom_line(aes(y = .fitted)) # augment on new data newdata <- head(mtcars) newdata$wt <- newdata$wt + 1 augment(n, newdata = newdata) } \seealso{ \link{na.action} \code{\link{nls}} and \code{\link{summary.nls}} } broom/man/prcomp_tidiers.Rd0000644000177700017770000000716513204276216017023 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/prcomp_tidiers.R \name{prcomp_tidiers} \alias{prcomp_tidiers} \alias{tidy.prcomp} \alias{augment.prcomp} \title{Tidying methods for principal components analysis via \code{\link{prcomp}}} \usage{ \method{tidy}{prcomp}(x, matrix = "u", ...) \method{augment}{prcomp}(x, data = NULL, newdata, ...) } \arguments{ \item{x}{an object of class \code{"prcomp"} resulting from a call to \code{\link[stats]{prcomp}}} \item{matrix}{character; Indicates which sets of eigenvectors are returned in tidy form. "v", "rotation", or "variables" will return information about each variable, while "u", "x", or "samples" (default) returns the loadings for each original row. "d" or "pcs" returns information about each principal component.} \item{...}{Extra arguments, not used} \item{data}{the original data on which principal components analysis was performed. This cannot be recovered from \code{x}. If \code{newdata} is supplied, \code{data} is ignored. If both \code{data} and \code{newdata} are missing, only the fitted locations on the principal components are returned.} \item{newdata}{data frame; new observations for which locations on principal components are sought.} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. If \code{matrix} is "u", "samples", or "x", the \code{tidy} method returns \describe{ \item{\code{row}}{The sample labels (rownames) of the data set on which PCA was performed} \item{\code{PC}}{An integer vector indicating the principal component} \item{\code{value}}{The value of the eigenvector (axis score) on the indicated principal component} } If \code{matrix} is "v", "variables", or "rotation", the \code{tidy} method returns \describe{ \item{\code{row}}{The variable labels (colnames) of the data set on which PCA was performed} \item{\code{PC}}{An integer vector indicating the principal component} \item{\code{value}}{The value of the eigenvector (axis score) on the indicated principal component} } If \code{matrix} is "d" or "pcs", the \code{tidy} method returns \describe{ \item{\code{PC}}{An integer vector indicating the principal component} \item{\code{std.dev}}{Standard deviation explained by this PC} \item{\code{percent}}{Percentage of variation explained} \item{\code{cumulative}}{Cumulative percentage of variation explained} } The \code{augment.prcomp} method returns a data frame containing fitted locations on the principal components for the observed data plus either the original data or the new data if supplied via \code{data} or \code{newdata} respectively. } \description{ These tidiers operate on the results of a principal components analysis computed using \code{prcomp}. The \code{tidy} method returns a data frame with either the eigenvectors representing each row or each column. } \examples{ pc <- prcomp(USArrests, scale = TRUE) # information about rotation head(tidy(pc)) # information about samples (states) head(tidy(pc, "samples")) # information about PCs tidy(pc, "pcs") # state map library(dplyr) library(ggplot2) pc \%>\% tidy(matrix = "samples") \%>\% mutate(region = tolower(row)) \%>\% inner_join(map_data("state"), by = "region") \%>\% ggplot(aes(long, lat, group = group, fill = value)) + geom_polygon() + facet_wrap(~ PC) + theme_void() + ggtitle("Principal components of arrest data") au <- augment(pc, data = USArrests) head(au) ggplot(au, aes(.fittedPC1, .fittedPC2)) + geom_point() + geom_text(aes(label = .rownames), vjust = 1, hjust = 1) } \seealso{ \code{\link{prcomp}}, \link{svd_tidiers} } \author{ Gavin L. Simpson } broom/man/lme4_tidiers.Rd0000644000177700017770000001171513204276216016360 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lme4_tidiers.R \name{lme4_tidiers} \alias{lme4_tidiers} \alias{tidy.merMod} \alias{augment.merMod} \alias{glance.merMod} \title{Tidying methods for mixed effects models} \usage{ \method{tidy}{merMod}(x, effects = c("ran_pars", "fixed"), scales = NULL, ran_prefix = NULL, conf.int = FALSE, conf.level = 0.95, conf.method = "Wald", ...) \method{augment}{merMod}(x, data = stats::model.frame(x), newdata, ...) \method{glance}{merMod}(x, ...) } \arguments{ \item{x}{An object of class \code{merMod}, such as those from \code{lmer}, \code{glmer}, or \code{nlmer}} \item{effects}{A character vector including one or more of "fixed" (fixed-effect parameters), "ran_pars" (variances and covariances or standard deviations and correlations of random effect terms) or "ran_modes" (conditional modes/BLUPs/latent variable estimates)} \item{scales}{scales on which to report the variables: for random effects, the choices are \sQuote{"sdcor"} (standard deviations and correlations: the default if \code{scales} is \code{NULL}) or \sQuote{"vcov"} (variances and covariances). \code{NA} means no transformation, appropriate e.g. for fixed effects; inverse-link transformations (exponentiation or logistic) are not yet implemented, but may be in the future.} \item{ran_prefix}{a length-2 character vector specifying the strings to use as prefixes for self- (variance/standard deviation) and cross- (covariance/correlation) random effects terms} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level for CI} \item{conf.method}{method for computing confidence intervals (see \code{lme4::confint.merMod})} \item{...}{extra arguments (not used)} \item{data}{original data this was fitted on; if not given this will attempt to be reconstructed} \item{newdata}{new data to be used for prediction; optional} } \value{ All tidying methods return a \code{data.frame} without rownames. The structure depends on the method chosen. \code{tidy} returns one row for each estimated effect, either with groups depending on the \code{effects} parameter. It contains the columns \item{group}{the group within which the random effect is being estimated: \code{"fixed"} for fixed effects} \item{level}{level within group (\code{NA} except for modes)} \item{term}{term being estimated} \item{estimate}{estimated coefficient} \item{std.error}{standard error} \item{statistic}{t- or Z-statistic (\code{NA} for modes)} \item{p.value}{P-value computed from t-statistic (may be missing/NA)} \code{augment} returns one row for each original observation, with columns (each prepended by a .) added. Included are the columns \item{.fitted}{predicted values} \item{.resid}{residuals} \item{.fixed}{predicted values with no random effects} Also added for "merMod" objects, but not for "mer" objects, are values from the response object within the model (of type \code{lmResp}, \code{glmResp}, \code{nlsResp}, etc). These include \code{".mu", ".offset", ".sqrtXwt", ".sqrtrwt", ".eta"}. \code{glance} returns one row with the columns \item{sigma}{the square root of the estimated residual variance} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{deviance}{deviance} } \description{ These methods tidy the coefficients of mixed effects models, particularly responses of the \code{merMod} class } \details{ When the modeling was performed with \code{na.action = "na.omit"} (as is the typical default), rows with NA in the initial data are omitted entirely from the augmented data frame. When the modeling was performed with \code{na.action = "na.exclude"}, one should provide the original data as a second argument, at which point the augmented data will contain those rows (typically with NAs in place of the new columns). If the original data is not provided to \code{augment} and \code{na.action = "na.exclude"}, a warning is raised and the incomplete rows are dropped. } \examples{ if (require("lme4")) { # example regressions are from lme4 documentation lmm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy) tidy(lmm1) tidy(lmm1, effects = "fixed") tidy(lmm1, effects = "fixed", conf.int=TRUE) tidy(lmm1, effects = "fixed", conf.int=TRUE, conf.method="profile") tidy(lmm1, effects = "ran_modes", conf.int=TRUE) head(augment(lmm1, sleepstudy)) glance(lmm1) glmm1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd), data = cbpp, family = binomial) tidy(glmm1) tidy(glmm1, effects = "fixed") head(augment(glmm1, cbpp)) glance(glmm1) startvec <- c(Asym = 200, xmid = 725, scal = 350) nm1 <- nlmer(circumference ~ SSlogis(age, Asym, xmid, scal) ~ Asym|Tree, Orange, start = startvec) tidy(nm1) tidy(nm1, effects = "fixed") head(augment(nm1, Orange)) glance(nm1) } } \seealso{ \link{na.action} } broom/man/fix_data_frame.Rd0000644000177700017770000000113713204276216016722 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{fix_data_frame} \alias{fix_data_frame} \title{Ensure an object is a data frame, with rownames moved into a column} \usage{ fix_data_frame(x, newnames = NULL, newcol = "term") } \arguments{ \item{x}{a data.frame or matrix} \item{newnames}{new column names, not including the rownames} \item{newcol}{the name of the new rownames column} } \value{ a data.frame, with rownames moved into a column and new column names assigned } \description{ Ensure an object is a data frame, with rownames moved into a column } broom/man/confint.geeglm.Rd0000644000177700017770000000147013204276216016670 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/geeglm_tidiers.R \name{confint.geeglm} \alias{confint.geeglm} \title{Confidence interval for \code{geeglm} objects} \usage{ \method{confint}{geeglm}(object, parm, level = 0.95, ...) } \arguments{ \item{object}{The 'geeglm' object} \item{parm}{The parameter to calculate the confidence interval for. If not specified, the default is to calculate a confidence interval on all parameters (all variables in the model).} \item{level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{...}{Additional parameters} } \value{ Returns the upper and lower confidence intervals } \description{ Generate confidence intervals for GEE analyses } \details{ This function was taken from http://stackoverflow.com/a/21221995/2632184. } broom/man/tidy.TukeyHSD.Rd0000644000177700017770000000256313204276216016405 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_tidiers.R \name{tidy.TukeyHSD} \alias{tidy.TukeyHSD} \title{tidy a TukeyHSD object} \usage{ \method{tidy}{TukeyHSD}(x, separate.levels = FALSE, ...) } \arguments{ \item{x}{object of class "TukeyHSD"} \item{separate.levels}{Whether to separate comparison into \code{level1} and \code{level2} columns} \item{...}{additional arguments (not used)} } \value{ A data.frame with one row per comparison, containing columns \item{term}{Term for which levels are being compared} \item{comparison}{Levels being compared, separated by -} \item{estimate}{Estimate of difference} \item{conf.low}{Low end of confidence interval of difference} \item{conf.high}{High end of confidence interval of difference} \item{adj.p.value}{P-value adjusted for multiple comparisons} If \code{separate.levels = TRUE}, the \code{comparison} column will be split up into \code{level1} and \code{level2}. } \description{ Returns a data.frame with one row for each pairwise comparison } \examples{ fm1 <- aov(breaks ~ wool + tension, data = warpbreaks) thsd <- TukeyHSD(fm1, "tension", ordered = TRUE) tidy(thsd) tidy(thsd, separate.levels = TRUE) # may include comparisons on multiple terms fm2 <- aov(mpg ~ as.factor(gear) * as.factor(cyl), data = mtcars) tidy(TukeyHSD(fm2)) } \seealso{ \code{\link{TukeyHSD}} } broom/man/betareg_tidiers.Rd0000644000177700017770000000464113204276216017130 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/betareg_tidiers.R \name{betareg_tidiers} \alias{betareg_tidiers} \alias{tidy.betareg} \alias{augment.betareg} \alias{glance.betareg} \title{Tidy betareg objects from the betareg package} \usage{ \method{tidy}{betareg}(x, conf.int = FALSE, conf.level = 0.95, ...) \method{augment}{betareg}(x, data = stats::model.frame(x), newdata, type.predict, type.residuals, ...) \method{glance}{betareg}(x, ...) } \arguments{ \item{x}{A "betareg" object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{...}{Extra arguments, not used} \item{data}{Original data frame the regression was fit on} \item{newdata}{New data frame to use for prediction} \item{type.predict}{Type of predictions to calculate} \item{type.residuals}{Type of residuals to calculate} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. tidy returns a data.frame with one row for each term used to predict the mean, along with at least one term used to predict phi (the inverse of the variance). It starts with the column \code{component} containing either "mean" or "precision" to describe which is being modeled, then has the same columns as tidied linear models or glm's (see \code{\link{lm_tidiers}}). augment returns the original data, along with new columns describing each observation: \item{.fitted}{Fitted values of model} \item{.resid}{Residuals} \item{.cooksd}{Cooks distance, \code{\link{cooks.distance}}} \code{glance} returns a one-row data.frame with the columns \item{pseudo.r.squared}{the deviance of the null model} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{df.residual}{residual degrees of freedom} \item{df.null}{degrees of freedom under the null} } \description{ Tidy beta regression objects into summarized coefficients, add their fitted values and residuals, or find their model parameters. } \examples{ if (require("betareg", quietly = TRUE)) { data("GasolineYield", package = "betareg") mod <- betareg(yield ~ batch + temp, data = GasolineYield) mod tidy(mod) tidy(mod, conf.int = TRUE) tidy(mod, conf.int = TRUE, conf.level = .99) head(augment(mod)) glance(mod) } } broom/man/process_lm.Rd0000644000177700017770000000147413204276216016143 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lm_tidiers.R \name{process_lm} \alias{process_lm} \title{helper function to process a tidied lm object} \usage{ process_lm(ret, x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE) } \arguments{ \item{ret}{data frame with a tidied version of a coefficient matrix} \item{x}{an "lm", "glm", "biglm", or "bigglm" object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals (typical for logistic regression)} } \description{ Adds a confidence interval, and possibly exponentiates, a tidied object. Useful for operations shared between lm and biglm. } broom/man/tidy.density.Rd0000644000177700017770000000121313204276216016413 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_tidiers.R \name{tidy.density} \alias{tidy.density} \title{tidy a density objet} \usage{ \method{tidy}{density}(x, ...) } \arguments{ \item{x}{an object of class "density"} \item{...}{extra arguments (not used)} } \value{ a data frame with "x" and "y" columns d <- density(faithful$eruptions, bw = "sj") head(tidy(d)) library(ggplot2) ggplot(tidy(d), aes(x, y)) + geom_line() } \description{ Given a "density" object, returns a tidy data frame with two columns: points x where the density is estimated, points y for the estimate } \seealso{ \code{\link{density}} } broom/man/glm_tidiers.Rd0000644000177700017770000000224413204276216016273 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glm_tidiers.R \name{glm_tidiers} \alias{glm_tidiers} \alias{glance.glm} \title{Tidying methods for a glm object} \usage{ \method{glance}{glm}(x, ...) } \arguments{ \item{x}{glm object} \item{...}{extra arguments, not used} } \value{ \code{tidy} and \code{augment} return the same values as do \code{\link{tidy.lm}} and \code{\link{augment.lm}}. \code{glance} returns a one-row data.frame with the columns \item{null.deviance}{the deviance of the null model} \item{df.null}{the residual degrees of freedom for the null model} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{deviance}{deviance} \item{df.residual}{residual degrees of freedom} } \description{ Tidy a \code{glm} object. The \code{tidy} and \code{augment} methods are handled by \link{lm_tidiers}. } \examples{ g <- glm(am ~ mpg, mtcars, family = "binomial") glance(g) } \seealso{ \code{\link{tidy.lm}} and \code{\link{augment.lm}}. Also \code{\link{glm}}, which computes the values reported by the \code{glance} method. } broom/man/multcomp_tidiers.Rd0000644000177700017770000000260713204276216017357 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multcomp_tidiers.R \name{multcomp_tidiers} \alias{multcomp_tidiers} \alias{tidy.glht} \alias{tidy.confint.glht} \alias{tidy.summary.glht} \alias{tidy.cld} \title{tidying methods for objects produced by \pkg{multcomp}} \usage{ \method{tidy}{glht}(x, ...) \method{tidy}{confint.glht}(x, ...) \method{tidy}{summary.glht}(x, ...) \method{tidy}{cld}(x, ...) } \arguments{ \item{x}{an object of class \code{glht}, \code{confint.glht}, \code{summary.glht} or \code{\link[multcomp]{cld}}} \item{...}{extra arguments (not used)} } \description{ These methods originated in ggplot2, as "fortify." In broom, they were renamed "tidy" because they summarize terms and tests, rather than adding columns to a dataset. } \examples{ if (require("multcomp") && require("ggplot2")) { amod <- aov(breaks ~ wool + tension, data = warpbreaks) wht <- glht(amod, linfct = mcp(tension = "Tukey")) tidy(wht) ggplot(wht, aes(lhs, estimate)) + geom_point() CI <- confint(wht) tidy(CI) ggplot(CI, aes(lhs, estimate, ymin = lwr, ymax = upr)) + geom_pointrange() tidy(summary(wht)) ggplot(mapping = aes(lhs, estimate)) + geom_linerange(aes(ymin = lwr, ymax = upr), data = CI) + geom_point(aes(size = p), data = summary(wht)) + scale_size(trans = "reverse") cld <- cld(wht) tidy(cld) } } broom/man/optim_tidiers.Rd0000644000177700017770000000247613204276216016653 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/optim_tidiers.R \name{optim_tidiers} \alias{optim_tidiers} \alias{tidy_optim} \alias{glance_optim} \title{Tidiers for lists returned from optim} \usage{ tidy_optim(x, ...) glance_optim(x, ...) } \arguments{ \item{x}{list returned from \code{optim}} \item{...}{extra arguments} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} returns a data frame with one row per parameter that was estimated, with columns \item{parameter}{name of the parameter, or \code{parameter1}, \code{parameter2}... if the input vector is not named} \item{value}{parameter value that minimizes or maximizes the output} \code{glance} returns a one-row data frame with the columns \item{value}{minimized or maximized output value} \item{function.count}{number of calls to \code{fn}} \item{gradient.count}{number of calls to \code{gr}} \item{convergence}{convergence code representing the error state} } \description{ Tidies objects returned by the \code{\link{optim}} function for general-purpose minimization and maximization. } \examples{ func <- function(x) { (x[1] - 2)^2 + (x[2] - 3)^2 + (x[3] - 8)^2 } o <- optim(c(1, 1, 1), func) tidy(o) glance(o) } \seealso{ \code{\link{optim}} } broom/man/orcutt_tidiers.Rd0000644000177700017770000000273013204276216017034 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/orcutt_tidiers.R \name{orcutt_tidiers} \alias{orcutt_tidiers} \alias{tidy.orcutt} \alias{glance.orcutt} \title{Tidiers for Cochrane Orcutt object} \usage{ \method{tidy}{orcutt}(x, ...) \method{glance}{orcutt}(x, ...) } \arguments{ \item{x}{An "orcutt" object returned by \code{cochrane.orcutt}} \item{...}{Extra arguments passed on to \code{\link{tidy.lm}}} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} returns the same information as \code{\link{tidy.lm}}, though without confidence interval options. \code{glance}{} \code{glance} returns a one-row data frame with the following columns: \item{r.squared}{R-squared} \item{adj.r.squared}{Adjusted R-squared} \item{rho}{Spearman's rho autocorrelation} \item{number.interaction}{Number of interactions} \item{dw.original}{Durbin-Watson statistic of original fit} \item{p.value.original}{P-value of original Durbin-Watson statistic} \item{dw.transformed}{Durbin-Watson statistic of transformed fit} \item{p.value.transformed}{P-value of autocorrelation after transformation} } \description{ Tidies a Cochrane Orcutt object, which estimates autocorrelation and beta coefficients in a linear fit. } \examples{ reg <- lm(mpg ~ wt + qsec + disp, mtcars) tidy(reg) if (require("orcutt", quietly = TRUE)) { co <- cochrane.orcutt(reg) co tidy(co) glance(co) } } broom/man/tidy.spec.Rd0000644000177700017770000000113413204276216015670 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_tidiers.R \name{tidy.spec} \alias{tidy.spec} \title{tidy a spec objet} \usage{ \method{tidy}{spec}(x, ...) } \arguments{ \item{x}{an object of class "spec"} \item{...}{extra arguments (not used)} } \value{ a data frame with "freq" and "spec" columns } \description{ Given a "spec" object, which shows a spectrum across a range of frequencies, returns a tidy data frame with two columns: "freq" and "spec" } \examples{ spc <- spectrum(lh) tidy(spc) library(ggplot2) ggplot(tidy(spc), aes(freq, spec)) + geom_line() } broom/man/compact.Rd0000644000177700017770000000043313204276216015415 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{compact} \alias{compact} \title{Remove NULL items in a vector or list} \usage{ compact(x) } \arguments{ \item{x}{a vector or list} } \description{ Remove NULL items in a vector or list } broom/man/rq_tidiers.Rd0000644000177700017770000001063613204276216016142 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rq_tidiers.R \name{rq_tidiers} \alias{rq_tidiers} \alias{tidy.rq} \alias{tidy.rqs} \alias{tidy.nlrq} \alias{glance.rq} \alias{glance.nlrq} \alias{augment.rq} \alias{augment.rqs} \alias{augment.nlrq} \title{Tidying methods for quantile regression models} \usage{ \method{tidy}{rq}(x, se.type = "rank", conf.int = TRUE, conf.level = 0.95, alpha = 1 - conf.level, ...) \method{tidy}{rqs}(x, se.type = "rank", conf.int = TRUE, conf.level = 0.95, alpha = 1 - conf.level, ...) \method{tidy}{nlrq}(x, conf.int = FALSE, conf.level = 0.95, ...) \method{glance}{rq}(x, ...) \method{glance}{nlrq}(x, ...) \method{augment}{rq}(x, data = model.frame(x), newdata, ...) \method{augment}{rqs}(x, data = model.frame(x), newdata, ...) \method{augment}{nlrq}(x, data = NULL, newdata = NULL, ...) } \arguments{ \item{x}{model object returned by \code{rq} or \code{nlrq}} \item{se.type}{Type of standard errors to calculate; see \code{summary.rq}} \item{conf.int}{boolean; should confidence intervals be calculated, ignored if \code{se.type = "rank"}} \item{conf.level}{confidence level for intervals} \item{alpha}{confidence level when \code{se.type = "rank"}; defaults to the same as \code{conf.level} although the specification is inverted} \item{\dots}{other arguments passed on to \code{summary.rq}} \item{data}{Original data, defaults to extracting it from the model} \item{newdata}{If provided, new data frame to use for predictions} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy.rq} returns a data frame with one row for each coefficient. The columns depend upon the confidence interval method selected. \code{tidy.rqs} returns a data frame with one row for each coefficient at each quantile that was estimated. The columns depend upon the confidence interval method selected. \code{tidy.nlrq} returns one row for each coefficient in the model, with five columns: \item{term}{The term in the nonlinear model being estimated and tested} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error from the linear model} \item{statistic}{t-statistic} \item{p.value}{two-sided p-value} \code{glance.rq} returns one row for each quantile (tau) with the columns: \item{tau}{quantile estimated} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{df.residual}{residual degrees of freedom} \code{glance.rq} returns one row for each quantile (tau) with the columns: \item{tau}{quantile estimated} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{df.residual}{residual degrees of freedom} \code{augment.rq} returns a row for each original observation with the following columns added: \item{.resid}{Residuals} \item{.fitted}{Fitted quantiles of the model} \item{.tau}{Quantile estimated} Depending on the arguments passed on to \code{predict.rq} via \code{\dots} a confidence interval is also calculated on the fitted values resulting in columns: \item{.conf.low}{Lower confidence interval value} \item{.conf.high}{Upper confidence interval value} See \code{predict.rq} for details on additional arguments to specify confidence intervals. \code{predict.rq} does not provide confidence intervals when \code{newdata} is provided. \code{augment.rqs} returns a row for each original observation and each estimated quantile (\code{tau}) with the following columns added: \item{.resid}{Residuals} \item{.fitted}{Fitted quantiles of the model} \item{.tau}{Quantile estimated} \code{predict.rqs} does not return confidence interval estimates. \code{augment.rqs} returns a row for each original observation with the following columns added: \item{.resid}{Residuals} \item{.fitted}{Fitted quantiles of the model} } \description{ These methods tidy the coefficients of a quantile regression model into a summary, augment the original data with information on the fitted values and residuals, and construct a glance of the model's statistics. } \details{ If \code{se.type != "rank"} and \code{conf.int = TRUE} confidence intervals are calculated by \code{summary.rq}. Otherwise they are standard t based intervals. This simply calls \code{augment.nls} on the "nlrq" object. } broom/man/acf_tidiers.Rd0000644000177700017770000000220013204276216016235 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_tidiers.R \name{acf_tidiers} \alias{acf_tidiers} \alias{tidy.acf} \title{Tidying method for the acf function} \usage{ \method{tidy}{acf}(x, ...) } \arguments{ \item{x}{acf object} \item{...}{(not used)} } \value{ \code{data.frame} with columns \item{lag}{lag values} \item{acf}{calucated correlation} } \description{ Tidy an "acf" object, which is the output of \code{acf} and the related \code{pcf} and \code{ccf} functions. } \examples{ # acf result <- acf(lh, plot=FALSE) tidy(result) # ccf result <- ccf(mdeaths, fdeaths, plot=FALSE) tidy(result) # pcf result <- pacf(lh, plot=FALSE) tidy(result) # lag plot library(ggplot2) result <- tidy(acf(lh, plot=FALSE)) p <- ggplot(result, aes(x=lag, y=acf)) + geom_bar(stat='identity', width=0.1) + theme_bw() p # with confidence intervals conf.level <- 0.95 # from \\code{plot.acf} method len.data <- length(lh) # same as acf$n.used conf.int <- qnorm((1 + conf.level) / 2) / sqrt(len.data) p + geom_hline(yintercept = c(-conf.int, conf.int), color='blue', linetype='dashed') } broom/man/boot_tidiers.Rd0000644000177700017770000000356213204276216016463 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/boot_tidiers.R \name{boot_tidiers} \alias{boot_tidiers} \alias{tidy.boot} \title{Tidying methods for bootstrap computations} \usage{ \method{tidy}{boot}(x, conf.int = FALSE, conf.level = 0.95, conf.method = "perc", ...) } \arguments{ \item{x}{\code{\link{boot}} object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level for CI} \item{conf.method}{method for computing confidence intervals (see \code{\link{boot.ci}})} \item{\dots}{extra arguments (not used)} } \value{ The \code{tidy} method returns a data frame with one row per bootstrapped statistic that was calculated, and the following columns: \item{term}{Name of the computed statistic, if present} \item{statistic}{The original values of the statistic} \item{bias}{The bias of the original statistic value} \item{std.error}{Standard error of the statistic} If weights were provided to the \code{boot} function, an \code{estimate} column is included showing the weighted bootstrap estimate, and the standard error is of that estimate. If there are no original statistics in the "boot" object, such as with a call to \code{tsboot} with \code{orig.t = FALSE}, the \code{original} and \code{statistic} columns are omitted, and only \code{estimate} and \code{std.error} columns shown. } \description{ Tidying methods for "boot" objects from the "boot" package. } \examples{ if (require("boot")) { clotting <- data.frame( u = c(5,10,15,20,30,40,60,80,100), lot1 = c(118,58,42,35,27,25,21,19,18), lot2 = c(69,35,26,21,18,16,13,12,12)) g1 <- glm(lot2 ~ log(u), data = clotting, family = Gamma) bootfun <- function(d, i) { coef(update(g1, data= d[i,])) } bootres <- boot(clotting, bootfun, R = 999) tidy(g1, conf.int=TRUE) tidy(bootres, conf.int=TRUE) } } broom/man/finish_glance.Rd0000644000177700017770000000217613204276216016566 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{finish_glance} \alias{finish_glance} \title{Add logLik, AIC, BIC, and other common measurements to a glance of a prediction} \usage{ finish_glance(ret, x) } \arguments{ \item{ret}{a one-row data frame (a partially complete glance)} \item{x}{the prediction model} } \value{ a one-row data frame with additional columns added, such as \item{logLik}{log likelihoods} \item{AIC}{Akaike Information Criterion} \item{BIC}{Bayesian Information Criterion} \item{deviance}{deviance} \item{df.residual}{residual degrees of freedom} Each of these are produced by the corresponding generics } \description{ A helper function for several functions in the glance generic. Methods such as logLik, AIC, and BIC are defined for many prediction objects, such as lm, glm, and nls. This is a helper function that adds them to a glance data.frame can be performed. If any of them cannot be computed, it fails quietly. } \details{ In one special case, deviance for objects of the \code{lmerMod} class from lme4 is computed with \code{deviance(x, REML=FALSE)}. } broom/man/poLCA_tidiers.Rd0000644000177700017770000001006313204276216016450 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/polca_tidiers.R \name{poLCA_tidiers} \alias{poLCA_tidiers} \alias{tidy.poLCA} \alias{augment.poLCA} \alias{glance.poLCA} \title{Tidiers for poLCA objects} \usage{ \method{tidy}{poLCA}(x, ...) \method{augment}{poLCA}(x, data, ...) \method{glance}{poLCA}(x, ...) } \arguments{ \item{x}{A poLCA object} \item{...}{Extra arguments, not used} \item{data}{For \code{augment}, the original dataset used to fit the latent class model. If not given, uses manifest variables in \code{x$y} and, if applicable, covariates in \code{x$x}} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} returns a data frame with one row per variable-class-outcome combination, with columns: \describe{ \item{variable}{Manifest variable} \item{class}{Latent class ID, an integer} \item{outcome}{Outcome of manifest variable} \item{estimate}{Estimated class-conditional response probability} \item{std.error}{Standard error of estimated probability} } \code{augment} returns a data frame with one row for each original observation, augmented with the following columns: \describe{ \item{.class}{Predicted class, using modal assignment} \item{.probability}{Posterior probability of predicted class} } If the \code{data} argument is given, those columns are included in the output (only rows for which predictions could be made). Otherwise, the \code{y} element of the poLCA object, which contains the manifest variables used to fit the model, are used, along with any covariates, if present, in \code{x}. Note that while the probability of all the classes (not just the predicted modal class) can be found in the \code{posterior} element, these are not included in the augmented output, since it would result in potentially many additional columns, which augment tends to avoid. \code{glance} returns a one-row data frame with the following columns: \describe{ \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{g.squared}{The likelihood ratio/deviance statistic} \item{chi.squared}{The Pearson Chi-Square goodness of fit statistic for multiway tables} \item{df}{Number of parameters estimated, and therefore degrees of freedom used} \item{df.residual}{Number of residual degrees of freedom left} } } \description{ Tidiers for poLCA latent class regression models. Summarize the probabilities of each outcome for each variable within each class with \code{tidy}, add predictions to the data with \code{augment}, or find the log-likelihood/AIC/BIC with \code{glance}. } \examples{ if (require("poLCA", quietly = TRUE)) { library(poLCA) library(dplyr) data(values) f <- cbind(A, B, C, D)~1 M1 <- poLCA(f, values, nclass = 2, verbose = FALSE) M1 tidy(M1) head(augment(M1)) glance(M1) library(ggplot2) ggplot(tidy(M1), aes(factor(class), estimate, fill = factor(outcome))) + geom_bar(stat = "identity", width = 1) + facet_wrap(~ variable) set.seed(2016) # compare multiple mods <- data_frame(nclass = 1:3) \%>\% group_by(nclass) \%>\% do(mod = poLCA(f, values, nclass = .$nclass, verbose = FALSE)) # compare log-likelihood and/or AIC, BIC mods \%>\% glance(mod) ## Three-class model with a single covariate. data(election) f2a <- cbind(MORALG,CARESG,KNOWG,LEADG,DISHONG,INTELG, MORALB,CARESB,KNOWB,LEADB,DISHONB,INTELB)~PARTY nes2a <- poLCA(f2a, election, nclass = 3, nrep = 5, verbose = FALSE) td <- tidy(nes2a) head(td) # show ggplot(td, aes(outcome, estimate, color = factor(class), group = class)) + geom_line() + facet_wrap(~ variable, nrow = 2) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) au <- augment(nes2a) head(au) au \%>\% count(.class) # if the original data is provided, it leads to NAs in new columns # for rows that weren't predicted au2 <- augment(nes2a, data = election) head(au2) dim(au2) } } broom/man/glmnet_tidiers.Rd0000644000177700017770000000474713204276216017014 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmnet_tidiers.R \name{glmnet_tidiers} \alias{glmnet_tidiers} \alias{tidy.glmnet} \alias{glance.glmnet} \title{Tidiers for LASSO or elasticnet regularized fits} \usage{ \method{tidy}{glmnet}(x, ...) \method{glance}{glmnet}(x, ...) } \arguments{ \item{x}{a "glmnet" object} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} produces a data.frame with one row per combination of coefficient (including the intercept) and value of lambda for which the estimate is nonzero, with the columns: \item{term}{coefficient name (V1...VN by default, along with "(Intercept)")} \item{step}{which step of lambda choices was used} \item{estimate}{estimate of coefficient} \item{lambda}{value of penalty parameter lambda} \item{dev.ratio}{fraction of null deviance explained at each value of lambda} \code{glance} returns a one-row data.frame with the values \item{nulldev}{null deviance} \item{npasses}{total passes over the data across all lambda values} } \description{ Tidying methods for regularized fits produced by \code{glmnet}, summarizing the estimates across values of the penalty parameter lambda. } \details{ Note that while this representation of GLMs is much easier to plot and combine than the default structure, it is also much more memory-intensive. Do not use for extremely large, sparse matrices. No \code{augment} method is yet provided even though the model produces predictions, because the input data is not tidy (it is a matrix that may be very wide) and therefore combining predictions with it is not logical. Furthermore, predictions make sense only with a specific choice of lambda. } \examples{ if (require("glmnet", quietly = TRUE)) { set.seed(2014) x <- matrix(rnorm(100*20),100,20) y <- rnorm(100) fit1 <- glmnet(x,y) head(tidy(fit1)) glance(fit1) library(dplyr) library(ggplot2) tidied <- tidy(fit1) \%>\% filter(term != "(Intercept)") ggplot(tidied, aes(step, estimate, group = term)) + geom_line() ggplot(tidied, aes(lambda, estimate, group = term)) + geom_line() + scale_x_log10() ggplot(tidied, aes(lambda, dev.ratio)) + geom_line() # works for other types of regressions as well, such as logistic g2 <- sample(1:2, 100, replace=TRUE) fit2 <- glmnet(x, g2, family="binomial") head(tidy(fit2)) } } broom/man/kde_tidiers.Rd0000644000177700017770000000217613204276216016263 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kde_tidiers.R \name{kde_tidiers} \alias{kde_tidiers} \alias{tidy.kde} \title{Tidy a kernel density estimate object from the ks package} \usage{ \method{tidy}{kde}(x, ...) } \arguments{ \item{x}{A "ks" object from the kde package} \item{...}{Extra arguments, not used} } \value{ A data frame with one row for each point in the estimated grid. The result contains one column (named \code{x1}, \code{x2}, etc) for each dimension, and an \code{estimate} column containing the estimated density. } \description{ Tidy a kernel density estimate object, into a table with one row for each point in the estimated grid, and one column for each dimension (along with an \code{estimate} column with the estimated density). } \examples{ if (require("ks", quietly = TRUE)) { dat <- replicate(2, rnorm(100)) k <- kde(dat) td <- tidy(k) head(td) library(ggplot2) ggplot(td, aes(x1, x2, fill = estimate)) + geom_tile() + theme_void() # also works with 3 dimensions dat3 <- replicate(3, rnorm(100)) k3 <- kde(dat3) td3 <- tidy(k3) head(td3) } } broom/man/vector_tidiers.Rd0000644000177700017770000000125613204276216017020 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vector_tidiers.R \name{tidy.numeric} \alias{tidy.numeric} \alias{tidy.character} \alias{tidy.logical} \title{Tidy atomic vectors} \usage{ \method{tidy}{numeric}(x, ...) \method{tidy}{character}(x, ...) \method{tidy}{logical}(x, ...) } \arguments{ \item{x}{An object of class "numeric", "integer", "character", or "logical". Most likely a named vector} \item{...}{Extra arguments (not used)} } \description{ Turn atomic vectors into data frames, where the names of the vector (if they exist) are a column and the values of the vector are a column. } \examples{ x <- 1:5 names(x) <- letters[1:5] tidy(x) } broom/man/ridgelm_tidiers.Rd0000644000177700017770000000373113204276216017141 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ridgelm_tidiers.R \name{ridgelm_tidiers} \alias{ridgelm_tidiers} \alias{tidy.ridgelm} \alias{glance.ridgelm} \title{Tidying methods for ridgelm objects from the MASS package} \usage{ \method{tidy}{ridgelm}(x, ...) \method{glance}{ridgelm}(x, ...) } \arguments{ \item{x}{An object of class "ridgelm"} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy.ridgelm} returns one row for each combination of choice of lambda and term in the formula, with columns: \item{lambda}{choice of lambda} \item{GCV}{generalized cross validation value for this lambda} \item{term}{the term in the ridge regression model being estimated} \item{estimate}{estimate of scaled coefficient using this lambda} \item{scale}{Scaling factor of estimated coefficient} \code{glance.ridgelm} returns a one-row data.frame with the columns \item{kHKB}{modified HKB estimate of the ridge constant} \item{kLW}{modified L-W estimate of the ridge constant} \item{lambdaGCV}{choice of lambda that minimizes GCV} This is similar to the output of \code{select.ridgelm}, but it is returned rather than printed. } \description{ These methods tidies the coefficients of a ridge regression model chosen at each value of lambda into a data frame, or constructs a one-row glance of the model's choices of lambda (the ridge constant) } \examples{ names(longley)[1] <- "y" fit1 <- MASS::lm.ridge(y ~ ., longley) tidy(fit1) fit2 <- MASS::lm.ridge(y ~ ., longley, lambda = seq(0.001, .05, .001)) td2 <- tidy(fit2) g2 <- glance(fit2) # coefficient plot library(ggplot2) ggplot(td2, aes(lambda, estimate, color = term)) + geom_line() # GCV plot ggplot(td2, aes(lambda, GCV)) + geom_line() # add line for the GCV minimizing estimate ggplot(td2, aes(lambda, GCV)) + geom_line() + geom_vline(xintercept = g2$lambdaGCV, col = "red", lty = 2) } broom/man/brms_tidiers.Rd0000644000177700017770000000570413204276216016463 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/brms_tidiers.R \name{brms_tidiers} \alias{brms_tidiers} \alias{tidy.brmsfit} \title{Tidying methods for a brms model} \usage{ \method{tidy}{brmsfit}(x, parameters = NA, par_type = c("all", "non-varying", "varying", "hierarchical"), robust = FALSE, intervals = TRUE, prob = 0.9, ...) } \arguments{ \item{x}{Fitted model object from the \pkg{brms} package. See \code{\link[brms]{brmsfit-class}}.} \item{parameters}{Names of parameters for which a summary should be returned, as given by a character vector or regular expressions. If \code{NA} (the default) summarized parameters are specified by the \code{par_type} argument.} \item{par_type}{One of \code{"all"}, \code{"non-varying"}, \code{"varying"}, or \code{"hierarchical"} (can be abbreviated). See the Value section for details.} \item{robust}{Whether to use median and median absolute deviation rather than mean and standard deviation.} \item{intervals}{If \code{TRUE} columns for the lower and upper bounds of posterior uncertainty intervals are included.} \item{prob}{Defines the range of the posterior uncertainty intervals, such that \code{100 * prob}\% of the parameter's posterior distribution lies within the corresponding interval. Only used if \code{intervals = TRUE}.} \item{...}{Extra arguments, not used} } \value{ All tidying methods return a \code{data.frame} without rownames. The structure depends on the method chosen. When \code{parameters = NA}, the \code{par_type} argument is used to determine which parameters to summarize. Generally, \code{tidy.brmsfit} returns one row for each coefficient, with at least three columns: \item{term}{The name of the model parameter.} \item{estimate}{A point estimate of the coefficient (mean or median).} \item{std.error}{A standard error for the point estimate (sd or mad).} When \code{par_type = "non-varying"}, only population-level effects are returned. When \code{par_type = "varying"}, only group-level effects are returned. In this case, two additional columns are added: \item{group}{The name of the grouping factor.} \item{level}{The name of the level of the grouping factor.} Specifying \code{par_type = "hierarchical"} selects the standard deviations and correlations of the group-level parameters. If \code{intervals = TRUE}, columns for the \code{lower} and \code{upper} bounds of the posterior intervals computed. } \description{ These methods tidy the estimates from \code{\link[brms:brmsfit-class]{brmsfit-objects}} (fitted model objects from the \pkg{brms} package) into a summary. } \examples{ \dontrun{ library(brms) fit <- brm(mpg ~ wt + (1|cyl) + (1+wt|gear), data = mtcars, iter = 500, chains = 2) tidy(fit) tidy(fit, parameters = "^sd_", intervals = FALSE) tidy(fit, par_type = "non-varying") tidy(fit, par_type = "varying") tidy(fit, par_type = "hierarchical", robust = TRUE) } } \seealso{ \code{\link[brms]{brms}}, \code{\link[brms]{brmsfit-class}} } broom/man/data.frame_tidiers.Rd0000644000177700017770000000405613204276216017521 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.frame_tidiers.R \name{data.frame_tidiers} \alias{data.frame_tidiers} \alias{tidy.data.frame} \alias{augment.data.frame} \alias{glance.data.frame} \title{Tidiers for data.frame objects} \usage{ \method{tidy}{data.frame}(x, ...) \method{augment}{data.frame}(x, data, ...) \method{glance}{data.frame}(x, ...) } \arguments{ \item{x}{A data.frame} \item{...}{extra arguments: for \code{tidy}, these are passed on to \code{\link{describe}} from \code{psych} package} \item{data}{data, not used} } \value{ \code{tidy.data.frame} produces a data frame with one row per original column, containing summary statistics of each: \item{column}{name of original column} \item{n}{Number of valid (non-NA) values} \item{mean}{mean} \item{sd}{standard deviation} \item{median}{median} \item{trimmed}{trimmed mean, with trim defaulting to .1} \item{mad}{median absolute deviation (from the median)} \item{min}{minimum value} \item{max}{maximum value} \item{range}{range} \item{skew}{skew} \item{kurtosis}{kurtosis} \item{se}{standard error} \code{glance} returns a one-row data.frame with \item{nrow}{number of rows} \item{ncol}{number of columns} \item{complete.obs}{number of rows that have no missing values} \item{na.fraction}{fraction of values across all rows and columns that are missing} } \description{ These perform tidy summaries of data.frame objects. \code{tidy} produces summary statistics about each column, while \code{glance} simply reports the number of rows and columns. Note that \code{augment.data.frame} will throw an error. } \details{ The \code{tidy} method calls the psych method \code{\link{describe}} directly to produce its per-columns summary statistics. } \examples{ td <- tidy(mtcars) td glance(mtcars) library(ggplot2) # compare mean and standard deviation ggplot(td, aes(mean, sd)) + geom_point() + geom_text(aes(label = column), hjust = 1, vjust = 1) + scale_x_log10() + scale_y_log10() + geom_abline() } \seealso{ \code{\link{describe}} } broom/man/binDesign_tidiers.Rd0000644000177700017770000000265513204276216017424 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bingroup_tidiers.R \name{binDesign_tidiers} \alias{binDesign_tidiers} \alias{tidy.binDesign} \alias{glance.binDesign} \title{Tidy a binDesign object} \usage{ \method{tidy}{binDesign}(x, ...) \method{glance}{binDesign}(x, ...) } \arguments{ \item{x}{A "binDesign" object} \item{...}{Extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. The \code{tidy} method returns a data.frame with one row for each iteration that was performed, with columns \item{n}{Number of trials in this iteration} \item{power}{The power achieved for this n} The \code{glance} method returns a one-row data.frame with columns \item{power}{The power achieved by the analysis} \item{n}{The sample size used to achieve this power} \item{power.reached}{Whether the desired power was reached} \item{maxit}{Number of iterations performed} } \description{ Tidy a binDesign object from the "binGroup" package, which determines the sample size needed for a particular power. } \examples{ if (require("binGroup", quietly = TRUE)) { des <- binDesign(nmax = 300, delta = 0.06, p.hyp = 0.1, power = .8) glance(des) head(tidy(des)) # the ggplot2 equivalent of plot(des) library(ggplot2) ggplot(tidy(des), aes(n, power)) + geom_line() } } broom/man/summary_tidiers.Rd0000644000177700017770000000176313204276216017216 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/summary_tidiers.R \name{summary_tidiers} \alias{summary_tidiers} \alias{tidy.summaryDefault} \alias{glance.summaryDefault} \title{Tidiers for summaryDefault objects} \usage{ \method{tidy}{summaryDefault}(x, ...) \method{glance}{summaryDefault}(x, ...) } \arguments{ \item{x}{summaryDefault object} \item{...}{extra arguments, not used} } \value{ Both \code{tidy} and \code{glance} return the same object: a one-row data frame with columns \item{minimum}{smallest value in original vector} \item{q1}{value at the first quartile} \item{median}{median of original vector} \item{mean}{mean of original vector} \item{q3}{value at the third quartile} \item{maximum}{largest value in original vector} \item{NAs}{number of NA values (if any)} } \description{ Tidy a summary of a vector. } \examples{ v <- rnorm(1000) s <- summary(v) s tidy(s) glance(s) v2 <- c(v,NA) tidy(summary(v2)) } \seealso{ \code{\link{summary}} } broom/man/tidy.map.Rd0000644000177700017770000000173013204276216015515 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/map_tidiers.R \name{tidy.map} \alias{tidy.map} \title{Tidy method for map objects.} \usage{ \method{tidy}{map}(x, ...) } \arguments{ \item{x}{map object} \item{...}{not used by this method} } \description{ This function turns a map into a data frame. } \details{ This code and documentation originated in ggplot2, but was called "fortify." In broom, "fortify" became "augment", which is reserved for functions that *add* columns to existing data (based on a model fit, for example) so these functions were renamed as "tidy." } \examples{ if (require("maps") && require("ggplot2")) { ca <- map("county", "ca", plot = FALSE, fill = TRUE) head(tidy(ca)) qplot(long, lat, data = ca, geom = "polygon", group = group) tx <- map("county", "texas", plot = FALSE, fill = TRUE) head(tidy(tx)) qplot(long, lat, data = tx, geom = "polygon", group = group, colour = I("white")) } } broom/man/process_rq.Rd0000644000177700017770000000134113204276216016146 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rq_tidiers.R \name{process_rq} \alias{process_rq} \title{Helper function for tidy.rq and tidy.rqs} \usage{ process_rq(rq_obj, se.type = "rank", conf.int = TRUE, conf.level = 0.95, ...) } \arguments{ \item{rq_obj}{an object returned by \code{summary.rq} or \code{summary.rqs}} \item{se.type}{type of standard errors used in \code{summary.rq} or \code{summary.rqs}} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level for confidence interval} \item{\dots}{currently unused} } \description{ See documentation for \code{summary.rq} for complete description of the options for \code{se.type}, \code{conf.int}, etc. } broom/man/sexpfit_tidiers.Rd0000644000177700017770000000251213204276216017174 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survival_tidiers.R \name{sexpfit_tidiers} \alias{sexpfit_tidiers} \alias{tidy.survexp} \alias{glance.survexp} \title{Tidy an expected survival curve} \usage{ \method{tidy}{survexp}(x, ...) \method{glance}{survexp}(x, ...) } \arguments{ \item{x}{"survexp" object} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} returns a one row for each time point, with columns \item{time}{time point} \item{estimate}{estimated survival} \item{n.risk}{number of individuals at risk} \code{glance} returns a one-row data.frame with the columns: \item{n.max}{maximum number of subjects at risk} \item{n.start}{starting number of subjects at risk} \item{timepoints}{number of timepoints} } \description{ This constructs a summary across time points or overall of an expected survival curve. Note that this contains less information than most survfit objects. } \examples{ if (require("survival", quietly = TRUE)) { sexpfit <- survexp(futime ~ 1, rmap=list(sex="male", year=accept.dt, age=(accept.dt-birth.dt)), method='conditional', data=jasa) tidy(sexpfit) glance(sexpfit) } } broom/man/emmeans_tidiers.Rd0000755000177700017770000000614213204321110017125 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emmeans_tidiers.R \name{emmeans_tidiers} \alias{emmeans_tidiers} \alias{tidy.lsmobj} \alias{tidy.ref.grid} \alias{tidy.emmGrid} \title{Tidy estimated marginal means (least-squares means) objects from the emmeans and lsmeans packages} \usage{ \method{tidy}{lsmobj}(x, conf.level = 0.95, ...) \method{tidy}{ref.grid}(x, ...) \method{tidy}{emmGrid}(x, ...) } \arguments{ \item{x}{"emmGrid", lsmobj", or "ref.grid" object} \item{conf.level}{Level of confidence interval, used only for \code{emmGrid} and \code{lsmobj} objects} \item{...}{Extra arguments, passed on to \link[emmeans]{summary.emmGrid} or \link[lsmeans]{summary.ref.grid}} } \value{ A data frame with one observation for each estimated mean, and one column for each combination of factors, along with the following variables: \item{estimate}{Estimated least-squares mean} \item{std.error}{Standard error of estimate} \item{df}{Degrees of freedom} \item{conf.low}{Lower bound of confidence interval} \item{conf.high}{Upper bound of confidence interval} When the input is a contrast, each row will contain one estimated contrast, along with some of the following columns: \item{level1}{One level of the factor being contrasted} \item{level2}{Second level} \item{contrast}{In cases where the contrast is not made up of two levels, describes each} \item{statistic}{T-ratio statistic} \item{p.value}{P-value} } \description{ Tidiers for estimated marginal means objects, which report the predicted means for factors or factor combinations in a linear model. This covers three classes: \code{emmGrid}, \code{lsmobj}, and \code{ref.grid}. (The first class is from the \code{emmeans} package, and is the successor to the latter two classes, which have slightly different purposes within the \code{lsmeans} package but have similar output). } \details{ There are a large number of arguments that can be passed on to \link[emmeans]{summary.emmGrid} or \link[lsmeans]{summary.ref.grid}. By broom convention, we use \code{conf.level} to pass the \code{level} argument. } \examples{ if (require("emmeans", quietly = TRUE)) { # linear model for sales of oranges per day oranges_lm1 <- lm(sales1 ~ price1 + price2 + day + store, data = oranges) # reference grid; see vignette("basics", package = "emmeans") oranges_rg1 <- ref_grid(oranges_lm1) td <- tidy(oranges_rg1) head(td) # marginal averages marginal <- emmeans(oranges_rg1, "day") tidy(marginal) # contrasts tidy(contrast(marginal)) tidy(contrast(marginal, method = "pairwise")) # plot confidence intervals library(ggplot2) ggplot(tidy(marginal), aes(day, estimate)) + geom_point() + geom_errorbar(aes(ymin = conf.low, ymax = conf.high)) # by multiple prices by_price <- emmeans(oranges_lm1, "day", by = "price2", at = list(price1 = 50, price2 = c(40, 60, 80), day = c("2", "3", "4")) ) by_price tidy(by_price) ggplot(tidy(by_price), aes(price2, estimate, color = day)) + geom_line() + geom_errorbar(aes(ymin = conf.low, ymax = conf.high)) } } broom/man/plm_tidiers.Rd0000644000177700017770000000406113204276216016303 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plm_tidiers.R \name{plm_tidiers} \alias{plm_tidiers} \alias{tidy.plm} \alias{augment.plm} \alias{glance.plm} \title{Tidiers for panel regression linear models} \usage{ \method{tidy}{plm}(x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE, ...) \method{augment}{plm}(x, data = as.data.frame(stats::model.frame(x)), ...) \method{glance}{plm}(x, ...) } \arguments{ \item{x}{a "plm" object representing a panel object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals} \item{...}{extra arguments, not used} \item{data}{original dataset} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy.plm} returns a data frame with one row per coefficient, of the same form as \code{\link{tidy.lm}}. \code{augment} returns a data frame with one row for each initial observation, adding the columns \item{.fitted}{predicted (fitted) values} \item{.resid}{residuals} \code{glance} returns a one-row data frame with columns \item{r.squared}{The percent of variance explained by the model} \item{adj.r.squared}{r.squared adjusted based on the degrees of freedom} \item{statistic}{F-statistic} \item{p.value}{p-value from the F test, describing whether the full regression is significant} \item{deviance}{deviance} \item{df.residual}{residual degrees of freedom} } \description{ Tidiers for panel regression linear models } \examples{ if (require("plm", quietly = TRUE)) { data("Produc", package = "plm") zz <- plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state","year")) summary(zz) tidy(zz) tidy(zz, conf.int = TRUE) tidy(zz, conf.int = TRUE, conf.level = .9) head(augment(zz)) glance(zz) } } \seealso{ \code{\link{lm_tidiers}} } broom/man/lmodel2_tidiers.Rd0000644000177700017770000000456313204276216017060 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lmodel2_tidiers.R \name{lmodel2_tidiers} \alias{lmodel2_tidiers} \alias{tidy.lmodel2} \alias{glance.lmodel2} \title{Tidiers for linear model II objects from the lmodel2 package} \usage{ \method{tidy}{lmodel2}(x, ...) \method{glance}{lmodel2}(x, ...) } \arguments{ \item{x}{lmodel2 object} \item{...}{Extra arguments, not used} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} returns a data frame with one row for each combination of method (OLS/MA/SMA/RMA) and term (always Intercept/Slope). Its columns are: \describe{ \item{method}{Either OLS/MA/SMA/RMA} \item{term}{Either "Intercept" or "Slope"} \item{estimate}{Estimated coefficient} \item{conf.low}{Lower bound of 95\% confidence interval} \item{conf.high}{Upper bound of 95\% confidence interval} } \code{glance} returns a one-row data frame with columns \describe{ \item{r.squared}{OLS R-squared} \item{p.value}{OLS parametric p-value} \item{theta}{Angle between OLS lines \code{lm(y ~ x)} and \code{lm(x ~ y)}} \item{H}{H statistic for computing confidence interval of major axis slope} } } \description{ Tidy or glance an lmodel2 object. An lmodel2 represents model II simple linear regression, where both variables in the regression equation are random. } \details{ Note that unlike linear regression, there are always only two terms in an lmodel2: Intercept and Slope. Furthermore, these are computed by four methods: OLS (ordinary least squares), MA (major axis), SMA (standard major axis), and RMA (ranged major axis). See the lmodel2 documentation for more. Note that there is no \code{augment} method for lmodel2 objects because lmodel2 does not provide a \code{predict} or {\code{residuals}} method (and since when both observations are random, fitted values and residuals have a less clear meaning). } \examples{ if (require("lmodel2", quietly = TRUE)) { data(mod2ex2) Ex2.res <- lmodel2(Prey ~ Predators, data=mod2ex2, "relative", "relative", 99) Ex2.res tidy(Ex2.res) glance(Ex2.res) # this allows coefficient plots with ggplot2 library(ggplot2) ggplot(tidy(Ex2.res), aes(estimate, term, color = method)) + geom_point() + geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) + geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) } } broom/man/ivreg_tidiers.Rd0000644000177700017770000000532213204321575016627 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ivreg_tidiers.R \name{ivreg_tidiers} \alias{ivreg_tidiers} \alias{tidy.ivreg} \alias{augment.ivreg} \alias{glance.ivreg} \title{Tidiers for ivreg models} \usage{ \method{tidy}{ivreg}(x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE, ...) \method{augment}{ivreg}(x, data = as.data.frame(stats::model.frame(x)), newdata, ...) \method{glance}{ivreg}(x, diagnostics = FALSE, ...) } \arguments{ \item{x}{An "ivreg" object} \item{conf.int}{Whether to include a confidence interval} \item{conf.level}{Confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{Whether to exponentiate the coefficient estimates and confidence intervals} \item{...}{extra arguments, not used} \item{data}{Original dataset} \item{newdata}{New data to make predictions from (optional)} \item{diagnostics}{Logical. Return results of diagnostic tests.} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy.ivreg} returns a data frame with one row per coefficient, of the same form as \code{\link{tidy.lm}}. \code{augment} returns a data frame with one row for each initial observation, adding the columns: \item{.fitted}{predicted (fitted) values} and if \code{newdata} is \code{NULL}: \item{.resid}{residuals} \code{glance} returns a one-row data frame with columns \item{r.squared}{The percent of variance explained by the model} \item{adj.r.squared}{r.squared adjusted based on the degrees of freedom} \item{statistic}{Wald test statistic} \item{p.value}{p-value from the Wald test} \item{df}{Degrees of freedom used by the coefficients} \item{sigma}{The square root of the estimated residual variance} \item{df.residual}{residual degrees of freedom} If \code{diagnostics} is \code{TRUE}, \code{glance} also returns: \item{p.value.Sargan}{P value of Sargan test} \item{p.value.Wu.Hausman}{P value of Wu-Hausman test} \item{p.value.weakinst}{P value of weak instruments test} } \description{ Tidiers for ivreg models } \examples{ if (require("AER", quietly = TRUE)) { data("CigarettesSW", package = "AER") CigarettesSW$rprice <- with(CigarettesSW, price/cpi) CigarettesSW$rincome <- with(CigarettesSW, income/population/cpi) CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax)/cpi) ivr <- ivreg(log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax/cpi), data = CigarettesSW, subset = year == "1995") summary(ivr) tidy(ivr) tidy(ivr, conf.int = TRUE) tidy(ivr, conf.int = TRUE, exponentiate = TRUE) head(augment(ivr)) glance(ivr) } } \seealso{ \code{\link{lm_tidiers}} } broom/man/rowwise_df_tidiers.Rd0000644000177700017770000000503613204276216017666 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rowwise_df_tidiers.R \name{rowwise_df_tidiers} \alias{rowwise_df_tidiers} \alias{tidy.rowwise_df} \alias{tidy_.rowwise_df} \alias{augment.rowwise_df} \alias{augment_.rowwise_df} \alias{glance.rowwise_df} \alias{glance_.rowwise_df} \alias{tidy.tbl_df} \alias{augment.tbl_df} \alias{glance.tbl_df} \title{Tidying methods for rowwise_dfs from dplyr, for tidying each row and recombining the results} \usage{ \method{tidy}{rowwise_df}(x, object, ...) \method{tidy_}{rowwise_df}(x, object, ...) \method{augment}{rowwise_df}(x, object, ...) \method{augment_}{rowwise_df}(x, object, ...) \method{glance}{rowwise_df}(x, object, ...) \method{glance_}{rowwise_df}(x, object, ...) \method{tidy}{tbl_df}(x, ...) \method{augment}{tbl_df}(x, ...) \method{glance}{tbl_df}(x, ...) } \arguments{ \item{x}{a rowwise_df} \item{object}{the column name of the column containing the models to be tidied. For tidy, augment, and glance it should be the bare name; for _ methods it should be quoted.} \item{...}{additional arguments to pass on to the respective tidying method} } \value{ A \code{"grouped_df"}, where the non-list columns of the original are used as grouping columns alongside the tidied outputs. } \description{ These \code{tidy}, \code{augment} and \code{glance} methods are for performing tidying on each row of a rowwise data frame created by dplyr's \code{group_by} and \code{do} operations. They first group a rowwise data frame based on all columns that are not lists, then perform the tidying operation on the specified column. This greatly shortens a common idiom of extracting tidy/augment/glance outputs after a do statement. } \details{ Note that this functionality is not currently implemented for data.tables, since the result of the do operation is difficult to distinguish from a regular data.table. } \examples{ library(dplyr) regressions <- mtcars \%>\% group_by(cyl) \%>\% do(mod = lm(mpg ~ wt, .)) regressions regressions \%>\% tidy(mod) regressions \%>\% augment(mod) regressions \%>\% glance(mod) # we can provide additional arguments to the tidying function regressions \%>\% tidy(mod, conf.int = TRUE) # we can also include the original dataset as a "data" argument # to augment: regressions <- mtcars \%>\% group_by(cyl) \%>\% do(mod = lm(mpg ~ wt, .), original = (.)) # this allows all the original columns to be included: regressions \%>\% augment(mod) # doesn't include all original regressions \%>\% augment(mod, data = original) # includes all original } broom/man/kappa_tidiers.Rd0000644000177700017770000000234313204276216016610 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/psych_tidiers.R \name{kappa_tidiers} \alias{kappa_tidiers} \alias{tidy.kappa} \title{Tidy a kappa object from a Cohen's kappa calculation} \usage{ \method{tidy}{kappa}(x, ...) } \arguments{ \item{x}{An object of class "kappa"} \item{...}{extra arguments (not used)} } \value{ A data.frame with columns \item{type}{Either "weighted" or "unweighted"} \item{estimate}{The estimated value of kappa with this method} \item{conf.low}{Lower bound of confidence interval} \item{conf.high}{Upper bound of confidence interval} } \description{ Tidy a "kappa" object, from the \code{\link{cohen.kappa}} function in the psych package. This represents the agreement of two raters when using nominal scores. } \details{ Note that the alpha of the confidence interval is determined when the \code{cohen.kappa} function is originally run. } \examples{ library(psych) rater1 = 1:9 rater2 = c(1, 3, 1, 6, 1, 5, 5, 6, 7) ck <- cohen.kappa(cbind(rater1, rater2)) tidy(ck) # graph the confidence intervals library(ggplot2) ggplot(tidy(ck), aes(estimate, type)) + geom_point() + geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) } \seealso{ \code{\link{cohen.kappa}} } broom/man/ergm_tidiers.Rd0000644000177700017770000001013113204276216016440 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ergm_tidiers.R \name{ergm_tidiers} \alias{ergm_tidiers} \alias{tidy.ergm} \alias{glance.ergm} \title{Tidying methods for an exponential random graph model} \usage{ \method{tidy}{ergm}(x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE, quick = FALSE, ...) \method{glance}{ergm}(x, deviance = FALSE, mcmc = FALSE, ...) } \arguments{ \item{x}{an \pkg{ergm} object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals} \item{quick}{whether to compute a smaller and faster version, containing only the \code{term} and \code{estimate} columns.} \item{...}{extra arguments passed to \code{\link[ergm]{summary.ergm}}} \item{deviance}{whether to report null and residual deviance for the model, along with degrees of freedom; defaults to \code{FALSE}} \item{mcmc}{whether to report MCMC interval, burn-in and sample size used to estimate the model; defaults to \code{FALSE}} } \value{ All tidying methods return a \code{data.frame} without rownames. The structure depends on the method chosen. \code{tidy.ergm} returns one row for each coefficient, with five columns: \item{term}{The term in the model being estimated and tested} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error} \item{mcmc.error}{The MCMC error} \item{p.value}{The two-sided p-value} If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and \code{conf.high}. \code{glance.ergm} returns a one-row data.frame with the columns \item{independence}{Whether the model assumed dyadic independence} \item{iterations}{The number of iterations performed before convergence} \item{logLik}{If applicable, the log-likelihood associated with the model} \item{AIC}{The Akaike Information Criterion} \item{BIC}{The Bayesian Information Criterion} If \code{deviance=TRUE}, and if the model supports it, the data frame will also contain the columns \item{null.deviance}{The null deviance of the model} \item{df.null}{The degrees of freedom of the null deviance} \item{residual.deviance}{The residual deviance of the model} \item{df.residual}{The degrees of freedom of the residual deviance} Last, if \code{mcmc=TRUE}, the data frame will also contain the columns \item{MCMC.interval}{The interval used during MCMC estimation} \item{MCMC.burnin}{The burn-in period of the MCMC estimation} \item{MCMC.samplesize}{The sample size used during MCMC estimation} } \description{ These methods tidy the coefficients of an exponential random graph model estimated with the \pkg{ergm} package into a summary, and construct a one-row glance of the model's statistics. The methods should work with any model that conforms to the \pkg{ergm} class, such as those produced from weighted networks by the \pkg{ergm.count} package. } \details{ There is no \code{augment} method for \pkg{ergm} objects. } \examples{ if (require("ergm")) { # Using the same example as the ergm package # Load the Florentine marriage network data data(florentine) # Fit a model where the propensity to form ties between # families depends on the absolute difference in wealth gest <- ergm(flomarriage ~ edges + absdiff("wealth")) # Show terms, coefficient estimates and errors tidy(gest) # Show coefficients as odds ratios with a 99\% CI tidy(gest, exponentiate = TRUE, conf.int = TRUE, conf.level = 0.99) # Take a look at likelihood measures and other # control parameters used during MCMC estimation glance(gest) glance(gest, deviance = TRUE) glance(gest, mcmc = TRUE) } } \references{ Hunter DR, Handcock MS, Butts CT, Goodreau SM, Morris M (2008b). \pkg{ergm}: A Package to Fit, Simulate and Diagnose Exponential-Family Models for Networks. \emph{Journal of Statistical Software}, 24(3). \url{http://www.jstatsoft.org/v24/i03/}. } \seealso{ \code{\link[ergm]{ergm}}, \code{\link[ergm]{control.ergm}}, \code{\link[ergm]{summary.ergm}} } broom/man/tidy.dist.Rd0000644000177700017770000000215513204276216015705 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_tidiers.R \name{tidy.dist} \alias{tidy.dist} \title{Tidy a distance matrix} \usage{ \method{tidy}{dist}(x, diag = attr(x, "Diag"), upper = attr(x, "Upper"), ...) } \arguments{ \item{x}{A "dist" object} \item{diag}{Whether to include the diagonal of the distance matrix. Defaults to whether the distance matrix includes it} \item{upper}{Whether to include the upper right triangle of the distance matrix. Defaults to whether the distance matrix includes it} \item{...}{Extra arguments, not used} } \value{ A data frame with one row for each pair of item distances, with columns: \describe{ \item{item1}{First item} \item{item2}{Second item} \item{distance}{Distance between items} } } \description{ Tidy a distance matrix, such as that computed by the \link{dist} function, into a one-row-per-pair table. If the distance matrix does not include an upper triangle and/or diagonal, this will not either. } \examples{ iris_dist <- dist(t(iris[, 1:4])) iris_dist tidy(iris_dist) tidy(iris_dist, upper = TRUE) tidy(iris_dist, diag = TRUE) } broom/man/tidy.manova.Rd0000644000177700017770000000237413204276216016226 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_tidiers.R \name{tidy.manova} \alias{tidy.manova} \title{tidy a MANOVA object} \usage{ \method{tidy}{manova}(x, test = "Pillai", ...) } \arguments{ \item{x}{object of class "manova"} \item{test}{one of "Pillai" (Pillai's trace), "Wilks" (Wilk's lambda), "Hotelling-Lawley" (Hotelling-Lawley trace) or "Roy" (Roy's greatest root) indicating which test statistic should be used. Defaults to "Pillai"} \item{...}{additional arguments passed on to \code{summary.manova}} } \value{ A data.frame with the columns \item{term}{Term in design} \item{statistic}{Approximate F statistic} \item{num.df}{Degrees of freedom} \item{p.value}{P-value} Depending on which test statistic is specified, one of the following columns is also included: \item{pillai}{Pillai's trace} \item{wilks}{Wilk's lambda} \item{hl}{Hotelling-Lawley trace} \item{roy}{Roy's greatest root} } \description{ Constructs a data frame with one row for each of the terms in the model, containing the information from \link{summary.manova}. } \examples{ npk2 <- within(npk, foo <- rnorm(24)) npk2.aov <- manova(cbind(yield, foo) ~ block + N*P*K, npk2) } \seealso{ \code{\link{summary.manova}} } broom/man/confint_tidy.Rd0000644000177700017770000000151113204276216016456 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{confint_tidy} \alias{confint_tidy} \title{Calculate confidence interval as a tidy data frame} \usage{ confint_tidy(x, conf.level = 0.95, func = stats::confint, ...) } \arguments{ \item{x}{a model object for which \code{\link{confint}} can be calculated} \item{conf.level}{confidence level} \item{func}{Function to use for computing confint} \item{...}{extra arguments passed on to \code{confint}} } \value{ A data frame with two columns: \code{conf.low} and \code{conf.high}. } \description{ Return a confidence interval as a tidy data frame. This directly wraps the \code{\link{confint}} function, but ensures it folllows broom conventions: column names of \code{conf.low} and \code{conf.high}, and no row names } \seealso{ \link{confint} } broom/man/tidy.coeftest.Rd0000644000177700017770000000162313204276216016555 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lmtest_tidiers.R \name{tidy.coeftest} \alias{tidy.coeftest} \title{Tidying methods for coeftest objects} \usage{ \method{tidy}{coeftest}(x, ...) } \arguments{ \item{x}{coeftest object} \item{...}{extra arguments (not used)} } \value{ A \code{data.frame} with one row for each coefficient, with five columns: \item{term}{The term in the linear model being estimated and tested} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error} \item{statistic}{test statistic} \item{p.value}{p-value} } \description{ This tidies the result of a coefficient test, from the \code{coeftest} function in the \code{lmtest} package. } \examples{ if (require("lmtest", quietly = TRUE)) { data(Mandible) fm <- lm(length ~ age, data=Mandible, subset=(age <= 28)) coeftest(fm) tidy(coeftest(fm)) } } broom/man/felm_tidiers.Rd0000644000177700017770000000612113204276216016435 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/felm_tidiers.R \name{felm_tidiers} \alias{felm_tidiers} \alias{tidy.felm} \alias{augment.felm} \alias{glance.felm} \title{Tidying methods for models with multiple group fixed effects} \usage{ \method{tidy}{felm}(x, conf.int = FALSE, conf.level = 0.95, fe = FALSE, fe.error = fe, ...) \method{augment}{felm}(x, data = NULL, ...) \method{glance}{felm}(x, ...) } \arguments{ \item{x}{felm object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{fe}{whether to include estimates of fixed effects} \item{fe.error}{whether to include standard error of fixed effects} \item{...}{extra arguments (not used)} \item{data}{Original data, defaults to extracting it from the model} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy.felm} returns one row for each coefficient. If \code{fe=TRUE}, it also includes rows for for fixed effects estimates. There are five columns: \item{term}{The term in the linear model being estimated and tested} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error from the linear model} \item{statistic}{t-statistic} \item{p.value}{two-sided p-value} If \code{cont.int=TRUE}, it also includes columns for \code{conf.low} and \code{conf.high}, computed with \code{\link{confint}}. \code{augment.felm} returns one row for each observation, with multiple columns added to the original data: \item{.fitted}{Fitted values of model} \item{.resid}{Residuals} If fixed effect are present, \item{.comp}{Connected component} \item{.fe_}{Fixed effects (as many columns as factors)} \code{glance.lm} returns a one-row data.frame with the columns \item{r.squared}{The percent of variance explained by the model} \item{adj.r.squared}{r.squared adjusted based on the degrees of freedom} \item{sigma}{The square root of the estimated residual variance} \item{statistic}{F-statistic} \item{p.value}{p-value from the F test} \item{df}{Degrees of freedom used by the coefficients} \item{df.residual}{residual degrees of freedom} } \description{ These methods tidy the coefficients of a linear model with multiple group fixed effects } \details{ If \code{conf.int=TRUE}, the confidence interval is computed } \examples{ if (require("lfe", quietly = TRUE)) { N=1e2 DT <- data.frame( id = sample(5, N, TRUE), v1 = sample(5, N, TRUE), v2 = sample(1e6, N, TRUE), v3 = sample(round(runif(100,max=100),4), N, TRUE), v4 = sample(round(runif(100,max=100),4), N, TRUE) ) result_felm <- felm(v2~v3, DT) tidy(result_felm) augment(result_felm) result_felm <- felm(v2~v3|id+v1, DT) tidy(result_felm, fe = TRUE) augment(result_felm) v1<-DT$v1 v2 <- DT$v2 v3 <- DT$v3 id <- DT$id result_felm <- felm(v2~v3|id+v1) tidy(result_felm) augment(result_felm) glance(result_felm) } } broom/man/sparse_tidiers.Rd0000644000177700017770000000125113204276216017006 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sparse_tidiers.R \name{sparse_tidiers} \alias{sparse_tidiers} \alias{tidy.dgTMatrix} \alias{tidy.dgCMatrix} \alias{tidy.sparseMatrix} \title{Tidy a sparseMatrix object from the Matrix package} \usage{ \method{tidy}{dgTMatrix}(x, ...) \method{tidy}{dgCMatrix}(x, ...) \method{tidy}{sparseMatrix}(x, ...) } \arguments{ \item{x}{A Matrix object} \item{...}{Extra arguments, not used} } \description{ Tidy a sparseMatrix object from the Matrix package into a three-column data frame, row, column, and value (with zeros missing). If there are row names or column names, use those, otherwise use indices } broom/man/smooth.spline_tidiers.Rd0000644000177700017770000000253713204276216020323 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/smooth.spline_tidiers.R \name{smooth.spline_tidiers} \alias{smooth.spline_tidiers} \alias{augment.smooth.spline} \alias{glance.smooth.spline} \title{tidying methods for smooth.spline objects} \usage{ \method{augment}{smooth.spline}(x, data = x$data, ...) \method{glance}{smooth.spline}(x, ...) } \arguments{ \item{x}{a smooth.spline object} \item{data}{defaults to data used to fit model} \item{...}{not used in this method} } \value{ \code{augment} returns the original data with extra columns: \item{.fitted}{Fitted values of model} \item{.resid}{Residuals} \code{glance} returns one row with columns \item{spar}{smoothing parameter} \item{lambda}{choice of lambda corresponding to \code{spar}} \item{df}{equivalent degrees of freedom} \item{crit}{minimized criterion} \item{pen.crit}{penalized criterion} \item{cv.crit}{cross-validation score} } \description{ This combines the original data given to smooth.spline with the fit and residuals } \details{ No \code{tidy} method is provided for smooth.spline objects. } \examples{ spl <- smooth.spline(mtcars$wt, mtcars$mpg, df = 4) head(augment(spl, mtcars)) head(augment(spl)) # calls original columns x and y library(ggplot2) ggplot(augment(spl, mtcars), aes(wt, mpg)) + geom_point() + geom_line(aes(y = .fitted)) } broom/man/anova_tidiers.Rd0000644000177700017770000000256413204276216016625 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/anova_tidiers.R \name{anova_tidiers} \alias{anova_tidiers} \alias{tidy.anova} \alias{tidy.aov} \alias{tidy.aovlist} \title{Tidying methods for anova and AOV objects} \usage{ \method{tidy}{anova}(x, ...) \method{tidy}{aov}(x, ...) \method{tidy}{aovlist}(x, ...) } \arguments{ \item{x}{An object of class "anova", "aov", or "aovlist"} \item{...}{extra arguments (not used)} } \value{ A data.frame with columns \item{term}{Term within the model, or "Residuals"} \item{df}{Degrees of freedom used by this term in the model} \item{sumsq}{Sum of squares explained by this term} \item{meansq}{Mean of sum of squares among degrees of freedom} \item{statistic}{F statistic} \item{p.value}{P-value from F test} In the case of an \code{"aovlist"} object, there is also a \code{stratum} column describing the error stratum } \description{ Tidies the result of an analysis of variance into an ANOVA table. Only a \code{tidy} method is provided, not an \code{augment} or \code{glance} method. } \details{ Note that the "term" column of an ANOVA table can come with leading or trailing whitespace, which this tidying method trims. } \examples{ a <- anova(lm(mpg ~ wt + qsec + disp, mtcars)) tidy(a) a <- aov(mpg ~ wt + qsec + disp, mtcars) tidy(a) al <- aov(mpg ~ wt + qsec + Error(disp / am), mtcars) tidy(al) } broom/man/inflate.Rd0000644000177700017770000000077013204330346015410 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{inflate} \alias{inflate} \title{Expand a dataset to include all factorial combinations of one or more variables} \usage{ inflate(df, ..., stringsAsFactors = FALSE) } \arguments{ \item{df}{a tbl} \item{...}{arguments} \item{stringsAsFactors}{logical specifying if character vectors are converted to factors.} } \value{ A tbl } \description{ This function is deprecated: use \code{tidyr::crossing} instead } broom/man/glance.Rd0000644000177700017770000000073613204330346015221 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glance.R \name{glance} \alias{glance} \title{Construct a single row summary "glance" of a model, fit, or other object} \usage{ glance(x, ...) } \arguments{ \item{x}{model or other R object to convert to single-row data frame} \item{...}{other arguments passed to methods} } \description{ glance methods always return either a one-row data frame (except on NULL, which returns an empty data frame) } broom/man/gam_tidiers.Rd0000644000177700017770000000363013204276216016260 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gam_tidiers.R \name{gam_tidiers} \alias{gam_tidiers} \alias{tidy.gam} \alias{glance.gam} \title{Tidying methods for a generalized additive model (gam)} \usage{ \method{tidy}{gam}(x, parametric = FALSE, ...) \method{glance}{gam}(x, ...) } \arguments{ \item{x}{gam object} \item{parametric}{logical. Return parametric coefficients (\code{TRUE}) or information about smooth terms (\code{FALSE})?} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy.gam} called on an object from the gam package, or an object from the mgcv package with \code{parametric = FALSE}, returns the tidied output of the parametric ANOVA with one row for each term in the formula. The columns match those in \link{anova_tidiers}. \code{tidy.gam} called on a gam object from the mgcv package with \code{parametric = TRUE} returns the fixed coefficients. \code{glance.gam} returns a one-row data.frame with the columns \item{df}{Degrees of freedom used by the coefficients} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{deviance}{deviance} \item{df.residual}{residual degrees of freedom} } \description{ These methods tidy the coefficients of a "gam" object (generalized additive model) into a summary, augment the original data with information on the fitted values and residuals, and construct a one-row glance of the model's statistics. } \details{ The "augment" method is handled by \link{lm_tidiers}. } \examples{ if (require("gam", quietly = TRUE)) { data(kyphosis) g <- gam(Kyphosis ~ s(Age,4) + Number, family = binomial, data = kyphosis) tidy(g) augment(g) glance(g) } } \seealso{ \link{lm_tidiers}, \link{anova_tidiers} } broom/man/tidy.table.Rd0000644000177700017770000000132613204276216016030 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_tidiers.R \name{tidy.table} \alias{tidy.table} \title{tidy a table object} \usage{ \method{tidy}{table}(x, ...) } \arguments{ \item{x}{An object of class "table"} \item{...}{Extra arguments (not used)} } \description{ A table, typically created by the \link{table} function, contains a contingency table of frequencies across multiple vectors. This directly calls the \code{\link{as.data.frame.table}} method, which melts it into a data frame with one column for each variable and a \code{Freq} column. } \examples{ tab <- with(airquality, table(cut(Temp, quantile(Temp)), Month)) tidy(tab) } \seealso{ \code{\link{as.data.frame.table}} } broom/man/nlme_tidiers.Rd0000644000177700017770000000711313204300607016437 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nlme_tidiers.R \name{nlme_tidiers} \alias{nlme_tidiers} \alias{tidy.lme} \alias{augment.lme} \alias{glance.lme} \title{Tidying methods for mixed effects models} \usage{ \method{tidy}{lme}(x, effects = "random", ...) \method{augment}{lme}(x, data = x$data, newdata, ...) \method{glance}{lme}(x, ...) } \arguments{ \item{x}{An object of class \code{lme}, such as those from \code{lme} or \code{nlme}} \item{effects}{Either "random" (default) or "fixed"} \item{...}{extra arguments (not used)} \item{data}{original data this was fitted on; if not given this will attempt to be reconstructed} \item{newdata}{new data to be used for prediction; optional} } \value{ All tidying methods return a \code{data.frame} without rownames. The structure depends on the method chosen. \code{tidy} returns one row for each estimated effect, either random or fixed depending on the \code{effects} parameter. If \code{effects = "random"}, it contains the columns \item{group}{the group within which the random effect is being estimated} \item{level}{level within group} \item{term}{term being estimated} \item{estimate}{estimated coefficient} If \code{effects="fixed"}, \code{tidy} returns the columns \item{term}{fixed term being estimated} \item{estimate}{estimate of fixed effect} \item{std.error}{standard error} \item{statistic}{t-statistic} \item{p.value}{P-value computed from t-statistic} \code{augment} returns one row for each original observation, with columns (each prepended by a .) added. Included are the columns \item{.fitted}{predicted values} \item{.resid}{residuals} \item{.fixed}{predicted values with no random effects} \code{glance} returns one row with the columns \item{sigma}{the square root of the estimated residual variance} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{deviance}{returned as NA. To quote Brian Ripley on R-help: McCullagh & Nelder (1989) would be the authoritative reference, but the 1982 first edition manages to use 'deviance' in three separate senses on one page. } } \description{ These methods tidy the coefficients of mixed effects models of the \code{lme} class from functions of the \code{nlme} package. } \details{ When the modeling was performed with \code{na.action = "na.omit"} (as is the typical default), rows with NA in the initial data are omitted entirely from the augmented data frame. When the modeling was performed with \code{na.action = "na.exclude"}, one should provide the original data as a second argument, at which point the augmented data will contain those rows (typically with NAs in place of the new columns). If the original data is not provided to \code{augment} and \code{na.action = "na.exclude"}, a warning is raised and the incomplete rows are dropped. } \examples{ if (require("nlme") & require("lme4")) { # example regressions are from lme4 documentation, but used for nlme lmm1 <- lme(Reaction ~ Days, random=~ Days|Subject, sleepstudy) tidy(lmm1) tidy(lmm1, effects = "fixed") head(augment(lmm1, sleepstudy)) glance(lmm1) startvec <- c(Asym = 200, xmid = 725, scal = 350) nm1 <- nlme(circumference ~ SSlogis(age, Asym, xmid, scal), data = Orange, fixed = Asym + xmid + scal ~1, random = Asym ~1, start = startvec) tidy(nm1) tidy(nm1, effects = "fixed") head(augment(nm1, Orange)) glance(nm1) } } \seealso{ \link{na.action} } broom/man/zoo_tidiers.Rd0000644000177700017770000000270013204276216016320 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/zoo_tidiers.R \name{zoo_tidiers} \alias{zoo_tidiers} \alias{tidy.zoo} \title{Tidying methods for a zoo object} \usage{ \method{tidy}{zoo}(x, ...) } \arguments{ \item{x}{An object of class \code{"zoo"}} \item{...}{extra arguments (not used)} } \value{ \code{tidy} returns a data frame with one row for each observation in each series, with the following columns: \item{index}{Index (usually date) for the zoo object} \item{series}{Name of the series} \item{value}{Value of the observation} } \description{ Tidies \code{zoo} (Z's ordered observations) time series objects. \code{zoo} objects are not tidy by default because they contain one row for each index and one series per column, rather than one row per observation per series. } \examples{ if (require("zoo", quietly = TRUE)) { set.seed(1071) # data generated as shown in the zoo vignette Z.index <- as.Date(sample(12450:12500, 10)) Z.data <- matrix(rnorm(30), ncol = 3) colnames(Z.data) <- c("Aa", "Bb", "Cc") Z <- zoo(Z.data, Z.index) tidy(Z) if (require("ggplot2", quietly = TRUE)) { ggplot(tidy(Z), aes(index, value, color = series)) + geom_line() ggplot(tidy(Z), aes(index, value)) + geom_line() + facet_wrap(~ series, ncol = 1) Zrolled <- rollmean(Z, 5) ggplot(tidy(Zrolled), aes(index, value, color = series)) + geom_line() } } } broom/man/cch_tidiers.Rd0000644000177700017770000000556413204276216016261 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survival_tidiers.R \name{cch_tidiers} \alias{cch_tidiers} \alias{tidy.cch} \alias{glance.cch} \title{tidiers for case-cohort data} \usage{ \method{tidy}{cch}(x, conf.level = 0.95, ...) \method{glance}{cch}(x, ...) } \arguments{ \item{x}{a "cch" object} \item{conf.level}{confidence level for CI} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} returns a data.frame with one row for each term \item{term}{name of term} \item{estimate}{estimate of coefficient} \item{stderror}{standard error} \item{statistic}{Z statistic} \item{p.value}{p-value} \item{conf.low}{low end of confidence interval} \item{conf.high}{high end of confidence interval} \code{glance} returns a one-row data.frame with the following columns: \item{score}{score} \item{rscore}{rscore} \item{p.value}{p-value from Wald test} \item{iter}{number of iterations} \item{n}{number of predictions} \item{nevent}{number of events} } \description{ Tidiers for case-cohort analyses: summarize each estimated coefficient, or test the overall model. } \details{ It is not clear what an \code{augment} method would look like, so none is provided. Nor is there currently any way to extract the covariance or the residuals. } \examples{ if (require("survival", quietly = TRUE)) { # examples come from cch documentation subcoh <- nwtco$in.subcohort selccoh <- with(nwtco, rel==1|subcoh==1) ccoh.data <- nwtco[selccoh,] ccoh.data$subcohort <- subcoh[selccoh] ## central-lab histology ccoh.data$histol <- factor(ccoh.data$histol,labels=c("FH","UH")) ## tumour stage ccoh.data$stage <- factor(ccoh.data$stage,labels=c("I","II","III" ,"IV")) ccoh.data$age <- ccoh.data$age/12 # Age in years fit.ccP <- cch(Surv(edrel, rel) ~ stage + histol + age, data = ccoh.data, subcoh = ~subcohort, id= ~seqno, cohort.size = 4028) tidy(fit.ccP) # coefficient plot library(ggplot2) ggplot(tidy(fit.ccP), aes(x = estimate, y = term)) + geom_point() + geom_errorbarh(aes(xmin = conf.low, xmax = conf.high), height = 0) + geom_vline(xintercept = 0) # compare between methods library(dplyr) fits <- data_frame(method = c("Prentice", "SelfPrentice", "LinYing")) \%>\% group_by(method) \%>\% do(tidy(cch(Surv(edrel, rel) ~ stage + histol + age, data = ccoh.data, subcoh = ~subcohort, id= ~seqno, cohort.size = 4028, method = .$method))) # coefficient plots comparing methods ggplot(fits, aes(x = estimate, y = term, color = method)) + geom_point() + geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) + geom_vline(xintercept = 0) } } \seealso{ \link{cch} } broom/man/mcmc_tidiers.Rd0000644000177700017770000000511713204276216016435 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mcmc_tidiers.R \name{mcmc_tidiers} \alias{mcmc_tidiers} \alias{tidyMCMC} \alias{tidy.rjags} \alias{tidy.stanfit} \title{Tidying methods for MCMC (Stan, JAGS, etc.) fits} \usage{ tidyMCMC(x, pars, estimate.method = "mean", conf.int = FALSE, conf.level = 0.95, conf.method = "quantile", droppars = "lp__", rhat = FALSE, ess = FALSE, ...) \method{tidy}{rjags}(x, pars, estimate.method = "mean", conf.int = FALSE, conf.level = 0.95, conf.method = "quantile", ...) \method{tidy}{stanfit}(x, pars, estimate.method = "mean", conf.int = FALSE, conf.level = 0.95, conf.method = "quantile", droppars = "lp__", rhat = FALSE, ess = FALSE, ...) } \arguments{ \item{x}{an object of class \sQuote{"stanfit"}} \item{pars}{(character) specification of which parameters to include} \item{estimate.method}{method for computing point estimate ("mean" or median")} \item{conf.int}{(logical) include confidence interval?} \item{conf.level}{probability level for CI} \item{conf.method}{method for computing confidence intervals ("quantile" or "HPDinterval")} \item{droppars}{Parameters not to include in the output (such as log-probability information)} \item{rhat, ess}{(logical) include Rhat and/or effective sample size estimates?} \item{...}{unused} } \description{ Tidying methods for MCMC (Stan, JAGS, etc.) fits } \examples{ \dontrun{ # Using example from "RStan Getting Started" # https://github.com/stan-dev/rstan/wiki/RStan-Getting-Started model_file <- system.file("extdata", "8schools.stan", package = "broom") schools_dat <- list(J = 8, y = c(28, 8, -3, 7, -1, 1, 18, 12), sigma = c(15, 10, 16, 11, 9, 11, 10, 18)) if (requireNamespace("rstan", quietly = TRUE)) { set.seed(2015) rstan_example <- stan(file = model_file, data = schools_dat, iter = 100, chains = 2) } } if (requireNamespace("rstan", quietly = TRUE)) { # the object from the above code was saved as rstan_example.rda infile <- system.file("extdata", "rstan_example.rda", package = "broom") load(infile) tidy(rstan_example) tidy(rstan_example, conf.int = TRUE, pars = "theta") td_mean <- tidy(rstan_example, conf.int = TRUE) td_median <- tidy(rstan_example, conf.int = TRUE, estimate.method = "median") library(dplyr) library(ggplot2) tds <- rbind(mutate(td_mean, method = "mean"), mutate(td_median, method = "median")) ggplot(tds, aes(estimate, term)) + geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) + geom_point(aes(color = method)) } } broom/man/augment.Rd0000644000177700017770000000153513204276216015433 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/augment.R \name{augment} \alias{augment} \title{Augment data according to a tidied model} \usage{ augment(x, ...) } \arguments{ \item{x}{model or other R object to convert to data frame} \item{...}{other arguments passed to methods} } \description{ Given an R statistical model or other non-tidy object, add columns to the original dataset such as predictions, residuals and cluster assignments. } \details{ Note that by convention the first argument is almost always \code{data}, which specifies the original data object. This is not part of the S3 signature, partly because it prevents \link{rowwise_df_tidiers} from taking a column name as the first argument. This generic originated in the ggplot2 package, where it was called "fortify." } \seealso{ \code{\link{augment.lm}} } broom/man/sp_tidiers.Rd0000644000177700017770000000243313204276216016136 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sp_tidiers.R \name{sp_tidiers} \alias{sp_tidiers} \alias{tidy.SpatialPolygonsDataFrame} \alias{tidy.SpatialPolygons} \alias{tidy.Polygons} \alias{tidy.Polygon} \alias{tidy.SpatialLinesDataFrame} \alias{tidy.Lines} \alias{tidy.Line} \title{tidying methods for classes from the sp package.} \usage{ \method{tidy}{SpatialPolygonsDataFrame}(x, region = NULL, ...) \method{tidy}{SpatialPolygons}(x, ...) \method{tidy}{Polygons}(x, ...) \method{tidy}{Polygon}(x, ...) \method{tidy}{SpatialLinesDataFrame}(x, ...) \method{tidy}{Lines}(x, ...) \method{tidy}{Line}(x, ...) } \arguments{ \item{x}{\code{SpatialPolygonsDataFrame} to convert into a dataframe.} \item{region}{name of variable used to split up regions} \item{...}{not used by this method} } \description{ Tidy classes from the sp package to allow them to be plotted using ggplot2. To figure out the correct variable name for region, inspect \code{as.data.frame(x)}. } \details{ These functions originated in the ggplot2 package as "fortify" functions. } \examples{ if (require("maptools")) { sids <- system.file("shapes/sids.shp", package="maptools") nc1 <- readShapePoly(sids, proj4string = CRS("+proj=longlat +datum=NAD27")) nc1_df <- tidy(nc1) } } broom/man/lm_tidiers.Rd0000644000177700017770000001557413204276216016136 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lm_tidiers.R \name{lm_tidiers} \alias{lm_tidiers} \alias{tidy.lm} \alias{tidy.summary.lm} \alias{augment.lm} \alias{glance.lm} \alias{glance.summary.lm} \title{Tidying methods for a linear model} \usage{ \method{tidy}{lm}(x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE, quick = FALSE, ...) \method{tidy}{summary.lm}(x, ...) \method{augment}{lm}(x, data = stats::model.frame(x), newdata, type.predict, type.residuals, ...) \method{glance}{lm}(x, ...) \method{glance}{summary.lm}(x, ...) } \arguments{ \item{x}{lm object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals (typical for logistic regression)} \item{quick}{whether to compute a smaller and faster version, containing only the \code{term} and \code{estimate} columns.} \item{...}{extra arguments (not used)} \item{data}{Original data, defaults to the extracting it from the model} \item{newdata}{If provided, performs predictions on the new data} \item{type.predict}{Type of prediction to compute for a GLM; passed on to \code{\link{predict.glm}}} \item{type.residuals}{Type of residuals to compute for a GLM; passed on to \code{\link{residuals.glm}}} } \value{ All tidying methods return a \code{data.frame} without rownames. The structure depends on the method chosen. \code{tidy.lm} returns one row for each coefficient, with five columns: \item{term}{The term in the linear model being estimated and tested} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error from the linear model} \item{statistic}{t-statistic} \item{p.value}{two-sided p-value} If the linear model is an "mlm" object (multiple linear model), there is an additional column: \item{response}{Which response column the coefficients correspond to (typically Y1, Y2, etc)} If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and \code{conf.high}, computed with \code{\link{confint}}. When \code{newdata} is not supplied \code{augment.lm} returns one row for each observation, with seven columns added to the original data: \item{.hat}{Diagonal of the hat matrix} \item{.sigma}{Estimate of residual standard deviation when corresponding observation is dropped from model} \item{.cooksd}{Cooks distance, \code{\link{cooks.distance}}} \item{.fitted}{Fitted values of model} \item{.se.fit}{Standard errors of fitted values} \item{.resid}{Residuals} \item{.std.resid}{Standardised residuals} (Some unusual "lm" objects, such as "rlm" from MASS, may omit \code{.cooksd} and \code{.std.resid}. "gam" from mgcv omits \code{.sigma}) When \code{newdata} is supplied, \code{augment.lm} returns one row for each observation, with three columns added to the new data: \item{.fitted}{Fitted values of model} \item{.se.fit}{Standard errors of fitted values} \item{.resid}{Residuals of fitted values on the new data} \code{glance.lm} returns a one-row data.frame with the columns \item{r.squared}{The percent of variance explained by the model} \item{adj.r.squared}{r.squared adjusted based on the degrees of freedom} \item{sigma}{The square root of the estimated residual variance} \item{statistic}{F-statistic} \item{p.value}{p-value from the F test, describing whether the full regression is significant} \item{df}{Degrees of freedom used by the coefficients} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{deviance}{deviance} \item{df.residual}{residual degrees of freedom} } \description{ These methods tidy the coefficients of a linear model into a summary, augment the original data with information on the fitted values and residuals, and construct a one-row glance of the model's statistics. } \details{ If you have missing values in your model data, you may need to refit the model with \code{na.action = na.exclude}. If \code{conf.int=TRUE}, the confidence interval is computed with the \code{\link{confint}} function. While \code{tidy} is supported for "mlm" objects, \code{augment} and \code{glance} are not. When the modeling was performed with \code{na.action = "na.omit"} (as is the typical default), rows with NA in the initial data are omitted entirely from the augmented data frame. When the modeling was performed with \code{na.action = "na.exclude"}, one should provide the original data as a second argument, at which point the augmented data will contain those rows (typically with NAs in place of the new columns). If the original data is not provided to \code{augment} and \code{na.action = "na.exclude"}, a warning is raised and the incomplete rows are dropped. Code and documentation for \code{augment.lm} originated in the ggplot2 package, where it was called \code{fortify.lm} } \examples{ library(ggplot2) library(dplyr) mod <- lm(mpg ~ wt + qsec, data = mtcars) tidy(mod) glance(mod) # coefficient plot d <- tidy(mod) \%>\% mutate(low = estimate - std.error, high = estimate + std.error) ggplot(d, aes(estimate, term, xmin = low, xmax = high, height = 0)) + geom_point() + geom_vline(xintercept = 0) + geom_errorbarh() head(augment(mod)) head(augment(mod, mtcars)) # predict on new data newdata <- mtcars \%>\% head(6) \%>\% mutate(wt = wt + 1) augment(mod, newdata = newdata) au <- augment(mod, data = mtcars) plot(mod, which = 1) qplot(.fitted, .resid, data = au) + geom_hline(yintercept = 0) + geom_smooth(se = FALSE) qplot(.fitted, .std.resid, data = au) + geom_hline(yintercept = 0) + geom_smooth(se = FALSE) qplot(.fitted, .std.resid, data = au, colour = factor(cyl)) qplot(mpg, .std.resid, data = au, colour = factor(cyl)) plot(mod, which = 2) qplot(sample =.std.resid, data = au, stat = "qq") + geom_abline() plot(mod, which = 3) qplot(.fitted, sqrt(abs(.std.resid)), data = au) + geom_smooth(se = FALSE) plot(mod, which = 4) qplot(seq_along(.cooksd), .cooksd, data = au) plot(mod, which = 5) qplot(.hat, .std.resid, data = au) + geom_smooth(se = FALSE) ggplot(au, aes(.hat, .std.resid)) + geom_vline(size = 2, colour = "white", xintercept = 0) + geom_hline(size = 2, colour = "white", yintercept = 0) + geom_point() + geom_smooth(se = FALSE) qplot(.hat, .std.resid, data = au, size = .cooksd) + geom_smooth(se = FALSE, size = 0.5) plot(mod, which = 6) ggplot(au, aes(.hat, .cooksd)) + geom_vline(xintercept = 0, colour = NA) + geom_abline(slope = seq(0, 3, by = 0.5), colour = "white") + geom_smooth(se = FALSE) + geom_point() qplot(.hat, .cooksd, size = .cooksd / .hat, data = au) + scale_size_area() # column-wise models a <- matrix(rnorm(20), nrow = 10) b <- a + rnorm(length(a)) result <- lm(b ~ a) tidy(result) } \seealso{ \code{\link{summary.lm}} \link{na.action} } broom/man/biglm_tidiers.Rd0000644000177700017770000000461313204276216016610 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/biglm_tidiers.R \name{biglm_tidiers} \alias{biglm_tidiers} \alias{tidy.biglm} \alias{glance.biglm} \title{Tidiers for biglm and bigglm object} \usage{ \method{tidy}{biglm}(x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE, quick = FALSE, ...) \method{glance}{biglm}(x, ...) } \arguments{ \item{x}{a "biglm" object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals (typical for logistic regression)} \item{quick}{whether to compute a smaller and faster version, containing only the \code{term} and \code{estimate} columns.} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy.biglm} returns one row for each coefficient, with columns \item{term}{The term in the linear model being estimated and tested} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error from the linear model} \item{p.value}{two-sided p-value} If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and \code{conf.high}, computed with \code{\link{confint}}. \code{glance.biglm} returns a one-row data frame, with columns \item{r.squared}{The percent of variance explained by the model} \item{AIC}{the Akaike Information Criterion} \item{deviance}{deviance} \item{df.residual}{residual degrees of freedom} } \description{ Tidiers for biglm object from the "biglm" package, which contains a linear model object that is limited in memory usage. Generally the behavior is as similar to the \code{\link{lm_tidiers}} as is possible. Currently no \code{augment} is defined. } \examples{ if (require("biglm", quietly = TRUE)) { bfit <- biglm(mpg ~ wt + disp, mtcars) tidy(bfit) tidy(bfit, conf.int = TRUE) tidy(bfit, conf.int = TRUE, conf.level = .9) glance(bfit) # bigglm: logistic regression bgfit <- bigglm(am ~ mpg, mtcars, family = binomial()) tidy(bgfit) tidy(bgfit, exponentiate = TRUE) tidy(bgfit, conf.int = TRUE) tidy(bgfit, conf.int = TRUE, conf.level = .9) tidy(bgfit, conf.int = TRUE, conf.level = .9, exponentiate = TRUE) glance(bgfit) } } broom/man/loess_tidiers.Rd0000644000177700017770000000362413204276216016644 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/loess_tidiers.R \name{loess_tidiers} \alias{loess_tidiers} \alias{augment.loess} \title{Augmenting methods for loess models} \usage{ \method{augment}{loess}(x, data = stats::model.frame(x), newdata, ...) } \arguments{ \item{x}{A "loess" object} \item{data}{Original data, defaults to the extracting it from the model} \item{newdata}{If provided, performs predictions on the new data} \item{...}{extra arguments} } \value{ When \code{newdata} is not supplied \code{augment.loess} returns one row for each observation with three columns added to the original data: \item{.fitted}{Fitted values of model} \item{.se.fit}{Standard errors of the fitted values} \item{.resid}{Residuals of the fitted values} When \code{newdata} is supplied \code{augment.loess} returns one row for each observation with one additional column: \item{.fitted}{Fitted values of model} \item{.se.fit}{Standard errors of the fitted values} } \description{ This method augments the original data with information on the fitted values and residuals, and optionally the standard errors. } \details{ When the modeling was performed with \code{na.action = "na.omit"} (as is the typical default), rows with NA in the initial data are omitted entirely from the augmented data frame. When the modeling was performed with \code{na.action = "na.exclude"}, one should provide the original data as a second argument, at which point the augmented data will contain those rows (typically with NAs in place of the new columns). If the original data is not provided to \code{augment} and \code{na.action = "na.exclude"}, a warning is raised and the incomplete rows are dropped. } \examples{ lo <- loess(mpg ~ wt, mtcars) augment(lo) # with all columns of original data augment(lo, mtcars) # with a new dataset augment(lo, newdata = head(mtcars)) } \seealso{ \link{na.action} } broom/man/muhaz_tidiers.Rd0000644000177700017770000000264013204320671016633 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/muhaz_tidiers.R \name{muhaz_tidiers} \alias{muhaz_tidiers} \alias{tidy.muhaz} \alias{glance.muhaz} \title{Tidying methods for kernel based hazard rate estimates} \usage{ \method{tidy}{muhaz}(x, ...) \method{glance}{muhaz}(x, ...) } \arguments{ \item{x}{\code{muhaz} object} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy.muhaz} returns a tibble containing two columns: \code{time} at which the hazard rate was estimated and \code{estimate}. \code{glance.muhaz} returns a one-row data.frame with the columns \item{nobs}{Number of observations used for estimation} \item{min.time}{The minimum observed event or censoring time} \item{max.time}{The maximum observed event or censoring time} \item{min.harzard}{Minimal estimated hazard} \item{max.hazard}{Maximal estimated hazard} } \description{ These methods tidy the output of \code{muhaz} objects as returned by the \code{\link[muhaz]{muhaz}} function, which provides kernel based non-parametric hazard rate estimators. } \details{ The "augment" method is not useful and therefore not available for \code{muhaz} objects. } \examples{ if (require("muhaz", quietly = TRUE)) { data(ovarian, package="survival") x <- muhaz(ovarian$futime, ovarian$fustat) tidy(x) glance(x) } } broom/man/mclust_tidiers.Rd0000644000177700017770000000523013204276216017021 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mclust_tidiers.R \name{mclust_tidiers} \alias{mclust_tidiers} \alias{tidy.Mclust} \alias{augment.Mclust} \alias{glance.Mclust} \title{Tidying methods for Mclust objects} \usage{ \method{tidy}{Mclust}(x, ...) \method{augment}{Mclust}(x, data, ...) \method{glance}{Mclust}(x, ...) } \arguments{ \item{x}{Mclust object} \item{...}{extra arguments, not used} \item{data}{Original data (required for \code{augment})} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} returns one row per component, with \item{component}{A factor describing the cluster from 1:k (or 0:k in presence of a noise term in x)} \item{size}{The size of each component} \item{proportion}{The mixing proportion of each component} \item{variance}{In case of one-dimensional and spherical models, the variance for each component, omitted otherwise. NA for noise component} \item{mean}{The mean for each component. In case of two- or more dimensional models, a column with the mean is added for each dimension. NA for noise component} \code{augment} returns the original data with two extra columns: \item{.class}{The class assigned by the Mclust algorithm} \item{.uncertainty}{The uncertainty associated with the classification} \code{glance} returns a one-row data.frame with the columns \item{model}{A character string denoting the model at which the optimal BIC occurs} \item{n}{The number of observations in the data} \item{G}{The optimal number of mixture components} \item{BIC}{The optimal BIC value} \item{logLik}{The log-likelihood corresponding to the optimal BIC} \item{df}{The number of estimated parameters} \item{hypvol}{The hypervolume parameter for the noise component if required, otherwise set to NA} } \description{ These methods summarize the results of Mclust clustering into three tidy forms. \code{tidy} describes the size, mixing probability, mean and variabilty of each class, \code{augment} adds the class assignments and their probabilities to the original data, and \code{glance} summarizes the model parameters of the clustering. } \examples{ library(dplyr) library(ggplot2) library(mclust) set.seed(2016) centers <- data.frame(cluster=factor(1:3), size=c(100, 150, 50), x1=c(5, 0, -3), x2=c(-1, 1, -2)) points <- centers \%>\% group_by(cluster) \%>\% do(data.frame(x1=rnorm(.$size[1], .$x1[1]), x2=rnorm(.$size[1], .$x2[1]))) \%>\% ungroup() m = Mclust(points \%>\% dplyr::select(x1, x2)) tidy(m) head(augment(m, points)) glance(m) } \seealso{ \code{\link[mclust]{Mclust}} } broom/man/tidy.ts.Rd0000644000177700017770000000067313204276216015373 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_tidiers.R \name{tidy.ts} \alias{tidy.ts} \title{tidy a ts timeseries object} \usage{ \method{tidy}{ts}(x, ...) } \arguments{ \item{x}{a "ts" object} \item{...}{extra arguments (not used)} } \value{ a tidy data frame } \description{ Turn a ts object into a tidy data frame. Right now simply uses \code{as.data.frame.ts}. } \seealso{ \link{as.data.frame.ts} } broom/man/survreg_tidiers.Rd0000644000177700017770000000626113204276216017214 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survival_tidiers.R \name{survreg_tidiers} \alias{survreg_tidiers} \alias{tidy.survreg} \alias{survreg_tidiers} \alias{augment.survreg} \alias{glance.survreg} \title{Tidiers for a parametric regression survival model} \usage{ \method{tidy}{survreg}(x, conf.level = 0.95, ...) \method{augment}{survreg}(x, data = stats::model.frame(x), newdata, type.predict = "response", type.residuals = "response", ...) \method{glance}{survreg}(x, conf.level = 0.95, ...) } \arguments{ \item{x}{a "survreg" model} \item{conf.level}{confidence level for CI} \item{...}{extra arguments (not used)} \item{data}{original data; if it is not provided, it is reconstructed as best as possible with \code{\link{model.frame}}} \item{newdata}{New data to use for prediction; optional} \item{type.predict}{type of prediction, default "response"} \item{type.residuals}{type of residuals to calculate, default "response"} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} returns a data.frame with one row for each term \item{term}{name of term} \item{estimate}{estimate of coefficient} \item{stderror}{standard error} \item{statistic}{Z statistic} \item{p.value}{p-value} \item{conf.low}{low end of confidence interval} \item{conf.high}{high end of confidence interval} \code{augment} returns the original data.frame with the following additional columns: \item{.fitted}{Fitted values of model} \item{.se.fit}{Standard errors of fitted values} \item{.resid}{Residuals} \code{glance} returns a one-row data.frame with the columns: \item{iter}{number of iterations} \item{df}{degrees of freedom} \item{statistic}{chi-squared statistic} \item{p.value}{p-value from chi-squared test} \item{logLik}{log likelihood} \item{AIC}{Akaike information criterion} \item{BIC}{Bayesian information criterion} \item{df.residual}{residual degrees of freedom} } \description{ Tidies the coefficients of a parametric survival regression model, from the "survreg" function, adds fitted values and residuals, or summarizes the model statistics. } \details{ When the modeling was performed with \code{na.action = "na.omit"} (as is the typical default), rows with NA in the initial data are omitted entirely from the augmented data frame. When the modeling was performed with \code{na.action = "na.exclude"}, one should provide the original data as a second argument, at which point the augmented data will contain those rows (typically with NAs in place of the new columns). If the original data is not provided to \code{augment} and \code{na.action = "na.exclude"}, a warning is raised and the incomplete rows are dropped. } \examples{ if (require("survival", quietly = TRUE)) { sr <- survreg(Surv(futime, fustat) ~ ecog.ps + rx, ovarian, dist="exponential") td <- tidy(sr) augment(sr, ovarian) augment(sr) glance(sr) # coefficient plot library(ggplot2) ggplot(td, aes(estimate, term)) + geom_point() + geom_errorbarh(aes(xmin = conf.low, xmax = conf.high), height = 0) + geom_vline(xintercept = 0) } } \seealso{ \link{na.action} } broom/man/matrix_tidiers.Rd0000644000177700017770000000232213204276216017015 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/matrix_tidiers.R \name{matrix_tidiers} \alias{matrix_tidiers} \alias{tidy.matrix} \alias{glance.matrix} \title{Tidiers for matrix objects} \usage{ \method{tidy}{matrix}(x, ...) \method{glance}{matrix}(x, ...) } \arguments{ \item{x}{A matrix} \item{...}{extra arguments, not used} } \value{ \code{tidy.matrix} returns the original matrix converted into a data.frame, except that it incorporates rownames (if they exist) into a column called \code{.rownames}. \code{glance} returns a one-row data.frame with \item{nrow}{number of rows} \item{ncol}{number of columns} \item{complete.obs}{number of rows that have no missing values} \item{na.fraction}{fraction of values across all rows and columns that are missing} } \description{ These perform tidying operations on matrix objects. \code{tidy} turns the matrix into a data.frame while bringing rownames, if they exist, in as a column called \code{.rownames} (since results of tidying operations never contain rownames). \code{glance} simply reports the number of rows and columns. Note that no augment method exists for matrices. } \examples{ mat <- as.matrix(mtcars) tidy(mat) glance(mat) } broom/man/broom.Rd0000644000177700017770000000135713204276216015113 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/broom.R \docType{package} \name{broom} \alias{broom} \alias{broom-package} \alias{broom-package} \title{Convert Statistical Analysis Objects into Tidy Data Frames} \description{ Convert statistical analysis objects from R into tidy data frames, so that they can more easily be combined, reshaped and otherwise processed with tools like dplyr, tidyr and ggplot2. The package provides three S3 generics: tidy, which summarizes a model's statistical findings such as coefficients of a regression; augment, which adds columns to the original data such as predictions, residuals and cluster assignments; and glance, which provides a one-row summary of model-level statistics. } broom/man/robust_tidiers.Rd0000644000177700017770000000341313204276216017031 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/robust_tidiers.R \name{robust_tidiers} \alias{robust_tidiers} \alias{tidy.lmRob} \alias{augment.lmRob} \alias{glance.lmRob} \alias{robust_tidiers} \alias{tidy.glmRob} \alias{augment.glmRob} \alias{glance.glmRob} \title{Tidiers for lmRob and glmRob objects} \usage{ \method{tidy}{lmRob}(x, ...) \method{augment}{lmRob}(x, ...) \method{glance}{lmRob}(x, ...) \method{tidy}{glmRob}(x, ...) \method{augment}{glmRob}(x, ...) \method{glance}{glmRob}(x, ...) } \arguments{ \item{x}{An lmRob or glmRob object with a robust regression} \item{...}{Extra arguments, not used} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} and \code{augment} return the same results as \code{\link{lm_tidiers}}. On an \code{lmRob} \code{glance} returns a one-row data frame with the following columns: \item{r.squared}{R-squared} \item{deviance}{Robust deviance} \item{sigma}{Residual scale estimate} \item{df.residual}{Number of residual degrees of freedom} On an \code{lmRob} \code{glance} returns a one-row data frame with the following columns: \item{deviance}{Robust deviance} \item{null.deviance}{Deviance under the null model} \item{df.residual}{Number of residual degrees of freedom} } \description{ Tidying robust regression objects from the robust package. The tidy and augment methods simply pass it on to the linear model tidiers. } \examples{ if (require("robust", quietly = TRUE)) { m <- lmRob(mpg ~ wt, data = mtcars) tidy(m) augment(m) glance(m) gm <- glmRob(am ~ wt, data = mtcars, family = "binomial") glance(gm) } } \seealso{ \code{\link{lm_tidiers}}, \code{\link[robust]{lmRob}}, \code{\link[robust]{glmRob}} } broom/man/speedlm_tidiers.Rd0000644000177700017770000000475213204317475017156 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/speedlm_tidiers.R \name{speedlm_tidiers} \alias{speedlm_tidiers} \alias{tidy.speedlm} \alias{glance.speedlm} \alias{augment.speedlm} \title{Tidying methods for a speedlm model} \usage{ \method{tidy}{speedlm}(x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE, quick = FALSE, ...) \method{glance}{speedlm}(x, ...) \method{augment}{speedlm}(x, data = stats::model.frame(x), newdata = data, ...) } \arguments{ \item{x}{speedlm object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals (typical for logistic regression)} \item{quick}{whether to compute a smaller and faster version, containing only the \code{term} and \code{estimate} columns.} \item{...}{extra arguments (not used)} \item{data}{data frame to augment} \item{newdata}{new data to use for predictions, optional} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy.speedlm} returns the tidied output of the lm with one row for each term in the formula. The columns match those in \link{lm_tidiers}. \code{glance.speedlm} returns a one-row data.frame with the columns \item{r.squared}{The percent of variance explained by the model} \item{adj.r.squared}{r.squared adjusted based on the degrees of freedom} \item{statistic}{F-statistic} \item{p.value}{p-value from the F test, describing whether the full regression is significant} \item{df}{Degrees of freedom used by the coefficients} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} \item{deviance}{deviance} \item{df.residual}{residual degrees of freedom} \code{augment.speedlm} returns one row for each observation, with just one column added: \item{.fitted}{Fitted values of model} } \description{ These methods tidy the coefficients of a "speedlm" object into a summary, augment the original data with information on the fitted values and residuals, and construct a one-row glance of the model's statistics. } \examples{ if (require("speedglm", quietly = TRUE)) { mod <- speedglm::speedlm(mpg ~ wt + qsec, data = mtcars) tidy(mod) glance(mod) augment(mod) } } \seealso{ \link{lm_tidiers}, \link{biglm_tidiers} } broom/man/process_geeglm.Rd0000644000177700017770000000140113204276216016761 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/geeglm_tidiers.R \name{process_geeglm} \alias{process_geeglm} \title{helper function to process a tidied geeglm object} \usage{ process_geeglm(ret, x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE) } \arguments{ \item{ret}{data frame with a tidied version of a coefficient matrix} \item{x}{a "geeglm" object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals (typical for log distributions)} } \description{ Adds a confidence interval, and possibly exponentiates, a tidied object. } broom/man/xyz_tidiers.Rd0000644000177700017770000000161013204276216016342 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/xyz_tidiers.R \name{xyz_tidiers} \alias{xyz_tidiers} \alias{tidy_xyz} \title{Tidiers for x, y, z lists suitable for persp, image, etc.} \usage{ tidy_xyz(x, ...) } \arguments{ \item{x}{list with components x, y and z} \item{...}{extra arguments} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} returns a data frame with columns x, y and z and one row per value in matrix z. } \description{ Tidies lists with components x, y (vector of coordinates) and z (matrix of values) which are typically used by functions such as \code{\link[graphics]{persp}} or \code{\link[graphics]{image}} and returned by interpolation functions such as \code{\link[akima]{interp}}. } \examples{ A <- list(x=1:5, y=1:3, z=matrix(runif(5*3), nrow=5)) image(A) tidy(A) } broom/man/rcorr_tidiers.Rd0000644000177700017770000000325113204276216016642 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rcorr_tidiers.R \name{rcorr_tidiers} \alias{rcorr_tidiers} \alias{tidy.rcorr} \title{Tidying methods for rcorr objects} \usage{ \method{tidy}{rcorr}(x, diagonal = FALSE, ...) } \arguments{ \item{x}{An object of class "rcorr"} \item{diagonal}{Whether to include diagonal elements (where \code{estimate} is 1 and \code{p.value} is NA), default FALSE} \item{...}{extra arguments (not used)} } \value{ A data.frame with one row for each pairing in the correlation matrix. Columns are: \item{column1}{Name or index of the first column being described} \item{column2}{Name or index of the second column being described} \item{estimate}{Estimate of Pearson's r or Spearman's rho} \item{n}{Number of observations used to compute the correlation} \item{p.value}{P-value of correlation} } \description{ Tidies a correlation matrix from the \code{rcorr} function in the "Hmisc" package, including correlation estimates, p-values, and the number of observations in each pairwise correlation. Note that it returns these in "long", or "melted", format, with one row for each pair of columns being compared. } \details{ Only half the symmetric matrix is shown. } \examples{ if (require("Hmisc", quietly = TRUE)) { mat <- replicate(52, rnorm(100)) # add some NAs mat[sample(length(mat), 2000)] <- NA # also column names colnames(mat) <- c(LETTERS, letters) rc <- rcorr(mat) td <- tidy(rc) head(td) library(ggplot2) ggplot(td, aes(p.value)) + geom_histogram(binwidth = .1) ggplot(td, aes(estimate, p.value)) + geom_point() + scale_y_log10() } } broom/man/multinom_tidiers.Rd0000644000177700017770000000455213204276216017364 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/multinom_tidiers.R \name{multinom_tidiers} \alias{multinom_tidiers} \alias{tidy.multinom} \alias{glance.multinom} \title{Tidying methods for multinomial logistic regression models} \usage{ \method{tidy}{multinom}(x, conf.int = FALSE, conf.level = 0.95, exponentiate = TRUE, ...) \method{glance}{multinom}(x, ...) } \arguments{ \item{x}{A model object of class \code{multinom}} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals (typical for multinomial logistic regression)} \item{...}{extra arguments, not used} } \value{ All tidying methods return a \code{data.frame} without rownames. The structure depends on the method chosen. \code{tidy.multinom} returns one row for each coefficient at each level of the response variable, with six columns: \item{y.value}{The response level} \item{term}{The term in the model being estimated and tested} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error from the linear model} \item{statistic}{Wald z-statistic} \item{p.value}{two-sided p-value} If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and \code{conf.high}, computed with \code{\link{confint}}. \code{glance.multinom} returns a \code{glance.multinom} returns a one-row data.frame with the columns \item{edf}{The effective degrees of freedom} \item{deviance}{deviance} \item{AIC}{the Akaike Information Criterion} } \description{ These methods tidy the coefficients of multinomial logistic regression models generated by \code{multinom} of the \code{nnet} package. } \details{ If \code{conf.int=TRUE}, the confidence interval is computed with the \code{\link{confint}} function. While \code{tidy} and \code{glance} are supported for "multinom" objects, \code{augment} is not. } \examples{ if (require(nnet) & require(MASS)){ example(birthwt) bwt.mu <- multinom(low ~ ., bwt) tidy(bwt.mu) glance(bwt.mu) #* This model is a truly terrible model #* but it should show you what the output looks #* like in a multinomial logistic regression fit.gear <- multinom(gear ~ mpg + factor(am), data=mtcars) tidy(fit.gear) glance(fit.gear) } } broom/man/coxph_tidiers.Rd0000644000177700017770000000625413204276216016642 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survival_tidiers.R \name{coxph_tidiers} \alias{coxph_tidiers} \alias{tidy.coxph} \alias{augment.coxph} \alias{glance.coxph} \title{Tidiers for coxph object} \usage{ \method{tidy}{coxph}(x, exponentiate = FALSE, conf.int = 0.95, ...) \method{augment}{coxph}(x, data = stats::model.frame(x), newdata, type.predict = "lp", type.residuals = "martingale", ...) \method{glance}{coxph}(x, ...) } \arguments{ \item{x}{"coxph" object} \item{exponentiate}{whether to report the estimate and confidence intervals on an exponential scale} \item{conf.int}{confidence level to be used for CI} \item{...}{Extra arguments, not used} \item{data}{original data for \code{augment}} \item{newdata}{new data on which to do predictions} \item{type.predict}{type of predicted value (see \code{\link{predict.coxph}})} \item{type.residuals}{type of residuals (see \code{\link{residuals.coxph}})} } \value{ \code{tidy} returns a data.frame with one row for each term, with columns \item{estimate}{estimate of slope} \item{std.error}{standard error of estimate} \item{statistic}{test statistic} \item{p.value}{p-value} \code{augment} returns the original data.frame with additional columns added: \item{.fitted}{predicted values} \item{.se.fit}{standard errors } \item{.resid}{residuals (not present if \code{newdata} is provided)} \code{glance} returns a one-row data.frame with statistics calculated on the cox regression. } \description{ Tidy the coefficients of a Cox proportional hazards regression model, construct predictions, or summarize the entire model into a single row. } \details{ When the modeling was performed with \code{na.action = "na.omit"} (as is the typical default), rows with NA in the initial data are omitted entirely from the augmented data frame. When the modeling was performed with \code{na.action = "na.exclude"}, one should provide the original data as a second argument, at which point the augmented data will contain those rows (typically with NAs in place of the new columns). If the original data is not provided to \code{augment} and \code{na.action = "na.exclude"}, a warning is raised and the incomplete rows are dropped. } \examples{ if (require("survival", quietly = TRUE)) { cfit <- coxph(Surv(time, status) ~ age + sex, lung) tidy(cfit) tidy(cfit, exponentiate = TRUE) lp <- augment(cfit, lung) risks <- augment(cfit, lung, type.predict = "risk") expected <- augment(cfit, lung, type.predict = "expected") glance(cfit) # also works on clogit models resp <- levels(logan$occupation) n <- nrow(logan) indx <- rep(1:n, length(resp)) logan2 <- data.frame(logan[indx,], id = indx, tocc = factor(rep(resp, each=n))) logan2$case <- (logan2$occupation == logan2$tocc) cl <- clogit(case ~ tocc + tocc:education + strata(id), logan2) tidy(cl) glance(cl) library(ggplot2) ggplot(lp, aes(age, .fitted, color = sex)) + geom_point() ggplot(risks, aes(age, .fitted, color = sex)) + geom_point() ggplot(expected, aes(time, .fitted, color = sex)) + geom_point() } } \seealso{ \link{na.action} } broom/man/augment_columns.Rd0000644000177700017770000000203213204276216017164 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{augment_columns} \alias{augment_columns} \title{add fitted values, residuals, and other common outputs to an augment call} \usage{ augment_columns(x, data, newdata, type, type.predict = type, type.residuals = type, se.fit = TRUE, ...) } \arguments{ \item{x}{a model} \item{data}{original data onto which columns should be added} \item{newdata}{new data to predict on, optional} \item{type}{Type of prediction and residuals to compute} \item{type.predict}{Type of prediction to compute; by default same as \code{type}} \item{type.residuals}{Type of residuals to compute; by default same as \code{type}} \item{se.fit}{Value to pass to predict's \code{se.fit}, or NULL for no value} \item{...}{extra arguments (not used)} } \description{ Add fitted values, residuals, and other common outputs to the value returned from \code{augment}. } \details{ In the case that a residuals or influence generic is not implemented for the model, fail quietly. } broom/man/process_ergm.Rd0000644000177700017770000000143613204276216016463 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ergm_tidiers.R \name{process_ergm} \alias{process_ergm} \title{helper function to process a tidied ergm object} \usage{ process_ergm(ret, x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE) } \arguments{ \item{ret}{data frame with a tidied version of a coefficient matrix} \item{x}{an "ergm" object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals (typical for logistic regression)} } \description{ Optionally exponentiates the coefficients, and optionally adds a confidence interval, to a tidied ergm object. } broom/man/kmeans_tidiers.Rd0000644000177700017770000000446513204276216017001 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/kmeans_tidiers.R \name{kmeans_tidiers} \alias{kmeans_tidiers} \alias{tidy.kmeans} \alias{augment.kmeans} \alias{glance.kmeans} \title{Tidying methods for kmeans objects} \usage{ \method{tidy}{kmeans}(x, col.names = paste0("x", 1:ncol(x$centers)), ...) \method{augment}{kmeans}(x, data, ...) \method{glance}{kmeans}(x, ...) } \arguments{ \item{x}{kmeans object} \item{col.names}{The names to call each dimension of the data in \code{tidy}. Defaults to \code{x1, x2...}} \item{...}{extra arguments, not used} \item{data}{Original data (required for \code{augment})} } \value{ All tidying methods return a \code{data.frame} without rownames. The structure depends on the method chosen. \code{tidy} returns one row per cluster, with one column for each dimension in the data describing the center, followed by \item{size}{The size of each cluster} \item{withinss}{The within-cluster sum of squares} \item{cluster}{A factor describing the cluster from 1:k} \code{augment} returns the original data with one extra column: \item{.cluster}{The cluster assigned by the k-means algorithm} \code{glance} returns a one-row data.frame with the columns \item{totss}{The total sum of squares} \item{tot.withinss}{The total within-cluster sum of squares} \item{betweenss}{The total between-cluster sum of squares} \item{iter}{The numbr of (outer) iterations} } \description{ These methods summarize the results of k-means clustering into three tidy forms. \code{tidy} describes the center and size of each cluster, \code{augment} adds the cluster assignments to the original data, and \code{glance} summarizes the total within and between sum of squares of the clustering. } \examples{ library(dplyr) library(ggplot2) set.seed(2014) centers <- data.frame(cluster=factor(1:3), size=c(100, 150, 50), x1=c(5, 0, -3), x2=c(-1, 1, -2)) points <- centers \%>\% group_by(cluster) \%>\% do(data.frame(x1=rnorm(.$size[1], .$x1[1]), x2=rnorm(.$size[1], .$x2[1]))) k <- kmeans(points \%>\% dplyr::select(x1, x2), 3) tidy(k) head(augment(k, points)) glance(k) ggplot(augment(k, points), aes(x1, x2)) + geom_point(aes(color = .cluster)) + geom_text(aes(label = cluster), data = tidy(k), size = 10) } \seealso{ \code{\link{kmeans}} } broom/man/tidy.power.htest.Rd0000644000177700017770000000124713204276216017225 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_tidiers.R \name{tidy.power.htest} \alias{tidy.power.htest} \title{tidy a power.htest} \usage{ \method{tidy}{power.htest}(x, ...) } \arguments{ \item{x}{a power.htest object} \item{...}{extra arguments, not used} } \value{ A data frame with one row per parameter passed in, with columns \code{n}, \code{delta}, \code{sd}, \code{sig.level}, and \code{power} (from the \code{power.htest} object). } \description{ tidy a power.htest } \examples{ ptt <- power.t.test(n = 2:30, delta = 1) tidy(ptt) library(ggplot2) ggplot(tidy(ptt), aes(n, power)) + geom_line() } \seealso{ \link{power.t.test} } broom/man/insert_NAs.Rd0000644000177700017770000000070713204276216016040 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utilities.R \name{insert_NAs} \alias{insert_NAs} \title{insert a row of NAs into a data frame wherever another data frame has NAs} \usage{ insert_NAs(x, original) } \arguments{ \item{x}{data frame that has one row for each non-NA row in original} \item{original}{data frame with NAs} } \description{ insert a row of NAs into a data frame wherever another data frame has NAs } broom/man/Arima_tidiers.Rd0000644000177700017770000000327113204276216016546 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/arima_tidiers.R \name{Arima_tidiers} \alias{Arima_tidiers} \alias{tidy.Arima} \alias{glance.Arima} \title{Tidying methods for ARIMA modeling of time series} \usage{ \method{tidy}{Arima}(x, conf.int = FALSE, conf.level = 0.95, ...) \method{glance}{Arima}(x, ...) } \arguments{ \item{x}{An object of class "Arima"} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} returns one row for each coefficient in the model, with five columns: \item{term}{The term in the nonlinear model being estimated and tested} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error from the linear model} If \code{conf.int = TRUE}, also returns \item{conf.low}{low end of confidence interval} \item{conf.high}{high end of confidence interval} \code{glance} returns one row with the columns \item{sigma}{the square root of the estimated residual variance} \item{logLik}{the data's log-likelihood under the model} \item{AIC}{the Akaike Information Criterion} \item{BIC}{the Bayesian Information Criterion} } \description{ These methods tidy the coefficients of ARIMA models of univariate time series. } \details{ \code{augment} is not currently implemented, as it is not clear whether ARIMA predictions can or should be merged with the original data frame. } \examples{ fit <- arima(lh, order = c(1, 0, 0)) tidy(fit) glance(fit) } \seealso{ \link{arima} } broom/man/gmm_tidiers.Rd0000644000177700017770000000773213204276216016303 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gmm_tidiers.R \name{gmm_tidiers} \alias{gmm_tidiers} \alias{tidy.gmm} \alias{glance.gmm} \title{Tidying methods for generalized method of moments "gmm" objects} \usage{ \method{tidy}{gmm}(x, conf.int = FALSE, conf.level = 0.95, exponentiate = FALSE, quick = FALSE, ...) \method{glance}{gmm}(x, ...) } \arguments{ \item{x}{gmm object} \item{conf.int}{whether to include a confidence interval} \item{conf.level}{confidence level of the interval, used only if \code{conf.int=TRUE}} \item{exponentiate}{whether to exponentiate the coefficient estimates and confidence intervals (typical for logistic regression)} \item{quick}{whether to compute a smaller and faster version, containing only the \code{term} and \code{estimate} columns (and confidence interval if requested, which may be slower)} \item{...}{extra arguments (not used)} } \value{ All tidying methods return a \code{data.frame} without rownames. The structure depends on the method chosen. \code{tidy.gmm} returns one row for each coefficient, with six columns: \item{term}{The term in the model being estimated} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error from the linear model} \item{statistic}{t-statistic} \item{p.value}{two-sided p-value} If all the the terms have _ in them (e.g. \code{WMK_(Intercept)}), they are split into \code{variable} and \code{term}. If \code{conf.int=TRUE}, it also includes columns for \code{conf.low} and \code{conf.high}, computed with \code{\link{confint}}. \code{glance.gmm} returns a one-row data.frame with the columns \item{df}{Degrees of freedom} \item{statistic}{Statistic from J-test for E(g)=0} \item{p.value}{P-value from J-test} \item{df.residual}{Residual degrees of freedom, if included in "gmm" object} } \description{ These methods tidy the coefficients of "gmm" objects from the gmm package, or glance at the model-wide statistics (especially the J-test). } \details{ If \code{conf.int=TRUE}, the confidence interval is computed with the \code{\link{confint}} function. Note that though the "gmm" object contains residuals and fitted values, there is not yet an \code{augment} method implemented. This is because the input to gmm is not tidy (it's a "wide" matrix), so it is not immediately clear what the augmented results should look like. } \examples{ if (require("gmm", quietly = TRUE)) { # examples come from the "gmm" package ## CAPM test with GMM data(Finance) r <- Finance[1:300, 1:10] rm <- Finance[1:300, "rm"] rf <- Finance[1:300, "rf"] z <- as.matrix(r-rf) t <- nrow(z) zm <- rm-rf h <- matrix(zm, t, 1) res <- gmm(z ~ zm, x = h) # tidy result tidy(res) tidy(res, conf.int = TRUE) tidy(res, conf.int = TRUE, conf.level = .99) # coefficient plot library(ggplot2) library(dplyr) tidy(res, conf.int = TRUE) \%>\% mutate(variable = reorder(variable, estimate)) \%>\% ggplot(aes(estimate, variable)) + geom_point() + geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) + facet_wrap(~ term) + geom_vline(xintercept = 0, color = "red", lty = 2) # from a function instead of a matrix g <- function(theta, x) { e <- x[,2:11] - theta[1] - (x[,1] - theta[1]) \%*\% matrix(theta[2:11], 1, 10) gmat <- cbind(e, e*c(x[,1])) return(gmat) } x <- as.matrix(cbind(rm, r)) res_black <- gmm(g, x = x, t0 = rep(0, 11)) tidy(res_black) tidy(res_black, conf.int = TRUE) ## APT test with Fama-French factors and GMM f1 <- zm f2 <- Finance[1:300, "hml"] - rf f3 <- Finance[1:300, "smb"] - rf h <- cbind(f1, f2, f3) res2 <- gmm(z ~ f1 + f2 + f3, x = h) td2 <- tidy(res2, conf.int = TRUE) td2 # coefficient plot td2 \%>\% mutate(variable = reorder(variable, estimate)) \%>\% ggplot(aes(estimate, variable)) + geom_point() + geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) + facet_wrap(~ term) + geom_vline(xintercept = 0, color = "red", lty = 2) } } broom/man/mle2_tidiers.Rd0000644000177700017770000000143613204276216016355 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mle2_tidiers.R \name{mle2_tidiers} \alias{mle2_tidiers} \alias{tidy.mle2} \title{Tidy mle2 maximum likelihood objects} \usage{ \method{tidy}{mle2}(x, conf.int = FALSE, conf.level = 0.95, ...) } \arguments{ \item{x}{An "mle2" object} \item{conf.int}{Whether to add \code{conf.low} and \code{conf.high} columns} \item{conf.level}{Confidence level to use for interval} \item{...}{Extra arguments, not used} } \description{ Tidy mle2 objects from the bbmle package. } \examples{ if (require("bbmle", quietly = TRUE)) { x <- 0:10 y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8) d <- data.frame(x,y) fit <- mle2(y ~ dpois(lambda = ymean), start = list(ymean = mean(y)), data = d) tidy(fit) } } broom/man/tidy.default.Rd0000644000177700017770000000135513204276216016367 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tidy.R \name{tidy.default} \alias{tidy.default} \title{Default tidying method} \usage{ \method{tidy}{default}(x, ...) } \arguments{ \item{x}{an object to be tidied} \item{...}{extra arguments (not used)} } \value{ A data frame, from \code{as.data.frame} applied to the input x. } \description{ By default, tidy uses \code{as.data.frame} to convert its output. This is dangerous, as it may fail with an uninformative error message. Generally tidy is intended to be used on structured model objects such as lm or htest for which a specific S3 object exists. } \details{ If you know that you want to use \code{as.data.frame} on your untidy object, just use it directly. } broom/man/gamlss_tidiers.Rd0000644000177700017770000000226213204276216017002 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gamlss_tidiers.R \name{gamlss_tidiers} \alias{gamlss_tidiers} \alias{tidy.gamlss} \title{Tidying methods for gamlss objects} \usage{ \method{tidy}{gamlss}(x, quick = FALSE, ...) } \arguments{ \item{x}{A "gamlss" object} \item{quick}{Whether to perform a fast version, and return only the coefficients} \item{...}{Extra arguments (not used)} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. A data.frame with one row for each coefficient, containing columns \item{parameter}{Type of coefficient being estimated: \code{mu}, \code{sigma}, \code{nu}, or \code{tau}} \item{term}{The term in the model being estimated and tested} \item{estimate}{The estimated coefficient} \item{std.error}{The standard error from the linear model} \item{statistic}{t-statistic} \item{p.value}{two-sided p-value} if (requireNamespace("gamlss", quietly = TRUE)) { data(abdom) mod<-gamlss(y~pb(x),sigma.fo=~pb(x),family=BCT, data=abdom, method=mixed(1,20)) tidy(mod) } } \description{ Tidying methods for "gamlss" objects from the gamlss package. } broom/man/tidy.ftable.Rd0000644000177700017770000000113413204276216016173 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stats_tidiers.R \name{tidy.ftable} \alias{tidy.ftable} \title{tidy an ftable object} \usage{ \method{tidy}{ftable}(x, ...) } \arguments{ \item{x}{An object of class "ftable"} \item{...}{Extra arguments (not used)} } \description{ An ftable contains a "flat" contingency table. This melts it into a data.frame with one column for each variable, then a \code{Freq} column. It directly uses the \code{stats:::as.data.frame.ftable} function } \examples{ tidy(ftable(Titanic, row.vars = 1:3)) } \seealso{ \code{\link{ftable}} } broom/man/survfit_tidiers.Rd0000644000177700017770000000676213204276216017227 0ustar herbrandtherbrandt% Generated by roxygen2: do not edit by hand % Please edit documentation in R/survival_tidiers.R \name{survfit_tidiers} \alias{survfit_tidiers} \alias{tidy.survfit} \alias{glance.survfit} \title{tidy survival curve fits} \usage{ \method{tidy}{survfit}(x, ...) \method{glance}{survfit}(x, ...) } \arguments{ \item{x}{"survfit" object} \item{...}{extra arguments, not used} } \value{ All tidying methods return a data.frame without rownames, whose structure depends on the method chosen. \code{tidy} returns a row for each time point, with columns \item{time}{timepoint} \item{n.risk}{number of subjects at risk at time t0} \item{n.event}{number of events at time t} \item{n.censor}{number of censored events} \item{estimate}{estimate of survival or cumulative incidence rate when multistate} \item{std.error}{standard error of estimate} \item{conf.high}{upper end of confidence interval} \item{conf.low}{lower end of confidence interval} \item{state}{state if multistate survfit object inputted} \item{strata}{strata if stratified survfit object inputted} \code{glance} returns one-row data.frame with the columns displayed by \code{\link{print.survfit}} \item{records}{number of observations} \item{n.max}{n.max} \item{n.start}{n.start} \item{events}{number of events} \item{rmean}{Restricted mean (see \link[survival]{print.survfit})} \item{rmean.std.error}{Restricted mean standard error} \item{median}{median survival} \item{conf.low}{lower end of confidence interval on median} \item{conf.high}{upper end of confidence interval on median} } \description{ Construct tidied data frames showing survival curves over time. } \details{ \code{glance} does not work on multi-state survival curves, since the values \code{glance} outputs would be calculated for each state. \code{tidy} does work for multi-state survival objects, and includes a \code{state} column to distinguish between them. } \examples{ if (require("survival", quietly = TRUE)) { cfit <- coxph(Surv(time, status) ~ age + sex, lung) sfit <- survfit(cfit) head(tidy(sfit)) glance(sfit) library(ggplot2) ggplot(tidy(sfit), aes(time, estimate)) + geom_line() + geom_ribbon(aes(ymin=conf.low, ymax=conf.high), alpha=.25) # multi-state fitCI <- survfit(Surv(stop, status * as.numeric(event), type = "mstate") ~ 1, data = mgus1, subset = (start == 0)) td_multi <- tidy(fitCI) head(td_multi) tail(td_multi) ggplot(td_multi, aes(time, estimate, group = state)) + geom_line(aes(color = state)) + geom_ribbon(aes(ymin = conf.low, ymax = conf.high), alpha = .25) # perform simple bootstrapping library(dplyr) bootstraps <- lung \%>\% bootstrap(100) \%>\% do(tidy(survfit(coxph(Surv(time, status) ~ age + sex, .)))) ggplot(bootstraps, aes(time, estimate, group = replicate)) + geom_line(alpha = .25) bootstraps_bytime <- bootstraps \%>\% group_by(time) \%>\% summarize(median = median(estimate), low = quantile(estimate, .025), high = quantile(estimate, .975)) ggplot(bootstraps_bytime, aes(x = time, y = median)) + geom_line() + geom_ribbon(aes(ymin = low, ymax = high), alpha = .25) # bootstrap for median survival glances <- lung \%>\% bootstrap(100) \%>\% do(glance(survfit(coxph(Surv(time, status) ~ age + sex, .)))) glances qplot(glances$median, binwidth = 15) quantile(glances$median, c(.025, .975)) } } broom/LICENSE0000644000177700017770000000005413204276216013731 0ustar herbrandtherbrandtYEAR: 2015 COPYRIGHT HOLDER: David Robinson