flexmix/0000755000176200001440000000000014404700401011720 5ustar liggesusersflexmix/NAMESPACE0000644000176200001440000000775114404637304013164 0ustar liggesusersimportFrom("graphics", "hist", "lines", "matplot", "plot", "points", "text") importFrom("grid", "gpar", "grid.lines", "grid.polygon", "unit") importFrom("grDevices", "axisTicks", "hcl", "rgb") importFrom("lattice", "barchart", "do.breaks", "histogram", "lattice.getOption", "llines", "panel.abline", "panel.barchart", "panel.rect", "panel.segments", "panel.xyplot", "parallelplot", "trellis.par.get", "xyplot") import("methods") importFrom("modeltools", "clusters", "getModel", "ICL", "KLdiv", "Lapply", "parameters", "posterior", "prior", "refit", "relabel") importFrom("nnet", "multinom", "nnet.default") importFrom("stats", "AIC", "as.formula", "BIC", "binomial", ".checkMFClasses", "coef", "cov.wt", "dbinom", "delete.response", "dexp", "dgamma", "dlnorm", "dnorm", "dpois", "dweibull", "factanal", "Gamma", ".getXlevels", "glm.fit", "lm.wfit", "model.frame", "model.matrix", "model.response", "na.omit", "optim", "pnorm", "poisson", "predict", "printCoefmat", "qnorm", "quantile", "rbinom", "residuals", "rgamma", "rmultinom", "rnorm", "rpois", "runif", "terms", "update", "update.formula", "var", "weighted.mean") importFrom("stats4", "logLik", "nobs", "plot", "summary") importFrom("utils", "getS3method") export( "ExLinear", "ExNPreg", "ExNclus", "FLXdist", "FLXgradlogLikfun", "FLXlogLikfun", "FLXMCdist1", "FLXMCfactanal", "FLXMCmvbinary", "FLXMCmvcombi", "FLXMCmvnorm", "FLXMCmvpois", "FLXMCnorm1", "FLXPmultinom", "FLXPconstant", "FLXfit", "FLXglm", "FLXglmFix", "FLXbclust", "FLXmclust", "FLXconstant", "FLXmultinom", "FLXMRcondlogit", "FLXMRglm", "FLXMRglmfix", "FLXMRglmnet", "FLXMRlmer", "FLXMRlmm", "FLXMRlmmc", "FLXMRmgcv", "FLXMRmultinom", "FLXMRrobglm", "FLXMRziglm", "FLXgetDesign", "FLXgetParameters", "FLXreplaceParameters", "boot", "existGradient", "flexmix", "group", "initFlexmix", "LR_test", "plotEll", "refit_optim", "relabel", "stepFlexmix" ) exportClasses( "FLXcomponent", "FLXP", "FLXPmultinom", "FLXcontrol", "FLXdist", "FLXMRfix", "FLXMRglmfix", "FLXMRglm", "FLXMRlmer", "FLXMRlmm", "FLXMRlmmfix", "FLXMRlmmc", "FLXMRlmc", "FLXMRlmmcfix", "FLXMRlmcfix", "FLXMRmgcv", "FLXMRrobglm", "FLXMRziglm", "FLXM", "FLXMR", "FLXMC", "FLXMCsparse", "FLXnested", "FLXR", "FLXRoptim", "FLXRmstep", "flexmix", "listOrdata.frame", "stepFlexmix", "summary.flexmix" ) exportMethods( "EIC", "FLXcheckComponent", "FLXdeterminePostunscaled", "FLXfit", "FLXgetK", "FLXgetModelmatrix", "FLXgetObs", "FLXmstep", "FLXremoveComponent", "KLdiv", "ICL", "Lapply", "coerce", "clusters", "fitted", "flexmix", "getModel", "initialize", "logLik", "parameters", "plot", "posterior", "predict", "prior", "rFLXM", "refit", "relabel", "rflexmix", "show", "summary", "unique" ) S3method("as.data.frame", "simple_triplet_matrix") flexmix/data/0000755000176200001440000000000014404662042012641 5ustar liggesusersflexmix/data/NPreg.RData0000644000176200001440000001026014404637305014574 0ustar liggesuserswTTG. MҖ(}ϲ,ػP`$Fnb{XPZA Dfvs5yxω{̽3{fw)Т3x\՘ x<[s4i!epgԐkqXs]Fxʗ~<*JPF \1gA6Wt;+@CPۮS\`–gD!shg\ng~-D JKQiU'<գlGO=ݾT\Y;&O9.~A׷ 'ׇ? IF&N.8=~s} 7.hU54Dm0+MΦ}gIbҪvM.Sܸvh[wh>=.ݎդmB^l]uD;~ǫPх+nM}C4δxu5Z p-CmgVݶ7gۤ1>u Z#]:=ʞ@x򏁋&W59(Bs]oz 7QQWqM6aйԽ+܁Yͮqpf_EEgY`ƹ7=Wi3(li({K:]U!MDnGmh׋6s|)ouͶ_zվFU隮=%vcP8.@w3ثc#ژpt~ANC?!WY.>]Fil,݇|[-G. c|cc߮$7g3/K[Zޢѡ3[?hqNyh_Ayާ~xD[%5*+ qWU, p_'I>(o]L֋݅sxsyMShݹUhM%~]WC׷p|Za/3զfkgoK/FC,6q9h; W@AiR~뤻9i:G?LGypml]|8Fq[;8s4;/>5RǨ =^2 Xhp]1 {Y 乞Y|eղ]Ạ ם'`.SUO 2WV[k{̛ ;HO{M |֌x} "/iMx JN}'._ [M|TqRfGK5nlRfsaexIcy5]4ouݣ 0X2>X&4_W-]a%'jXӌν@2Й`s7=ؙ46 }VE\Q^\[zWۭOw?;`ޣ_z>f3 + lOvOc|UTx>Z-)3-z^Y_ATW%gkUWֈ@YQDCwzXv;Dx&˹76e$T$Ch˕#M!}Cd@/6ҳ=yL:+5ckn]hk9!m۽A8:t' |a:e\9-w C/DL||={#RU[~…_L)tY%#A G ;l kYw"=B{Cx^خ!}#&Ss!oB!żv Uzul1ݑyq=(y5(l&kAջ >_k+GTz~ Q)=!E[A0mfݒpaP]K%Tfg,^Ja)[L`rakPK2 9J jk҉ e& v ;_Yf:fJY2oK!^}Hcsz+\s nj 1.nzy੕I.yl,{y̘hS =c߾e{zW T3(>-俛{{\";[y /w[a'4mj{l_ۛ ZrTkL[iD%R'pYG>KX.UyA=YqLybs5(qʻ$*oȀ3qVҷ\;uU2H} zEBԆm5O|@byuQ>6~ o摲?&IAs,!l0i_w;&NxBw LW>m@5xt2G193m>=xīcK3ynz֣:C`K}&2񘶮y慮ƴ~֣ͮ=gf;צL?g>nyzқo}[{2fytˮyuu)g{Þ=ַO|f6=;m}Z>7>}?wz>x>yyqoo[_7}^y|SQ0Fsg1Ƙ`L1fzsSc`l1v{qƸ`a\1nwxa1>_LcL) aZ`1#ˆ1#1 L& D`05& DcZbb0016vNΘ.n^8L<7/?ff fП 3sQt@u"U$TW*r]E+`RD&&5 IIMFjrR#!qCHB8!$!qCD"8D!"qCD"8!&1qCLb8$!! qHCB8$!!)qHCJR8!%)qHCF28d!#qȈCF28!'9qȉCNr8ġ q(CA P8ġ987L8fd W/U}?~LL13X0X2gbfaechА+;'1C? 3021`f0=Vu?s|i~|7_D?|k;qǏ Q6Z>O$U!1O96N}IQRڗvvV*U1JhUJ2Z* M@mjP&6 M@mBjRڄ&6! MHmBjRD&6MDm"jQD&61MLmbjS&61IMBmjP$&6 IMBmRjRڤ&6)IMJmRjRd&6ɨMFm2jQd&69ɩMNmrjS&69)MAm jSPԦ6)K~X ?АgOˆWg1flexmix/data/Mehta.RData0000644000176200001440000000067514404662042014624 0ustar liggesusersݒS@/o ma;km@ ƏHW7d13/v_. RJ@k}@SZMGJRTU#TWb:ry?~%ίe_&I$9>?!psx9"‹3KMTZk_[/Ir43D$1Եq*Y0ƒb0O3*ǭU,FstV4W8= >0V t.=K>AdA]ƔG 7QK[:Dz>ͣu$|s׳tҪVm,          ~!ʣcF-H}~CJNflexmix/data/bioChemists.RData0000644000176200001440000001451614404637305016042 0ustar liggesusersKykg%eA"$@(nLjc@q|>xI.E8OCNg }վ~Cl/_Ϸ==ZUh_Ʀ1fb&Kg?Kl=w=>}l[kg4ϷlKOn<ߟWu_>҂پQfuo}l?[W.˚=f= -}1I׵ԡӧW___W:Mן!?ye^lͳy,1gE}LC>\$Pvߘ^mC,jrcu7}c>; d(Ni?C Y$%6Hrw^\-qcȆC>Ӟ狱!u;/̐c0'}~{C15c<߼Xc1?Vcrnl̟2׎}x<]q6O_=|XNWуbջ奥RrG'ggl-\ Ѿ J1I6^cYa{c-@}v׮C=~C>woK!X_yϘXSPw]ϋyc|9yq1lizP\{6-pk8_cl5n}=6n<,cϚc1}cy5}>rs6 ϐؼc7N.?|>v?|>gǵͼElqѶͳ<-ScrYqjMBو5KGfvK_=899:^2K.Dʴ7]n{]ջq]tczokkk-:.w4^]eLko=Ke7fM;LqHsλUvY{>P\p_}.y?s6O{]92ok+t1yWvCn{!?譋}}DǑt ~񨯭cϻml}uc 7Suf_lvgtn=K^ulwu;tMKW]v9J9љ|q~|~]OiڞK+\)}y\a~Sz併vϕr,v ԏ՟b_?y^/H.iOZm\z?J\_y%~oovjʵ%OJ??芿~o"K퇕R'ȱ[y.vY7kڏ_V+ǢSJ]7zmT9\K>K;/ƍgIѯ8I~׸I}X~iVk\?>-׸ުObߵ<Ծ:_?Įkҿ(jxRq(~7ޥ_-וߊb\a}o楼_8)jG7i:߷{Oo>ErACŹ~+/y}/f4;W?cr=,w+OĎnүS J&ʟ:7K޹D=u~#J9J駴_u.{qS8kw^gT]WEy^$' 2_@j?>Qy,q+q\gj+ROWd#(u-G=N_xx|9q-.M8%"z?nR^a8&|Ɓg$><⇞?H席JLo$O|Btү:/wqSzg}֎>z|/嫝K6%K$VfB>_K#| ɫ: weR8WLJ.uDGߪzZP-⧌}e ~YWyE$?'Ou͡5VY+hm)~_X8R:.yXSg5Ͻޕ/$.uAWe{B j{:O_pG[>\Y(nɸkgr_Wz"\PR^c>KR^Kr']97q@}~#z{=_qM}^!K|FtU~qUJdX%/O.YSy|F>wcY]?k|i _We- ?+s:'T^HW+^^]]8.{iu.ﯩy'upӟqTg/D~#f8|ڐTv<>#>x$q5w v>wuu5;jﶚϫ~n<\nQO#R7ەͶT޿\zVK9;r\/maK}˭4Rڵ^7vn26+ﯘٿos;;TrO~/J^޿Q?ۺ^o/{~ToCIjغvw[66~lͭžm?7iyjks6[-umbB72;H}͖v&6.Ϯ>lil,].d)GCb36ckЪ!0i.bW[,}mT;ɳJ!62j*l5Д:ؑ6>l_jR|]֎ Rן`ZvRvjK_*$m^1LI?$%W_.a@KcC6^5m$^^)2Gz<&e~J_o:(on>!%(-|ŧ2ƽӃ;f|w>8Gw?wwT-Go>>=x暻֭ݽ[_SD>62qpr*郃q?ݓhwONj77o \{sHQY}t#kα\ف+sk?/=>|{p}K>|x {oyp[AjGK/4}ݿٍ{'Oޑ>S{ƓNf[cѿl%̓=9rˉϒh)Ii9ݪg{3W|= ,ֳTr=jتajتajت᪆j᪆j᪆j᫆j᫆jFjFjFjĪFjĪFjĪFjFjFj䪑Fj䪑Fj1Ӫ1Ӫ1Ӫ1Ӫ1SX޻u=N-Si4r8͜j{j{j5E͢fQY,j5C͡Ps9j5C͡QyzML<=|oori||5g;c:C3g8H3pc?/|m$Mflexmix/data/dmft.RData0000644000176200001440000000741014404637305014516 0ustar liggesusersMo)JJWsmѠlU2 J6?<A7L4fF_}0z \ 7_=ðq?:>*lM˸cky)kxg%q>~~g+){-oBcإ篆6i\!\uԲni.WRؿwy^gl~th}x\˛p\ׇ~3s&:ܰg'/uzbt-g?z{&5z*|sFt{\ɹrצ5xJ8ޞt7ssuHpZxs|Qt\5k):>#sZӯ>0ȾCkںr#:b_輌i}~oGHx߇pt6jYS|ձXouB|l?=KnW\Zn|&5w>>[+9?}>x{|{볁xĿh_N\{ަsZz8m[Og\zߏNy:>}|g6;ޗ洏|>y__c<ȹG;z\O )K6q=|YgAUP[f߆K: Թ6~u2|Su EO琮!>3w^ >SZ63=MWVX|sf*l卝37WT9R?MmSyN]Ysqn+ci͵oܥY*ks\\ΙKS/ͧs9e/͉: ϙKmzμ9'wsl sNmïNͫMkr\[SS7`iNWǟ'??zx֧o8|}8vy?e~Wb\Y?-5WeXl='1Eyr˞'m{,3K<)fkc\f̙[sտC-AOK ñu9XMwl.Orx|]:cmcOѩ1U籾8Fq LݔKc8ׯSf3W֜gSQNblT94)cT_ũ:Dznymc}56wplOKc979fԜypxXwŔSc85s3?TKLT=3g53U祜9wN~퇧Q嗧?I]bflexmix/data/trypanosome.RData0000644000176200001440000000212614404637305016143 0ustar liggesusersjeIRQ(<xӍMl#IA^ޒ^-HA$&G2@a͋:}y:Mt|2ݞӓ/GՏo^^_K鳛+]v8pq7|{/~3_g=~=̯̏v}~c~qͳtpw?=yωӏ<|𓷛77˳%O^\l^eCO.x޾Uh///_~YK/cx>6gS6gl-ےgPJCi( 4PJi8 4pNi8JҨ4*JҨ4*Jhi4Z-FKhi44444444444z=FOi4z=Hc1i4F#HcMcMcMcMcMcMcMcMcMc7Ns֙Uf-ƺvJm6Sfj3Lm6S5Q5Q5Q5Q5Q35S35S35S35S35S+jEVԊZQ+jEQkFQkFQkj BmP[-j NS:NS:NSAmPAmPJmR[Vj+K%aDX",K%aDX",K%aDX",K%aDX",K%aDX",K%aDX",K%aDX",K%aDX",K%aDX",K%aDX",K%aDX",K%aDX",K%aXb,1K%cXb,1K%cXb,1K%cXb,1K%cXb,1K%cXb,1K%cXb,1K%cXb,1K%cXb,1K%cXb,1K%cXb,1K%cXb,1K%cXb,1֒?7Ww篯6;~۞뺱&flexmix/data/whiskey.RData0000644000176200001440000000732514404637305015254 0ustar liggesusersneڛdmР(Z%A}; 4m iUTVسҋzi)ˈԼ=#j8p3C/;N=a7s<:{/.g/;bol_ӟE氓FR[9Ɵ{qI?HMroe2o쿗x7>t]6|N8JL[{'9短7g۞-s3/Lٲ͜?Jmni?a*=q9^˴me۞.wT>َR'[<7Üq}c|I?>K_ӌ׽-}%}t)Fƫ۹{,/sڝӜ:_vt~|͹=HQg{{qm<^[*/=i&k[<;w9 wgWlxW}ⱗoNhn}tnl;|)rqn5e|Π jZ7uUv^n:@V;v35~E׋OP>J7Gu/K;{L#jڸ7Q]'w|Wʩݳmӥ[4vz^<1p]OymW~KZ}Zӗۊ;W7mKѽN XO}۷ǻҾ>%iW}ϭM)ޔvN놶G~}ek+|/_ۥ[6o_j|rU4->u>W}O˷v} KSՓ߱6}nEմ޾}^e%y\}Kc_z깧\s}E|ﺪ>-}׾>+ZO4^Vh]Anƕm~u2u}krA~l)+_aJoMv/a{mҗ%WeW\ɗt[Ntyq%_ۥ[M?]Gڒ%"_Ctii/0-ߞdM|\[a[smǥϼn\>׿[W;tp;|·t;\ŗue{8scZV}/zl/-[늦̫GUيs[U+~q#_֓_-W׻ẸM({_Z#QCu+Z-꯺/G|]q.ֹVڷVO|yŗ]׺LlS|v+~˧gW߲VϢ畫p]v]{vU?sEXVwkzܫ׫if[M|iGL\js5TGW8o7_[?芟k]x_Q[WR,띺WYߪ׋o|Lz5}kƙuruźyu7槺.z_ơj=eo}`Q5u^ͫOzϊU.i8(]zUn+{\zq]?A*;/Jm{[2oKtnnWՔl]UK:Jzl{45eUoYRuuuo]׭hU^3U}3EuP 2kz m9 zW}_%S9AAAAktҠ r͹f5ڒ5C$[PЏz 4\SHAA::=\Er>)\c{Ϋ{ʷpOMk^AAAAAAAzҐ4! iN9tFibMZ̖_/.ov-͒>yi}tm+0j'W|vt<2=t2%>]/MϦ}?d%7׍:p5[-,sŻ7hUE4,AkݯW[Z˿&=<(kFƣԍԖQerʽ'NjwE6|FptmDCX}L,==_ߌ[ʹ)Y޽{۸$IkO><?[iaRN8?6{OrjÏC[G:#c`flexmix/data/salmonellaTA98.txt.gz0000644000176200001440000000012214404662042016551 0ustar liggesusers-@Va _gIAˌ0<,8MPRe)V^.H3$UVI~~^yflexmix/data/patent.RData0000644000176200001440000000442714404637305015064 0ustar liggesusersX tބ$ }G!3lHv DPN 1ygh (-4т̟csݙowʮ|K&)ٔΤ)%?LxN*8djMFQf1N(8%Q$T[ݼ3 碼R+@eW^pr"٠+i=Jb|w"'{{ǰ\PYFPȶ/d ofL$hPh}F*, ~ FIazw2+͖H4|>:B)2lju%cIJx6;9Q+LK'r|@ˍYFxcx +2٪ཌpoS/ZUr jSDoK.yѨ_IIc$h;ݏ9$2 9_%'*)~8?eʃ/9>%g 1<}_PM<ٮXL>^ jĬ!M֢ul_L`wz~x]nv65Nl~MԟV9 e(iPFW~n?bj:TLj65¾IL^׭(S"0Qsui팋Ǥ(J'=n(DY(E(P2P8ծ9=ױ4p1 ݮyk쳰G vM** Fg!7t;X¶~?e{/mO _ ??a-a6#׿ȿ?]}JC罡⍚xy^C[~8m9j[a~6'|S+0K^DMa!S/VON_`3i{/{v- Ttb WuaFjɏӡR>(ѯWlqj T| lP9q;>(S?%=?l/ԝ񛒧`c]9J %s̞ .y)=WêTK+aCn`/3t-dZ(^SK  BM-Pr]M$T6,;ᅤP,ˌ37Lk}؏7;$])Y[Ci>zWlN݌uwwrj}$ܷ۶|HWMNXuǷ|ә7&K'<%2|ʠ}V@m*5SjV;[gzr\Ý;kPtyY  #nN>œz=X:%qժϚãpFM5W]SG%}4#{ݵϕ#׬ت_7'|}t:5t ҀJa`wrn>Kwu7bONM?\׸εZ[= '3ź>U/VO:S{jHwZR5Ա?O}oԸ^׺[ih~0>I|{j7^7,>=x>^ʛT~88 C=F`L8_:o% ;jTH͓:G~^NP!NCoQ>|y~P;=cA#֔.G-ZTGR:st䟚Z/RLSq*%C;F1/j]x%iqK䟡C){GMv֌xtty)֥zF` wtNp?]#^+{ӴdP#*yu~8U'B_?\">?P>CΧ޿ѾKK]y{Rwt}gTWb?8ٳv=RoVކ ؆-ynҁ%8y;ȏNSfy; <*:M*&Fܟ|?MNŸcq:Xf[xT毃9v1bǰMQ05w+ޛ7:ƀ2- ?,wS>(_ +}fMiGNHA͢/Ks;,`g35V\Y嵡\2_fhx :U yի|5xRJ XX(xܡ &|DοNWŸf_@imwl a]#a'Ny˵>\ c̏h8F{6}?n;NLZ[݂nE~vV667~M~h8px`0GZ,D݁ǯ|ag2pޕR?|/*Vt=N!>zZ O:u:t)1yߌOf}uZrjQ/@+ud,nۮs}س ّ~'',lP瞃[]{;s-n If',"mϞ{#ײo{ZyAEj<[p놾y-n~3UX6J߱; |Z}wo䭀\[5Б} .9\ҵq|C`Λ\y}jK-r?lI=3J;=4aTy'F zLl, O*N J폶 Ϸ*}[ J9:m:c_|WIh*J4 v<7h,ZWp:3s| +u"\x`5{"?9s wٻg3gMbo#v»nv f11 9uy9P7@ꈍkjf^rJvg㧃w?n{j< Nh;l_r$ 3vu0[QҲn H4[F;Εq}f؀ʋv7'ֱ4^_NP϶HS=shxqk58϶3\AÝۏ5@q: xc.>x0Խ=guףּm[/< y-P5A1+2f>w.3(>?We~{L37}LMmLĖ7ŗӚ??".ԋIl]o~c#wJ|<{NaP7:} _}{z =o4 6A܏^8k&r'Ԝi3bhhU3&: M$p=v.!/anq~^qvyzwmCDsqkɇz5lۮLR °pY4YX,hXp (#(-J}W"b Ga?vmⅱ'#Qs{IS'ԭޕ~ afjGΏZw{Tuovejؔ{dCN} ;]Ӌ{p?h،ع4_妠ol:m!>X)..=ne}~46}46 K WpL _u)ZR\UF{ߋFuK+xM1v | QBjq[Kþ{;Kx.M^YY2fLN;z JnxPgrYA^mN[&q43@6P=-6"K;63ݺ6kZuf 5Lc h>M9<4xgd(<_wMx5/>FCUX:cuП f 2{j:MT=ۓV*_*PbMېUVM"\)|ز}sMn)]4UpWq qΣFS܋uDQp⦵FxVPCSǪO/nFm;UECelEqK!ԲIwsk{&x \ju R±M1yb^fKh^}/Q KhJ5C79ԏ%2%-\+?ߠZn<J8 aWDٸ"E.l+ 0Pxr{`k]VLg#B =teLꂴ9?#}9!0F`JN~Ѐ5܍mBWA?[طjB\uE v=>Sc͟fIk'Gܡ9e;{ T}7H)OHq%sNɧnQ~ }1DZ͟2A޶>`'o9 ij,@^_94~y2I'{y7~RAwfl Ke{faN><_8Ӿ'.]a\{84M+}JS@ [G@5>Y]c ݶlRVlԜCx+A:!IOŋS߶wAaϥ=;d7@Qt;'?. ?bT%(]!B-5>Jkȳ-q3#@vGׂHk~r֡*AzˢE`ԐȰsn =#uXt,ƕqZ --X[ 8ոxqo/ڕߍ]lf;ҐO.4tFEmʌ5ǜo_ZX۲q-0~\?a߃](-(uč`Jq]Ghgq40Ľ-o4#"ÙvO4̱%ZQ8sη8-\sM,V/BSI։q*3_nUW5NšA̍T4[q8@ld-{!{/}0-ki DZnS6`h4ͽyluy*N΀;m8w{q7/x yl9[s0KM: p?F=pQmV٧V8!|/% W|܆w)hbh'l-ZYE¹*6T|Ui.M[My\XA5Ns͠-MۿΜC=7*Jd{зmL^khǤhƪqί^o bg_A=h־)4f21)(W&.ڻ+vGA%8󆎇h17hJ%mv GSela\4P9aJyB[o&[d$Omfጪ5@hWhu{nuDZF+NY]֘z&>cjz|BN[vZy-Vt{p^jمVG'O܀|,g+ 89"φ`p(=z -bA1c%57sX7wOY JcRf2qEseM߾S)E kN[ۆpđ 3p=Ӿ1u8]/1c{ N|wWŅXP|^p׍J]yƲ{>&e-Ipa YM81O^n0e+87a ZJ:uӟ-lJW9RD4Vw1o9 tI`>;B >/O^=j%xS=z]C rqJ`'f>`0%ir{uSBTȿVOڻ4-9`:Vѿ=> m'"/ PovĦ Lle$];ϴ|e@&UM bԻqgt1%IH>/[nS(Dǹ r&mry<q!ŋ ^Fiy ՗Wj@Y @)uTCwR@}leyT*(egu-j 1ݞ/; \@1;z 茶q๣ B48SC= [cj]t#h=wVt2o^1 $`=P˔րhE'\4icF= j$EG[ïDKUؿvE 4>O噽L{ 0lddkŖ,5nL Bac9zт+B@ndJ~ e(}"wϗKπt-Ӷ'7fQ70=Yt5ezi4tImP+]MjAt6`h*> B_vPUe \?H,?FaE:<1-n[)tMj֜f z +/c"5'ul>A{æ+ KW|܀{O_YQqz{00 tSNVE4vhXNu! Pû]{ijM&7:](N|= lq'H%E~a ${qI-&8D4<@Th).;v!' Zε ^8t@k[;$mz?]VQ@D|`,Dqpjlŋ6 F XkgbtSGw[ng`h_t{h? W"RgMsQ1 $?G zE^`|ZƎ l[݂wT@Ĵ39ziӯS80ƎvGmz1 9bAi02X 'AqwoHZ" 6n4h ߜ$$#`\$&,q>7Y"͇Uxg/7haoawoBKi6x0;zi6Ոr6 %_b\KMMw}>ù~tHsqA49FN }Z2kM0^tA-Kj`nr |W9:G sOl ̔>D-ҩZѲ~ koA;XߺWamX~Dl+hjhcT3+lvMtl3 :i߽'P\Əz ޮe4:z| h:[e~l␠t"/l[>Vw~HZǪSoCsݰv&5Xw.?fjX_[ꈥ<̶jFKbVʂ#^mhykx-M N$OjMF搾&UAIGPi;+qĽVwEω*C Dy91u-Lk&}:0\f 6`]9-;x2y|.i Q/;h^[_?x}'ymV[&oz&zQ`(,$ u8Ky7اIKr!E YG$]؛j boޱKm5~ RtW} 2 +$]  J,`<`JꃈGhHDӲȏ +]|o7zc"P͎] Tc?Tiw !izxD˵VH?boDH}v= ,͛ODq掔,X`z*cq ĭV >kJ *EUT bN+i Wk1-@_sc; k&ϨPMG@[OWJ8MAVNDo/y5HN.NѱJ ]kH*pu|M\ %k5u=$! 8>n9$0tlXU_ݪ@29z Ca${p!FC$<]L @|_#b*Wcu+^>,>dc5@FABJ&3m^@O* 4#HH_uVAsE~``0K@ ݰBM^k&q`r><23!5A])t"g ˋ˃YTCw_>Z)Q3 s@Qn$EB% S$3奔j2V);8H|&ON'uv=p;uG70z]Y RǺM He\!8sޢɣk;ebn7ݚ;- bX $=ش]^a8jG]zmU Wzq%/TݞZ]DG3A⠵g^BH=#{AiyT1H^vߔWl=]pdDI*V75a]sbܘ>H>rķ~S o)* 5Z(؇]y@reN2ͯk"MA4ճ<d+}>M aOA;kA\kH" +#dP33<-A*a@Ğ9`?5Ǜ<$peewƕ6L%$J r6#+!N>.I[5բLBR8za[-0x6= 'skzK~}/`<'^ |yrװmsdNڂ}% H: P{PzV s2Rqĕe4W< ?DZ;({XV@~s@Z{L:6 34ާkl^֢kVocwA8=>J9{m6:v R_4𺖾MJ|ɝ@vXRR;Ϣ@d(Se ph7bj+?򜾢|s58~ڕ|p)t-3h) Q%O|{BRGN}i`ĪUtɫ>p|*7y_A Pݾ=uϵ+ssjx~!~ @*>g7 ;tN*U"iJ}b|3aӁ N,GReWz7+%?46jqJpjMvs8acG@s fGsIdqt,\9@R޲u_n^z|祥2@ })RVc2%#'HTߧ,Eoo`^!uh_jvKxc$(lϽ G;&3_-׽k dV@+)/9O_ڕ@D&4(D5ԗob@h… E%njQç5M(cɽ%@?1v*S²0Glf_qZ(}.65 ~НN<43?kn_t( d,S^ w:‚q;PZjҀrpˁ߁ ,-pgж7̇jKN vT~Mo v=6SfJİUıiz V.;b{ j '@69/9}{{9u i>0gprK iS*pi쒶Y{@(-Lq@Z^|w/҇<\etJG)}d^,qF u csx{?'.-՞g> [ުT<ͷ.9kE /ڔ0m2 _|6hj:a#KѰ+Az>Mwqtj # Yc<^L| 4!*- I3?BV'a]Mˉ"flexmix/data/NregFix.RData0000644000176200001440000001017614404637305015131 0ustar liggesusersy\[i"5Zi}&qNq-7KZnHHDBɒrI**f 9oUZnEVJʴyqTz?{9syg.f+.Vh4iiMJ;ttq֛FuvoC,P$!"9{T F9u@ IRgF 3'Mvǔw@Y3:w6x)W%P?5Te2((ؕʭj]A $>41( h:{~D>VX^_6;&m:MG& tFwb,>nxʫ_`Ud.1sPh"7 A5&?u,6k)Ekm.F#fMBk|T&׼>Fׂςxk'xEH2uQkO(JgVqZb 5>eX|@9k恏3Kѻ%S7Հ:>.kM54(T  (MIN~A{'9tނ |ToKi&D~Xg :5Q<2Ҽ'ѹfg1zU-;8wx4hblxU%u*wg3T}J-IYit,sE(޺Uj#:l޴0yg_-+N'[#CkNY1Pk Ai-ܳ3ʵ^o&g5&6F=gG3TOdy=NK?A˖J ~B}-}򣼕USroX$ihr$ $H ʸas Ò'6)[@irQa]JgmZ{._qv)Z9d_$Z^3D=j, PU#Q_A,CΨ*c~gK~ҩa=6zjkٿ/ђ(Glcʠ8ZLfs46Eڂ*/^UWUTAvvttz.H:=kФMwc*~e%E uf2 QLʟ7BU[iPdKyynGAEYə!mԉ|10u]kE?ɊwcQK}Tuu%>n|ccݴFj\.3d@&Ovng,t6>#vU;妄c@Ycm@>CѫMQZ5@M;O5zNCQC $UWUf f;xy0J +i?RЍwoɂfg]=]ZjX]TD3b)ePcΒYz<2pV8ȩSN r]yE싾ϧ!iW^;vCu.3n$%LmY%gs-By1͏"AE{Ͳ=rCt!5ve// jo':77 {vmSXq-wjPK53#TϧFwթY >JMEǎ(Ӊ/·c>l4r&b-Ӱ t-J߆̠,?eίAv2WiEAɓ/驳v:Pyש֎{/zR)Ŧ۠-YAaAԷnY>Ӭ GK1^Bgls;Ϙ[4c(3z/΋Bz[ _eeN0p\u^rҜU ^ӌ,[jԆ-^Ը jźvRrNuٽ۠̊Clq˰'K Zdg{5-(1%m=}fw̐{h !^ b5~P>) '_m3aژl\]X,ju7/9JOA e?uBѸ-kCB7zC!I>F!zY;'p-8F"75fELq4nt|p#a7Q%nVPVcqjdrAݥu(qAY=@|-47Ps~?~r,mܘD3ƽ֮ީ||{G"ֻ|}TV[F_˾{}*ߓruWz @Kl^~mUo>>W}_ǝ>ѵ@e/}׬uE}IioNoIjwoh:h=I79=x==AD=McYb'>N8a 66`c 66`c 668` 68` 68` .6b .6b .6xa 6xa 6xa >6c >6c >6`C 6`C 6`C !6bC !6bC !6DaC 6DaC 6Dt,$rH#OD!DcE4XDcE4XDcE46DcM46DcM46DC48DC48DC4.ѸDK4.ѸDK4.ѸDG4xDG4xDG4>DO4>DO4>DM@4DM@4DM@4!фDMH4!фDMH4!фDMD4DDMD4DDg*'/'՞ݏ5m flexmix/data/tribolium.RData0000644000176200001440000000055414404637305015574 0ustar liggesusersݓMO@B644LŃו..쎔XI}݂ٗ: *Aзhy:0о+t&tt)tX@5Z4ءR}+NT{{F[S!ҐlWPw^fȨinB* YPf͎̟%_vcXQˏ Ͳ4\l=%O_nj)KAGy¢4J_T,]QJ"^<{4w=F&| S 7 w!Q!i4AHAуE=(zP#rd0 6Z7flexmix/data/fabricfault.RData0000644000176200001440000000054114404637305016044 0ustar liggesusers]=N@'vb#  tt珎Q *KRRlDQqZNWD  ޚr>g`~'"RQ@T#u89G|i)Tn@8F}5 ߮x?8ۃnk^~.A_o+{j^{*C%h.#Ϝzx bffY9wrWV!tp|۷]}q?Rm@%M03W^M٬i)Bhiemckaa)P(j :B]!``````````%#PghPflexmix/data/seizure.RData0000644000176200001440000000215314404637305015251 0ustar liggesusersnUqbǎc *y#4m6=M$9JHi#9)x<£t70Z?A߸N=3JM]1fM}J>02M]eVnfL}~M} S5 w>avsVF=+WY{q fk.o}?͹ Ǚ+ؚDJAҜ}ٻ]*\{J*1復S>zAM֛{om9b/X/5yOZ:s8_BqX\"NJJJJJJJJJJZZZZZZZZZZ0a#F @0a #F(P0Ba #F$H0"aDˆ #F,X0baˆ #F,X0a$H#F"D0aH #F*T0RaH-w:*g;h;h;hє)GSM9r4hєiGӎM;v4hѴiGӎ6| w//l<½Si.flexmix/man/0000755000176200001440000000000014404637306012510 5ustar liggesusersflexmix/man/fitted.Rd0000644000176200001440000000213514404637306014257 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: fitted.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{fitted-methods} \docType{methods} \alias{fitted,flexmix-method} \alias{fitted,FLXM-method} \alias{fitted,FLXR-method} \alias{fitted,FLXRMRglm-method} \title{Extract Model Fitted Values} \description{ Extract fitted values for each component from a flexmix object. } \usage{ \S4method{fitted}{flexmix}(object, drop = TRUE, aggregate = FALSE, ...) } \arguments{ \item{object}{an object of class \code{"flexmix"} or \code{"FLXR"}} \item{drop}{logical, if \code{TRUE} then the function tries to simplify the return object by combining lists of length 1 into matrices.} \item{aggregate}{logical, if \code{TRUE} then the fitted values for each model aggregated over the components are returned.} \item{\dots}{currently not used} } \keyword{methods} \author{Friedrich Leisch and Bettina Gruen} \examples{ data("NPreg", package = "flexmix") ex1 <- flexmix(yn ~ x + I(x^2), data = NPreg, k = 2) matplot(NPreg$x, fitted(ex1), pch = 16, type = "p") points(NPreg$x, NPreg$yn) } flexmix/man/dmft.Rd0000644000176200001440000000370214404637306013733 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: dmft.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{dmft} \alias{dmft} \docType{data} \title{Dental Data} \description{ Count data from a dental epidemiological study for evaluation of various programs for reducing caries collected among school children from an urban area of Belo Horizonte (Brazil). } \usage{data("dmft")} \format{ A data frame with 797 observations on the following 5 variables. \describe{ \item{End}{Number of decayed, missing or filled teeth at the end of the study.} \item{Begin}{Number of decayed, missing or filled teeth at the beginning of the study.} \item{Gender}{A factor with levels \code{male} and \code{female}.} \item{Ethnic}{A factor with levels \code{brown}, \code{white} and \code{black}.} \item{Treatment}{A factor with levels \code{control}, \code{educ}, \code{enrich}, \code{rinse}, \code{hygiene} and \code{all}.} } } \details{ The aim of the caries prevention study was to compare four methods to prevent dental caries. Interventions were carried out according to the following scheme: \describe{ \item{control}{Control group} \item{educ}{Oral health education} \item{enrich}{Enrichment of the school diet with rice bran} \item{rinse}{Mouthwash with 0.2\% sodium floride (NaF) solution} \item{hygiene}{Oral hygiene} \item{all}{All four methods together} } } \source{ D. Boehning, E. Dietz, P. Schlattmann, L. Mendonca and U. Kirchner. The zero-inflated Poisson model and the decayed, missing and filled teeth index in dental epidemiology. \emph{Journal of the Royal Statistical Society A}, \bold{162}(2), 195--209, 1999. } \examples{ data("dmft", package = "flexmix") dmft_flx <- initFlexmix(End ~ 1, data = dmft, k = 2, model = FLXMRglmfix(family = "poisson", fixed = ~ Gender + Ethnic + Treatment)) } \keyword{datasets} flexmix/man/flexmix.Rd0000644000176200001440000001266614404637306014466 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: flexmix.Rd 5115 2017-04-07 08:18:13Z gruen $ % \name{flexmix} \alias{flexmix} \alias{flexmix,formula,ANY,ANY,ANY,missing-method} \alias{flexmix,formula,ANY,ANY,ANY,list-method} \alias{flexmix,formula,ANY,ANY,ANY,FLXM-method} \alias{prior,flexmix-method} \alias{show,flexmix-method} \alias{summary,flexmix-method} \alias{show,summary.flexmix-method} \title{Flexible Mixture Modeling} \description{ FlexMix implements a general framework for finite mixtures of regression models. Parameter estimation is performed using the EM algorithm: the E-step is implemented by \code{flexmix}, while the user can specify the M-step. } \usage{ flexmix(formula, data = list(), k = NULL, cluster = NULL, model = NULL, concomitant = NULL, control = NULL, weights = NULL) \S4method{summary}{flexmix}(object, eps = 1e-4, ...) } \arguments{ \item{formula}{A symbolic description of the model to be fit. The general form is \code{y~x|g} where \code{y} is the response, \code{x} the set of predictors and \code{g} an optional grouping factor for repeated measurements.} \item{data}{An optional data frame containing the variables in the model.} \item{k}{Number of clusters (not needed if \code{cluster} is specified).} \item{cluster}{Either a matrix with \code{k} columns of initial cluster membership probabilities for each observation; or a factor or integer vector with the initial cluster assignments of observations at the start of the EM algorithm. Default is random assignment into \code{k} clusters.} \item{weights}{An optional vector of replication weights to be used in the fitting process. Should be \code{NULL}, an integer vector or a formula.} \item{model}{Object of class \code{FLXM} or list of \code{FLXM} objects. Default is the object returned by calling \code{\link{FLXMRglm}()}.} \item{concomitant}{Object of class \code{FLXP}. Default is the object returned by calling \code{\link{FLXPconstant}}.} \item{control}{Object of class \code{FLXcontrol} or a named list.} \item{object}{Object of class \code{flexmix}.} \item{eps}{Probabilities below this threshold are treated as zero in the summary method.} \item{\dots}{Currently not used.} } \details{ FlexMix models are described by objects of class \code{FLXM}, which in turn are created by driver functions like \code{\link{FLXMRglm}} or \code{\link{FLXMCmvnorm}}. Multivariate responses with independent components can be specified using a list of \code{FLXM} objects. The \code{summary} method lists for each component the prior probability, the number of observations assigned to the corresponding cluster, the number of observations with a posterior probability larger than \code{eps} and the ratio of the latter two numbers (which indicates how separated the cluster is from the others). } \value{ Returns an object of class \code{flexmix}. } \author{Friedrich Leisch and Bettina Gruen} \seealso{\code{\link[flexmix]{plot-methods}}} \references{ Friedrich Leisch. FlexMix: A general framework for finite mixture models and latent class regression in R. \emph{Journal of Statistical Software}, \bold{11}(8), 2004. doi:10.18637/jss.v011.i08 Bettina Gruen and Friedrich Leisch. Fitting finite mixtures of generalized linear regressions in R. Computational Statistics & Data Analysis, 51(11), 5247-5252, 2007. doi:10.1016/j.csda.2006.08.014 Bettina Gruen and Friedrich Leisch. FlexMix Version 2: Finite mixtures with concomitant variables and varying and constant parameters Journal of Statistical Software, 28(4), 1-35, 2008. doi:10.18637/jss.v028.i04 } \keyword{regression} \keyword{cluster} \examples{ data("NPreg", package = "flexmix") ## mixture of two linear regression models. Note that control parameters ## can be specified as named list and abbreviated if unique. ex1 <- flexmix(yn ~ x + I(x^2), data = NPreg, k = 2, control = list(verb = 5, iter = 100)) ex1 summary(ex1) plot(ex1) ## now we fit a model with one Gaussian response and one Poisson ## response. Note that the formulas inside the call to FLXMRglm are ## relative to the overall model formula. ex2 <- flexmix(yn ~ x, data = NPreg, k = 2, model = list(FLXMRglm(yn ~ . + I(x^2)), FLXMRglm(yp ~ ., family = "poisson"))) plot(ex2) ex2 table(ex2@cluster, NPreg$class) ## for Gaussian responses we get coefficients and standard deviation parameters(ex2, component = 1, model = 1) ## for Poisson response we get only coefficients parameters(ex2, component = 1, model = 2) ## fitting a model only to the Poisson response is of course ## done like this ex3 <- flexmix(yp ~ x, data = NPreg, k = 2, model = FLXMRglm(family = "poisson")) ## if observations are grouped, i.e., we have several observations per ## individual, fitting is usually much faster: ex4 <- flexmix(yp~x|id1, data = NPreg, k = 2, model = FLXMRglm(family = "poisson")) ## And now a binomial example. Mixtures of binomials are not generically ## identified, here the grouping variable is necessary: set.seed(1234) ex5 <- initFlexmix(cbind(yb,1 - yb) ~ x, data = NPreg, k = 2, model = FLXMRglm(family = "binomial"), nrep = 5) table(NPreg$class, clusters(ex5)) ex6 <- initFlexmix(cbind(yb, 1 - yb) ~ x | id2, data = NPreg, k = 2, model = FLXMRglm(family = "binomial"), nrep = 5) table(NPreg$class, clusters(ex6)) } flexmix/man/bioChemists.Rd0000644000176200001440000000240014404637306015244 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: bioChemists.Rd 5193 2020-10-09 18:38:20Z gruen $ % \name{bioChemists} \alias{bioChemists} \docType{data} \title{Articles by Graduate Students in Biochemistry Ph.D. Programs} \description{ A sample of 915 biochemistry graduate students. } \usage{ data("bioChemists") } \format{ \describe{ \item{art}{count of articles produced during last 3 years of Ph.D.} \item{fem}{factor indicating gender of student, with levels Men and Women} \item{mar}{factor indicating marital status of student, with levels Single and Married} \item{kid5}{number of children aged 5 or younger} \item{phd}{prestige of Ph.D. department} \item{ment}{count of articles produced by Ph.D. mentor during last 3 years} } } \details{ This data set is taken from package \pkg{pscl} provided by Simon Jackman. } \source{ found in Stata format at \url{https://jslsoc.sitehost.iu.edu/stata/spex_data/couart2.dta} } \references{ Long, J. Scott. The origins of sex difference in science. \emph{Social Forces}, \bold{68}, 1297--1315, 1990. Long, J. Scott. \emph{Regression Models for Categorical and Limited Dependent Variables}, 1997. Thousand Oaks, California: Sage. } \keyword{datasets} flexmix/man/FLXmodel-class.Rd0000644000176200001440000000402514404637306015555 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXmodel-class.Rd 5229 2022-06-02 14:47:26Z gruen $ % \name{FLXM-class} \docType{class} \alias{FLXM-class} \alias{FLXMC-class} \alias{FLXMR-class} \alias{FLXMCsparse-class} \alias{show,FLXM-method} \title{Class "FLXM"} \description{FlexMix model specification.} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("FLXM", ...)}, typically inside driver functions like \code{\link{FLXMRglm}} or \code{\link{FLXMCmvnorm}}. } \section{Slots}{ \describe{ \item{\code{fit}:}{Function returning an \code{FLXcomponent} object.} \item{\code{defineComponent}:}{Function or expression to determine the \code{FLXcomponent} object given the parameters.} \item{\code{weighted}:}{Logical indicating whether \code{fit} can do weighted likelihood maximization.} \item{\code{name}:}{Character string used in print methods.} \item{\code{formula}:}{Formula describing the model.} \item{\code{fullformula}:}{Resulting formula from updating the model formula with the formula specified in the call to \code{flexmix}.} \item{\code{x}:}{Model matrix.} \item{\code{y}:}{Model response.} \item{\code{terms}, \code{xlevels}, \code{contrasts}:}{Additional information for model matrix.} \item{\code{preproc.x}:}{Function for preprocessing matrix \code{x} before the EM algorithm starts, by default the identity function.} \item{\code{preproc.y}:}{Function for preprocessing matrix \code{y} before the EM algorithm starts, by default the identity function.} } } \details{ The most generic class is the virtual class \code{FLXM}. The classes \code{FLXMC} for model-based clustering and \code{FLXMR} for clusterwise regression extend the virtual class. Both have further more specific model classes which inherit from them. Model class \code{FLXMCsparse} allows for model-based clustering with a sparse matrix as data input. } \author{Friedrich Leisch and Bettina Gruen} \keyword{classes} flexmix/man/candy.Rd0000644000176200001440000000250314404637306014075 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: candy.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{candy} \alias{candy} \docType{data} \title{Candy Packs Purchased} \description{ The data is from a new product and concept test where the number of individual packs of hard candy purchased within the past 7 days is recorded. } \usage{data("candy")} \format{ A data frame with 21 observations on the following 2 variables. \describe{ \item{\code{Packages}}{a numeric vector} \item{\code{Freq}}{a numeric vector} } } \source{ D. Boehning, E. Dietz and P. Schlattmann. Recent Developments in Computer-Assisted Analysis of Mixtures. Biometrics 54(2), 525--536, 1998. } \references{ J. Magidson and J. K. Vermunt. Latent Class Models. In D. W. Kaplan (ed.), The Sage Handbook of Quantitative Methodology for the Social Sciences, 175--198, 2004. Thousand Oakes: Sage Publications. D. Boehning, E. Dietz and P. Schlattmann. Recent Developments in Computer-Assisted Analysis of Mixtures. \emph{Biometrics}, \bold{54}(2), 525--536, 1998. W. R. Dillon and A. Kumar. Latent structure and other mixture models in marketing: An integrative survey and overview. In R. P. Bagozzi (ed.), Advanced methods of marketing research, 352--388, 1994. Cambridge, UK: Blackwell. } \keyword{datasets} flexmix/man/Mehta.Rd0000644000176200001440000000265514404637306014045 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: Mehta.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{Mehta} \alias{Mehta} \docType{data} \title{Mehta Trial} \description{ For a 22-centre trial the number of responses and the total number of patients is reported for the control group and the group receiving a new drug. } \usage{data("Mehta")} \format{ A data frame with 44 observations on the following 4 variables. \describe{ \item{Response}{Number of responses.} \item{Total}{Total number of observations.} \item{Drug}{A factor indicating treatment with levels \code{New} and \code{Control}.} \item{Site}{A factor indicating the site/centre.} } } \source{ M. Aitkin. Meta-analysis by random effect modelling in generalized linear models. \emph{Statistics in Medicine}, \bold{18}, 2343--2351, 1999. } \references{ C.R. Mehta, N.R. Patel and P. Senchaudhuri. Importance sampling for estimating exact probabilities in permutational inference. \emph{Journal of the American Statistical Association}, \emph{83}, 999--1005, 1988. } \examples{ data("Mehta", package = "flexmix") mehtaMix <- initFlexmix(cbind(Response, Total-Response) ~ 1|Site, data = Mehta, nrep = 5, k = 3, model = FLXMRglmfix(family = "binomial", fixed = ~ Drug), control = list(minprior = 0.04)) } \keyword{datasets} flexmix/man/BregFix.Rd0000644000176200001440000000212114404637306014321 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: BregFix.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{BregFix} \alias{BregFix} \docType{data} \title{Artificial Example for Binomial Regression} \description{ A simple artificial regression example data set with 3 latent classes, one independent variable \code{x} and a concomitant variable \code{w}. } \usage{data("BregFix")} \format{ A data frame with 200 observations on the following 5 variables. \describe{ \item{\code{yes}}{number of successes} \item{\code{no}}{number of failures} \item{\code{x}}{independent variable} \item{\code{w}}{concomitant variable, a factor with levels \code{0} \code{1}} \item{\code{class}}{latent class memberships} } } \examples{ data("BregFix", package = "flexmix") Model <- FLXMRglmfix(family="binomial", nested = list(formula = c(~x, ~0), k = c(2, 1))) Conc <- FLXPmultinom(~w) FittedBin <- initFlexmix(cbind(yes, no) ~ 1, data = BregFix, k = 3, model = Model, concomitant = Conc) summary(FittedBin) } \keyword{datasets} flexmix/man/patent.Rd0000644000176200001440000000312614404637306014274 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: patent.Rd 5220 2022-04-18 18:05:04Z gruen $ % \name{patent} \alias{patent} \docType{data} \title{Patents and R&D Spending} \description{ Number of patents, R&D spending and sales in millions of dollar for 70 pharmaceutical and biomedical companies in 1976. } \usage{data("patent")} \format{ A data frame with 70 observations on the following 4 variables. \describe{ \item{Company}{Name of company.} \item{Patents}{Number of patents.} \item{RDS}{R&D spending per sales.} \item{lgRD}{Logarithmized R&D spendings (in millions of dollars).} } } \details{ The data is taken from the National Bureau of Economic Research R&D Masterfile. } \source{ P. Wang, I.M. Cockburn and M.L. Puterman. Analysis of Patent Data -- A Mixed-Poisson-Regression-Model Approach. \emph{Journal of Business & Economic Statistics}, \bold{16}(1), 27--41, 1998. } \references{ B.H. Hall, C. Cummins, E. Laderman and J. Mundy. The R&D Master File Documentation. Technical Working Paper 72, National Bureau of Economic Research, 1988. Cambridge, MA. } \examples{ data("patent", package = "flexmix") patentMix <- initFlexmix(Patents ~ lgRD, k = 3, model = FLXMRglm(family = "poisson"), concomitant = FLXPmultinom(~RDS), nrep = 5, data = patent) plot(Patents ~ lgRD, data = patent, pch = as.character(clusters(patentMix))) ordering <- order(patent$lgRD) apply(fitted(patentMix), 2, function(y) lines(sort(patent$lgRD), y[ordering])) } \keyword{datasets} flexmix/man/FLXMRrobglm.Rd0000644000176200001440000000531014404637306015071 0ustar liggesusers% % Copyright (C) 2008 Friedrich Leisch and Bettina Gruen % $Id: FLXMRrobglm.Rd 5229 2022-06-02 14:47:26Z gruen $ % \name{FLXMRrobglm} \alias{FLXMRrobglm} \alias{FLXMRrobglm-class} \title{FlexMix Driver for Robust Estimation of Generalized Linear Models} \description{ This driver adds a noise component to the mixture model which can be used to model background noise in the data. See the Compstat paper Leisch (2008) cited below for details. } \usage{ FLXMRrobglm(formula = . ~ ., family = c("gaussian", "poisson"), bgw = FALSE, ...) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{flexmix} using \code{\link{update.formula}}. Default is to use the original \code{flexmix} model formula.} \item{family}{A character string naming a \code{\link{glm}} family function.} \item{bgw}{Logical, controls whether the parameters of the background component are fixed to multiples of location and scale of the complete data (the default), or estimated by EM with normal weights for the background (\code{bgw = TRUE}).} \item{\dots}{passed to \code{FLXMRglm}} } \value{ Returns an object of class \code{FLXMRrobglm} inheriting from \code{FLXMRglm}. } \author{Friedrich Leisch and Bettina Gruen} \note{ The implementation of this model class is currently under development, and some methods like \code{refit} are still missing. } \references{ Friedrich Leisch. Modelling background noise in finite mixtures of generalized linear regression models. In Paula Brito, editor, Compstat 2008--Proceedings in Computational Statistics, 385--396. Physica Verlag, Heidelberg, Germany, 2008.\cr Preprint available at http://epub.ub.uni-muenchen.de/6332/. } \examples{ ## Example from Compstat paper, see paper for detailed explanation: data("NPreg", package = "flexmix") DATA <- NPreg[, 1:2] set.seed(3) DATA2 <- rbind(DATA, cbind(x = -runif(3), yn = 50 + runif(3))) ## Estimation without (f2) and with (f3) background component f2 <- flexmix(yn ~ x + I(x^2), data = DATA2, k = 2) f3 <- flexmix(yn ~ x + I(x^2), data = DATA2, k = 3, model = FLXMRrobglm(), control = list(minprior = 0)) ## Predict on new data for plots x <- seq(-5,15, by = .1) y2 <- predict(f2, newdata = data.frame(x = x)) y3 <- predict(f3, newdata = data.frame(x = x)) ## f2 was estimated without background component: plot(yn ~ x, data = DATA2, pch = clusters(f2), col = clusters(f2)) lines(x, y2$Comp.1, col = 1) lines(x, y2$Comp.2, col = 2) ## f3 is with background component: plot(yn ~ x, data = DATA2, pch = 4 - clusters(f3), col = 4 - clusters(f3)) lines(x, y3$Comp.2, col = 2) lines(x, y3$Comp.3, col = 1) } \keyword{models} flexmix/man/flexmix-class.Rd0000644000176200001440000000370214404637306015560 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: flexmix-class.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{flexmix-class} \docType{class} \alias{flexmix-class} \title{Class "flexmix"} \description{A fitted \code{\link{flexmix}} model.} \section{Slots}{ \describe{ \item{\code{model}:}{List of \code{FLXM} objects.} \item{\code{prior}:}{Numeric vector with prior probabilities of clusters.} \item{\code{posterior}:}{Named list with elements \code{scaled} and \code{unscaled}, both matrices with one row per observation and one column per cluster.} \item{\code{iter}:}{Number of EM iterations.} \item{\code{k}:}{Number of clusters after EM.} \item{\code{k0}:}{Number of clusters at start of EM.} \item{\code{cluster}:}{Cluster assignments of observations.} \item{\code{size}:}{Cluster sizes.} \item{\code{logLik}:}{Log-likelihood at EM convergence.} \item{\code{df}:}{Total number of parameters of the model.} \item{\code{components}:}{List describing the fitted components using \code{FLXcomponent} objects.} \item{\code{formula}:}{Object of class \code{"formula"}.} \item{\code{control}:}{Object of class \code{"FLXcontrol"}.} \item{\code{call}:}{The function call used to create the object.} \item{\code{group}:}{Object of class \code{"factor"}.} \item{\code{converged}:}{Logical, \code{TRUE} if EM algorithm converged.} \item{\code{concomitant}:}{Object of class \code{"FLXP"}..} \item{\code{weights}:}{Optional weights of the observations.} } } \section{Extends}{ Class \code{FLXdist}, directly. } \section{Accessor Functions}{ The following functions should be used for accessing the corresponding slots: \describe{ \item{\code{cluster}:}{Cluster assignments of observations.} \item{\code{posterior}:}{A matrix of posterior probabilities for each observation.} } } \author{Friedrich Leisch and Bettina Gruen} \keyword{classes} flexmix/man/NregFix.Rd0000644000176200001440000000243214404637306014342 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: NregFix.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{NregFix} \alias{NregFix} \title{Artificial Example for Normal Regression} \description{ A simple artificial regression example with 3 latent classes, two independent variables, one concomitant variable and a dependent variable which follows a Gaussian distribution. } \usage{ data("NregFix") } \format{ A data frame with 200 observations on the following 5 variables. \describe{ \item{\code{x1}}{Independent variable: numeric variable.} \item{\code{x2}}{Independent variable: a factor with two levels: \code{0} and \code{1}.} \item{\code{w}}{Concomitant variable: a factor with two levels: \code{0} and \code{1}.} \item{\code{y}}{Dependent variable.} \item{\code{class}}{Latent class memberships.} } } \examples{ data("NregFix", package = "flexmix") library("lattice") xyplot(y ~ x1 | x2 * w, data = NregFix, groups = class) Model <- FLXMRglmfix(~ 1, fixed = ~ x2, nested = list(k = c(2, 1), formula = c(~x1, ~0))) fittedModel <- initFlexmix(y ~ 1, model = Model, data = NregFix, k = 3, concomitant = FLXPmultinom(~ w), nrep = 5) fittedModel } \keyword{datasets} flexmix/man/FLXMRziglm.Rd0000644000176200001440000000302114404637306014726 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXMRziglm.Rd 5229 2022-06-02 14:47:26Z gruen $ % \name{FLXMRziglm} \alias{FLXMRziglm} \alias{FLXMRziglm-class} \alias{refit,FLXMRziglm-method} \alias{FLXreplaceParameters,FLXMRziglm-method} \alias{FLXgradlogLikfun,FLXMRziglm-method} \title{FlexMix Interface to Zero Inflated Generalized Linear Models} \description{ This is a driver which allows fitting of zero inflated poisson and binomial models. } \usage{ FLXMRziglm(formula = . ~ ., family = c("binomial", "poisson"), ...) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{flexmix} using \code{\link{update.formula}}. Default is to use the original \code{flexmix} model formula.} \item{family}{A character string naming a \code{\link{glm}} family function.} \item{\dots}{passed to \code{FLXMRglm}} } \value{ Returns an object of class \code{FLXMRziglm} inheriting from \code{FLXMRglm}. } \author{Friedrich Leisch and Bettina Gruen} \note{ In fact this only approximates zero inflated models by fixing the coefficient of the intercept at -Inf and the other coefficients at zero for the first component. } \examples{ data("dmft", package = "flexmix") Model <- FLXMRziglm(family = "poisson") Fitted <- flexmix(End ~ log(Begin + 0.5) + Gender + Ethnic + Treatment, model = Model, k = 2 , data = dmft, control = list(minprior = 0.01)) summary(refit(Fitted)) } \keyword{models} flexmix/man/FLXMRglmnet.Rd0000644000176200001440000000657214404637306015110 0ustar liggesusers\name{FLXMRglmnet} \alias{FLXMRglmnet} \alias{FLXMRglmnet-class} \title{FlexMix Interface for Adaptive Lasso / Elastic Net with GLMs} \description{ This is a driver which allows fitting of mixtures of GLMs where the coefficients are penalized using the (adaptive) lasso or the elastic net by reusing functionality from package \pkg{glmnet}. } \usage{ FLXMRglmnet(formula = . ~ ., family = c("gaussian", "binomial", "poisson"), adaptive = TRUE, select = TRUE, offset = NULL, ...) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{\link{flexmix}} using \code{\link{update.formula}}. Default is to use the original \code{\link{flexmix}} model formula.} \item{family}{A character string naming a \code{\link{glm}} family function.} \item{adaptive}{A logical indicating if the adaptive lasso should be used. By default equal to \code{TRUE}.} \item{select}{A logical vector indicating which variables in the model matrix should be included in the penalized part. By default equal to \code{TRUE} implying that all variables are penalized.} \item{offset}{This can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting.} \item{\dots}{Additional arguments to be passed to \code{\link[glmnet]{cv.glmnet}} fitter.} } \details{ Some care is needed to ensure convergence of the algorithm, which is computationally more challenging than a standard EM. In the proposed method, not only are cluster allocations identified and component parameters estimated as commonly done in mixture models, but there is also variable selection via penalized regression using $k$-fold cross-validation to choose the penalty parameter. For the algorithm to converge, it is necessary that the same cross-validation partitioning be used across the EM iterations, i.e., the subsamples for cross-validation must be defined at the beginning This is accomplished using the \code{foldid} option as an additional parameter to be passed to \code{\link[glmnet]{cv.glmnet}} (see \pkg{glmnet} package documentation). } \value{ Returns an object of class \code{FLXMRglm}. } \author{ Frederic Mortier and Nicolas Picard. } \seealso{ \code{\link{FLXMRglm}} } \references{ Frederic Mortier, Dakis-Yaoba Ouedraogo, Florian Claeys, Mahlet G. Tadesse, Guillaume Cornu, Fidele Baya, Fabrice Benedet, Vincent Freycon, Sylvie Gourlet-Fleury and Nicolas Picard. Mixture of inhomogeneous matrix models for species-rich ecosystems. \emph{Environmetrics}, \bold{26}(1), 39-51, 2015. doi:10.1002/env.2320 } \examples{ set.seed(12) p <- 10 beta <- matrix(0, nrow = p + 1, ncol = 2) beta[1,] <- c(-1, 1) beta[cbind(c(5, 10), c(1, 2))] <- 1 nobs <- 100 X <- matrix(rnorm(nobs * p), nobs, p) mu <- cbind(1, X) \%*\% beta z <- sample(1:ncol(beta), nobs, replace = TRUE) y <- mu[cbind(1:nobs, z)] + rnorm(nobs) data <- data.frame(y, X) ## The maximum number of iterations is reduced to ## avoid a long running time. fo <- sample(rep(seq(10), length = nrow(data))) ex1 <- flexmix(y ~ ., data = data, k = 2, cluster = z, model = FLXMRglmnet(foldid = fo), control = list(iter.max = 2)) parameters(ex1) } \keyword{regression} \keyword{cluster} flexmix/man/FLXcomponent-class.Rd0000644000176200001440000000147014404637306016460 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXcomponent-class.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{FLXcomponent-class} \docType{class} \alias{FLXcomponent-class} \alias{show,FLXcomponent-method} \title{Class "FLXcomponent"} \description{A fitted component of a \code{\link{flexmix}} model.} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("FLXcomponent", ...)}. } \section{Slots}{ \describe{ \item{\code{df}:}{Number of parameters used by the component.} \item{\code{logLik}:}{Function computing the log-likelihood of observations.} \item{\code{parameters}:}{List with model parameters.} \item{\code{predict}:}{Function predicting response for new data.} } } \author{Friedrich Leisch and Bettina Gruen} \keyword{classes} flexmix/man/FLXconcomitant.Rd0000644000176200001440000000177514404637306015701 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXconcomitant.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{FLXP} \docType{class} \alias{FLXPconstant} \alias{FLXPmultinom} \alias{FLXconstant} \alias{FLXmultinom} \alias{show,FLXP-method} \title{Creates the Concomitant Variable Model} \description{ Creator functions for the concomitant variable model. \code{FLXPconstant} specifies constant priors and \code{FLXPmultinom} multinomial logit models for the priors. } \usage{ FLXPconstant() FLXPmultinom(formula = ~1) } \arguments{ \item{formula}{A formula for determining the model matrix of the concomitant variables.} } \details{ \code{FLXPmultinom} uses \code{nnet.default} from \pkg{nnet} to fit the multinomial logit model. } \value{ Object of class \code{FLXP}. \code{FLXPmultinom} returns an object of class \code{FLXPmultinom} which extends class \code{FLXP} directly and is used for method dispatching. } \author{Friedrich Leisch and Bettina Gruen} \keyword{models} flexmix/man/fabricfault.Rd0000644000176200001440000000227314404637306015265 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: fabricfault.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{fabricfault} \alias{fabricfault} \docType{data} \title{Fabric Faults} \description{ Number of faults in rolls of a textile fabric. } \usage{data("fabricfault")} \format{ A data frame with 32 observations on the following 2 variables. \describe{ \item{Length}{Length of role (m).} \item{Faults}{Number of faults.} } } \source{ G. McLachlan and D. Peel. \emph{Finite Mixture Models}, 2000, John Wiley and Sons Inc. \url{http://www.maths.uq.edu.au/~gjm/DATA/mmdata.html} } \references{ A. F. Bissell. A Negative Binomial Model with Varying Element Sizes \emph{Biometrika}, \bold{59}, 435--441, 1972. M. Aitkin. A general maximum likelihood analysis of overdispersion in generalized linear models. \emph{Statistics and Computing}, \bold{6}, 251--262, 1996. } \examples{ data("fabricfault", package = "flexmix") fabricMix <- initFlexmix(Faults ~ 1, data = fabricfault, k = 2, model = FLXMRglmfix(family = "poisson", fixed = ~ log(Length)), nrep = 5) } \keyword{datasets} flexmix/man/FLXMRcondlogit.Rd0000644000176200001440000000441614404637306015577 0ustar liggesusers\name{FLXMRcondlogit} \alias{FLXMRcondlogit} \title{FlexMix Interface to Conditional Logit Models} \description{ Model driver for fitting mixtures of conditional logit models. } \usage{ FLXMRcondlogit(formula = . ~ ., strata) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{\link{flexmix}} using \code{\link{update.formula}}. Default is to use the original \code{\link{flexmix}} model formula.} \item{strata}{A formula which is interpreted such that no intercept is fitted and which allows to determine the variable indicating which observations are from the same stratum.} } \details{ The M-step is performed using \code{coxph.fit}. } \value{ Returns an object of class \code{FLXMRcondlogit} inheriting from \code{FLXMRglm}. } \references{ Bettina Gruen and Friedrich Leisch. Identifiability of finite mixtures of multinomial logit models with varying and fixed effects. \emph{Journal of Classification}, \bold{25}, 225--247. 2008. } \author{ Bettina Gruen } \section{Warning}{ To ensure identifiability repeated measurements are necessary. Sufficient conditions are given in Gruen and Leisch (2008). } \seealso{\code{\link{FLXMRmultinom}}} \examples{ if (require("Ecdat")) { data("Catsup", package = "Ecdat") ## To reduce the time needed for the example only a subset is used Catsup <- subset(Catsup, id \%in\% 1:100) Catsup$experiment <- seq_len(nrow(Catsup)) vnames <- c("display", "feature", "price") Catsup_long <- reshape(Catsup, idvar = c("id", "experiment"), times = c(paste("heinz", c(41, 32, 28), sep = ""), "hunts32"), timevar = "brand", varying = matrix(colnames(Catsup)[2:13], nrow = 3, byrow = TRUE), v.names = vnames, direction = "long") Catsup_long$selected <- with(Catsup_long, choice == brand) Catsup_long <- Catsup_long[, c("id", "selected", "experiment", vnames, "brand")] Catsup_long$brand <- relevel(factor(Catsup_long$brand), "hunts32") set.seed(0808) flx1 <- flexmix(selected ~ display + feature + price + brand | id, model = FLXMRcondlogit(strata = ~ experiment), data = Catsup_long, k = 1) } } \keyword{regression} \keyword{models} flexmix/man/flexmix-internal.Rd0000644000176200001440000000732214404637306016271 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: flexmix-internal.Rd 5184 2020-06-20 18:27:29Z gruen $ % \name{flexmix-internal} \alias{FLXgetModelmatrix} \alias{FLXcheckComponent} \alias{FLXcheckComponent,FLXM-method} \alias{FLXcheckComponent,FLXMRfix-method} \alias{FLXdeterminePostunscaled} \alias{FLXdeterminePostunscaled,FLXM-method} \alias{FLXdeterminePostunscaled,FLXMRfix-method} \alias{FLXdeterminePostunscaled,FLXMRlmc-method} \alias{FLXdeterminePostunscaled,FLXMRlmmc-method} \alias{FLXdeterminePostunscaled,FLXMRcondlogit-method} \alias{FLXgetK} \alias{FLXgetK,FLXM-method} \alias{FLXgetK,FLXMRfix-method} \alias{FLXgetModelmatrix} \alias{FLXgetModelmatrix,FLXM-method} \alias{FLXgetModelmatrix,FLXMCsparse-method} \alias{FLXgetModelmatrix,FLXMCmvcombi-method} \alias{FLXgetModelmatrix,FLXMRcondlogit-method} \alias{FLXgetModelmatrix,FLXMRfix-method} \alias{FLXgetModelmatrix,FLXMRlmc-method} \alias{FLXgetModelmatrix,FLXMRlmmc-method} \alias{FLXgetModelmatrix,FLXMRmgcv-method} \alias{FLXgetModelmatrix,FLXMRrobglm-method} \alias{FLXgetModelmatrix,FLXMRziglm-method} \alias{FLXgetModelmatrix,FLXP-method} \alias{FLXgetObs} \alias{FLXgetObs,FLXM-method} \alias{FLXgetObs,FLXMRfix-method} \alias{FLXgetObs,FLXMRlmc-method} \alias{FLXmstep} \alias{FLXmstep,FLXM-method} \alias{FLXmstep,FLXMCmvcombi-method} \alias{FLXmstep,FLXMRcondlogit-method} \alias{FLXmstep,FLXMRfix-method} \alias{FLXmstep,FLXMRlmc-method} \alias{FLXmstep,FLXMRlmcfix-method} \alias{FLXmstep,FLXMRlmmc-method} \alias{FLXmstep,FLXMRlmmcfix-method} \alias{FLXmstep,FLXMRmgcv-method} \alias{FLXmstep,FLXMRrobglm-method} \alias{FLXmstep,FLXMRziglm-method} \alias{FLXremoveComponent} \alias{FLXremoveComponent,FLXM-method} \alias{FLXremoveComponent,FLXMRfix-method} \alias{FLXremoveComponent,FLXMRrobglm-method} \alias{FLXremoveComponent,FLXMRziglm-method} \alias{FLXMRglm-class} \alias{FLXR-class} \alias{FLXRMRglm-class} \alias{FLXRPmultinom-class} \alias{summary.flexmix-class} \alias{posterior,FLXM,listOrdata.frame-method} \alias{FLXMRfix-class} \alias{FLXMRglmfix-class} \alias{FLXRMRglmfix-class} \alias{predict,FLXMRglmfix-method} \alias{fitted,FLXMRglmfix-method} \alias{summary,FLXRMRglmfix-method} \alias{listOrdata.frame-class} \alias{refit_optim} \alias{refit_optim,FLXM-method} \alias{refit_optim,FLXMC-method} \alias{refit_optim,FLXMRglm-method} \alias{refit_optim,FLXMRziglm-method} \alias{refit_optim,FLXP-method} \alias{FLXgetDesign} \alias{FLXgetDesign,FLXM-method} \alias{FLXgetDesign,FLXMRglmfix-method} \alias{FLXgetDesign,FLXMRziglm-method} \alias{FLXgetParameters} \alias{FLXgetParameters,FLXdist-method} \alias{FLXgetParameters,FLXM-method} \alias{FLXgetParameters,FLXMC-method} \alias{FLXgetParameters,FLXMRglm-method} \alias{FLXgetParameters,FLXP-method} \alias{FLXgetParameters,FLXPmultinom-method} \alias{FLXreplaceParameters} \alias{FLXreplaceParameters,FLXdist-method} \alias{FLXreplaceParameters,FLXM-method} \alias{FLXreplaceParameters,FLXMC-method} \alias{FLXreplaceParameters,FLXMRglm-method} \alias{FLXreplaceParameters,FLXP-method} \alias{FLXreplaceParameters,FLXPmultinom-method} \alias{FLXlogLikfun} \alias{FLXlogLikfun,flexmix-method} \alias{FLXgradlogLikfun} \alias{FLXgradlogLikfun,flexmix-method} \alias{FLXgradlogLikfun,FLXM-method} \alias{FLXgradlogLikfun,FLXMRglm-method} \alias{FLXgradlogLikfun,FLXP-method} \alias{existGradient} \alias{existGradient,FLXM-method} \alias{existGradient,FLXMRglm-method} \alias{existGradient,FLXMRcondlogit-method} \alias{existGradient,FLXMRglmfix-method} \alias{existGradient,FLXMRmultinom-method} \alias{existGradient,FLXP-method} \title{Internal FlexMix Functions} \description{ Internal flexmix functions, methods and classes. } \details{ These are not to be called by the user. } \keyword{internal} flexmix/man/FLXbclust.Rd0000644000176200001440000000233314404637306014646 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXbclust.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{FLXMCmvbinary} \alias{FLXMCmvbinary} \alias{FLXbclust} \title{FlexMix Binary Clustering Driver} \description{ This is a model driver for \code{\link{flexmix}} implementing model-based clustering of binary data. } \usage{ FLXMCmvbinary(formula = . ~ ., truncated = FALSE) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{\link{flexmix}} using \code{\link{update.formula}}. Only the left-hand side (response) of the formula is used. Default is to use the original \code{\link{flexmix}} model formula.} \item{truncated}{logical, if \code{TRUE} the observations for the pattern with only zeros are missing and the truncated likelihood is optimized using an EM-algorithm.} } \details{ This model driver can be used to cluster binary data. The only parameter is the column-wise mean of the data, which equals the probability of observing a 1. } \value{ \code{FLXMCmvbinary} returns an object of class \code{FLXMC}. } \author{Friedrich Leisch and Bettina Gruen} \seealso{\code{\link{flexmix}}} \keyword{cluster} flexmix/man/FLXMCfactanal.Rd0000644000176200001440000000452614404637306015351 0ustar liggesusers\name{FLXMCfactanal} \alias{FLXMCfactanal} \alias{rFLXM,FLXMCfactanal,FLXcomponent-method} \title{Driver for Mixtures of Factor Analyzers} \description{ This driver for \code{\link{flexmix}} implements estimation of mixtures of factor analyzers using ML estimation of factor analysis implemented in \code{factanal} in each M-step. } \usage{ FLXMCfactanal(formula = . ~ ., factors = 1, ...) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{\link{flexmix}} using \code{\link{update.formula}}. Only the left-hand side (response) of the formula is used. Default is to use the original \code{\link{flexmix}} model formula.} \item{factors}{Integer specifying the number of factors in each component.} \item{\dots}{Passed to \code{factanal}} } \value{ \code{FLXMCfactanal} returns an object of class \code{FLXM}. } \references{ G. McLachlan and D. Peel. \emph{Finite Mixture Models}, 2000. John Wiley and Sons Inc. } \author{Bettina Gruen} \section{Warning}{ This does not implement the AECM framework presented in McLachlan and Peel (2000, p.245), but uses the available functionality in R for ML estimation of factor analyzers. The implementation therefore is only experimental and has not been well tested. Please note that in general a good initialization is crucial for the EM algorithm to converge to a suitable solution for this model class. } \seealso{\code{\link{flexmix}}} \examples{ ## Reproduce (partly) Table 8.1. p.255 (McLachlan and Peel, 2000) if (require("gclus")) { data("wine", package = "gclus") wine_data <- as.matrix(wine[,-1]) set.seed(123) wine_fl_diag <- initFlexmix(wine_data ~ 1, k = 3, nrep = 10, model = FLXMCmvnorm(diagonal = TRUE)) wine_fl_fact <- lapply(1:4, function(q) flexmix(wine_data ~ 1, model = FLXMCfactanal(factors = q, nstart = 3), cluster = posterior(wine_fl_diag))) sapply(wine_fl_fact, logLik) ## FULL set.seed(123) wine_full <- initFlexmix(wine_data ~ 1, k = 3, nrep = 10, model = FLXMCmvnorm(diagonal = FALSE)) logLik(wine_full) ## TRUE wine_true <- flexmix(wine_data ~ 1, cluster = wine$Class, model = FLXMCmvnorm(diagonal = FALSE)) logLik(wine_true) } } \keyword{models} flexmix/man/FLXfit.Rd0000644000176200001440000000227014404637306014134 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXfit.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{FLXfit} \alias{FLXfit} \alias{FLXfit,list-method} \title{Fitter Function for FlexMix Models} \description{ This is the basic computing engine called by \code{\link{flexmix}}, it should usually not be used directly. } \usage{ FLXfit(model, concomitant, control, postunscaled = NULL, groups, weights) } \arguments{ \item{model}{List of \code{FLXM} objects.} \item{concomitant}{Object of class \code{FLXP}.} \item{control}{Object of class \code{FLXcontrol}.} \item{weights}{A numeric vector of weights to be used in the fitting process.} \item{postunscaled}{Initial a-posteriori probabilities of the observations at the start of the EM algorithm.} \item{groups}{List with components \code{group} which is a factor with optional grouping of observations and \code{groupfirst} which is a logical vector for the first observation of each group.} } \value{ Returns an object of class \code{flexmix}. } \author{Friedrich Leisch and Bettina Gruen} \seealso{\code{\link{flexmix}}, \code{\link{flexmix-class}}} \keyword{regression} \keyword{cluster} flexmix/man/FLXcontrol-class.Rd0000644000176200001440000000333414404637306016137 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXcontrol-class.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{FLXcontrol-class} \docType{class} \alias{FLXcontrol-class} \alias{coerce,list,FLXcontrol-method} \alias{coerce,NULL,FLXcontrol-method} \title{Class "FLXcontrol"} \description{Hyperparameters for the EM algorithm.} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("FLXcontrol", ...)}. In addition, named lists can be coerced to \code{FLXcontrol} objects, names are completed if unique (see examples). } \section{Slots}{ \describe{ \item{\code{iter.max}:}{Maximum number of iterations.} \item{\code{minprior}:}{Minimum prior probability of clusters, components falling below this threshold are removed during the iteration.} \item{\code{tolerance}:}{The EM algorithm is stopped when the (relative) change of log-likelihood is smaller than \code{tolerance}.} \item{\code{verbose}:}{If a positive integer, then the log-likelihood is reported every \code{verbose} iterations. If 0, no output is generated during model fitting.} \item{\code{classify}:}{Character string, one of \code{"auto"}, \code{"weighted"}, \code{"hard"} (or \code{"CEM"}), \code{"random"} or (\code{"SEM"}).} \item{\code{nrep}:}{Reports the number of random initializations used in \code{\link{stepFlexmix}()} to determine the mixture.} } Run \code{new("FLXcontrol")} to see the default settings of all slots. } \author{Friedrich Leisch and Bettina Gruen} \keyword{classes} \examples{ ## have a look at the defaults new("FLXcontrol") ## corce a list mycont <- list(iter = 200, tol = 0.001, class = "r") as(mycont, "FLXcontrol") } flexmix/man/logLik-methods.Rd0000644000176200001440000000105414404637306015661 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: logLik-methods.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{logLik-methods} \docType{methods} \title{Methods for Function logLik in Package \pkg{flexmix}} \alias{logLik,flexmix-method} \alias{logLik,stepFlexmix-method} \description{Evaluate the log-likelihood. This function is defined as an S4 generic in the \code{stats4} package.} \section{Methods}{ \describe{ \item{object = flexmix}{Evaluate the log-likelihood of an \code{flexmix} object} } } \keyword{methods} flexmix/man/FLXdist-class.Rd0000644000176200001440000000464314404637306015426 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXdist-class.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{FLXdist-class} \docType{class} \alias{FLXdist-class} \alias{predict,FLXdist-method} \alias{predict,FLXM-method} \alias{predict,FLXMRglm-method} \alias{predict,FLXMRmgcv-method} \alias{parameters,FLXdist-method} \alias{prior} \alias{prior,FLXdist-method} \title{Class "FLXdist"} \description{ Objects of class \code{FLXdist} represent unfitted finite mixture models. } \usage{ \S4method{parameters}{FLXdist}(object, component = NULL, model = NULL, which = c("model", "concomitant"), simplify = TRUE, drop = TRUE) \S4method{predict}{FLXdist}(object, newdata = list(), aggregate = FALSE, ...) } \arguments{ \item{object}{An object of class "FLXdist".} \item{component}{Number of component(s), if \code{NULL} all components are returned.} \item{model}{Number of model(s), if \code{NULL} all models are returned.} \item{which}{Specifies if the parameters of the component specific model or the concomitant variable model are returned.} \item{simplify}{Logical, if \code{TRUE} the returned values are simplified to a vector or matrix if possible.} \item{drop}{Logical, if \code{TRUE} the function tries to simplify the return object by omitting lists of length one.} \item{newdata}{Dataframe containing new data.} \item{aggregate}{Logical, if \code{TRUE} then the predicted values for each model aggregated over the components are returned.} \item{\dots}{Passed to the method of the model class.} } \section{Slots}{ \describe{ \item{model}{List of \code{FLXM} objects.} \item{prior}{Numeric vector with prior probabilities of clusters.} \item{components}{List describing the components using \code{FLXcomponent} objects.} \item{\code{concomitant}:}{Object of class \code{"FLXP"}.} \item{formula}{Object of class \code{"formula"}.} \item{call}{The function call used to create the object.} \item{k}{Number of clusters.} } } \section{Accessor Functions}{ The following functions should be used for accessing the corresponding slots: \describe{ \item{\code{parameters}:}{The parameters for each model and component, return value depends on the model.} \item{\code{prior}:}{Numeric vector of prior class probabilities/component weights} } } \author{Friedrich Leisch and Bettina Gruen} \seealso{\code{FLXdist}} \keyword{classes} flexmix/man/KLdiv.Rd0000644000176200001440000000532414404637306014014 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: KLdiv.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{KLdiv} \alias{KLdiv,matrix-method} \alias{KLdiv,flexmix-method} \alias{KLdiv,FLXMRglm-method} \alias{KLdiv,FLXMC-method} \title{Kullback-Leibler Divergence} \description{ Estimate the Kullback-Leibler divergence of several distributions.} \usage{ \S4method{KLdiv}{matrix}(object, eps = 10^-4, overlap = TRUE,...) \S4method{KLdiv}{flexmix}(object, method = c("continuous", "discrete"), ...) } \arguments{ \item{object}{See Methods section below.} \item{method}{The method to be used; "continuous" determines the Kullback-Leibler divergence between the unweighted theoretical component distributions and the unweighted posterior probabilities at the observed points are used by "discrete".} \item{eps}{Probabilities below this threshold are replaced by this threshold for numerical stability.} \item{overlap}{Logical, do not determine the KL divergence for those pairs where for each point at least one of the densities has a value smaller than \code{eps}.} \item{...}{Passed to the matrix method.} } \section{Methods}{ \describe{ \item{object = "matrix":}{Takes as input a matrix of density values with one row per observation and one column per distribution.} \item{object = "flexmix":}{Returns the Kullback-Leibler divergence of the mixture components.} }} \details{ Estimates \deqn{\int f(x) (\log f(x) - \log g(x)) dx} for distributions with densities \eqn{f()} and \eqn{g()}. } \value{ A matrix of KL divergences where the rows correspond to using the respective distribution as \eqn{f()} in the formula above. } \note{ The density functions are modified to have equal support. A weight of at least \code{eps} is given to each observation point for the modified densities. } \keyword{methods} \author{Friedrich Leisch and Bettina Gruen} \references{ S. Kullback and R. A. Leibler. On information and sufficiency.\emph{The Annals of Mathematical Statistics}, \bold{22}(1), 79--86, 1951. Friedrich Leisch. Exploring the structure of mixture model components. In Jaromir Antoch, editor, Compstat 2004--Proceedings in Computational Statistics, 1405--1412. Physika Verlag, Heidelberg, Germany, 2004. ISBN 3-7908-1554-3. } \examples{ ## Gaussian and Student t are much closer to each other than ## to the uniform: x <- seq(-3, 3, length = 200) y <- cbind(u = dunif(x), n = dnorm(x), t = dt(x, df = 10)) matplot(x, y, type = "l") KLdiv(y) if (require("mlbench")) { set.seed(2606) x <- mlbench.smiley()$x model1 <- flexmix(x ~ 1, k = 9, model = FLXmclust(diag = FALSE), control = list(minprior = 0)) plotEll(model1, x) KLdiv(model1) } }flexmix/man/relabel.Rd0000644000176200001440000000357314404637306014415 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: posterior.Rd 3937 2008-03-28 14:56:01Z leisch $ % \name{relabel} \alias{relabel} \alias{relabel,FLXdist,missing-method} \alias{relabel,FLXdist,character-method} \alias{relabel,FLXdist,integer-method} \title{Relabel the Components} \description{ The components are sorted by the value of one of the parameters or according to an integer vector containing the permutation of the numbers from 1 to the number of components. } \usage{ relabel(object, by, ...) \S4method{relabel}{FLXdist,character}(object, by, which = NULL, ...) } \arguments{ \item{object}{An object of class \code{"flexmix"}.} \item{by}{If a character vector, it needs to be one of \code{"prior"}, \code{"model"}, \code{"concomitant"} indicating if the parameter should be from the component-specific or the concomitant variable model. If an integer vector it indicates how the components should be sorted. If missing, the components are sorted by component size.} \item{which}{Name (or unique substring) of a parameter if \code{by} is equal to "model" or "concomitant".} \item{\dots}{Currently not used.} } \author{Friedrich Leisch and Bettina Gruen} \keyword{methods} \examples{ set.seed(123) beta <- matrix(1:16, ncol = 4) beta df1 <- ExLinear(beta, n = 100, sd = .5) f1 <- flexmix(y~., data = df1, k = 4) ## There was label switching, parameters are not in the same order ## as in beta: round(parameters(f1)) betas <- rbind(beta, .5) betas ## This makes no sense: summary(abs(as.vector(betas-parameters(f1)))) ## We relabel the components by sorting the coefficients of x1: r1 <- relabel(f1, by = "model", which = "x1") round(parameters(r1)) ## Now we can easily compare the fit with the true parameters: summary(abs(as.vector(betas-parameters(r1)))) } flexmix/man/FLXMRlmmc.Rd0000644000176200001440000000300314404637306014534 0ustar liggesusers\name{FLXMRlmmc} \alias{FLXMRlmmc} \alias{FLXMRlmmc-class} \alias{FLXMRlmmcfix-class} \alias{FLXMRlmc-class} \alias{FLXMRlmcfix-class} \alias{predict,FLXMRlmc-method} \title{FlexMix Interface to Linear Mixed Models with Left-Censoring} \description{ This is a driver which allows fitting of mixtures of linear models with random effects and left-censored observations. } \usage{ FLXMRlmmc(formula = . ~ ., random, censored, varFix, eps = 10^-6, ...) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{flexmix} using \code{\link{update.formula}}. Default is to use the original \code{flexmix} model formula.} \item{random}{A formula for specifying the random effects. If missing no random effects are fitted.} \item{varFix}{If random effects are specified a named logical vector of length 2 indicating if the variance of the random effects and the residuals are fixed over the components. Otherwise a logical indicating if the variance of the residuals are fixed over the components.} \item{censored}{A formula for specifying the censoring variable.} \item{eps}{Observations with an a-posteriori probability smaller or equal to \code{eps} are omitted in the M-step.} \item{\dots}{Additional arguments to be passed to \code{lm.wfit}.} } \value{ Returns an object of class \code{FLXMRlmmc}, \code{FLXMRlmmcfix}, \code{FLXMRlmc} or \code{FLXMRlmcfix} inheriting from \code{FLXMR}. } \author{Bettina Gruen} \keyword{models} flexmix/man/FLXdist.Rd0000644000176200001440000000255614404637306014324 0ustar liggesusers\name{FLXdist} \alias{FLXdist} \alias{simulate,FLXdist-method} \alias{show,FLXdist-method} \title{Finite Mixtures of Distributions} \description{ Constructs objects of class \code{FLXdist} which represent unfitted finite mixture models. } \usage{ FLXdist(formula, k = NULL, model = FLXMRglm(), components, concomitant = FLXPconstant()) } \arguments{ \item{formula}{A symbolic description of the model to be fit. The general form is \code{~x|g} where \code{x} is the set of predictors and \code{g} an optional grouping factor for repeated measurements.} \item{k}{Integer specifying the number of cluster or a numeric vector of length equal to the length of components, specifying the prior probabilities of clusters.} \item{model}{Object of class \code{FLXM} or a list of \code{FLXM} objects. Default is the object returned by calling \code{FLXMRglm()}.} \item{components}{A list of length equal to the number of components containing a list of length equal to the number of models which again contains a list of named elements for defining the parameters of the component-specific model.} \item{concomitant}{Object of class \code{FLXconcomitant} specifying the model for concomitant variables.} } \value{ Returns an object of class \code{FLXdist}. } \author{Bettina Gruen} \seealso{\code{FLXdist-class}} \keyword{utilities} flexmix/man/refit.Rd0000644000176200001440000001204614404637306014113 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: refit.Rd 5115 2017-04-07 08:18:13Z gruen $ % \name{refit-methods} \alias{refit,flexmix-method} \alias{FLXRmstep-class} \alias{FLXRoptim-class} \alias{show,FLXR-method} \alias{show,Coefmat-method} \alias{summary,FLXRoptim-method} \alias{summary,FLXRmstep-method} \alias{plot,FLXRoptim,missing-method} \title{Refit a Fitted Model} \description{ Refits an estimated flexmix model to obtain additional information like coefficient significance p-values for GLM regression. } \usage{ \S4method{refit}{flexmix}(object, newdata, method = c("optim", "mstep"), ...) \S4method{summary}{FLXRoptim}(object, model = 1, which = c("model", "concomitant"), ...) \S4method{summary}{FLXRmstep}(object, model = 1, which = c("model", "concomitant"), ...) \S4method{plot}{FLXRoptim,missing}(x, y, model = 1, which = c("model", "concomitant"), bycluster = TRUE, alpha = 0.05, components, labels = NULL, significance = FALSE, xlab = NULL, ylab = NULL, ci = TRUE, scales = list(), as.table = TRUE, horizontal = TRUE, ...) } \arguments{ \item{object}{An object of class \code{"flexmix"}} \item{newdata}{Optional new data.} \item{method}{Specifies if the variance covariance matrix is determined using \code{\link{optim}} or if the posteriors are assumed as given and an M-step is performed.} \item{model}{The model (for a multivariate response) that shall be used.} \item{which}{Specifies if a component specific model or the concomitant variable model is used.} \item{x}{An object of class \code{"FLXRoptim"}} \item{y}{Missing object.} \item{bycluster}{A logical if the parameters should be group by cluster or by variable.} \item{alpha}{Numeric indicating the significance level.} \item{components}{Numeric vector specifying which components are plotted. The default is to plot all components.} \item{labels}{Character vector specifying the variable names used.} \item{significance}{A logical indicating if non-significant coefficients are shaded in a lighter grey.} \item{xlab}{String for the x-axis label.} \item{ylab}{String for the y-axis label.} \item{ci}{A logical indicating if significant and insignificant parameter estimates are shaded differently.} \item{scales}{See argument of the same name for function \code{\link[lattice]{xyplot}}.} \item{as.table}{See arguments of the same name for function \code{\link[lattice]{xyplot}}.} \item{horizontal}{See arguments of the same name for function \code{\link[lattice]{xyplot}}.} \item{\dots}{Currently not used} } \value{ An object inheriting form class \code{FLXR} is returned. For the method using \code{optim} the object has class \code{FLXRoptim} and for the M-step method it has class \code{FLXRmstep}. Both classes give similar results for their \code{summary} methods. Objects of class \code{FLXRoptim} have their own \code{plot} method. \code{Lapply} can be used to further analyse the refitted component specific models of objects of class \code{FLXRmstep}. } \details{ The \code{refit} method for \code{FLXMRglm} models in combination with the \code{summary} method can be used to obtain the usual tests for significance of coefficients. Note that the tests are valid only if \code{flexmix} returned the maximum likelihood estimator of the parameters. If \code{refit} is used with \code{method = "mstep"} for these component specific models the returned object contains a \code{glm} object for each component where the elements \code{model} which is the model frame and \code{data} which contains the original dataset are missing. } \keyword{methods} \author{Friedrich Leisch and Bettina Gruen} \references{ Friedrich Leisch. FlexMix: A general framework for finite mixture models and latent class regression in R. \emph{Journal of Statistical Software}, \bold{11}(8), 2004. doi:10.18637/jss.v011.i08 } \section{Warning}{ For \code{method = "mstep"} the standard deviations are determined separately for each of the components using the a-posteriori probabilities as weights without accounting for the fact that the components have been simultaneously estimated. The derived standard deviations are hence approximative and should only be used in an exploratory way, as they are underestimating the uncertainty given that the missing information of the component memberships are replaced by the expected values. The \code{newdata} argument can only be specified when using \code{method = "mstep"} for refitting \code{FLXMRglm} components. A variant of \code{glm} for weighted ML estimation is used for fitting the components and full \code{glm} objects are returned. Please note that in this case the data and the model frame are stored for each component which can significantly increase the object size. } \examples{ data("NPreg", package = "flexmix") ex1 <- flexmix(yn ~ x + I(x^2), data = NPreg, k = 2) ex1r <- refit(ex1) ## in one component all coefficients should be highly significant, ## in the other component only the linear term summary(ex1r) } flexmix/man/FLXglm.Rd0000644000176200001440000000261114404637306014130 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXglm.Rd 5229 2022-06-02 14:47:26Z gruen $ % \name{FLXMRglm} \alias{FLXMRglm} \alias{FLXglm} \title{FlexMix Interface to Generalized Linear Models} \description{ This is the main driver for FlexMix interfacing the \code{\link{glm}} family of models. } \usage{ FLXMRglm(formula = . ~ ., family = c("gaussian", "binomial", "poisson", "Gamma"), offset = NULL) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{\link{flexmix}} using \code{\link{update.formula}}. Default is to use the original \code{\link{flexmix}} model formula.} \item{family}{A character string naming a \code{\link{glm}} family function.} \item{offset}{This can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting.} } \details{ See \code{\link{flexmix}} for examples. } \value{ Returns an object of class \code{FLXMRglm} inheriting from \code{FLXMR}. } \author{Friedrich Leisch and Bettina Gruen} \references{ Friedrich Leisch. FlexMix: A general framework for finite mixture models and latent class regression in R. \emph{Journal of Statistical Software}, \bold{11}(8), 2004. doi:10.18637/jss.v011.i08 } \seealso{\code{\link{flexmix}}, \code{\link{glm}}} \keyword{regression} \keyword{models} flexmix/man/FLXMRmultinom.Rd0000644000176200001440000000222614404637306015456 0ustar liggesusers\name{FLXMRmultinom} \alias{FLXMRmultinom} \title{FlexMix Interface to Multiomial Logit Models} \description{ Model driver for fitting mixtures of multinomial logit models. } \usage{ FLXMRmultinom(formula = . ~ ., ...) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{\link{flexmix}} using \code{\link{update.formula}}. Default is to use the original \code{\link{flexmix}} model formula.} \item{\dots}{Additional arguments to be passed to \code{nnet.default}.} } \details{ The M-step is performed using \code{nnet.default}. } \value{ Returns an object of class \code{FLXMRmultinom} inheriting from \code{FLXMRglm}. } \references{ Bettina Gruen and Friedrich Leisch. Identifiability of finite mixtures of multinomial logit models with varying and fixed effects. \emph{Journal of Classification}, \bold{25}, 225--247. 2008. } \author{ Bettina Gruen } \section{Warning}{ To ensure identifiability repeated measurements are necessary. Sufficient conditions are given in Gruen and Leisch (2008). } \seealso{\code{\link{FLXMRcondlogit}}} \keyword{regression} \keyword{models} flexmix/man/betablocker.Rd0000644000176200001440000000276614404637306015267 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: betablocker.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{betablocker} \alias{betablocker} \docType{data} \title{Clinical Trial of Beta-Blockers} \description{ 22-centre clinical trial of beta-blockers for reducing mortality after myocardial infarction. } \usage{data("betablocker")} \format{ A data frame with 44 observations on the following 4 variables. \describe{ \item{Deaths}{Number of deaths.} \item{Total}{Total number of patients.} \item{Center}{Number of clinical centre.} \item{Treatment}{A factor with levels \code{Control} and \code{Treated}.} } } \source{ G. McLachlan and D. Peel. \emph{Finite Mixture Models}, 2000. John Wiley and Sons Inc. \url{http://www.maths.uq.edu.au/~gjm/DATA/mmdata.html} } \references{ M. Aitkin. Meta-analysis by random effect modelling in generalized linear models. \emph{Statistics in Medicine}, \bold{18}, 2343--2351, 1999. S. Yusuf, R. Peto, J. Lewis, R. Collins and P. Sleight. Beta blockade during and after myocardial infarction: an overview of the randomized trials. \emph{Progress in Cardiovascular Diseases}, \bold{27}, 335--371, 1985. } \examples{ data("betablocker", package = "flexmix") betaMix <- initFlexmix(cbind(Deaths, Total - Deaths) ~ 1 | Center, data = betablocker, k = 3, nrep = 5, model = FLXMRglmfix(family = "binomial", fixed = ~Treatment)) } \keyword{datasets} flexmix/man/FLXMCmvcombi.Rd0000644000176200001440000000407614404637306015234 0ustar liggesusers% % Copyright (C) 2009 Friedrich Leisch and Bettina Gruen % $Id: FLXMCmvcombi.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{FLXMCmvcombi} \alias{FLXMCmvcombi} \title{FlexMix Binary and Gaussian Clustering Driver} \description{ This is a model driver for \code{\link{flexmix}} implementing model-based clustering of a combination of binary and Gaussian data. } \usage{ FLXMCmvcombi(formula = . ~ .) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{\link{flexmix}} using \code{\link{update.formula}}. Only the left-hand side (response) of the formula is used. Default is to use the original \code{\link{flexmix}} model formula.} } \details{ This model driver can be used to cluster mixed-mode binary and Gaussian data. It checks which columns of a matrix contain only zero and ones, and does the same as \code{\link{FLXMCmvbinary}} for them. For the remaining columns of the data matrix independent Gaussian distributions are used (same as \code{\link{FLXMCmvnorm}} with \code{diagonal = FALSE}. The same could be obtained by creating a corresponding list of two models for the respective columns, but \code{FLXMCmvcombi} does a better job in reporting parameters. } \value{ \code{FLXMCmvcombi} returns an object of class \code{FLXMC}. } \author{Friedrich Leisch} \seealso{\code{\link{flexmix}}, \code{\link{FLXMCmvbinary}}, \code{\link{FLXMCmvnorm}}} \keyword{cluster} \examples{ ## create some artificial data x1 <- cbind(rnorm(300), sample(0:1, 300, replace = TRUE, prob = c(0.25, 0.75))) x2 <- cbind(rnorm(300, mean = 2, sd = 0.5), sample(0:1, 300, replace = TRUE, prob = c(0.75, 0.25))) x <- rbind(x1, x2) ## fit the model f1 <- flexmix(x ~ 1, k = 2, model = FLXMCmvcombi()) ## should be similar to the original parameters parameters(f1) table(clusters(f1), rep(1:2, c(300,300))) ## a column with noise should not hurt too much x <- cbind(x, rnorm(600)) f2 <- flexmix(x ~ 1, k = 2, model = FLXMCmvcombi()) parameters(f2) table(clusters(f2), rep(1:2, c(300,300))) } flexmix/man/group.Rd0000644000176200001440000000073314404637306014136 0ustar liggesusers\name{group} \docType{methods} \alias{group} \alias{group-methods} \alias{group,flexmix-method} \alias{group,FLXM-method} \alias{group,FLXMRglmfix-method} \title{Extract Grouping Variable} \description{ Extract grouping variable for all observations. } \usage{ \S4method{group}{flexmix}(object) \S4method{group}{FLXM}(object) \S4method{group}{FLXMRglmfix}(object) } \arguments{ \item{object}{an object of class \code{flexmix}.} } \keyword{methods} \author{Bettina Gruen} flexmix/man/FLXMRlmer.Rd0000644000176200001440000000767614404637306014567 0ustar liggesusers\name{FLXMRlmer} \alias{FLXMRlmer} \alias{FLXMRlmer-class} \alias{FLXMRlmm-class} \alias{FLXMRlmmfix-class} \alias{FLXdeterminePostunscaled,FLXMRlmer-method} \alias{FLXdeterminePostunscaled,FLXMRlmm-method} \alias{FLXmstep,FLXMRlmer-method} \alias{FLXmstep,FLXMRlmm-method} \alias{FLXgetModelmatrix,FLXMRlmer-method} \alias{FLXgetModelmatrix,FLXMRlmm-method} \alias{FLXMRlmm} \alias{FLXgetObs,FLXMRlmm-method} \alias{FLXmstep,FLXMRlmmfix-method} \alias{predict,FLXMRlmm-method} \alias{rFLXM,FLXMRlmm,FLXcomponent-method} \alias{rFLXM,FLXMRlmm,list-method} \alias{rFLXM,FLXMRlmc,FLXcomponent-method} \alias{rFLXM,FLXMRlmer,FLXcomponent-method} \title{FlexMix Interface to Linear Mixed Models} \description{ This is a driver which allows fitting of mixtures of linear models with random effects. } \usage{ FLXMRlmm(formula = . ~ ., random, lm.fit = c("lm.wfit", "smooth.spline"), varFix = c(Random = FALSE, Residual = FALSE), \dots) FLXMRlmer(formula = . ~ ., random, weighted = TRUE, control = list(), eps = .Machine$double.eps) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{flexmix} using \code{\link{update.formula}}. Default is to use the original \code{flexmix} model formula.} \item{random}{A formula for specifying the random effects.} \item{weighted}{A logical indicating if the model should be estimated with weighted ML.} \item{control}{A list of control parameters. See \code{\link[lme4]{lmer}} for details.} \item{eps}{Observations with a component-specific posterior smaller than \code{eps} are omitted in the M-step for this component.} \item{lm.fit}{A character string indicating if the coefficients should be fitted using either a linear model or the function \code{smooth.spline}} \item{varFix}{Named logical vector of length 2 indicating if the variance of the random effects and the residuals are fixed over the components.} \item{\dots}{Additional arguments to be passed to \code{smooth.spline}.} } \details{ \code{FLXMRlmm} allows only one random effect. \code{FLXMRlmer} allows an arbitrary number of random effects if \code{weighted = FALSE}; a certain structure of the model matrix of the random effects has to be given for weighted ML estimation, i.e. where \code{weighted = TRUE}. } \value{ Returns an object of class \code{FLXMRlmer} and \code{FLXMRlmm} inheriting from \code{FLXMRglm} and \code{FLXMR}, respectively. } \section{Warning}{ For \code{FLXMRlmer} the weighted ML estimation is only correct if the covariate matrix of the random effects is the same for each observation. By default weighted ML estimation is made and the condition on the covariate matrix of the random effects is checked. If this fails, only estimation with \code{weighted = FALSE} is possible which will maximize the classification likelihood. } \author{Bettina Gruen} \examples{ id <- rep(1:50, each = 10) x <- rep(1:10, 50) sample <- data.frame(y = rep(rnorm(unique(id)/2, 0, c(5, 2)), each = 10) + rnorm(length(id), rep(c(3, 8), each = 10)) + rep(c(0, 3), each = 10) * x, x = x, id = factor(id)) fitted <- flexmix(.~.|id, k = 2, model = FLXMRlmm(y ~ x, random = ~ 1), data = sample, control = list(tolerance = 10^-3), cluster = rep(rep(1:2, each = 10), 25)) parameters(fitted) fitted1 <- flexmix(.~.|id, k = 2, model = FLXMRlmer(y ~ x, random = ~ 1), data = sample, control = list(tolerance = 10^-3), cluster = rep(rep(1:2, each = 10), 25)) parameters(fitted1) fitted2 <- flexmix(.~.|id, k = 2, model = FLXMRlmm(y ~ 0 + x, random = ~ 1, lm.fit = "smooth.spline"), data = sample, control = list(tolerance = 10^-3), cluster = rep(rep(1:2, each = 10), 25)) parameters(fitted2) } \keyword{models} flexmix/man/BIC-methods.Rd0000644000176200001440000000105014404637306015031 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: BIC-methods.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{BIC-methods} \docType{methods} \title{Methods for Function BIC} \alias{BIC,flexmix-method} \alias{BIC,stepFlexmix-method} \description{Compute the Bayesian Information Criterion.} \section{Methods}{ \describe{ \item{object = flexmix:}{Compute the BIC of a \code{flexmix} object} \item{object = stepFlexmix:}{Compute the BIC of all models contained in the \code{stepFlexmix} object.} } } \keyword{methods} flexmix/man/plotEll.Rd0000644000176200001440000000253514404637306014417 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: plotEll.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{plotEll} \alias{plotEll} \title{Plot Confidence Ellipses for FLXMCmvnorm Results} \description{ Plot 50\% and 95\% confidence ellipses for mixtures of Gaussians fitted using \code{\link{FLXMCmvnorm}}. } \usage{ plotEll(object, data, which = 1:2, model = 1, project = NULL, points = TRUE, eqscale = TRUE, col = NULL, number = TRUE, cex = 1.5, numcol = "black", pch = NULL, ...) } \arguments{ \item{object}{An object of class \code{flexmix} with a fitted \code{FLXMCmvnorm} model.} \item{data}{The response variable in a data frame or as a matrix.} \item{which}{Index numbers of dimensions of (projected) input space to plot.} \item{model}{The model (for a multivariate response) that shall be plotted.} \item{project}{Projection object, currently only the result of \code{\link[stats]{prcomp}} is supported.} \item{points}{Logical, shall data points be plotted?} \item{eqscale}{Logical, plot using \code{\link[MASS]{eqscplot}}?} \item{number}{Logical, plot number labels at cluster centers?} \item{cex, numcol}{Size and color of number labels.} \item{pch, col, \dots}{Graphical parameters.} } \author{Friedrich Leisch and Bettina Gruen} \seealso{\code{\link{FLXMCmvnorm}}} \keyword{cluster} flexmix/man/ExNclus.Rd0000644000176200001440000000151314404637306014360 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: ExNclus.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{ExNclus} \alias{ExNclus} \alias{Nclus} \title{Artificial Example with 4 Gaussians} \description{ A simple artificial example for normal clustering with 4 latent classes, all of them having a Gaussian distribution. See the function definition for true means and covariances. } \usage{ ExNclus(n) data("Nclus") } \arguments{ \item{n}{Number of observations in the two small latent classes.} } \details{ The \code{Nclus} data set can be re-created by \code{ExNclus(100)} using \code{set.seed(2602)}, it has been saved as a data set for simplicity of examples only. } \examples{ data("Nclus", package = "flexmix") require("MASS") eqscplot(Nclus, col = rep(1:4, c(100, 100, 150, 200))) } \keyword{datasets} flexmix/man/salmonellaTA98.Rd0000644000176200001440000000342414404637306015537 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: salmonellaTA98.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{salmonellaTA98} \alias{salmonellaTA98} \title{Salmonella Reverse Mutagenicity Assay} \usage{data("salmonellaTA98")} \description{ Data on Ames Salmonella reverse mutagenicity assay. } \format{ This data frame contains the following columns: \describe{ \item{x}{Dose levels of quinoline.} \item{y}{Numbers of revertant colonies of TA98 Salmonella observed on each of three replicate plates tested at each of six dose levels of quinoline diameter.} } } \details{ This data set is taken from package \pkg{dispmod} provided by Luca Scrucca. } \source{ Margolin, B.J., Kaplan, N. and Zeiger, E. Statistical analysis of the Ames Salmonella/microsome test, \emph{Proc. Natl. Acad. Sci. USA}, \bold{76}, 3779--3783, 1981. } \references{ Breslow, N.E. Extra-Poisson variation in log-linear models, \emph{Applied Statistics}, \bold{33}, 38--44, 1984. Wang, P., Puterman, M.L., Cockburn, I.M., and Le, N.D. Mixed Poisson regression models with covariate dependent rates, \emph{Biometrics}, \bold{52}, 381--400, 1996. } \examples{ data("salmonellaTA98", package = "flexmix") salmonMix <- initFlexmix(y ~ 1, data = salmonellaTA98, model = FLXMRglmfix(family = "poisson", fixed = ~ x + log(x + 10)), k = 2, nrep = 5) salmonMix.pr <- predict(salmonMix, newdata = salmonellaTA98) plot(y ~ x, data = salmonellaTA98, pch = as.character(clusters(salmonMix)), ylim = range(c(salmonellaTA98$y, unlist(salmonMix.pr)))) for (i in 1:2) lines(salmonellaTA98$x, salmonMix.pr[[i]], lty = i) } \keyword{datasets} flexmix/man/FLXMCmvpois.Rd0000644000176200001440000000175714404637306015120 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXMCmvpois.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{FLXMCmvpois} \alias{FLXMCmvpois} \title{FlexMix Poisson Clustering Driver} \description{ This is a model driver for \code{\link{flexmix}} implementing model-based clustering of Poisson distributed data. } \usage{ FLXMCmvpois(formula = . ~ .) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{\link{flexmix}} using \code{\link{update.formula}}. Only the left-hand side (response) of the formula is used. Default is to use the original \code{\link{flexmix}} model formula.} } \details{ This can be used to cluster Poisson distributed data where given the component membership the variables are mutually independent. } \value{ \code{FLXMCmvpois} returns an object of class \code{FLXMC}. } \author{Friedrich Leisch and Bettina Gruen} \seealso{\code{\link{flexmix}}} \keyword{cluster} flexmix/man/boot.Rd0000644000176200001440000000607314404637306013750 0ustar liggesusers\name{boot} \alias{boot} \alias{boot,flexmix-method} \alias{LR_test} \alias{LR_test,flexmix-method} \alias{boot,flexmix-method} \alias{show,FLXboot-method} \alias{FLXboot-class} \alias{plot,FLXboot,missing-method} \alias{parameters,FLXboot-method} \alias{clusters,FLXboot,listOrdata.frame-method} \alias{predict,FLXboot-method} \alias{posterior,FLXboot,listOrdata.frame-method} \title{Bootstrap a flexmix Object} \description{ Given a \code{flexmix} object perform parametric or empirical bootstrap. } \usage{ boot(object, ...) \S4method{boot}{flexmix}(object, R, sim = c("ordinary", "empirical", "parametric"), initialize_solution = FALSE, keep_weights = FALSE, keep_groups = TRUE, verbose = 0, control, k, model = FALSE, ...) LR_test(object, ...) \S4method{LR_test}{flexmix}(object, R, alternative = c("greater", "less"), control, ...) } \arguments{ \item{object}{A fitted finite mixture model of class \code{flexmix}.} \item{R}{The number of bootstrap replicates.} \item{sim}{A character string indicating the type of simulation required. Possible values are \code{"ordinary"} (the default), \code{"parametric"}, or \code{"empirical"}.} \item{initialize_solution}{A logical. If \code{TRUE} the EM algorithm is initialized in the given solution.} \item{keep_weights}{A logical. If \code{TRUE} the weights are kept.} \item{keep_groups}{A logical. If \code{TRUE} the groups are kept.} \item{verbose}{If a positive integer, then progress information is reported every \code{verbose} iterations. If 0, no output is generated during the bootstrap replications.} \item{control}{Object of class \code{FLXcontrol} or a named list. If missing the control of the fitted \code{object} is taken.} \item{k}{Vector of integers specifying for which number of components finite mixtures are fitted to the bootstrap samples. If missing the number of components of the fitted \code{object} are taken.} \item{alternative}{A character string specifying the alternative hypothesis, must be either \code{"greater"} (default) or \code{"less"} indicating if the alternative hypothesis is that the mixture has one more component or one less.} \item{model}{A logical. If \code{TRUE} the model and the weights slot for each sample are stored and returned.} \item{\dots}{Further arguments to be passed to or from methods.} } \value{ \code{boot} returns an object of class \code{FLXboot} which contains the fitted parameters, the fitted priors, the log likelihoods, the number of components of the fitted mixtures and the information if the EM algorithm has converged. \code{LR_test} returns an object of class \code{htest} containing the number of valid bootstrap replicates, the p-value, the - twice log likelihood ratio test statistics for the original data and the bootstrap replicates. } \author{Bettina Gruen} \examples{ data("NPreg", package = "flexmix") fitted <- initFlexmix(yn ~ x + I(x^2) | id2, data = NPreg, k = 2) \dontrun{ lrtest <- LR_test(fitted, alternative = "greater", R = 20, verbose = 1) } } \keyword{methods} flexmix/man/FLXmclust.Rd0000644000176200001440000000465314404637306014670 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXmclust.Rd 5115 2017-04-07 08:18:13Z gruen $ % \name{FLXMCmvnorm} \alias{FLXMCmvnorm} \alias{FLXMCnorm1} \alias{FLXmclust} \title{FlexMix Clustering Demo Driver} \description{ These are demo drivers for \code{\link{flexmix}} implementing model-based clustering of Gaussian data. } \usage{ FLXMCmvnorm(formula = . ~ ., diagonal = TRUE) FLXMCnorm1(formula = . ~ .) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{\link{flexmix}} using \code{\link{update.formula}}. Only the left-hand side (response) of the formula is used. Default is to use the original \code{\link{flexmix}} model formula.} \item{diagonal}{If \code{TRUE}, then the covariance matrix of the components is restricted to diagonal matrices.} } \details{ This is mostly meant as a demo for FlexMix driver programming, you should also look at package \pkg{mclust} for real applications. \code{FLXMCmvnorm} clusters multivariate data, \code{FLXMCnorm1} univariate data. In the latter case smart initialization is important, see the example below. } \value{ \code{FLXMCmvnorm} returns an object of class \code{FLXMC}. } \author{Friedrich Leisch and Bettina Gruen} \references{ Friedrich Leisch. FlexMix: A general framework for finite mixture models and latent class regression in R. \emph{Journal of Statistical Software}, \bold{11}(8), 2004. doi:10.18637/jss.v011.i08 } \seealso{\code{\link{flexmix}}} \keyword{cluster} \examples{ data("Nclus", package = "flexmix") require("MASS") eqscplot(Nclus) ## This model is wrong (one component has a non-diagonal cov matrix) ex1 <- flexmix(Nclus ~ 1, k = 4, model = FLXMCmvnorm()) print(ex1) plotEll(ex1, Nclus) ## True model, wrong number of components ex2 <- flexmix(Nclus ~ 1, k = 6, model = FLXMCmvnorm(diagonal = FALSE)) print(ex2) plotEll(ex2, Nclus) ## Get parameters of first component parameters(ex2, component = 1) ## Have a look at the posterior probabilies of 10 random observations ok <- sample(1:nrow(Nclus), 10) p <- posterior(ex2)[ok, ] p ## The following two should be the same max.col(p) clusters(ex2)[ok] \testonly{ stopifnot(all.equal(max.col(p), clusters(ex2)[ok])) } ## Now try the univariate case plot(density(Nclus[, 1])) ex3 <- flexmix(Nclus[, 1] ~ 1, cluster = cut(Nclus[, 1], 3), model = FLXMCnorm1()) ex3 parameters(ex3) } flexmix/man/whiskey.Rd0000644000176200001440000000300314404637306014456 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: whiskey.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{whiskey} \alias{whiskey} \alias{whiskey_brands} \docType{data} \title{Survey Data on Brands of Scotch whiskey Consumed} \description{ The data set is from Simmons Study of Media and Markets and contains the incidence matrix for scotch brands used in last year for those households who report consuming scotch. } \usage{data("whiskey")} \format{ A data frame \code{whiskey} with 484 observations on the following 2 variables. \describe{ \item{\code{Freq}}{a numeric vector} \item{\code{Incidence}}{a matrix with 21 columns} } Additional information on the brands is contained in the data frame \code{whiskey_brands} which is simultaneously loaded. This data frame contains 21 observations on the following 3 variables. \describe{ \item{\code{Brand}}{a character vector} \item{\code{Type}}{a factor with levels \code{Blend} \code{Single Malt}} \item{\code{Bottled}}{a factor with levels \code{Domestic} \code{Foreign}} } } \details{ The dataset is taken from the \pkg{bayesm} package. } \source{ Peter Rossi and Rob McCulloch. bayesm: Bayesian Inference for Marketing/Micro-econometrics. R package version 2.0-8, 2006. http://gsbwww.uchicago.edu/fac/peter.rossi/research/bsm.html } \references{ Edwards, Y. and G. Allenby. Multivariate Analysis of Multiple Response Data, \emph{Journal of Marketing Research}, \bold{40}, 321--334, 2003. } \keyword{datasets} flexmix/man/FLXMRmgcv.Rd0000644000176200001440000000402214404637306014542 0ustar liggesusers\name{FLXMRmgcv} \alias{FLXMRmgcv} \alias{FLXMRmgcv-class} \title{FlexMix Interface to GAMs} \description{ This is a driver which allows fitting of mixtures of GAMs. } \usage{ FLXMRmgcv(formula = . ~ ., family = c("gaussian", "binomial", "poisson"), offset = NULL, control = NULL, optimizer = c("outer", "newton"), in.out = NULL, eps = .Machine$double.eps, ...) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{\link{flexmix}} using \code{\link{update.formula}}. Default is to use the original \code{\link{flexmix}} model formula.} \item{family}{A character string naming a \code{\link{glm}} family function.} \item{offset}{This can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting.} \item{control}{A list of fit control parameters returned by \code{gam.control}.} \item{optimizer}{An array specifying the numerical optimization method to use to optimize the smoothing parameter estimation criterion; for more details see \code{\link[mgcv]{gam}}.} \item{in.out}{Optional list for initializing outer iteration; for more details see \code{\link[mgcv]{gam}}.} \item{eps}{Observations with an a-posteriori probability smaller or equal to \code{eps} are omitted in the M-step.} \item{\dots}{Additional arguments to be pased to the GAM fitter.} } \value{ Returns an object of class \code{FLXMRmgcv} inheriting from \code{FLXMRglm}. } \author{ Bettina Gruen } \seealso{ \code{\link{FLXMRglm}} } \examples{ set.seed(2012) x <- seq(0, 1, length.out = 100) z <- sample(0:1, length(x), replace = TRUE) y <- rnorm(length(x), ifelse(z, 5 * sin(x * 2 * pi), 10 * x - 5)) fitted_model <- flexmix(y ~ s(x), model = FLXMRmgcv(), cluster = z + 1, control = list(tolerance = 10^-3)) plot(y ~ x, col = clusters(fitted_model)) matplot(x, fitted(fitted_model), type = "l", add = TRUE) } \keyword{regression} \keyword{cluster} flexmix/man/EIC.Rd0000644000176200001440000000241414404637306013400 0ustar liggesusers% % Copyright (C) 2004-2009 Friedrich Leisch and Bettina Gruen % $Id: EIC.Rd 3912 2008-03-13 15:10:24Z gruen $ % \name{EIC} \alias{EIC} \alias{EIC,flexmix-method} \alias{EIC,stepFlexmix-method} \title{Entropic Measure Information Criterion} \description{ Compute the entropic measure information criterion for model selection. } \usage{ \S4method{EIC}{flexmix}(object, \dots) \S4method{EIC}{stepFlexmix}(object, \dots) } \arguments{ \item{object}{See Methods section below} \item{\dots}{Some methods for this generic function may take additional, optional arguments. At present none do.} } \section{Methods}{ \describe{ \item{object = "flexmix":}{Compute the EIC of a \code{flexmix} object.} \item{object = "stepFlexmix":}{Compute the EIC of all models contained in the \code{stepFlexmix} object.} }} \value{ Returns a numeric vector with the corresponding EIC value(s). } \keyword{methods} \author{Bettina Gruen} \references{ V. Ramaswamy, W. S. DeSarbo, D. J. Reibstein, and W. T. Robinson. An empirical pooling approach for estimating marketing mix elasticities with PIMS data. \emph{Marketing Science}, \bold{12}(1), 103--124, 1993. } \examples{ data("NPreg", package = "flexmix") ex1 <- flexmix(yn ~ x + I(x^2), data = NPreg, k = 2) EIC(ex1) } flexmix/man/Lapply-methods.Rd0000644000176200001440000000313014404637306015676 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: Lapply-methods.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{Lapply-methods} \docType{methods} \title{Methods for Function Lapply} \alias{Lapply,FLXRmstep-method} \description{Apply a function to each component of a finite mixture} \usage{ \S4method{Lapply}{FLXRmstep}(object, FUN, model = 1, component = TRUE, ...) } \arguments{ \item{object}{S4 class object.} \item{FUN}{The function to be applied.} \item{model}{The model (for a multivariate response) that shall be used.} \item{component}{Index vector for selecting the components.} \item{\dots}{Optional arguments to \code{FUN}.} } \section{Methods}{ \describe{ \item{object = FLXRmstep:}{Apply a function to each component of a refitted \code{flexmix} object using method = \code{"mstep"}.} } } \details{ \code{FUN} is found by a call to \code{match.fun} and typically is specified as a function or a symbol (e.g. a backquoted name) or a character string specifying a function to be searched for from the environment of the call to \code{Lapply}. } \value{ A list of the length equal to the number of components specified is returned, each element of which is the result of applying \code{FUN} to the specified component of the refitted mixture model. } \keyword{methods} \author{Friedrich Leisch and Bettina Gruen} \examples{ data("NPreg", package = "flexmix") ex2 <- flexmix(yn ~ x, data = NPreg, k = 2, model = list(FLXMRglm(yn ~ . + I(x^2)), FLXMRglm(yp ~ ., family = "poisson"))) ex2r <- refit(ex2, method = "mstep") Lapply(ex2r, "vcov", 2) } flexmix/man/FLXP-class.Rd0000644000176200001440000000211414404637306014651 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXP-class.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{FLXP-class} \docType{class} \alias{FLXP-class} \title{Class "FLXP"} \alias{initialize,FLXP-method} \alias{FLXPconstant-class} \alias{FLXPmultinom-class} \description{ Concomitant model class. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("FLXP", ...)}, typically inside driver functions like \code{\link{FLXPconstant}} or \code{\link{FLXPmultinom}}. } \section{Slots}{ \describe{ \item{\code{name}:}{Character string used in print methods.} \item{\code{formula}:}{Formula describing the model.} \item{\code{x}:}{Model matrix.} \item{\code{fit}:}{Function returning the fitted prior probabilities.} \item{\code{refit}:}{Function returning the fitted concomitant model.} \item{\code{coef}:}{Matrix containing the fitted parameters.} \item{\code{df}:}{Function for determining the number of degrees of freedom used.} } } \author{Friedrich Leisch and Bettina Gruen} \keyword{classes} flexmix/man/FLXMCdist1.Rd0000644000176200001440000000373614404637306014626 0ustar liggesusers% % Copyright (C) 2016 Bettina Gruen % $Id: FLXMCdist1.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{FLXMCdist1} \alias{FLXMCdist1} \title{FlexMix Clustering of Univariate Distributions} \description{ These are drivers for \code{\link{flexmix}} implementing model-based clustering of univariate data using different distributions for the component-specific models. } \usage{ FLXMCdist1(formula = . ~ ., dist, ...) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{\link{flexmix}} using \code{\link{update.formula}}. Only the left-hand side (response) of the formula is used. Default is to use the original \code{\link{flexmix}} model formula.} \item{dist}{Character string indicating the component-specific univariate distribution.} \item{...}{Arguments for the specific model drivers.} } \details{ Currently drivers for the following distributions are available: \enumerate{ \item Lognormal (\code{"lnorm"}) \item inverse Gaussian (\code{"invGauss"} using \code{\link[SuppDists:invGauss]{dinvGauss}}) \item gamma (\code{"gamma"}) \item exponential (\code{"exp"}) \item Weibull (\code{"weibull"}) \item Burr (\code{"burr"} using \code{\link[actuar:Burr]{dburr}}) \item Inverse Burr (\code{"invburr"} using \code{\link[actuar:Burr]{dinvburr}}) } } \value{ \code{FLXMCdist1} returns an object of class \code{FLXMC}. } \author{Friedrich Leisch and Bettina Gruen} \references{ Tatjana Miljkovic and Bettina Gruen. Modeling loss data using mixtures of distributions. \emph{Insurance: Mathematics and Economics}, \bold{70}, 387-396, 2016. doi:10.1016/j.insmatheco.2016.06.019 } \seealso{\code{\link{flexmix}}} \keyword{cluster} \examples{ if (require("actuar")) { set.seed(123) y <- c(rexp(100, 10), rexp(100, 1)) ex <- flexmix(y ~ 1, cluster = rep(1:2, each = 100), model = FLXMCdist1(dist = "exp")) parameters(ex) } } flexmix/man/FLXnested-class.Rd0000644000176200001440000000200214404637306015730 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXnested-class.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{FLXnested-class} \docType{class} \alias{FLXnested-class} \alias{coerce,list,FLXnested-method} \alias{coerce,NULL,FLXnested-method} \alias{coerce,numeric,FLXnested-method} \alias{initialize,FLXnested-method} \title{Class "FLXnested"} \description{Specification of nesting structure for regression coefficients.} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("FLXnested", formula, k, ...)}. In addition, named lists can be coerced to \code{FLXnested} objects, names are completed if unique. } \section{Slots}{ \describe{ \item{\code{formula}:}{Object of class \code{"list"} containing the formula for determining the model matrix for each nested parameter.} \item{\code{k}:}{Object of class \code{"numeric"} specifying the number of components in each group.} } } \author{Friedrich Leisch and Bettina Gruen} \keyword{classes} flexmix/man/ExNPreg.Rd0000644000176200001440000000165114404637306014312 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: ExNPreg.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{ExNPreg} \alias{ExNPreg} \alias{NPreg} \title{Artificial Example for Normal, Poisson and Binomial Regression} \description{ A simple artificial regression example with 2 latent classes, one independent variable (uniform on \eqn{[0,10]}), and three dependent variables with Gaussian, Poisson and Binomial distribution, respectively. } \usage{ ExNPreg(n) data("NPreg") } \arguments{ \item{n}{Number of observations per latent class.} } \details{ The \code{NPreg} data frame can be re-created by \code{ExNPreg(100)} using \code{set.seed(2602)}, it has been saved as a data set for simplicity of examples only. } \examples{ data("NPreg", package = "flexmix") plot(yn ~ x, data = NPreg, col = class) plot(yp ~ x, data = NPreg, col = class) plot(yb ~ x, data = NPreg, col = class) } \keyword{datasets} flexmix/man/ExLinear.Rd0000644000176200001440000000560514404637306014514 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: ExNclus.Rd 3912 2008-03-13 15:10:24Z gruen $ % \name{ExLinear} \alias{ExLinear} \title{Artificial Data from a Generalized Linear Regression Mixture} \description{ Generate random data mixed from k generalized linear regressions (GLMs). } \usage{ ExLinear(beta, n, xdist = "runif", xdist.args = NULL, family = c("gaussian","poisson"), sd = 1, ...) } \arguments{ \item{beta}{A matrix of regression coefficients. Each row corresponds to a variable, each column to a mixture component. The first row is used as intercept.} \item{n}{Integer, the number of observations per component.} \item{xdist}{Character, name of a random number function for the explanatory variables.} \item{xdist.args}{List, arguments for the random number functions.} \item{family}{A character string naming a GLM family.Only \code{"gaussian"} and \code{"poisson"} are implemented at the moment.} \item{sd}{Numeric, the error standard deviation for each component for Gaussian responses.} \item{\dots}{Used as default for \code{xdist.args} if that is not specified.} } \details{ First, arguments \code{n} (and \code{sd} for Gaussian response) are recycled to the number of mixture components \code{ncol(beta)}, and arguments \code{xdist} and \code{xdist.args} are recycled to the number of explanatory variables \code{nrow(beta)-1}. Then a design matrix is created for each mixture component by drawing random numbers from \code{xdist}. For each component, the design matrix is multiplied by the regression coefficients to form the linear predictor. For Gaussian responses the identity link is used, for Poisson responses the log link. The true cluster memberships are returned as attribute \code{"clusters"}. } \examples{ ## simple example in 2d beta <- matrix(c(1, 2, 3, -1), ncol = 2) sigma <- c(.5, 1) df1 <- ExLinear(beta, 100, sd = sigma, min = -1, max = 2) plot(y~x1, df1, col = attr(df1, "clusters")) ## add a second explanatory variable with exponential distribution beta2 <- rbind(beta, c(-2, 2)) df2 <- ExLinear(beta2, 100, sd = c(.5, 1), xdist = c("runif", "rexp"), xdist.args = list(list(min = -1, max = 2), list(rate = 3))) summary(df2) opar = par("mfrow") par(mfrow = 1:2) hist(df2$x1) hist(df2$x2) par(opar) f1 <- flexmix(y ~ ., data = df2, k = 2) ## sort components on Intercept f1 <- relabel(f1, "model", "Intercept") ## the parameters should be close to the true beta and sigma round(parameters(f1), 3) rbind(beta2, sigma) ### A simple Poisson GLM df3 <- ExLinear(beta/2, 100, min = -1, max = 2, family = "poisson") plot(y ~ x1, df3, col = attr(df3, "clusters")) f3 <- flexmix(y ~ ., data = df3, k = 2, model = FLXMRglm(family = "poisson")) round(parameters(relabel(f3, "model", "Intercept")), 3) beta/2 } \keyword{datasets} flexmix/man/seizure.Rd0000644000176200001440000000414514404637306014471 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: seizure.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{seizure} \alias{seizure} \docType{data} \title{Epileptic Seizure Data} \description{ Data from a clinical trial where the effect of intravenous gamma-globulin on suppression of epileptic seizures is studied. Daily observations for a period of 140 days on one patient are given, where the first 27 days are a baseline period without treatment, the remaining 113 days are the treatment period. } \usage{data("seizure")} \format{ A data frame with 140 observations on the following 4 variables. \describe{ \item{Seizures}{A numeric vector, daily counts of epileptic seizures.} \item{Hours}{A numeric vector, hours of daily parental observation.} \item{Treatment}{A factor with levels \code{No} and \code{Yes}.} \item{Day}{A numeric vector.} } } \source{ P. Wang, M. Puterman, I. Cockburn, and N. Le. Mixed poisson regression models with covariate dependent rates. \emph{Biometrics}, \bold{52}, 381--400, 1996. } \references{ B. Gruen and F. Leisch. Bootstrapping finite mixture models. In J. Antoch, editor, Compstat 2004--Proceedings in Computational Statistics, 1115--1122. Physika Verlag, Heidelberg, Germany, 2004. ISBN 3-7908-1554-3. } \examples{ data("seizure", package = "flexmix") plot(Seizures/Hours ~ Day, col = as.integer(Treatment), pch = as.integer(Treatment), data = seizure) abline(v = 27.5, lty = 2, col = "grey") legend(140, 9, c("Baseline", "Treatment"), pch = 1:2, col = 1:2, xjust = 1, yjust = 1) set.seed(123) ## The model presented in the Wang et al paper: two components for ## "good" and "bad" days, respectively, each a Poisson GLM with hours of ## parental observation as offset seizMix <- flexmix(Seizures ~ Treatment * log(Day), data = seizure, k = 2, model = FLXMRglm(family = "poisson", offset = log(seizure$Hours))) summary(seizMix) summary(refit(seizMix)) matplot(seizure$Day, fitted(seizMix)/seizure$Hours, type = "l", add = TRUE, col = 3:4) } \keyword{datasets} flexmix/man/stepFlexmix.Rd0000644000176200001440000001451414404637306015314 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: stepFlexmix.Rd 5115 2017-04-07 08:18:13Z gruen $ % \name{stepFlexmix} \alias{stepFlexmix} \alias{initFlexmix} \alias{initMethod} \alias{stepFlexmix-class} \alias{initMethod-class} \alias{plot,stepFlexmix,missing-method} \alias{show,stepFlexmix-method} \alias{getModel,stepFlexmix-method} \alias{unique,stepFlexmix-method} \title{Run FlexMix Repeatedly} \description{ Runs flexmix repeatedly for different numbers of components and returns the maximum likelihood solution for each. } \usage{ initFlexmix(..., k, init = list(), control = list(), nrep = 3L, verbose = TRUE, drop = TRUE, unique = FALSE) initMethod(name = c("tol.em", "cem.em", "sem.em"), step1 = list(tolerance = 10^-2), step2 = list(), control = list(), nrep = 3L) stepFlexmix(..., k = NULL, nrep = 3, verbose = TRUE, drop = TRUE, unique = FALSE) \S4method{plot}{stepFlexmix,missing}(x, y, what = c("AIC", "BIC", "ICL"), xlab = NULL, ylab = NULL, legend = "topright", ...) \S4method{getModel}{stepFlexmix}(object, which = "BIC") \S4method{unique}{stepFlexmix}(x, incomparables = FALSE, ...) } \arguments{ \item{\dots}{Passed to \code{\link{flexmix}} (or \code{\link{matplot}} in the \code{plot} method).} \item{k}{A vector of integers passed in turn to the \code{k} argument of \code{\link{flexmix}}.} \item{init}{An object of class \code{"initMethod"} or a named list where \code{initMethod} is called with it as arguments in addition to the \code{control} argument.} \item{name}{A character string indication which initialization strategy should be employed: short runs of EM followed by a long (\code{"tol.em"}), short runs of CEM followed by a long EM run (\code{"cem.em"}), short runs of SEM followed by a long EM run (\code{"sem.em"}).} \item{step1}{A named list which combined with the \code{control} argument is coercable to a \code{"FLXcontrol"} object. This control setting is used for the short runs.} \item{step2}{A named list which combined with the \code{control} argument is coercable to a \code{"FLXcontrol"} object. This control setting is used for the long run.} \item{control}{A named list which combined with the \code{step1} or the \code{step2} argument is coercable to a \code{"FLXcontrol"} object.} \item{nrep}{For each value of \code{k} run \code{\link{flexmix}} \code{nrep} times and keep only the solution with maximum likelihood. If \code{nrep} is set for the long run, it is ignored, because the EM algorithm is deterministic using the best solution discovered in the short runs for initialization.} \item{verbose}{If \code{TRUE}, show progress information during computations.} \item{drop}{If \code{TRUE} and \code{k} is of length 1, then a single flexmix object is returned instead of a \code{"stepFlexmix"} object.} \item{unique}{If \code{TRUE}, then \code{unique()} is called on the result, see below.} \item{x, object}{An object of class \code{"stepFlexmix"}.} \item{y}{Not used.} \item{what}{Character vector naming information criteria to plot. Functions of the same name must exist, which take a \code{stepFlexmix} object as input and return a numeric vector like \code{AIC,stepFlexmix-method} (see examples below).} \item{xlab,ylab}{Graphical parameters.} \item{legend}{If not \code{FALSE} and \code{what} contains more than 1 element, a legend is placed at the specified location, see \code{\link{legend}} for details.} \item{which}{Number of model to get. If character, interpreted as number of components or name of an information criterion.} \item{incomparables}{A vector of values that cannot be compared. Currently, \code{FALSE} is the only possible value, meaning that all values can be compared.} } \value{ An object of class \code{"stepFlexmix"} containing the best models with respect to the log likelihood for the different number of components in a slot if \code{length(k)>1}, else directly an object of class \code{"flexmix"}. If \code{unique = FALSE}, then the resulting object contains one model per element of \code{k} (which is the number of clusters the EM algorithm started with). If \code{unique = TRUE}, then the result is resorted according to the number of clusters contained in the fitted models (which may be less than the number with which the EM algorithm started), and only the maximum likelihood solution for each number of fitted clusters is kept. This operation can also be done manually by calling \code{unique()} on objects of class \code{"stepFlexmix"}. } \author{Friedrich Leisch and Bettina Gruen} \references{ Friedrich Leisch. FlexMix: A general framework for finite mixture models and latent class regression in R. \emph{Journal of Statistical Software}, \bold{11}(8), 2004. doi:10.18637/jss.v011.i08 Christophe Biernacki, Gilles Celeux and Gerard Govaert. Choosing starting values for the EM algorithm for getting the highest likelihood in multivariate Gaussian mixture models. \emph{Computational Statistics & Data Analysis}, \bold{41}(3--4), 561--575, 2003. Theresa Scharl, Bettina Gruen and Friedrch Leisch. Mixtures of regression models for time-course gene expression data: Evaluation of initialization and random effects. \emph{Bioinformatics}, \bold{26}(3), 370--377, 2010. } \examples{ data("Nclus", package = "flexmix") ## try 2 times for k = 4 set.seed(511) ex1 <- initFlexmix(Nclus~1, k = 4, model = FLXMCmvnorm(diagonal = FALSE), nrep = 2) ex1 ## now 2 times each for k = 2:5, specify control parameter ex2 <- initFlexmix(Nclus~1, k = 2:5, model = FLXMCmvnorm(diagonal = FALSE), control = list(minprior = 0), nrep = 2) ex2 plot(ex2) ## get BIC values BIC(ex2) ## get smallest model getModel(ex2, which = 1) ## get model with 3 components getModel(ex2, which = "3") ## get model with smallest ICL (here same as for AIC and BIC: true k = 4) getModel(ex2, which = "ICL") ## now 1 time each for k = 2:5, with larger minimum prior ex3 <- initFlexmix(Nclus~1, k = 2:5, model = FLXMCmvnorm(diagonal = FALSE), control = list(minprior = 0.1), nrep = 1) ex3 ## keep only maximum likelihood solution for each unique number of ## fitted clusters: unique(ex3) } \keyword{cluster} \keyword{regression} flexmix/man/AIC-methods.Rd0000644000176200001440000000104614404637306015035 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: AIC-methods.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{AIC-methods} \docType{methods} \title{Methods for Function AIC} \alias{AIC,flexmix-method} \alias{AIC,stepFlexmix-method} \description{Compute the Akaike Information Criterion.} \section{Methods}{ \describe{ \item{object = flexmix:}{Compute the AIC of a \code{flexmix} object} \item{object = stepFlexmix:}{Compute the AIC of all models contained in the \code{stepFlexmix} object.} } } \keyword{methods} flexmix/man/plot-methods.Rd0000644000176200001440000000727614404637306015432 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: plot-methods.Rd 5115 2017-04-07 08:18:13Z gruen $ % \name{plot-methods} \docType{methods} \alias{plot-methods} \alias{plot,flexmix,missing-method} \title{Rootogram of Posterior Probabilities} \description{ The \code{plot} method for \code{\link{flexmix-class}} objects gives a rootogram or histogram of the posterior probabilities. } \usage{ \S4method{plot}{flexmix,missing}(x, y, mark = NULL, markcol = NULL, col = NULL, eps = 1e-4, root = TRUE, ylim = TRUE, main = NULL, xlab = "", ylab = "", as.table = TRUE, endpoints = c(-0.04, 1.04), ...) } \arguments{ \item{x}{An object of class \code{"flexmix"}.} \item{y}{Not used.} \item{mark}{Integer: mark posteriors of this component.} \item{markcol}{Color used for marking components.} \item{col}{Color used for the bars.} \item{eps}{Posteriors smaller than \code{eps} are ignored.} \item{root}{If \code{TRUE}, a rootogram of the posterior probabilities is drawn, otherwise a standard histogram.} \item{ylim}{A logical value or a numeric vector of length 2. If \code{TRUE}, the y axes of all rootograms are aligned to have the same limits, if \code{FALSE} each y axis is scaled separately. If a numeric vector is specified it is used as usual.} \item{main}{Main title of the plot.} \item{xlab}{Label of x-axis.} \item{ylab}{Label of y-axis.} \item{as.table}{Logical that controls the order in which panels should be plotted: if \code{FALSE} (the default), panels are drawn left to right, bottom to top (as in a graph); if \code{TRUE}, left to right, top to bottom.} \item{endpoints}{Vector of length 2 indicating the range of x-values that is to be covered by the histogram. This applies only when \code{breaks} is unspecified. In \code{do.breaks}, this specifies the interval that is to be divided up.} \item{...}{Further graphical parameters for the lattice function histogram.} } \details{ For each mixture component a rootogram or histogram of the posterior probabilities of all observations is drawn. Rootograms are very similar to histograms, the only difference is that the height of the bars correspond to square roots of counts rather than the counts themselves, hence low counts are more visible and peaks less emphasized. Please note that the y-axis denotes the number of observations in each bar in any case. Usually in each component a lot of observations have posteriors close to zero, resulting in a high count for the corresponding bin in the rootogram which obscures the information in the other bins. To avoid this problem, all probabilities with a posterior below \code{eps} are ignored. A peak at probability one indicates that a mixture component is well seperated from the other components, while no peak at one and/or significant mass in the middle of the unit interval indicates overlap with other components. } \references{ Friedrich Leisch. FlexMix: A general framework for finite mixture models and latent class regression in R. \emph{Journal of Statistical Software}, \bold{11}(8), 2004. doi:10.18637/jss.v011.i08 Jeremy Tantrum, Alejandro Murua and Werner Stuetzle. Assessment and pruning of hierarchical model based clustering. Proceedings of the 9th ACM SIGKDD international conference on Knowledge Discovery and Data Mining, 197--205. ACM Press, New York, NY, USA, 2003. Friedrich Leisch. Exploring the structure of mixture model components. In Jaromir Antoch, editor, Compstat 2004--Proceedings in Computational Statistics, 1405--1412. Physika Verlag, Heidelberg, Germany, 2004. ISBN 3-7908-1554-3. } \author{Friedrich Leisch and Bettina Gruen} \keyword{methods} \keyword{hplot} flexmix/man/ICL.Rd0000644000176200001440000000245414404637306013413 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: ICL.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{ICL} \alias{ICL,flexmix-method} \alias{ICL,stepFlexmix-method} \title{Integrated Completed Likelihood Criterion} \description{ Compute the Integrated Completed Likelihood criterion for model selection. } \usage{ \S4method{ICL}{flexmix}(object, \dots) \S4method{ICL}{stepFlexmix}(object, \dots) } \arguments{ \item{object}{see Methods section below} \item{\dots}{Some methods for this generic function may take additional, optional arguments. At present none do.} } \section{Methods}{ \describe{ \item{object = "flexmix":}{Compute the ICL of a \code{flexmix} object.} \item{object = "stepFlexmix":}{Compute the ICL of all models contained in the \code{stepFlexmix} object.} }} \value{ Returns a numeric vector with the corresponding ICL value(s). } \keyword{methods} \author{Friedrich Leisch and Bettina Gruen} \references{ C. Biernacki, G. Celeux, and G. Govaert. Assessing a mixture model for clustering with the integrated completed likelihood. \emph{IEEE Transactions on Pattern Analysis and Machine Intelligence}, \emph{22}(7), 719--725, 2000. } \examples{ data("NPreg", package = "flexmix") ex1 <- flexmix(yn ~ x + I(x^2), data = NPreg, k = 2) ICL(ex1) } flexmix/man/trypanosome.Rd0000644000176200001440000000321514404637306015360 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: trypanosome.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{trypanosome} \alias{trypanosome} \docType{data} \title{Trypanosome} \description{ Trypanosome data from a dosage-response analysis to assess the proportion of organisms belonging to different populations. It is assumed that organisms belonging to different populations are indistinguishable other than in terms of their reaction to the stimulus. } \usage{data("trypanosome")} \format{ A data frame with 426 observations on the following 2 variables. \describe{ \item{\code{Dead}}{A logical vector.} \item{\code{Dose}}{A numeric vector.} } } \details{ The experimental technique involved inspection under the microscope of a representative aliquot of a suspension, all organisms appearing within two fields of view being classified either alive or dead. Hence the total numbers of organisms present at each dose and the number showing the quantal response were both random variables. } \source{ R. Ashford and P.J. Walker. Quantal Response Analysis for a Mixture of Populations. \emph{Biometrics}, \bold{28}, 981--988, 1972. } \references{ D.A. Follmann and D. Lambert. Generalizing Logistic Regression by Nonparametric Mixing. \emph{Journal of the American Statistical Association}, \bold{84}(405), 195--300, 1989. } \examples{ data("trypanosome", package = "flexmix") trypMix <- initFlexmix(cbind(Dead, 1-Dead) ~ 1, k = 2, nrep = 5, data = trypanosome, model = FLXMRglmfix(family = "binomial", fixed = ~log(Dose))) } \keyword{datasets} flexmix/man/posterior.Rd0000644000176200001440000000225514404637306015031 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: posterior.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{posterior} \alias{clusters,flexmix,missing-method} \alias{clusters,FLXdist,ANY-method} \alias{posterior,flexmix,missing-method} \alias{posterior,FLXdist,listOrdata.frame-method} \title{Determine Cluster Membership and Posterior Probabilities} \description{Determine posterior probabilities or cluster memberships for a fitted \code{flexmix} or unfitted \code{FLXdist} model.} \usage{ \S4method{posterior}{flexmix,missing}(object, newdata, unscaled = FALSE, ...) \S4method{posterior}{FLXdist,listOrdata.frame}(object, newdata, unscaled = FALSE, ...) \S4method{clusters}{flexmix,missing}(object, newdata, ...) \S4method{clusters}{FLXdist,ANY}(object, newdata, ...) } \arguments{ \item{object}{An object of class "flexmix" or "FLXdist".} \item{newdata}{Data frame or list containing new data. If missing the posteriors of the original observations are returned.} \item{unscaled}{Logical, if \code{TRUE} the component-specific likelihoods are returned.} \item{\dots}{Currently not used.} } \author{Friedrich Leisch and Bettina Gruen} \keyword{methods} flexmix/man/rflexmix.Rd0000644000176200001440000000315614404637306014642 0ustar liggesusers\name{rflexmix} \alias{rflexmix} \alias{rflexmix,flexmix,missing-method} \alias{rflexmix,FLXdist,numeric-method} \alias{rflexmix,FLXdist,listOrdata.frame-method} \alias{rFLXM} \alias{rFLXM,FLXM,list-method} \alias{rFLXM,FLXMC,FLXcomponent-method} \alias{rFLXM,FLXMCmultinom,FLXcomponent-method} \alias{rFLXM,FLXMCbinom,FLXcomponent-method} \alias{rFLXM,FLXMRglm,list-method} \alias{rFLXM,FLXMRglmfix,list-method} \alias{rFLXM,FLXM,FLXcomponent-method} \alias{rFLXM,FLXMRglm,FLXcomponent-method} \title{Random Number Generator for Finite Mixtures} \description{ Given a finite mixture model generate random numbers from it. } \usage{ rflexmix(object, newdata, ...) } \arguments{ \item{object}{A fitted finite mixture model of class \code{flexmix} or an unfitted of class \code{FLXdist}.} \item{newdata}{Optionally, a data frame in which to look for variables with which to predict or an integer specifying the number of random draws for model-based clustering. If omitted, the data to which the model was fitted is used.} \item{\dots}{Further arguments to be passed to or from methods.} } \details{ \code{rflexmix} provides the creation of the model matrix for new data and the sampling of the cluster memberships. The sampling of the component distributions given the classification is done by calling \code{rFLXM}. This step has to be provided for the different model classes. } \value{ A list with components \item{y}{Random sample} \item{group}{Grouping factor} \item{class}{Class membership} } \author{Bettina Gruen} \examples{ example(flexmix) sample <- rflexmix(ex1) } \keyword{distribution} flexmix/man/FLXglmFix.Rd0000644000176200001440000000432514404637306014603 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: FLXglmFix.Rd 5229 2022-06-02 14:47:26Z gruen $ % \name{FLXMRglmfix} \alias{FLXMRglmfix} \alias{FLXglmFix} \title{FlexMix Interface to GLMs with Fixed Coefficients} \description{ This implements a driver for FlexMix which interfaces the \code{glm} family of models and where it is possible to specify fixed (constant) or nested varying coefficients or to ensure that in the Gaussian case the variance estimate is equal for all components. } \usage{ FLXMRglmfix(formula = . ~ ., fixed = ~0, varFix = FALSE, nested = NULL, family = c("gaussian", "binomial", "poisson", "Gamma"), offset = NULL) } \arguments{ \item{formula}{A formula which is interpreted relative to the formula specified in the call to \code{flexmix} using \code{update.formula}. Default is to use the original \code{flexmix} model formula.} \item{fixed}{A formula which specifies the additional regressors for the fixed (constant) coefficients.} \item{varFix}{A logical indicating if the variance estimate for Gaussian components should be constrained to be equal for all components. It can be also a vector specifying the number of components with equal variance.} \item{nested}{An object of class \code{FLXnested} or a list specifying the nested structure.} \item{family}{A character string naming a \code{glm} family function.} \item{offset}{This can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting.} } \value{ Returns an object of class \code{FLXMRglmfix} inheriting from \code{FLXMRglm} and \code{FLXMRfix}. } \author{Friedrich Leisch and Bettina Gruen} \seealso{\code{FLXMRglm}} \examples{ data("NPreg", package = "flexmix") ex <- flexmix(yn ~ x | id2, data = NPreg, k = 2, cluster = NPreg$class, model = FLXMRglm(yn ~ . + I(x^2))) ex.fix <- flexmix(yn ~ x | id2, data = NPreg, cluster = posterior(ex), model = FLXMRglmfix(nested = list(k = c(1, 1), formula = c(~0, ~I(x^2))))) summary(refit(ex)) \dontrun{ summary(refit(ex.fix)) } } \keyword{regression} \keyword{models} flexmix/man/tribolium.Rd0000644000176200001440000000356614404637306015017 0ustar liggesusers% % Copyright (C) 2004-2015 Friedrich Leisch and Bettina Gruen % $Id: tribolium.Rd 5008 2015-01-13 20:30:25Z gruen $ % \name{tribolium} \alias{tribolium} \docType{data} \title{Tribolium Beetles} \description{ The data investigates whether the adult Tribolium species Castaneum has developed an evolutionary advantage to recognize and avoid eggs of their own species while foraging. } \usage{data("tribolium")} \format{ A data frame with 27 observations on the following 4 variables. \describe{ \item{\code{Remaining}}{A numeric vector.} \item{\code{Total}}{A numeric vector.} \item{\code{Replicate}}{A factor with levels \code{1}, \code{2}, \code{3}.} \item{\code{Species}}{A factor with levels \code{Castaneum} \code{Confusum} \code{Madens}.} } } \details{ Beetles of the genus Tribolium are cannibalistic in the sense that adults eat the eggs of their own species as well as those of closely related species. The experiment isolated a number of adult beetles of the same species and presented them with a vial of 150 eggs (50 of each type), the eggs being thoroughly mixed to ensure uniformity throughout the vial. The data gives the consumption data for adult Castaneum species. It reports the number of Castaneum, Confusum and Madens eggs, respectively, that remain uneaten after two day exposure to the adult beetles. Replicates 1, 2, and 3 correspond to different occasions on which the experiment was conducted. } \source{ P. Wang and M.L. Puterman. Mixed Logistic Regression Models. \emph{Journal of Agricultural, Biological, and Environmental Statistics}, \bold{3} (2), 175--200, 1998. } \examples{ data("tribolium", package = "flexmix") tribMix <- initFlexmix(cbind(Remaining, Total - Remaining) ~ Species, k = 2, nrep = 5, data = tribolium, model = FLXMRglm(family = "binomial")) } \keyword{datasets} flexmix/DESCRIPTION0000644000176200001440000000344214404700400013430 0ustar liggesusersPackage: flexmix Type: Package Title: Flexible Mixture Modeling Version: 2.3-19 Authors@R: c(person("Bettina", "Gruen", role = c("aut", "cre"), email = "Bettina.Gruen@R-project.org", comment = c(ORCID = "0000-0001-7265-4773")), person("Friedrich", "Leisch", role = "aut", comment = c(ORCID = "0000-0001-7278-1983")), person("Deepayan", "Sarkar", role = "ctb", comment = c(ORCID = "0000-0003-4107-1553")), person("Frederic", "Mortier", role = "ctb"), person("Nicolas", "Picard", role = "ctb", comment = c(ORCID = "0000-0001-5548-9171"))) Description: A general framework for finite mixtures of regression models using the EM algorithm is implemented. The E-step and all data handling are provided, while the M-step can be supplied by the user to easily define new models. Existing drivers implement mixtures of standard linear models, generalized linear models and model-based clustering. Depends: R (>= 2.15.0), lattice Imports: graphics, grid, grDevices, methods, modeltools (>= 0.2-16), nnet, stats, stats4, utils Suggests: actuar, codetools, diptest, Ecdat, ellipse, gclus, glmnet, lme4 (>= 1.1), MASS, mgcv (>= 1.8-0), mlbench, multcomp, mvtnorm, SuppDists, survival License: GPL (>= 2) LazyLoad: yes NeedsCompilation: no Packaged: 2023-03-16 18:46:58 UTC; gruen Author: Bettina Gruen [aut, cre] (), Friedrich Leisch [aut] (), Deepayan Sarkar [ctb] (), Frederic Mortier [ctb], Nicolas Picard [ctb] () Maintainer: Bettina Gruen Repository: CRAN Date/Publication: 2023-03-16 20:50:08 UTC flexmix/build/0000755000176200001440000000000014404662037013033 5ustar liggesusersflexmix/build/vignette.rds0000644000176200001440000000077214404662037015400 0ustar liggesusersTMo@uВHP)8 9 .V52TEPNq޵vJ?n!'(;o1 a#Nu+~Z)kƢr3_ɭҌv̈́Zs;M3MM%Mn3a^dT]ݏ,k2X2XHe |3ȔpIV%|8?\'1HcN[B4 )@$-\eh +|AHX9ztpC_ l+%c LxZ7\̻*S-2GˤNeۋіuBZ^CMI!vt$nb 65/{u\l8 ^{8nktRwaS}L%ylϻm55J ;!?jƕqROUݦ>⁕V?5[ {&x8_=LF׋ɂ +gۂ]V۹{Q^32jyZs \hflexmix/vignettes/0000755000176200001440000000000014404662042013740 5ustar liggesusersflexmix/vignettes/regression-examples.Rnw0000644000176200001440000012407214404637307020440 0ustar liggesusers\documentclass[nojss]{jss} \usepackage{amsfonts,bm,amsmath,amssymb} %%\usepackage{Sweave} %% already provided by jss.cls %%%\VignetteIndexEntry{Applications of finite mixtures of regression models} %%\VignetteDepends{flexmix} %%\VignetteKeywords{R, finite mixture model, generalized linear model, latent class regression} %%\VignettePackage{flexmix} \title{Applications of finite mixtures of regression models} <>= library("stats") library("graphics") library("flexmix") @ \author{Bettina Gr{\"u}n\\ Wirtschaftsuniversit{\"a}t Wien \And Friedrich Leisch\\ Universit\"at f\"ur Bodenkultur Wien} \Plainauthor{Bettina Gr{\"u}n, Friedrich Leisch} \Address{ Bettina Gr\"un\\ Institute for Statistics and Mathematics\\ Wirtschaftsuniversit{\"a}t Wien\\ Welthandelsplatz 1\\ 1020 Wien, Austria\\ E-mail: \email{Bettina.Gruen@R-project.org}\\ Friedrich Leisch\\ Institut f\"ur Angewandte Statistik und EDV\\ Universit\"at f\"ur Bodenkultur Wien\\ Peter Jordan Stra\ss{}e 82\\ 1190 Wien, Austria\\ E-mail: \email{Friedrich.Leisch@boku.ac.at} } \Abstract{ Package \pkg{flexmix} provides functionality for fitting finite mixtures of regression models. The available model class includes generalized linear models with varying and fixed effects for the component specific models and multinomial logit models for the concomitant variable models. This model class includes random intercept models where the random part is modelled by a finite mixture instead of a-priori selecting a suitable distribution. The application of the package is illustrated on various datasets which have been previously used in the literature to fit finite mixtures of Gaussian, binomial or Poisson regression models. The \proglang{R} commands are given to fit the proposed models and additional insights are gained by visualizing the data and the fitted models as well as by fitting slightly modified models. } \Keywords{\proglang{R}, finite mixture models, generalized linear models, concomitant variables} \Plainkeywords{R, finite mixture models, generalized linear models, concomitant variables} %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \begin{document} \SweaveOpts{engine=R, echo=true, height=5, width=8, eps=FALSE, keep.source=TRUE} \setkeys{Gin}{width=0.8\textwidth} <>= options(width=70, prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) suppressWarnings(RNGversion("3.5.0")) set.seed(1802) library("lattice") ltheme <- canonical.theme("postscript", FALSE) lattice.options(default.theme=ltheme) @ %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \section{Introduction} Package \pkg{flexmix} provides infrastructure for flexible fitting of finite mixtures models. The design principles of the package allow easy extensibility and rapid prototyping. In addition, the main focus of the available functionality is on fitting finite mixtures of regression models, as other packages in \proglang{R} exist which have specialized functionality for model-based clustering, such as e.g.~\pkg{mclust} \citep{flexmix:Fraley+Raftery:2002a} for finite mixtures of Gaussian distributions. \cite{flexmix:Leisch:2004a} gives a general introduction into the package outlining the main implementational principles and illustrating the use of the package. The paper is also contained as a vignette in the package. An example for fitting mixtures of Gaussian regression models is given in \cite{flexmix:Gruen+Leisch:2006}. This paper focuses on examples of finite mixtures of binomial logit and Poisson regression models. Several datasets which have been previously used in the literature to demonstrate the use of finite mixtures of regression models have been selected to illustrate the application of the package. The model class covered are finite mixtures of generalized linear model with focus on binomial logit and Poisson regressions. The regression coefficients as well as the dispersion parameters of the component specific models are assumed to vary for all components, vary between groups of components, i.e.~to have a nesting, or to be fixed over all components. In addition it is possible to specify concomitant variable models in order to be able to characterize the components. Random intercept models are a special case of finite mixtures with varying and fixed effects as fixed effects are assumed for the coefficients of all covariates and varying effects for the intercept. These models are often used to capture overdispersion in the data which can occur for example if important covariates are omitted in the regression. It is then assumed that the influence of these covariates can be captured by allowing a random distribution for the intercept. This illustration does not only show how the package \pkg{flexmix} can be used for fitting finite mixtures of regression models but also indicates the advantages of using an extension package of an environment for statistical computing and graphics instead of a stand-alone package as available visualization techniques can be used for inspecting the data and the fitted models. In addition users already familiar with \proglang{R} and its formula interface should find the model specification and a lot of commands for exploring the fitted model intuitive. %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \section{Model specification} Finite mixtures of Gaussian regressions with concomitant variable models are given by: \begin{align*} H(y\,|\,\bm{x}, \bm{w}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(\bm{w}, \bm{\alpha}) \textrm{N}(y\,|\, \mu_s(\bm{x}), \sigma^2_s), \end{align*} where $\textrm{N}(\cdot\,|\, \mu_s(\bm{x}), \sigma^2_s)$ is the Gaussian distribution with mean $\mu_s(\bm{x}) = \bm{x}' \bm{\beta}^s$ and variance $\sigma^2_s$. $\Theta$ denotes the vector of all parameters of the mixture distribution and the dependent variables are $y$, the independent $\bm{x}$ and the concomitant $\bm{w}$. Finite mixtures of binomial regressions with concomitant variable models are given by: \begin{align*} H(y\,|\,T, \bm{x}, \bm{w}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(\bm{w}, \bm{\alpha}) \textrm{Bi}(y\,|\,T, \theta_s(\bm{x})), \end{align*} where $\textrm{Bi}(\cdot\,|\,T, \theta_s(\bm{x}))$ is the binomial distribution with number of trials equal to $T$ and success probability $\theta_s(\bm{x}) \in (0,1)$ given by $\textrm{logit}(\theta_s(\bm{x})) = \bm{x}' \bm{\beta}^s$. Finite mixtures of Poisson regressions are given by: \begin{align*} H(y \,|\, \bm{x}, \bm{w}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(\bm{w}, \bm{\alpha}) \textrm{Poi} (y \,|\, \lambda_s(\bm{x})), \end{align*} where $\textrm{Poi}(\cdot\,|\,\lambda_s(\bm{x}))$ denotes the Poisson distribution and $\log(\lambda_s(\bm{x})) = \bm{x}'\bm{\beta}^s$. For all these mixture distributions the coefficients are split into three different groups depending on if fixed, nested or varying effects are specified: \begin{align*} \bm{\beta}^s &= (\bm{\beta}_1, \bm{\beta}^{c(s)}_{2}, \bm{\beta}^{s}_3) \end{align*} where the first group represents the fixed, the second the nested and the third the varying effects. For the nested effects a partition $\mathcal{C} = \{c_s \,|\, s = 1,\ldots S\}$ of the $S$ components is determined where $c_s = \{s^* = 1,\ldots,S \,|\, c(s^*) = c(s)\}$. A similar splitting is possible for the variance of mixtures of Gaussian regression models. The function for maximum likelihood (ML) estimation with the Expectation-Maximization (EM) algorithm is \code{flexmix()} which is described in detail in \cite{flexmix:Leisch:2004a}. It takes as arguments a specification of the component specific model and of the concomitant variable model. The component specific model with varying, nested and fixed effects can be specified with the M-step driver \code{FLXMRglmfix()} which has arguments \code{formula} for the varying, \code{nested} for the nested and \code{fixed} for the fixed effects. \code{formula} and \code{fixed} take an argument of class \code{"formula"}, whereas \code{nested} expects an object of class \code{"FLXnested"} or a named list specifying the nested structure with a component \code{k} which is a vector of the number of components in each group of the partition and a component \code{formula} which is a vector of formulas for each group of the partition. In addition there is an argument \code{family} which has to be one of \code{gaussian}, \code{binomial}, \code{poisson} or \code{Gamma} and determines the component specific distribution function as well as an \code{offset} argument. The argument \code{varFix} can be used to determine the structure of the dispersion parameters. If only varying effects are specified the M-step driver \code{FLXMRglm()} can be used which only has an argument \code{formula} for the varying effects and also a \code{family} and an \code{offset} argument. This driver has the advantage that in the M-step the weighted ML estimation is made separately for each component which signifies that smaller model matrices are used. If a mixture model with a lot of components $S$ is fitted to a large data set with $N$ observations and the model matrix used in the M-step of \code{FLXMRglm()} has $N$ rows and $K$ columns, the model matrix used in the M-step of \code{FLXMRglmfix()} has $S N$ rows and up to $S K$ columns. In general the concomitant variable model is assumed to be a multinomial logit model, i.e.~: \begin{align*} \pi_s(\bm{w},\bm{\alpha}) &= \frac{e^{\bm{w}'\bm{\alpha}_s}}{\sum_{u = 1}^S e^{\bm{w}'\bm{\alpha}_u}} \quad \forall s, \end{align*} with $\bm{\alpha} = (\bm{\alpha}'_s)_{s=1,\ldots,S}$ and $\bm{\alpha}_1 \equiv \bm{0}$. This model can be fitted in \pkg{flexmix} with \code{FLXPmultinom()} which takes as argument \code{formula} the formula specification of the multinomial logit part. For fitting the function \code{nnet()} is used from package \pkg{MASS} \citep{flexmix:Venables+Ripley:2002} with the independent variables specified by the formula argument and the dependent variables are given by the a-posteriori probability estimates. %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \section[Using package flexmix]{Using package \pkg{flexmix}} In the following datasets from different areas such as medicine, biology and economics are used. There are three subsections: for finite mixtures of Gaussian regressions, for finite mixtures of binomial regression models and for finite mixtures of Poisson regression models. %%------------------------------------------------------------------------- \subsection{Finite mixtures of Gaussian regressions} This artificial dataset with 200 observations is given in \cite{flexmix:Gruen+Leisch:2006}. The data is generated from a mixture of Gaussian regression models with three components. There is an intercept with varying effects, an independent variable $x1$, which is a numeric variable, with fixed effects and another independent variable $x2$, which is a categorical variable with two levels, with nested effects. The prior probabilities depend on a concomitant variable $w$, which is also a categorical variable with two levels. Fixed effects are also assumed for the variance. The data is illustrated in Figure~\ref{fig:artificialData} and the true underlying model is given by: \begin{align*} H(y\,|\,(x1, x2), w, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(w, \bm{\alpha}) \textrm{N}(y\,|\, \mu_s, \sigma^2), \end{align*} with $\bm{\beta}^s = (\beta^s_{\textrm{Intercept}}, \beta^{c(s)}_{\textrm{x1}}, \beta_{\textrm{x2}})$. The nesting signifies that $c(1) = c(2)$ and $\beta^{c(3)}_{\textrm{x1}} = 0$. The mixture model is fitted by first loading the package and the dataset and then specifying the component specific model. In a first step a component specific model with only varying effects is specified. Then the fitting function \code{flexmix()} is called repeatedly using \code{stepFlexmix()}. Finally, we order the components such that they are in ascending order with respect to the coefficients of the variable \code{x1}. <>= set.seed(2807) library("flexmix") data("NregFix", package = "flexmix") Model <- FLXMRglm(~ x2 + x1) fittedModel <- stepFlexmix(y ~ 1, model = Model, nrep = 3, k = 3, data = NregFix, concomitant = FLXPmultinom(~ w)) fittedModel <- relabel(fittedModel, "model", "x1") summary(refit(fittedModel)) @ The estimated coefficients indicate that the components differ for the intercept, but that they are not significantly different for the coefficients of $x2$. For $x1$ the coefficient of the first component is not significantly different from zero and the confidence intervals for the other two components overlap. Therefore we fit a modified model, which is equivalent to the true underlying model. The previously fitted model is used for initializing the EM algorithm: <>= Model2 <- FLXMRglmfix(fixed = ~ x2, nested = list(k = c(1, 2), formula = c(~ 0, ~ x1)), varFix = TRUE) fittedModel2 <- flexmix(y ~ 1, model = Model2, cluster = posterior(fittedModel), data = NregFix, concomitant = FLXPmultinom(~ w)) BIC(fittedModel) BIC(fittedModel2) @ The BIC suggests that the restricted model should be preferred. \begin{figure}[tb] \centering \setkeys{Gin}{width=0.95\textwidth} <>= plotNregFix <- NregFix plotNregFix$w <- factor(NregFix$w, levels = 0:1, labels = paste("w =", 0:1)) plotNregFix$x2 <- factor(NregFix$x2, levels = 0:1, labels = paste("x2 =", 0:1)) plotNregFix$class <- factor(NregFix$class, levels = 1:3, labels = paste("Class", 1:3)) print(xyplot(y ~ x1 | x2*w, groups = class, data = plotNregFix, cex = 0.6, auto.key = list(space="right"), layout = c(2,2))) @ \setkeys{Gin}{width=0.8\textwidth} \caption{Sample with 200 observations from the artificial example.} \label{fig:artificialData} \end{figure} <>= summary(refit(fittedModel2)) @ The coefficients are ordered such that the fixed coefficients are first, the nested varying coefficients second and the varying coefficients last. %%------------------------------------------------------------------------- \subsection{Finite mixtures of binomial logit regressions} %%------------------------------------------------------------------------- \subsubsection{Beta blockers} The dataset is analyzed in \cite{flexmix:Aitkin:1999, flexmix:Aitkin:1999a} using a finite mixture of binomial regression models. Furthermore, it is described in \cite{flexmix:McLachlan+Peel:2000} on page 165. The dataset is from a 22-center clinical trial of beta-blockers for reducing mortality after myocardial infarction. A two-level model is assumed to represent the data, where centers are at the upper level and patients at the lower level. The data is illustrated in Figure~\ref{fig:beta} and the model is given by: \begin{align*} H(\textrm{Deaths} \,|\, \textrm{Total}, \textrm{Treatment}, \textrm{Center}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s \textrm{Bi}( \textrm{Deaths} \,|\, \textrm{Total}, \theta_s). \end{align*} First, the center classification is ignored and a binomial logit regression model with treatment as covariate is fitted using \code{glm}, i.e.~$S=1$: <>= data("betablocker", package = "flexmix") betaGlm <- glm(cbind(Deaths, Total - Deaths) ~ Treatment, family = "binomial", data = betablocker) betaGlm @ In the next step the center classification is included by allowing a random effect for the intercept given the centers, i.e.~the coefficients $\bm{\beta}^s$ are given by $(\beta^s_{\textrm{Intercept|Center}}, \beta_{\textrm{Treatment}})$. This signifies that the component membership is fixed for each center. In order to determine the suitable number of components, the mixture is fitted with different numbers of components and the BIC information criterion is used to select an appropriate model. In this case a model with three components is selected. The fitted values for the model with three components are given in Figure~\ref{fig:beta}. <>= betaMixFix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ 1 | Center, model = FLXMRglmfix(family = "binomial", fixed = ~ Treatment), k = 2:4, nrep = 3, data = betablocker) betaMixFix @ \begin{figure} \centering <>= library("grid") betaMixFix_3 <- getModel(betaMixFix, "3") betaMixFix_3 <- relabel(betaMixFix_3, "model", "Intercept") betablocker$Center <- with(betablocker, factor(Center, levels = Center[order((Deaths/Total)[1:22])])) clusters <- factor(clusters(betaMixFix_3), labels = paste("Cluster", 1:3)) print(dotplot(Deaths/Total ~ Center | clusters, groups = Treatment, as.table = TRUE, data = betablocker, xlab = "Center", layout = c(3, 1), scales = list(x = list(draw = FALSE)), key = simpleKey(levels(betablocker$Treatment), lines = TRUE, corner = c(1,0)))) betaMixFix.fitted <- fitted(betaMixFix_3) for (i in 1:3) { seekViewport(trellis.vpname("panel", i, 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[1:22, i], "native"), gp = gpar(lty = 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[23:44, i], "native"), gp = gpar(lty = 2)) } @ \caption{Relative number of deaths for the treatment and the control group for each center in the beta blocker dataset. The centers are sorted by the relative number of deaths in the control group. The lines indicate the fitted values for each component of the 3-component mixture model with random intercept and fixed effect for treatment.} \label{fig:beta} \end{figure} In addition the treatment effect can also be included in the random part of the model. As then all coefficients for the covariates and the intercept follow a mixture distribution the component specific model can be specified using \code{FLXMRglm()}. The coefficients are $\bm{\beta}^s=(\beta^s_{\textrm{Intercept|Center}}, \beta^s_{\textrm{Treatment|Center}})$, i.e.~it is assumed that the heterogeneity is only between centers and therefore the aggregated data for each center can be used. <>= betaMix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ Treatment | Center, model = FLXMRglm(family = "binomial"), k = 3, nrep = 3, data = betablocker) summary(betaMix) @ The full model with a random effect for treatment has a higher BIC and therefore the smaller would be preferred. The default plot of the returned \code{flexmix} object is a rootogramm of the a-posteriori probabilities where observations with a-posteriori probabilities smaller than \code{eps} are omitted. With argument \code{mark} the component is specified to have those observations marked which are assigned to this component based on the maximum a-posteriori probabilities. This indicates which components overlap. <>= print(plot(betaMixFix_3, mark = 1, col = "grey", markcol = 1)) @ The default plot of the fitted model indicates that the components are well separated. In addition component 1 has a slight overlap with component 2 but none with component 3. The fitted parameters of the component specific models can be accessed with: <>= parameters(betaMix) @ The cluster assignments using the maximum a-posteriori probabilities are obtained with: <>= table(clusters(betaMix)) @ The estimated probabilities for each component for the treated patients and those in the control group can be obtained with: <>= predict(betaMix, newdata = data.frame(Treatment = c("Control", "Treated"))) @ or <>= fitted(betaMix)[c(1, 23), ] @ A further analysis of the model is possible with function \code{refit()} which returns the estimated coefficients together with the standard deviations, z-values and corresponding p-values: <>= summary(refit(getModel(betaMixFix, "3"))) @ The printed coefficients are ordered to have the fixed effects before the varying effects. %%----------------------------------------------------------------------- \subsubsection{Mehta et al. trial} This dataset is similar to the beta blocker dataset and is also analyzed in \cite{flexmix:Aitkin:1999a}. The dataset is visualized in Figure~\ref{fig:mehta}. The observation for the control group in center 15 is slightly conspicuous and might classify as an outlier. The model is given by: \begin{align*} H(\textrm{Response} \,|\, \textrm{Total}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s \textrm{Bi}( \textrm{Response} \,|\, \textrm{Total}, \theta_s), \end{align*} with $\bm{\beta}^s = (\beta^s_{\textrm{Intercept|Site}}, \beta_{\textrm{Drug}})$. This model is fitted with: <>= data("Mehta", package = "flexmix") mehtaMix <- stepFlexmix(cbind(Response, Total - Response)~ 1 | Site, model = FLXMRglmfix(family = "binomial", fixed = ~ Drug), control = list(minprior = 0.04), nrep = 3, k = 3, data = Mehta) summary(mehtaMix) @ One component only contains the observations for center 15 and in order to be able to fit a mixture with such a small component it is necessary to modify the default argument for \code{minprior} which is 0.05. The fitted values for this model are given separately for each component in Figure~\ref{fig:mehta}. \begin{figure} \centering <>= Mehta$Site <- with(Mehta, factor(Site, levels = Site[order((Response/Total)[1:22])])) clusters <- factor(clusters(mehtaMix), labels = paste("Cluster", 1:3)) print(dotplot(Response/Total ~ Site | clusters, groups = Drug, layout = c(3,1), data = Mehta, xlab = "Site", scales = list(x = list(draw = FALSE)), key = simpleKey(levels(Mehta$Drug), lines = TRUE, corner = c(1,0)))) mehtaMix.fitted <- fitted(mehtaMix) for (i in 1:3) { seekViewport(trellis.vpname("panel", i, 1)) sapply(1:nlevels(Mehta$Drug), function(j) grid.lines(unit(1:22, "native"), unit(mehtaMix.fitted[Mehta$Drug == levels(Mehta$Drug)[j], i], "native"), gp = gpar(lty = j))) } @ \caption{Relative number of responses for the treatment and the control group for each site in the Mehta et al.~trial dataset together with the fitted values. The sites are sorted by the relative number of responses in the control group.} \label{fig:mehta} \end{figure} If also a random effect for the coefficient of $\textrm{Drug}$ is fitted, i.e.~$\bm{\beta}^s = (\beta^s_{\textrm{Intercept|Site}}, \beta^s_{\textrm{Drug|Site}})$, this is estimated by: <>= mehtaMix <- stepFlexmix(cbind(Response, Total - Response) ~ Drug | Site, model = FLXMRglm(family = "binomial"), k = 3, data = Mehta, nrep = 3, control = list(minprior = 0.04)) summary(mehtaMix) @ The BIC is smaller for the larger model and this indicates that the assumption of an equal drug effect for all centers is not confirmed by the data. Given Figure~\ref{fig:mehta} a two-component model with fixed treatment is also fitted to the data where site 15 is omitted: <>= Mehta.sub <- subset(Mehta, Site != 15) mehtaMix <- stepFlexmix(cbind(Response, Total - Response) ~ 1 | Site, model = FLXMRglmfix(family = "binomial", fixed = ~ Drug), data = Mehta.sub, k = 2, nrep = 3) summary(mehtaMix) @ %%----------------------------------------------------------------------- \subsubsection{Tribolium} A finite mixture of binomial regressions is fitted to the Tribolium dataset given in \cite{flexmix:Wang+Puterman:1998}. The data was collected to investigate whether the adult Tribolium species Castaneum has developed an evolutionary advantage to recognize and avoid eggs of its own species while foraging, as beetles of the genus Tribolium are cannibalistic in the sense that adults eat the eggs of their own species as well as those of closely related species. The experiment isolated a number of adult beetles of the same species and presented them with a vial of 150 eggs (50 of each type), the eggs being thoroughly mixed to ensure uniformity throughout the vial. The data gives the consumption data for adult Castaneum species. It reports the number of Castaneum, Confusum and Madens eggs, respectively, that remain uneaten after two day exposure to the adult beetles. Replicates 1, 2, and 3 correspond to different occasions on which the experiment was conducted. The data is visualized in Figure~\ref{fig:tribolium} and the model is given by: \begin{align*} H(\textrm{Remaining} \,|\, \textrm{Total}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(\textrm{Replicate}, \bm{\alpha}) \textrm{Bi}( \textrm{Remaining} \,|\, \textrm{Total}, \theta_s), \end{align*} with $\bm{\beta}^s = (\beta^s_{\textrm{Intercept}}, \bm{\beta}_{\textrm{Species}})$. This model is fitted with: <>= data("tribolium", package = "flexmix") TribMix <- stepFlexmix(cbind(Remaining, Total - Remaining) ~ 1, k = 2:3, model = FLXMRglmfix(fixed = ~ Species, family = "binomial"), concomitant = FLXPmultinom(~ Replicate), data = tribolium) @ The model which is selected as the best in \cite{flexmix:Wang+Puterman:1998} can be estimated with: <>= modelWang <- FLXMRglmfix(fixed = ~ I(Species == "Confusum"), family = "binomial") concomitantWang <- FLXPmultinom(~ I(Replicate == 3)) TribMixWang <- stepFlexmix(cbind(Remaining, Total - Remaining) ~ 1, data = tribolium, model = modelWang, concomitant = concomitantWang, k = 2) summary(refit(TribMixWang)) @ \begin{figure} \centering <>= clusters <- factor(clusters(TribMixWang), labels = paste("Cluster", 1:TribMixWang@k)) print(dotplot(Remaining/Total ~ factor(Replicate) | clusters, groups = Species, data = tribolium[rep(1:9, each = 3) + c(0:2)*9,], xlab = "Replicate", auto.key = list(corner = c(1,0)))) @ \caption{Relative number of remaining beetles for the number of replicate. The different panels are according to the cluster assignemnts based on the a-posteriori probabilities of the model suggested in \cite{flexmix:Wang+Puterman:1998}.} \label{fig:tribolium} \end{figure} \cite{flexmix:Wang+Puterman:1998} also considered a model where they omit one conspicuous observation. This model can be estimated with: <>= TribMixWangSub <- stepFlexmix(cbind(Remaining, Total - Remaining) ~ 1, k = 2, data = tribolium[-7,], model = modelWang, concomitant = concomitantWang) @ %%----------------------------------------------------------------------- \subsubsection{Trypanosome} The data is used in \cite{flexmix:Follmann+Lambert:1989}. It is from a dosage-response analysis where the proportion of organisms belonging to different populations shall be assessed. It is assumed that organisms belonging to different populations are indistinguishable other than in terms of their reaction to the stimulus. The experimental technique involved inspection under the microscope of a representative aliquot of a suspension, all organisms appearing within two fields of view being classified either alive or dead. Hence the total numbers of organisms present at each dose and the number showing the quantal response were both random variables. The data is illustrated in Figure~\ref{fig:trypanosome}. The model which is proposed in \cite{flexmix:Follmann+Lambert:1989} is given by: \begin{align*} H(\textrm{Dead} \,|\,\bm{\Theta}) &= \sum_{s = 1}^S \pi_s \textrm{Bi}( \textrm{Dead} \,|\, \theta_s), \end{align*} where $\textrm{Dead} \in \{0,1\}$ and with $\bm{\beta}^s = (\beta^s_{\textrm{Intercept}}, \bm{\beta}_{\textrm{log(Dose)}})$. This model is fitted with: <>= data("trypanosome", package = "flexmix") TrypMix <- stepFlexmix(cbind(Dead, 1-Dead) ~ 1, k = 2, nrep = 3, data = trypanosome, model = FLXMRglmfix(family = "binomial", fixed = ~ log(Dose))) summary(refit(TrypMix)) @ The fitted values are given in Figure~\ref{fig:trypanosome} together with the fitted values of a generalized linear model in order to facilitate comparison of the two models. \begin{figure} \centering <>= tab <- with(trypanosome, table(Dead, Dose)) Tryp.dat <- data.frame(Dead = tab["1",], Alive = tab["0",], Dose = as.numeric(colnames(tab))) plot(Dead/(Dead+Alive) ~ Dose, data = Tryp.dat) Tryp.pred <- predict(glm(cbind(Dead, 1-Dead) ~ log(Dose), family = "binomial", data = trypanosome), newdata=Tryp.dat, type = "response") TrypMix.pred <- predict(TrypMix, newdata = Tryp.dat, aggregate = TRUE)[[1]] lines(Tryp.dat$Dose, Tryp.pred, lty = 2) lines(Tryp.dat$Dose, TrypMix.pred, lty = 3) legend(4.7, 1, c("GLM", "Mixture model"), lty=c(2, 3), xjust=0, yjust=1) @ \caption{Relative number of deaths for each dose level together with the fitted values for the generalized linear model (``GLM'') and the random intercept model (``Mixture model'').} \label{fig:trypanosome} \end{figure} %%------------------------------------------------------------------------- \subsection{Finite mixtures of Poisson regressions} % %%----------------------------------------------------------------------- \subsubsection{Fabric faults} The dataset is analyzed using a finite mixture of Poisson regression models in \cite{flexmix:Aitkin:1996}. Furthermore, it is described in \cite{flexmix:McLachlan+Peel:2000} on page 155. It contains 32 observations on the number of faults in rolls of a textile fabric. A random intercept model is used where a fixed effect is assumed for the logarithm of length: <>= data("fabricfault", package = "flexmix") fabricMix <- stepFlexmix(Faults ~ 1, model = FLXMRglmfix(family="poisson", fixed = ~ log(Length)), data = fabricfault, k = 2, nrep = 3) summary(fabricMix) summary(refit(fabricMix)) Lnew <- seq(0, 1000, by = 50) fabricMix.pred <- predict(fabricMix, newdata = data.frame(Length = Lnew)) @ The intercept of the first component is not significantly different from zero for a signficance level of 0.05. We therefore also fit a modified model where the intercept is a-priori set to zero for the first component. This nested structure is given as part of the model specification with argument \code{nested}. <>= fabricMix2 <- flexmix(Faults ~ 0, data = fabricfault, cluster = posterior(fabricMix), model = FLXMRglmfix(family = "poisson", fixed = ~ log(Length), nested = list(k=c(1,1), formula=list(~0,~1)))) summary(refit(fabricMix2)) fabricMix2.pred <- predict(fabricMix2, newdata = data.frame(Length = Lnew)) @ The data and the fitted values for each of the components for both models are given in Figure~\ref{fig:fabric}. \begin{figure} \centering <>= plot(Faults ~ Length, data = fabricfault) sapply(fabricMix.pred, function(y) lines(Lnew, y, lty = 1)) sapply(fabricMix2.pred, function(y) lines(Lnew, y, lty = 2)) legend(190, 25, paste("Model", 1:2), lty=c(1, 2), xjust=0, yjust=1) @ \caption{Observed values of the fabric faults dataset together with the fitted values for the components of each of the two fitted models.} \label{fig:fabric} \end{figure} %%----------------------------------------------------------------------- \subsubsection{Patent} The patent data given in \cite{flexmix:Wang+Cockburn+Puterman:1998} consist of 70 observations on patent applications, R\&D spending and sales in millions of dollar from pharmaceutical and biomedical companies in 1976 taken from the National Bureau of Economic Research R\&D Masterfile. The observations are displayed in Figure~\ref{fig:patent}. The model which is chosen as the best in \cite{flexmix:Wang+Cockburn+Puterman:1998} is given by: \begin{align*} H(\textrm{Patents} \,|\, \textrm{lgRD}, \textrm{RDS}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(\textrm{RDS}, \bm{\alpha}) \textrm{Poi} ( \textrm{Patents} \,|\, \lambda_s), \end{align*} and $\bm{\beta}^s = (\beta^s_{\textrm{Intercept}}, \beta^s_{\textrm{lgRD}})$. The model is fitted with: <>= data("patent", package = "flexmix") ModelPat <- FLXMRglm(family = "poisson") FittedPat <- stepFlexmix(Patents ~ lgRD, k = 3, nrep = 3, model = ModelPat, data = patent, concomitant = FLXPmultinom(~ RDS)) summary(FittedPat) @ The fitted values for the component specific models and the concomitant variable model are given in Figure~\ref{fig:patent}. The plotting symbol of the observations corresponds to the induced clustering given by \code{clusters(FittedPat)}. This model is modified to have fixed effects for the logarithmized R\&D spendings, i.e.~$\bm(\beta)^s = (\beta^s_{\textrm{Intercept}}, \beta_{\textrm{lgRD}})$. The already fitted model is used for initialization, i.e.~the EM algorithm is started with an M-step given the a-posteriori probabilities. <>= ModelFixed <- FLXMRglmfix(family = "poisson", fixed = ~ lgRD) FittedPatFixed <- flexmix(Patents ~ 1, model = ModelFixed, cluster = posterior(FittedPat), concomitant = FLXPmultinom(~ RDS), data = patent) summary(FittedPatFixed) @ The fitted values for the component specific models and the concomitant variable model of this model are also given in Figure~\ref{fig:patent}. \begin{figure} \centering \setkeys{Gin}{width=0.95\textwidth} <>= lgRDv <- seq(-3, 5, by = 0.05) newdata <- data.frame(lgRD = lgRDv) plotData <- function(fitted) { with(patent, data.frame(Patents = c(Patents, unlist(predict(fitted, newdata = newdata))), lgRD = c(lgRD, rep(lgRDv, 3)), class = c(clusters(fitted), rep(1:3, each = nrow(newdata))), type = rep(c("data", "fit"), c(nrow(patent), nrow(newdata)*3)))) } plotPatents <- cbind(plotData(FittedPat), which = "Wang et al.") plotPatentsFixed <- cbind(plotData(FittedPatFixed), which = "Fixed effects") plotP <- rbind(plotPatents, plotPatentsFixed) rds <- seq(0, 3, by = 0.02) x <- model.matrix(FittedPat@concomitant@formula, data = data.frame(RDS = rds)) plotConc <- function(fitted) { E <- exp(x%*%fitted@concomitant@coef) data.frame(Probability = as.vector(E/rowSums(E)), class = rep(1:3, each = nrow(x)), RDS = rep(rds, 3)) } plotConc1 <- cbind(plotConc(FittedPat), which = "Wang et al.") plotConc2 <- cbind(plotConc(FittedPatFixed), which = "Fixed effects") plotC <- rbind(plotConc1, plotConc2) print(xyplot(Patents ~ lgRD | which, data = plotP, groups=class, xlab = "log(R&D)", panel = "panel.superpose", type = plotP$type, panel.groups = function(x, y, type = "p", subscripts, ...) { ind <- plotP$type[subscripts] == "data" panel.xyplot(x[ind], y[ind], ...) panel.xyplot(x[!ind], y[!ind], type = "l", ...) }, scales = list(alternating=FALSE), layout=c(1,2), as.table=TRUE), more=TRUE, position=c(0,0,0.6, 1)) print(xyplot(Probability ~ RDS | which, groups = class, data = plotC, type = "l", scales = list(alternating=FALSE), layout=c(1,2), as.table=TRUE), position=c(0.6, 0.01, 1, 0.99)) @ \caption{Patent data with the fitted values of the component specific models (left) and the concomitant variable model (right) for the model in \citeauthor{flexmix:Wang+Cockburn+Puterman:1998} and with fixed effects for $\log(\textrm{R\&D})$. The plotting symbol for each observation is determined by the component with the maximum a-posteriori probability.} \label{fig:patent} \end{figure} \setkeys{Gin}{width=0.8\textwidth} With respect to the BIC the full model is better than the model with the fixed effects. However, fixed effects have the advantage that the different components differ only in their baseline and the relation between the components in return of investment for each additional unit of R\&D spending is constant. Due to a-priori domain knowledge this model might seem more plausible. The fitted values for the constrained model are also given in Figure~\ref{fig:patent}. %%----------------------------------------------------------------------- \subsubsection{Seizure} The data is used in \cite{flexmix:Wang+Puterman+Cockburn:1996} and is from a clinical trial where the effect of intravenous gamma-globulin on suppression of epileptic seizures is studied. There are daily observations for a period of 140 days on one patient, where the first 27 days are a baseline period without treatment, the remaining 113 days are the treatment period. The model proposed in \cite{flexmix:Wang+Puterman+Cockburn:1996} is given by: \begin{align*} H(\textrm{Seizures} \,|\, (\textrm{Treatment}, \textrm{log(Day)}, \textrm{log(Hours)}), \bm{\Theta}) &= \sum_{s = 1}^S \pi_s \textrm{Poi} ( \textrm{Seizures} \,|\, \lambda_s), \end{align*} where $\bm(\beta)^s = (\beta^s_{\textrm{Intercept}}, \beta^s_{\textrm{Treatment}}, \beta^s_{\textrm{log(Day)}}, \beta^s_{\textrm{Treatment:log(Day)}})$ and $\textrm{log(Hours)}$ is used as offset. This model is fitted with: <>= data("seizure", package = "flexmix") seizMix <- stepFlexmix(Seizures ~ Treatment * log(Day), data = seizure, k = 2, nrep = 3, model = FLXMRglm(family = "poisson", offset = log(seizure$Hours))) summary(seizMix) summary(refit(seizMix)) @ A different model with different contrasts to directly estimate the coefficients for the jump when changing between base and treatment period is given by: <>= seizMix2 <- flexmix(Seizures ~ Treatment * log(Day/27), data = seizure, cluster = posterior(seizMix), model = FLXMRglm(family = "poisson", offset = log(seizure$Hours))) summary(seizMix2) summary(refit(seizMix2)) @ A different model which allows no jump at the change between base and treatment period is fitted with: <>= seizMix3 <- flexmix(Seizures ~ log(Day/27)/Treatment, data = seizure, cluster = posterior(seizMix), model = FLXMRglm(family = "poisson", offset = log(seizure$Hours))) summary(seizMix3) summary(refit(seizMix3)) @ With respect to the BIC criterion the smaller model with no jump is preferred. This is also the more intuitive model from a practitioner's point of view, as it does not seem to be plausible that starting the treatment already gives a significant improvement, but improvement develops over time. The data points together with the fitted values for each component of the two models are given in Figure~\ref{fig:seizure}. It can clearly be seen that the fitted values are nearly equal which also supports the smaller model. \begin{figure} \centering <>= plot(Seizures/Hours~Day, pch = c(1,3)[as.integer(Treatment)], data=seizure) abline(v=27.5, lty=2, col="grey") legend(140, 9, c("Baseline", "Treatment"), pch=c(1, 3), xjust=1, yjust=1) matplot(seizure$Day, fitted(seizMix)/seizure$Hours, type="l", add=TRUE, lty = 1, col = 1) matplot(seizure$Day, fitted(seizMix3)/seizure$Hours, type="l", add=TRUE, lty = 3, col = 1) legend(140, 7, paste("Model", c(1,3)), lty=c(1, 3), xjust=1, yjust=1) @ \caption{Observed values for the seizure dataset together with the fitted values for the components of the two different models.} \label{fig:seizure} \end{figure} %%----------------------------------------------------------------------- \subsubsection{Ames salmonella assay data} The ames salomnella assay dataset was used in \cite{flexmix:Wang+Puterman+Cockburn:1996}. They propose a model given by: \begin{align*} H(\textrm{y} \,|\, \textrm{x}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s \textrm{Poi} ( \textrm{y} \,|\, \lambda_s), \end{align*} where $\bm{\beta}^s = (\beta^s_{\textrm{Intercept}}, \beta_{\textrm{x}}, \beta_{\textrm{log(x+10)}})$. The model is fitted with: <>= data("salmonellaTA98", package = "flexmix") salmonMix <- stepFlexmix(y ~ 1, data = salmonellaTA98, k = 2, nrep = 3, model = FLXMRglmfix(family = "poisson", fixed = ~ x + log(x + 10))) @ \begin{figure} \centering <>= salmonMix.pr <- predict(salmonMix, newdata=salmonellaTA98) plot(y~x, data=salmonellaTA98, pch=as.character(clusters(salmonMix)), xlab="Dose of quinoline", ylab="Number of revertant colonies of salmonella", ylim=range(c(salmonellaTA98$y, unlist(salmonMix.pr)))) for (i in 1:2) lines(salmonellaTA98$x, salmonMix.pr[[i]], lty=i) @ \caption{Means and classification for assay data according to the estimated posterior probabilities based on the fitted model.} \label{fig:almes} \end{figure} %%----------------------------------------------------------------------- \section{Conclusions and future work} Package \pkg{flexmix} can be used to fit finite mixtures of regressions to datasets used in the literature to illustrate these models. The results can be reproduced and additional insights can be gained using visualization methods available in \proglang{R}. The fitted model is an object in \proglang{R} which can be explored using \code{show()}, \code{summary()} or \code{plot()}, as suitable methods have been implemented for objects of class \code{"flexmix"} which are returned by \code{flexmix()}. In the future it would be desirable to have more diagnostic tools available to analyze the model fit and compare different models. The use of resampling methods would be convenient as they can be applied to all kinds of mixtures models and would therefore suit well the purpose of the package which is flexible modelling of various finite mixture models. Furthermore, an additional visualization method for the fitted coefficients of the mixture would facilitate the comparison of the components. %%----------------------------------------------------------------------- \section*{Computational details} <>= SI <- sessionInfo() pkgs <- paste(sapply(c(SI$otherPkgs, SI$loadedOnly), function(x) paste("\\\\pkg{", x$Package, "} ", x$Version, sep = "")), collapse = ", ") @ All computations and graphics in this paper have been done using \proglang{R} version \Sexpr{getRversion()} with the packages \Sexpr{pkgs}. %%----------------------------------------------------------------------- \section*{Acknowledgments} This research was supported by the the Austrian Science Foundation (FWF) under grant P17382 and the Austrian Academy of Sciences ({\"O}AW) through a DOC-FFORTE scholarship for Bettina Gr{\"u}n. %%----------------------------------------------------------------------- \bibliography{flexmix} \end{document} flexmix/vignettes/ziglm.R0000644000176200001440000000220514404637307015212 0ustar liggesuserssetClass("FLXMRziglm", contains = "FLXMRglm") FLXMRziglm <- function(formula = . ~ ., family = c("binomial", "poisson"), ...) { family <- match.arg(family) new("FLXMRziglm", FLXMRglm(formula, family, ...), name = paste("FLXMRziglm", family, sep=":")) } setMethod("FLXgetModelmatrix", signature(model="FLXMRziglm"), function(model, data, formula, lhs=TRUE, ...) { model <- callNextMethod(model, data, formula, lhs) if (attr(terms(model@fullformula), "intercept") == 0) stop("please include an intercept") model }) setMethod("FLXremoveComponent", signature(model = "FLXMRziglm"), function(model, nok, ...) { if (1 %in% nok) as(model, "FLXMRglm") else model }) setMethod("FLXmstep", signature(model = "FLXMRziglm"), function(model, weights, components, ...) { coef <- c(-Inf, rep(0, ncol(model@x)-1)) names(coef) <- colnames(model@x) comp.1 <- with(list(coef = coef, df = 0, offset = NULL, family = model@family), eval(model@defineComponent)) c(list(comp.1), FLXmstep(as(model, "FLXMRglm"), weights[, -1, drop=FALSE], components[-1])) }) flexmix/vignettes/mixture.bib0000644000176200001440000003621114404637307016124 0ustar liggesusers@STRING{csda = {Computational Statistics \& Data Analysis} } @STRING{jasa = {Journal of the American Statistical Association} } @STRING{jcgs = {Journal of Computational and Graphical Statistics} } @STRING{jrssa = {Journal of the Royal Statistical Society A} } @STRING{jrssb = {Journal of the Royal Statistical Society B} } @Article{ mixtures:aitkin:1999, author = {Murray Aitkin}, title = {A General Maximum Likelihood Analysis of Variance Components in Generalized Linear Models}, journal = {Biometrics}, year = 1999, volume = 55, pages = {117--128} } @Article{ mixtures:aitkin:1999a, author = {Murray Aitkin}, title = {Meta-Analysis by Random Effect Modelling in Generalized Linear Models}, journal = {Statistics in Medicine}, year = 1999, volume = 18, number = {17--18}, month = {September}, pages = {2343--2351} } @Article{ mixtures:biernacki+celeux+govaert:2000, author = {Christophe Biernacki and Gilles Celeux and G{\'e}rard Govaert}, title = {Assessing a Mixture Model for Clustering with the Integrated Completed Likelihood}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, year = 2000, volume = 22, number = 7, pages = {719--725}, month = {July} } @Article{ mixtures:biernacki+celeux+govaert:2003, author = {Christophe Biernacki and Gilles Celeux and G{\'e}rard Govaert}, title = {Choosing Starting Values for the {EM} Algorithm for Getting the Highest Likelihood in Multivariate {G}aussian Mixture Models}, journal = csda, year = 2003, volume = 41, pages = {561--575} } @Article{ mixtures:boehning+dietz+schlattmann:1999, author = {Dankmar B{\"o}hning and Ekkehart Dietz and Peter Schlattmann and Lisette Mendon{\c c}a and Ursula Kirchner}, title = {The Zero-Inflated {P}oisson Model and the Decayed, Missing and Filled Teeth Index in Dental Epidemiology}, journal = jrssa, year = 1999, volume = 162, number = 2, pages = {195--209}, month = {August} } @Book{ mixtures:boehning:1999, author = {Dankmar B{\"o}hning}, title = {Computer Assisted Analysis of Mixtures and Applications: Meta-Analysis, Disease Mapping, and Others}, publisher = {Chapman \& Hall/CRC}, year = 1999, volume = 81, series = {Monographs on Statistics and Applied Probability}, address = {London} } @Manual{ mixtures:canty+ripley:2010, title = {boot: Bootstrap R (S-Plus) Functions}, author = {Angelo Canty and Brian Ripley}, year = 2010, note = {R package version 1.2-43}, url = {http://CRAN.R-project.org/package=boot} } @TechReport{ mixtures:celeux+diebolt:1988, author = {Gilles Celeux and Jean Diebolt}, title = {A Random Imputation Principle: The Stochastic {EM} Algorithm}, institution = {INRIA}, year = 1988, type = {Rapports de Recherche}, number = 901, month = {September} } @Article{ mixtures:celeux+govaert:1992, author = {Gilles Celeux and G{\'e}rard Govaert}, title = {A {C}lassification {EM} Algorithm for Clustering and Two Stochastic Versions}, journal = {Computational Statistics \& Data Analysis}, year = 1992, volume = 14, number = 3, pages = {315--332}, month = {October} } @Book{ mixtures:chambers:1998, author = {John M. Chambers}, title = {Programming with Data}, publisher = {Springer-Verlag}, year = 1998, address = {New York}, isbn = {0-387-98503-4} } @Article{ mixtures:dasgupta+raftery:1998, author = {Abhijit Dasgupta and Adrian E. Raftery}, title = {Detecting Features in Spatial Point Processes with Clutter Via Model-Based Clustering}, journal = jasa, year = 1998, volume = 93, number = 441, pages = {294--302}, month = {March} } @Book{mixtures:davison+hinkley:1997, author = {A. C. Davison and D. V. Hinkley}, title = {Bootstrap Methods and Their Application}, publisher = {Cambridge University Press}, address = {Cambridge, UK}, year = 1997, isbn = {0-521-57391-2 (hardcover), 0-521-57471-4 (paperback)}, series = {Cambridge Series on Statistical and Probabilistic Mathematics}, } @Article{ mixtures:dayton+macready:1988, author = {C. Mitchell Dayton and George B. Macready}, title = {Concomitant-Variable Latent-Class Models}, journal = jasa, year = 1988, volume = 83, number = 401, pages = {173--178}, month = {March} } @Article{ mixtures:dempster+laird+rubin:1977, author = {A. P. Dempster and N. M. Laird and D. B. Rubin}, title = {Maximum Likelihood from Incomplete Data Via the {EM}-Algorithm}, journal = jrssb, volume = 39, pages = {1--38}, year = 1977 } @InCollection{ mixtures:diebolt+ip:1996, author = {Jean Diebolt and Eddie H. S. Ip}, editor = {W. R. Gilks and S. Richardson and D. J. Spiegelhalter}, booktitle = {Markov Chain {M}onte {C}arlo in Practice}, title = {Stochastic {EM}: Method and Application}, publisher = {Chapman and Hall}, year = 1996, pages = {259--273} } @Book{ mixtures:everitt+hand:1981, author = {B. S. Everitt and D. J. Hand}, title = {Finite Mixture Distributions}, publisher = {Chapman and Hall}, address = {London}, year = 1981 } @Article{ mixtures:follmann+lambert:1989, author = {Dean A. Follmann and Diane Lambert}, title = {Generalizing Logistic Regression by Non-Parametric Mixing}, journal = jasa, volume = 84, number = 405, month = {March}, pages = {295--300}, year = 1989 } @Article{ mixtures:follmann+lambert:1991, author = {Dean A. Follmann and Diane Lambert}, title = {Identifiability of Finite Mixtures of Logistic Regression Models}, journal = {Journal of Statistical Planning and Inference}, volume = 27, number = 3, month = {March}, pages = {375--381}, year = 1991 } @Book{ mixtures:fowler:2004, author = {Martin Fowler}, title = {{UML} Distilled: A Brief Guide to the Standard Object Modeling Language}, publisher = {Addison-Wesley Professional}, year = 2004, edition = {3rd} } @Book{ mixtures:fruehwirth-schnatter:2006, author = {Sylvia Fr{\"u}hwirth-Schnatter}, title = {Finite Mixture and Markov Switching Models}, publisher = {Springer-Verlag}, address = {New York}, series = {Springer-Verlag Series in Statistics}, isbn = {0-387-32909-9}, year = 2006 } @Article{ mixtures:gentleman+ihaka:2000, author = {Robert Gentleman and Ross Ihaka}, title = {Lexical Scope and Statistical Computing}, journal = jcgs, year = 2000, volume = 9, number = 3, pages = {491--508} } @InProceedings{ mixtures:gruen+leisch:2004, author = {Bettina Gr{\" u}n and Friedrich Leisch}, title = {Bootstrapping Finite Mixture Models}, booktitle = {COMPSTAT 2004 -- Proceedings in Computational Statistics}, year = 2004, editor = {Jaromir Antoch}, publisher = {Physica Verlag, Heidelberg}, isbn = {3-7908-1554-3}, pages = {1115--1122} } @InProceedings{ mixtures:gruen+leisch:2006, author = {Bettina Gr{\" u}n and Friedrich Leisch}, title = {Fitting Finite Mixtures of Linear Regression Models with Varying \& Fixed Effects in \proglang{R}}, booktitle = {COMPSTAT 2006 -- Proceedings in Computational Statistics}, pages = {853--860}, editor = {Alfredo Rizzi and Maurizio Vichi}, publisher = {Physica Verlag, Heidelberg, Germany}, isbn = {3-7908-1708-2}, year = 2006 } @Article{ mixtures:gruen+leisch:2007a, author = {Bettina Gr{\" u}n and Friedrich Leisch}, title = {Fitting Finite Mixtures of Generalized Linear Regressions in \proglang{R}}, journal = csda, year = 2007, volume = 51, number = 11, month = {July}, pages = {5247--5252} } @Article{ mixtures:gruen+leisch:2008, author = {Bettina Gr\"un and Friedrich Leisch}, title = {Identifiability of Finite Mixtures of Multinomial Logit Models with Varying and Fixed Effects}, journal = {Journal of Classification}, year = 2008, note = {Accepted for publication on 2008-03-28} } @Article{ mixtures:gruen+leisch:2008a, author = {Bettina Gr\"un and Friedrich Leisch}, title = {{F}lex{M}ix Version 2: Finite Mixtures with Concomitant Variables and Varying and Constant Parameters}, journal = {Journal of Statistical Software}, year = 2008, volume = 28, number = 4, pages = {1--35}, doi = {10.18637/jss.v028.i04}, } @Unpublished{mixtures:gruen+leisch:2010, author = {Bettina Gr\"un and Friedrich Leisch}, title = {Finite Mixture Model Diagnostics Using Resampling Methods}, note = {Unpublished manuscript}, year = 2010, } @PhDThesis{ mixtures:gruen:2006, author = {Bettina Gr{\"u}n}, title = {Identification and Estimation of Finite Mixture Models}, school = {Institut f{\"u}r Statistik und Wahrscheinlichkeitstheorie, Technische Universit{\"a}t Wien}, year = 2006, month = {September}, note = {{Friedrich Leisch, advisor}} } @Book{ mixtures:harrell:2001, author = {Frank E. Harrell}, title = {Regression Modeling Strategies}, publisher = {Springer-Verlag}, address = {New York}, year = 2001 } @Article{ mixtures:hastie+tibshirani:1993, author = {Trevor Hastie and Robert Tibshirani}, title = {Varying-Coefficient Models}, journal = jrssb, year = 1993, volume = 55, number = 4, pages = {757--796} } @Article{mixtures:hennig:2000, author = {Christian Hennig}, title = {Identifiability of Models for Clusterwise Linear Regression}, journal = {Journal of Classification}, volume = 17, number = 2, month = {July}, pages = {273--296}, year = 2000 } @Article{ mixtures:karlis+xekalaki:2003, author = {Dimitris Karlis and Evdokia Xekalaki}, title = {Choosing Initial Values for the {EM} Algorithm for Finite Mixtures}, journal = csda, year = 2003, volume = 41, pages = {577--590} } @InProceedings{ mixtures:leisch:2002, author = {Friedrich Leisch}, title = {Sweave: Dynamic Generation of Statistical Reports Using Literate Data Analysis}, booktitle = {COMPSTAT 2002 -- Proceedings in Computational Statistics}, pages = {575--580}, year = 2002, editor = {Wolfgang H{\"a}rdle and Bernd R{\"o}nz}, publisher = {Physica Verlag, Heidelberg}, note = {ISBN 3-7908-1517-9} } @Article{ mixtures:leisch:2003, author = {Friedrich Leisch}, title = {Sweave, Part {II}: Package Vignettes}, journal = {\proglang{R} News}, year = 2003, volume = 3, number = 2, pages = {21--24}, month = {October}, url = {http://CRAN.R-project.org/doc/Rnews/} } @Article{ mixtures:leisch:2004, author = {Friedrich Leisch}, title = {\pkg{FlexMix}: A General Framework for Finite Mixture Models and Latent Class Regression in \proglang{R}}, journal = {Journal of Statistical Software}, year = 2004, volume = 11, number = 8, pages = {1--18}, doi = {10.18637/jss.v011.i08}, } @InProceedings{ mixtures:leisch:2004a, author = {Friedrich Leisch}, title = {Exploring the Structure of Mixture Model Components}, booktitle = {COMPSTAT 2004 -- Proceedings in Computational Statistics}, year = 2004, editor = {Jaromir Antoch}, publisher = {Physica Verlag, Heidelberg}, isbn = {3-7908-1554-3}, pages = {1405--1412} } @InProceedings{ mixtures:leisch:2008, author = {Friedrich Leisch}, title = {Modelling Background Noise in Finite Mixtures of Generalized Linear Regression Models}, booktitle = {COMPSTAT 2008 -- Proceedings in Computational Statistics}, volume = {I}, pages = {385-396}, editor = {Paula Brito}, publisher = {Physica Verlag, Heidelberg, Germany}, isbn = {978-3-7908-2083-6}, year = 2008 } @Article{ mixtures:long:1990, author = {J. Scott Long}, title = {The Origins of Sex Differences in Science}, journal = {Social Forces}, year = 1990, volume = 68, number = 4, pages = {1297--1315}, month = {June} } @Book{ mixtures:mccullagh+nelder:1989, author = {Peter McCullagh and John A. Nelder}, title = {Generalized Linear Models}, edition = {2nd}, publisher = {Chapman and Hall}, year = 1989, location = {London} } @Book{ mixtures:mclachlan+basford:1988, author = {Geoffrey J. McLachlan and Kaye E. Basford}, title = {Mixture Models: Inference and Applications to Clustering}, publisher = {Marcel Dekker}, year = 1988, address = {New York} } @Book{ mixtures:mclachlan+peel:2000, author = {Geoffrey J. McLachlan and David Peel}, title = {Finite Mixture Models}, publisher = {John Wiley \& Sons}, year = 2000 } @Book{ mixtures:pinheiro+bates:2000, author = {Jose C. Pinheiro and Douglas M. Bates}, title = {Mixed-Effects Models in \proglang{S} and \proglang{S-Plus}}, publisher = {Springer-Verlag}, year = 2000, isbn = {0-387-98957-0} } @Book{ mixtures:sarkar:2008, title = {\pkg{lattice}: Multivariate Data Visualization with \proglang{R}}, author = {Deepayan Sarkar}, year = 2008, publisher = {Springer-Verlag}, address = {New York}, isbn = {978-0-387-75968-5} } @Book{ mixtures:titterington+smith+makov:1985, author = {D. M. Titterington and A. F. M. Smith and U. E. Makov}, title = {Statistical Analysis of Finite Mixture Distributions}, publisher = {John Wiley \& Sons}, year = 1985 } @Book{ mixtures:venables+ripley:2002, title = {Modern Applied Statistics with \proglang{S}}, author = {William N. Venables and Brian D. Ripley}, publisher = {Springer-Verlag}, edition = {4th}, address = {New York}, year = 2002, isbn = {0-387-95457-0} } @Article{ mixtures:wang+puterman+cockburn:1996, author = {Peiming Wang and Martin L. Puterman and Iain M. Cockburn and Nhu D. Le}, title = {Mixed {P}oisson Regression Models with Covariate Dependent Rates}, journal = {Biometrics}, year = 1996, volume = 52, pages = {381--400} } @Article{ mixtures:wedel+desarbo:1995, author = {Michel Wedel and Wagner S. DeSarbo}, title = {A Mixture Likelihood Approach for Generalized Linear Models}, journal = {Journal of Classification}, year = 1995, volume = 12, number = 1, month = {March}, pages = {21--55} } @Article{ mixtures:wehrens+buydens+fraley:2004, author = {Ron Wehrens and Lutgarde M.C. Buydens and Chris Fraley and Adrian E. Raftery}, title = {Model-Based Clustering for Image Segmentation and Large Datasets Via Sampling}, journal = {Journal of Classification}, year = 2004, volume = 21, number = 2, pages = {231--253} } flexmix/vignettes/bootstrapping.Rnw0000644000176200001440000004660014404637307017337 0ustar liggesusers\documentclass[nojss]{jss} \usepackage{amsfonts,bm,amsmath,amssymb} %%\usepackage{Sweave} %% already provided by jss.cls %%%\VignetteIndexEntry{Finite Mixture Model Diagnostics Using Resampling Methods} %%\VignetteDepends{flexmix} %%\VignetteKeywords{R, finite mixture model, resampling, bootstrap} %%\VignettePackage{flexmix} \title{Finite Mixture Model Diagnostics Using Resampling Methods} <>= options(useFancyQuotes = FALSE) digits <- 3 Colors <- c("#E495A5", "#39BEB1") critical_values <- function(n, p = "0.95") { data("qDiptab", package = "diptest") if (n %in% rownames(qDiptab)) { return(qDiptab[as.character(n), p]) }else { n.approx <- as.numeric(rownames(qDiptab)[which.min(abs(n - as.numeric(rownames(qDiptab))))]) return(sqrt(n.approx)/sqrt(n) * qDiptab[as.character(n.approx), p]) } } library("graphics") library("flexmix") combine <- function(x, sep, width) { cs <- cumsum(nchar(x)) remaining <- if (any(cs[-1] > width)) combine(x[c(FALSE, cs[-1] > width)], sep, width) c(paste(x[c(TRUE, cs[-1] <= width)], collapse= sep), remaining) } prettyPrint <- function(x, sep = " ", linebreak = "\n\t", width = getOption("width")) { x <- strsplit(x, sep)[[1]] paste(combine(x, sep, width), collapse = paste(sep, linebreak, collapse = "")) } @ \author{Bettina Gr{\"u}n\\ Wirtschaftsuniversit{\"a}t Wien \And Friedrich Leisch\\ Universit\"at f\"ur Bodenkultur Wien} \Plainauthor{Bettina Gr{\"u}n, Friedrich Leisch} \Address{ Bettina Gr\"un\\ Institute for Statistics and Mathematics\\ Wirtschaftsuniversit{\"a}t Wien\\ Welthandelsplatz 1\\ 1020 Wien, Austria\\ E-mail: \email{Bettina.Gruen@R-project.org}\\ Friedrich Leisch\\ Institut f\"ur Angewandte Statistik und EDV\\ Universit\"at f\"ur Bodenkultur Wien\\ Peter Jordan Stra\ss{}e 82\\ 1190 Wien, Austria\\ E-mail: \email{Friedrich.Leisch@boku.ac.at} } \Abstract{ This paper illustrates the implementation of resampling methods in \pkg{flexmix} as well as the application of resampling methods for model diagnostics of fitted finite mixture models. Convenience functions to perform these methods are available in package \pkg{flexmix}. The use of the methods is illustrated with an artificial example and the \code{seizure} data set. } \Keywords{\proglang{R}, finite mixture models, resampling, bootstrap} \Plainkeywords{R, finite mixture models, resampling, bootstrap} %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \begin{document} \SweaveOpts{engine=R, echo=true, height=5, width=8, eps=FALSE, keep.source=TRUE} \setkeys{Gin}{width=0.95\textwidth} \section{Implementation of resampling methods}\label{sec:implementation} The proposed framework for model diagnostics using resampling \citep{mixtures:gruen+leisch:2004} equally allows to investigate model fit for all kinds of mixture models. The procedure is applicable to mixture models with different component specific models and does not impose any limitation such as for example on the dimension of the parameter space of the component specific model. In addition to the fitting step different component specific models only require different random number generators for the parametric bootstrap. The \code{boot()} function in \pkg{flexmix} is a generic \proglang{S4} function with a method for fitted finite mixtures of class \code{"flexmix"} and is applicable to general finite mixture models. The function with arguments and their defaults is given by: <>= cat(prettyPrint(gsub("boot_flexmix", "boot", prompt(flexmix:::boot_flexmix, filename = NA)$usage[[2]]), sep = ", ", linebreak = paste("\n", paste(rep(" ", 2), collapse = ""), sep= ""), width = 70)) @ The interface is similar to the \code{boot()} function in package \pkg{boot} \citep{mixtures:Davison+Hinkley:1997, mixtures:Canty+Ripley:2010}. The \code{object} is a fitted finite mixture of class \code{"flexmix"} and \code{R} denotes the number of resamples. The possible bootstrapping method are \code{"empirical"} (also available as \code{"ordinary"}) and \code{"parametric"}. For the parametric bootstrap sampling from the fitted mixture is performed using \code{rflexmix()}. For mixture models with different component specific models \code{rflexmix()} requires a sampling method for the component specific model. Argument \code{initialize\_solution} allows to select if the EM algorithm is started in the original finite mixture solution or if random initialization is performed. The fitted mixture model might contain weights and group indicators. The weights are case weights and allow to reduce the amount of data if observations are identical. This is useful for example for latent class analysis of multivariate binary data. The argument \code{keep\_weights} allows to indicate if they should be kept for the bootstrapping. Group indicators allow to specify that the component membership is identical over several observations, e.g., for repeated measurements of the same individual. Argument \code{keep\_groups} allows to indicate if the grouping information should also be used in the bootstrapping. \code{verbose} indicates if information on the progress should be printed. The \code{control} argument allows to control the EM algorithm for fitting the model to each of the bootstrap samples. By default the \code{control} argument is extracted from the fitted model provided by \code{object}. \code{k} allows to specify the number of components and by default this is also taken from the fitted model provided. The \code{model} argument determines if also the model and the weights slot for each sample are stored and returned. The returned object is of class \code{"FLXboot"} and otherwise only contains the fitted parameters, the fitted priors, the log likelihoods, the number of components of the fitted mixtures and the information if the EM algorithm has converged. The likelihood ratio test is implemented based on \code{boot()} in function \code{LR\_test()} and returns an object of class \code{"htest"} containing the number of valid bootstrap replicates, the p-value, the double negative log likelihood ratio test statistics for the original data and the bootstrap replicates. The \code{plot} method for \code{"FLXboot"} objects returns a parallel coordinate plot with the fitted parameters separately for each of the components. \section{Artificial data set} In the following a finite mixture model is used as the underlying data generating process which is theoretically not identifiable. We are assuming a finite mixture of linear regression models with two components of equal size where the coverage condition is not fulfilled \citep{mixtures:Hennig:2000}. Hence, intra-component label switching is possible, i.e., there exist two parameterizations implying the same mixture distribution which differ how the components between the covariate points are combined. We assume that one measurement per object and a single categorical regressor with two levels are given. The usual design matrix for a model with intercept uses the two covariate points $\mathbf{x}_1 = (1, 0)'$ and $\mathbf{x}_2 = (1, 1)'$. The mixture distribution is given by \begin{eqnarray*} H(y|\mathbf{x}, \Theta) &=& \frac{1}{2} N(\mu_1, 0.1) + \frac{1}{2} N(\mu_2, 0.1), \end{eqnarray*} where $\mu_k(\mathbf{x}) = \mathbf{x}'\bm{\alpha}_k$ and $N(\mu, \sigma^2)$ is the normal distribution. Now let $\mu_1(\mathbf{x}_1) = 1$, $\mu_2(\mathbf{x}_1) = 2$, $\mu_1(\mathbf{x}_2) = -1$ and $\mu_2(\mathbf{x}_2) = 4$. As Gaussian mixture distributions are generically identifiable the means, variances and component weights are uniquely determined in each covariate point given the mixture distribution. However, as the coverage condition is not fulfilled, the two possible solutions for $\bm{\alpha}$ are: \begin{description} \item[Solution 1:] $\bm{\alpha}_1^{(1)} = (2,\phantom{-}2)'$, $\bm{\alpha}_2^{(1)} = (1,-2)'$, \item[Solution 2:] $\bm{\alpha}_1^{(2)} = (2,-3)'$, $\bm{\alpha}_2^{(2)} = (1,\phantom{-}3)'$. \end{description} We specify this artificial mixture distribution using \code{FLXdist()}. \code{FLXdist()} returns an unfitted finite mixture of class \code{"FLXdist"}. The class of fitted finite mixture models \code{"flexmix"} extends class \code{"FLXdist"}. Each component follows a normal distribution. The parameters specified in a named list therefore consist of the regression coefficients and the standard deviation. Function \code{FLXdist()} has an argument \code{formula} for specifying the regression in each of the components, an argument \code{k} for the component weights and \code{components} for the parameters of each of the components. <<>>= library("flexmix") Component_1 <- list(Model_1 = list(coef = c(1, -2), sigma = sqrt(0.1))) Component_2 <- list(Model_1 = list(coef = c(2, 2), sigma = sqrt(0.1))) ArtEx.mix <- FLXdist(y ~ x, k = rep(0.5, 2), components = list(Component_1, Component_2)) @ We draw a balanced sample with 50 observations in each covariate point from the mixture model using \code{rflexmix()} after defining the data points for the covariates. \code{rflexmix()} can either have an unfitted or a fitted finite mixture as input. For unfitted mixtures data has to be provided using the \code{newdata} argument. For already fitted mixtures data can be optionally provided, otherwise the data used for fitting the mixture is used. <<>>= ArtEx.data <- data.frame(x = rep(0:1, each = 100/2)) suppressWarnings(RNGversion("3.5.0")) set.seed(123) ArtEx.sim <- rflexmix(ArtEx.mix, newdata = ArtEx.data) ArtEx.data$y <- ArtEx.sim$y[[1]] ArtEx.data$class <- ArtEx.sim$class @ In Figure~\ref{fig:art} the sample is plotted together with the two solutions for combining $x_1$ and $x_2$, i.e., this illustrates intra-component label switching. \begin{figure} \centering <>= par(mar = c(5, 4, 2, 0) + 0.1) plot(y ~ x, data = ArtEx.data, pch = with(ArtEx.data, 2*class + x)) pars <- list(matrix(c(1, -2, 2, 2), ncol = 2), matrix(c(1, 3, 2, -3), ncol = 2)) for (i in 1:2) apply(pars[[i]], 2, abline, col = Colors[i]) @ \caption{Balanced sample from the artificial example with the two theoretical solutions.} \label{fig:art} \end{figure} We fit a finite mixture to the sample using \code{stepFlexmix()}. <<>>= set.seed(123) ArtEx.fit <- stepFlexmix(y ~ x, data = ArtEx.data, k = 2, nrep = 5, control = list(iter = 1000, tol = 1e-8, verbose = 0)) @ The fitted mixture can be inspected using \code{summary()} and \code{parameters()}. <<>>= summary(ArtEx.fit) parameters(ArtEx.fit) @ Obviously the fitted mixture parameters correspond to the parameterization we used to specify the mixture distribution. Using standard asymptotic theory to analyze the fitted mixture model gives the following estimates for the standard deviations. <<>>= ArtEx.refit <- refit(ArtEx.fit) summary(ArtEx.refit) @ The fitted mixture can also be analyzed using resampling techniques. For analyzing the stability of the parameter estimates where the possibility of identifiability problems is also taken into account the parametric bootstrap is used with random initialization. Function \code{boot()} can be used for empirical or parametric bootstrap (specified by the argument \code{sim}). The logical argument \code{initialize_solution} specifies if the initialization is in the original solution or random. By default random initialization is made. The number of bootstrap samples is set by the argument \code{R}. Please note that the arguments are chosen to correspond to those for function \code{boot} in package \pkg{boot} \citep{mixtures:Davison+Hinkley:1997}. <>= set.seed(123) ArtEx.bs <- boot(ArtEx.fit, R = 200, sim = "parametric") ArtEx.bs @ <>= if (file.exists("ArtEx.bs.rda")) { load("ArtEx.bs.rda") } else { set.seed(123) ArtEx.bs <- boot(ArtEx.fit, R = 200, sim = "parametric") save(ArtEx.bs, file = "ArtEx.bs.rda") } ArtEx.bs @ Function \code{boot()} returns an object of class \code{"\Sexpr{class(ArtEx.bs)}"}. The default plot compares the bootstrap parameter estimates to the confidence intervals derived using standard asymptotic theory in a parallel coordinate plot (see Figure~\ref{fig:plot.FLXboot-art}). Clearly two groups of parameter estimates can be distinguished which are about of equal size. One subset of the parameter estimates stays within the confidence intervals induced by standard asymptotic theory, while the second group corresponds to the second solution and clusters around these parameter values. \begin{figure}[h!] \centering <>= print(plot(ArtEx.bs, ordering = "coef.x", col = Colors)) @ \caption{Diagnostic plot of the bootstrap results for the artificial example.} \label{fig:plot.FLXboot-art} \end{figure} In the following the DIP-test is applied to check if the parameter estimates follow a unimodal distribution. This is done for the aggregated parameter esimates where unimodality implies that this parameter is not suitable for imposing an ordering constraint which induces a unique labelling. For the separate component analysis which is made after imposing an ordering constraint on the coefficient of $x$ rejection the null hypothesis of unimodality implies that identifiability problems are present, e.g.~due to intra-component label switching. <<>>= require("diptest") parameters <- parameters(ArtEx.bs) Ordering <- factor(as.vector(apply(matrix(parameters[,"coef.x"], nrow = 2), 2, order))) Comp1 <- parameters[Ordering == 1,] Comp2 <- parameters[Ordering == 2,] dip.values.art <- matrix(nrow = ncol(parameters), ncol = 3, dimnames=list(colnames(parameters), c("Aggregated", "Comp 1", "Comp 2"))) dip.values.art[,"Aggregated"] <- apply(parameters, 2, dip) dip.values.art[,"Comp 1"] <- apply(Comp1, 2, dip) dip.values.art[,"Comp 2"] <- apply(Comp2, 2, dip) dip.values.art @ The critical value for column \code{Aggregated} is \Sexpr{round(critical_values(nrow(parameters)), digits = digits)} and for the columns of the separate components \Sexpr{round(critical_values(nrow(Comp1)), digits = digits)}. The component sizes as well as the standard deviations follow a unimodal distribution for the aggregated data as well as for each of the components. The regression coefficients are multimodal for the aggregate data as well as for each of the components. While from the aggregated case it might be concluded that imposing an ordering constraint on the intercept or the coefficient of $x$ is suitable, the component-specific analyses reveal that a unique labelling was not achieved. \section{Seizure} In \cite{mixtures:Wang+Puterman+Cockburn:1996} a Poisson mixture regression is fitted to data from a clinical trial where the effect of intravenous gammaglobulin on suppression of epileptic seizures is investigated. The data used were 140 observations from one treated patient, where treatment started on the $28^\textrm{th}$ day. In the regression model three independent variables were included: treatment, trend and interaction treatment-trend. Treatment is a dummy variable indicating if the treatment period has already started. Furthermore, the number of parental observation hours per day were available and it is assumed that the number of epileptic seizures per observation hour follows a Poisson mixture distribution. The number of epileptic seizures per parental observation hour for each day are plotted in Figure~\ref{fig:seizure}. The fitted mixture distribution consists of two components which can be interpreted as representing 'good' and 'bad' days of the patients. The mixture model can be formulated by \begin{equation*} H(y|\mathbf{x}, \Theta) = \pi_1 P(\lambda_1) + \pi_2 P(\lambda_2), \end{equation*} where $\lambda_k = e^{\mathbf{x}'\bm{\alpha}_k}$ for $k = 1,2$ and $P(\lambda)$ is the Poisson distribution. The data is loaded and the mixture fitted with two components. <<>>= data("seizure", package = "flexmix") model <- FLXMRglm(family = "poisson", offset = log(seizure$Hours)) control <- list(iter = 1000, tol = 1e-10, verbose = 0) set.seed(123) seizMix <- stepFlexmix(Seizures ~ Treatment * log(Day), data = seizure, k = 2, nrep = 5, model = model, control = control) @ The fitted regression lines for each of the two components are shown in Figure~\ref{fig:seizure}. \begin{figure}[h!] \begin{center} <>= par(mar = c(5, 4, 2, 0) + 0.1) plot(Seizures/Hours~Day, data=seizure, pch = as.integer(seizure$Treatment)) abline(v = 27.5, lty = 2, col = "grey") matplot(seizure$Day, fitted(seizMix)/seizure$Hours, type="l", add = TRUE, col = 1, lty = 1, lwd = 2) @ \caption{Seizure data with the fitted values for the \citeauthor{mixtures:Wang+Puterman+Cockburn:1996} model. The plotting character for the observed values in the base period is a circle and for those in the treatment period a triangle.} \label{fig:seizure} \end{center} \end{figure} The parameteric bootstrap with random initialization is used to investigate identifiability problems and parameter stability. The diagnostic plot is given in Figure~\ref{fig:plot.FLXboot-seiz}. The coloring is according to an ordering constraint on the intercept. Clearly the parameter estimates corresponding to the solution where the bad days from the base period are combined with the good days from the treatement period and vice versa for the good days of the base period can be distinguished and indicate the slight identifiability problems of the fitted mixture. <>= set.seed(123) seizMix.bs <- boot(seizMix, R = 200, sim = "parametric") seizMix.bs @ <>= if (file.exists("seizMix.bs.rda")) { load("seizMix.bs.rda") } else { set.seed(123) seizMix.bs <- boot(seizMix, R = 200, sim = "parametric") save(seizMix.bs, file = "seizMix.bs.rda") } seizMix.bs @ \begin{figure}[h!] \centering <>= print(plot(seizMix.bs, ordering = "coef.(Intercept)", col = Colors)) @ \label{fig:plot.FLXboot-seiz} \caption{Diagnostic plot of the bootstrap results for the \code{seizure} data.} \end{figure} <<>>= parameters <- parameters(seizMix.bs) Ordering <- factor(as.vector(apply(matrix(parameters[,"coef.(Intercept)"], nrow = 2), 2, order))) Comp1 <- parameters[Ordering == 1,] Comp2 <- parameters[Ordering == 2,] @ For applying the DIP test also an ordering constraint on the intercept is used. The critical value for column \code{Aggregated} is \Sexpr{round(critical_values(nrow(parameters)), digits = digits)} and for the columns of the separate components \Sexpr{round(critical_values(nrow(Comp1)), digits = digits)}. <<>>= dip.values.art <- matrix(nrow = ncol(parameters), ncol = 3, dimnames = list(colnames(parameters), c("Aggregated", "Comp 1", "Comp 2"))) dip.values.art[,"Aggregated"] <- apply(parameters, 2, dip) dip.values.art[,"Comp 1"] <- apply(Comp1, 2, dip) dip.values.art[,"Comp 2"] <- apply(Comp2, 2, dip) dip.values.art @ For the aggregate results the hypothesis of unimodality cannot be rejected for the trend. For the component-specific analyses unimodality cannot be rejected only for the intercept (where the ordering condition was imposed on) and again the trend. For all other parameter estimates unimodality is rejected which indicates that the ordering constraint was able to impose a unique labelling only for the own parameter and not for the other parameters. This suggests identifiability problems. %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \bibliography{mixture} \end{document} flexmix/vignettes/mymclust.R0000644000176200001440000000205314404637307015746 0ustar liggesusersmymclust <- function (formula = .~., diagonal = TRUE) { retval <- new("FLXMC", weighted = TRUE, formula = formula, dist = "mvnorm", name = "my model-based clustering") retval@defineComponent <- function(para) { logLik <- function(x, y) { mvtnorm::dmvnorm(y, mean = para$center, sigma = para$cov, log = TRUE) } predict <- function(x) { matrix(para$center, nrow = nrow(x), ncol = length(para$center), byrow = TRUE) } new("FLXcomponent", parameters = list(center = para$center, cov = para$cov), df = para$df, logLik = logLik, predict = predict) } retval@fit <- function(x, y, w, ...) { para <- cov.wt(y, wt = w)[c("center", "cov")] df <- (3 * ncol(y) + ncol(y)^2)/2 if (diagonal) { para$cov <- diag(diag(para$cov)) df <- 2 * ncol(y) } retval@defineComponent(c(para, df = df)) } retval } flexmix/vignettes/flexmix.bib0000644000176200001440000002335214404637307016105 0ustar liggesusers@STRING{jcgs = {Journal of Computational and Graphical Statistics} } @STRING{tuwien = {Technische Universit{\"a}t Wien, Vienna, Austria} } @STRING{jasa = {Journal of the American Statistical Association} } @Article{ flexmix:Aitkin:1996, author = {Murray Aitkin}, title = {A General Maximum Likelihood Analysis of Overdispersion in Generalized Linear Models}, journal = {Statistics and Computing}, year = 1996, volume = 6, pages = {251--262} } @Article{ flexmix:Aitkin:1999, author = {Murray Aitkin}, title = {A General Maximum Likelihood Analysis of Variance Components in Generalized Linear Models}, journal = {Biometrics}, year = 1999, volume = 55, pages = {117--128} } @Article{ flexmix:Aitkin:1999a, author = {Murray Aitkin}, title = {Meta-Analysis by Random Effect Modelling in Generalized Linear Models}, journal = {Statistics in Medicine}, year = 1999, volume = 18, number = {17--18}, month = {September}, pages = {2343--2351} } @Manual{ flexmix:Buyske:2003, title = {{R} Package \texttt{mmlcr}: Mixed-Mode Latent Class Regression}, author = {Steve Buyske}, year = 2003, note = {version 1.3.2}, url = {http://www.stat.rutgers.edu/~buyske/software.html} } @Book{ flexmix:Chambers:1998, author = {John M. Chambers}, title = {Programming with Data: A Guide to the {S} Language}, publisher = {Springer Verlag}, year = 1998, address = {Berlin, Germany} } @Article{ flexmix:DeSarbo+Cron:1988, author = {Wayne S. DeSarbo and W. L. Cron}, title = {A Maximum Likelihood Methodology for Clusterwise Linear Regression}, journal = {Journal of Classification}, year = 1988, volume = 5, pages = {249--282} } @Article{ flexmix:Dempster+Laird+Rubin:1977, author = {A.P. Dempster and N.M. Laird and D.B. Rubin}, title = {Maximum Likelihood from Incomplete Data via the {EM}-Alogrithm}, journal = {Journal of the Royal Statistical Society, B}, volume = 39, pages = {1--38}, year = 1977 } @Article{ flexmix:Diebolt+Robert:1994, author = {J. Diebolt and C. P. Robert}, title = {Estimation of Finite Mixture Distributions Through {B}ayesian Sampling}, journal = {Journal of the Royal Statistical Society, Series B}, year = 1994, volume = 56, pages = {363--375} } @Book{ flexmix:Everitt+Hand:1981, author = {Brian S. Everitt and David J. Hand}, title = {Finite Mixture Distributions}, publisher = {Chapman and Hall}, address = {London}, year = 1981 } @Article{ flexmix:Follmann+Lambert:1989, author = {Dean A. Follmann and Diane Lambert}, title = {Generalizing Logistic Regression by Non-Parametric Mixing}, journal = jasa, volume = 84, number = 405, month = {March}, pages = {295--300}, year = 1989 } @Article{ flexmix:Fraley+Raftery:2002, author = {Chris Fraley and Adrian E. Raftery}, title = {Model-Based Clustering, Discriminant Analysis and dDnsity Estimation}, journal = jasa, year = 2002, volume = 97, pages = {611-631} } @TechReport{ flexmix:Fraley+Raftery:2002a, author = {Chris Fraley and Adrian E. Raftery}, title = {{MCLUST}: Software for Model-Based Clustering, Discriminant Analysis and Density Estimation}, institution = {Department of Statistics, University of Washington}, year = 2002, number = 415, address = {Seattle, WA, USA}, url = {http://www.stat.washington.edu/raftery} } @Article{ flexmix:Gentleman+Ihaka:2000, author = {Robert Gentleman and Ross Ihaka}, title = {Lexical Scope and Statistical Computing}, journal = jcgs, year = 2000, volume = 9, number = 3, pages = {491--508}, keywords = {statistical computing, function closure, lexical scope, random number generators} } @InProceedings{ flexmix:Gruen+Leisch:2006, author = {Bettina Gr{\" u}n and Friedrich Leisch}, title = {Fitting Finite Mixtures of Linear Regression Models with Varying \& Fixed Effects in \textsf{R}}, booktitle = {Compstat 2006---Proceedings in Computational Statistics}, pages = {853--860}, editor = {Alfredo Rizzi and Maurizio Vichi}, publisher = {Physica Verlag}, address = {Heidelberg, Germany}, isbn = {3-7908-1708-2}, year = 2006 } @InProceedings{ flexmix:Grun+Leisch:2004, author = {Bettina Gr{\" u}n and Friedrich Leisch}, title = {Bootstrapping Finite Mixture Models}, booktitle = {Compstat 2004---Proceedings in Computational Statistics}, year = 2004, editor = {Jaromir Antoch}, publisher = {Physica Verlag}, address = {Heidelberg, Germany}, isbn = {3-7908-1554-3}, pages = {1115--1122}, pdf = {http://www.stat.uni-muenchen.de/~leisch/papers/Grun+Leisch-2004.pdf} } @MastersThesis{ flexmix:Grun:2002, author = {Bettina Gr{\"u}n}, title = {{I}dentifizierbarkeit von multinomialen {M}ischmodellen}, school = tuwien, year = 2002, note = {Kurt Hornik and Friedrich Leisch, advisors} } @Article{ flexmix:Hennig:2000, author = {Christian Hennig}, title = {Identifiability of Models for Clusterwise Linear Regression}, journal = {Journal of Classification}, volume = 17, pages = {273--296}, year = 2000 } @InProceedings{ flexmix:Leisch:2004, author = {Friedrich Leisch}, title = {Exploring the Structure of Mixture Model Components}, booktitle = {Compstat 2004---Proceedings in Computational Statistics}, year = 2004, editor = {Jaromir Antoch}, publisher = {Physica Verlag}, address = {Heidelberg, Germany}, isbn = {3-7908-1554-3}, pages = {1405--1412}, pdf = {http://www.stat.uni-muenchen.de/~leisch/papers/Leisch-2004.pdf} } @Article{ flexmix:Leisch:2004a, author = {Friedrich Leisch}, title = {{FlexMix}: A General Framework for Finite Mixture Models and Latent Class Regression in {R}}, journal = {Journal of Statistical Software}, year = 2004, volume = 11, number = 8, doi = {10.18637/jss.v011.i08}, } @Book{ flexmix:McLachlan+Peel:2000, author = {Geoffrey McLachlan and David Peel}, title = {Finite Mixture Models}, publisher = {John Wiley and Sons Inc.}, year = 2000 } @Manual{ flexmix:R-Core:2004, title = {R: A Language and Environment for Statistical Computing}, author = {{R Development Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = 2004, isbn = {3-900051-07-0}, url = {http://www.R-project.org} } @InProceedings{ flexmix:Tantrum+Murua+Stuetzle:2003, author = {Jeremy Tantrum and Alejandro Murua and Werner Stuetzle}, title = {Assessment and Pruning of Hierarchical Model Based Clustering}, booktitle = {Proceedings of the ninth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining}, pages = {197--205}, year = 2003, publisher = {ACM Press}, address = {New York, NY, USA}, isbn = {1-58113-737-0}, } @Book{ flexmix:Titterington+Smith+Makov:1985, author = {D.M. Titterington and A.F.M. Smith and U.E. Makov}, title = {Statistical Analysis of Finite Mixture Distributions}, publisher = {John Wiley and Sons Inc.}, year = 1985 } @InProceedings{ flexmix:Urbanek+Theus:2003, author = {Simon Urbanek and Martin Theus}, title = {{iPlots}---High Interaction Graphics for {R}}, booktitle = {Proceedings of the 3rd International Workshop on Distributed Statistical Computing, Vienna, Austria}, editor = {Kurt Hornik and Friedrich Leisch and Achim Zeileis}, year = 2003, url = {http://www.ci.tuwien.ac.at/Conferences/DSC-2003/Proceedings/}, note = {{ISSN 1609-395X}} } @Book{ flexmix:Venables+Ripley:2002, title = {Modern Applied Statistics with S}, author = {William N. Venables and Brian D. Ripley}, publisher = {Springer Verlag}, edition = {Fourth}, address = {New York}, year = 2002, isbn = {0-387-95457-0} } @Article{ flexmix:Wang+Cockburn+Puterman:1998, author = {Peiming Wang and Iain M. Cockburn and Martin L. Puterman}, title = {Analysis of Patent Data---{A} Mixed-{P}oisson-Regression-Model Approach}, journal = {Journal of Business \& Economic Statistics}, year = 1998, volume = 16, number = 1, pages = {27--41} } @Article{ flexmix:Wang+Puterman+Cockburn:1996, author = {Peiming Wang and Martin L. Puterman and Iain M. Cockburn and Nhu D. Le}, title = {Mixed {P}oisson Regression Models with Covariate Dependent Rates}, journal = {Biometrics}, year = 1996, volume = 52, pages = {381--400} } @Article{ flexmix:Wang+Puterman:1998, author = {Peiming Wang and Martin L. Puterman}, title = {Mixed Logistic Regression Models}, journal = {Journal of Agricultural, Biological, and Environmental Statistics}, year = 1998, volume = 3, number = 2, pages = {175--200} } @Article{ flexmix:Wedel+DeSarbo:1995, author = {Michel Wedel and Wayne S. DeSarbo}, title = {A Mixture Likelihood Approach for Generalized Linear Models}, journal = {Journal of Classification}, year = 1995, volume = 12, pages = {21--55} } @Book{ flexmix:Wedel+Kamakura:2001, author = {Michel Wedel and Wagner A. Kamakura}, title = {Market Segmentation -- Conceptual and Methodological Foundations}, publisher = {Kluwer Academic Publishers}, year = 2001, address = {Boston, MA, USA}, edition = {2nd} } flexmix/vignettes/.install_extras0000644000176200001440000000004614404637307017003 0ustar liggesusersmyConcomitant.R$ mymclust.R$ ziglm.R$ flexmix/vignettes/mixture-regressions.Rnw0000644000176200001440000022017614404637307020504 0ustar liggesusers% % Copyright (C) 2008 Bettina Gruen and Friedrich Leisch % $Id: mixture-regressions.Rnw $ % \documentclass[nojss]{jss} \usepackage{amsfonts} \title{FlexMix Version 2: Finite Mixtures with Concomitant Variables and Varying and Constant Parameters} \Plaintitle{FlexMix Version 2: Finite Mixtures with Concomitant Variables and Varying and Constant Parameters} \Shorttitle{FlexMix Version 2} \author{Bettina Gr{\"u}n\\ Wirtschaftsuniversit{\"a}t Wien \And Friedrich Leisch\\ Universit\"at f\"ur Bodenkultur Wien} \Plainauthor{Bettina Gr{\"u}n, Friedrich Leisch} \Address{ Bettina Gr\"un\\ Institute for Statistics and Mathematics\\ Wirtschaftsuniversit{\"a}t Wien\\ Welthandelsplatz 1\\ 1020 Wien, Austria\\ E-mail: \email{Bettina.Gruen@R-project.org}\\ Friedrich Leisch\\ Institut f\"ur Angewandte Statistik und EDV\\ Universit\"at f\"ur Bodenkultur Wien\\ Peter Jordan Stra\ss{}e 82\\ 1190 Wien, Austria\\ E-mail: \email{Friedrich.Leisch@boku.ac.at} } \Abstract{ This article is a (slightly) modified version of \cite{mixtures:Gruen+Leisch:2008a}, published in the \emph{Journal of Statistical Software}. \pkg{flexmix} provides infrastructure for flexible fitting of finite mixture models in \proglang{R} using the expectation-maximization (EM) algorithm or one of its variants. The functionality of the package was enhanced. Now concomitant variable models as well as varying and constant parameters for the component specific generalized linear regression models can be fitted. The application of the package is demonstrated on several examples, the implementation described and examples given to illustrate how new drivers for the component specific models and the concomitant variable models can be defined. } \Keywords{\proglang{R}, finite mixture models, generalized linear models, concomitant variables} \Plainkeywords{R, finite mixture models, generalized linear models, concomitant variables} \usepackage{amsmath, listings} \def\argmax{\mathop{\rm arg\,max}} %% \usepackage{Sweave} prevent automatic inclusion \SweaveOpts{width=9, height=4.5, eps=FALSE, keep.source=TRUE} <>= options(width=60, prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) library("graphics") library("stats") library("flexmix") library("lattice") ltheme <- canonical.theme("postscript", FALSE) lattice.options(default.theme=ltheme) data("NPreg", package = "flexmix") data("dmft", package = "flexmix") source("myConcomitant.R") @ %%\VignetteIndexEntry{FlexMix Version 2: Finite Mixtures with Concomitant Variables and Varying and Constant Parameters} %%\VignetteDepends{flexmix} %%\VignetteKeywords{R, finite mixture models, model based clustering, latent class regression} %%\VignettePackage{flexmix} \begin{document} %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Introduction}\label{sec:introduction} Finite mixture models are a popular technique for modelling unobserved heterogeneity or to approximate general distribution functions in a semi-parametric way. They are used in a lot of different areas such as astronomy, biology, economics, marketing or medicine. An overview on mixture models is given in \cite{mixtures:Everitt+Hand:1981}, \cite{mixtures:Titterington+Smith+Makov:1985}, \cite{mixtures:McLachlan+Basford:1988}, \cite{mixtures:Boehning:1999}, \cite{mixtures:McLachlan+Peel:2000} and \cite{mixtures:Fruehwirth-Schnatter:2006}. Version 1 of \proglang{R} package \pkg{flexmix} was introduced in \cite{mixtures:Leisch:2004}. The main design principles of the package are extensibility and fast prototyping for new types of mixture models. It uses \proglang{S}4 classes and methods \citep{mixtures:Chambers:1998} as implemented in the \proglang{R} package \pkg{methods} and exploits advanced features of \proglang{R} such as lexical scoping \citep{mixtures:Gentleman+Ihaka:2000}. The package implements a framework for maximum likelihood estimation with the expectation-maximization (EM) algorithm \citep{mixtures:Dempster+Laird+Rubin:1977}. The main focus is on finite mixtures of regression models and it allows for multiple independent responses and repeated measurements. The EM algorithm can be controlled through arguments such as the maximum number of iterations or a minimum improvement in the likelihood to continue. Newly introduced features in the current package version are concomitant variable models \citep{mixtures:Dayton+Macready:1988} and varying and constant parameters in the component specific regressions. Varying parameters follow a finite mixture, i.e., several groups exist in the population which have different parameters. Constant parameters are fixed for the whole population. This model is similar to mixed-effects models \citep{mixtures:Pinheiro+Bates:2000}. The main difference is that in this application the distribution of the varying parameters is unknown and has to be estimated. Thus the model is actually closer to the varying-coefficients modelling framework \citep{mixtures:Hastie+Tibshirani:1993}, using convex combinations of discrete points as functional form for the varying coefficients. The extension to constant and varying parameters allows for example to fit varying intercept models as given in \cite{mixtures:Follmann+Lambert:1989} and \cite{mixtures:Aitkin:1999}. These models are frequently applied to account for overdispersion in the data where the components follow either a binomial or Poisson distribution. The model was also extended to include nested varying parameters, i.e.~this allows to have groups of components with the same parameters \citep{mixtures:Gruen+Leisch:2006, mixtures:Gruen:2006}. In Section~\ref{sec:model-spec-estim} the extended model class is presented together with the parameter estimation using the EM algorithm. In Section~\ref{sec:using-new-funct} examples are given to demonstrate how the new functionality can be used. An overview on the implementational details is given in Section~\ref{sec:implementation}. The new model drivers are presented and changes made to improve the flexibility of the software and to enable the implementation of the new features are discussed. Examples for writing new drivers for the component specific models and the concomitant variable models are given in Section~\ref{sec:writing-your-own}. This paper gives a short overview on finite mixtures and the package in order to be self-contained. A more detailed introduction to finite mixtures and the package \pkg{flexmix} can be found in \cite{mixtures:Leisch:2004}. All computations and graphics in this paper have been done with \pkg{flexmix} version \Sexpr{packageDescription("flexmix",fields="Version")} and \proglang{R} version \Sexpr{getRversion()} using Sweave \citep{mixtures:Leisch:2002}. The newest release version of \pkg{flexmix} is always available from the Comprehensive \proglang{R} Archive Network at \url{http://CRAN.R-project.org/package=flexmix}. An up-to-date version of this paper is contained in the package as a vignette, giving full access to the \proglang{R} code behind all examples shown below. See \code{help("vignette")} or \cite{mixtures:Leisch:2003} for details on handling package vignettes. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Model specification and estimation}\label{sec:model-spec-estim} A general model class of finite mixtures of regression models is considered in the following. The mixture is assumed to consist of $K$ components where each component follows a parametric distribution. Each component has a weight assigned which indicates the a-priori probability for an observation to come from this component and the mixture distribution is given by the weighted sum over the $K$ components. If the weights depend on further variables, these are referred to as concomitant variables. In marketing choice behaviour is often modelled in dependence of marketing mix variables such as price, promotion and display. Under the assumption that groups of respondents with different price, promotion and display elasticities exist mixtures of regressions are fitted to model consumer heterogeneity and segment the market. Socio-demographic variables such as age and gender have often been shown to be related to the different market segments even though they generally do not perform well when used to a-priori segment the market. The relationships between the behavioural and the socio-demographic variables is then modelled through concomitant variable models where the group sizes (i.e.~the weights of the mixture) depend on the socio-demographic variables. The model class is given by \begin{align*} h(y|x, w, \psi) &= \sum_{k = 1}^K \pi_k(w, \alpha) f_k(y|x,\theta_{k})\\ &= \sum_{k = 1}^K \pi_k(w, \alpha) \prod_{d=1}^D f_{kd}(y_d|x_d,\theta_{kd}), \end{align*} where $\psi$ denotes the vector of all parameters for the mixture density $h()$ and is given by $(\alpha, (\theta_k)_{k=1,\ldots,K})$. $y$ denotes the response, $x$ the predictor and $w$ the concomitant variables. $f_k$ is the component specific density function. Multivariate variables $y$ are assumed to be dividable into $D$ subsets where the component densities are independent between the subsets, i.e.~the component density $f_k$ is given by a product over $D$ densities which are defined for the subset variables $y_d$ and $x_d$ for $d=1,\ldots,D$. The component specific parameters are given by $\theta_k = (\theta_{kd})_{d=1,\ldots,D}$. Under the assumption that $N$ observations are available the dimensions of the variables are given by $y = (y_d)_{d=1,\ldots,D} \in \mathbb{R}^{N \times \sum_{d=1}^D k_{yd}}$, $x = (x_d)_{d=1,\ldots,D} \in \mathbb{R}^{N \times \sum_{d=1}^D k_{xd}}$ for all $d = 1,\ldots,D$ and $w \in \mathbb{R}^{N \times k_w}$. In this notation $k_{yd}$ denotes the dimension of the $d^{\textrm{th}}$ response, $k_{xd}$ the dimension of the $d^{\textrm{th}}$ predictors and $k_w$ the dimension of the concomitant variables. For mixtures of GLMs each of the $d$ responses will in general be univariate, i.e.~multivariate responses will be conditionally independent given the segment memberships. For the component weights $\pi_k$ it holds $\forall w$ that \begin{equation}\label{eq:prior} \sum_{k=1}^K \pi_k(w,\alpha) = 1 \quad \textrm{and} \quad \pi_k(w, \alpha) > 0, \, \forall k, \end{equation} where $\alpha$ are the parameters of the concomitant variable model. For the moment focus is given to finite mixtures where the component specific densities are from the same parametric family, i.e.~$f_{kd} \equiv f_d$ for notational simplicity. If $f_d$ is from the exponential family of distributions and for each component a generalized linear model is fitted \citep[GLMs;][]{mixtures:McCullagh+Nelder:1989} these models are also called GLIMMIX models \citep{mixtures:Wedel+DeSarbo:1995}. In this case the component specific parameters are given by $\theta_{kd} = (\beta'_{kd}, \phi_{kd})$ where $\beta_{kd}$ are the regression coefficients and $\phi_{kd}$ is the dispersion parameter. The component specific parameters $\theta_{kd}$ are either restricted to be equal over all components, to vary between groups of components or to vary between all components. The varying between groups is referred to as varying parameters with one level of nesting. A disjoint partition $K_c$, $c = 1,\ldots,C$ of the set $\tilde{K} := \{1\ldots,K\}$ is defined for the regression coefficients. $C$ is the number of groups of the regression coefficients at the nesting level. The regression coefficients are accordingly split into three groups: \begin{align*} \beta_{kd} &= (\beta'_{1d}, \beta'_{2,c(k)d}, \beta'_{3,kd})', \end{align*} where $c(k) = \{c = 1,\ldots, C: k \in K_c\}$. Similar a disjoint partition $K_v$, $v = 1,\ldots,V$, of $\tilde{K}$ can be defined for the dispersion parameters if nested varying parameters are present. $V$ denotes the number of groups of the dispersion parameters at the nesting level. This gives: \begin{align*} \phi_{kd} &= \left\{\begin{array}{ll} \phi_{d} & \textrm{for constant parameters}\\ \phi_{kd} & \textrm{for varying parameters}\\ \phi_{v(k)d} & \textrm{for nested varying parameters} \end{array}\right. \end{align*} where $v(k) = \{v = 1,\ldots,V: k \in K_v\}$. The nesting structure of the component specific parameters is also described in \cite{mixtures:Gruen+Leisch:2006}. Different concomitant variable models are possible to determine the component weights \citep{mixtures:Dayton+Macready:1988}. The mapping function only has to fulfill condition \eqref{eq:prior}. In the following a multinomial logit model is assumed for the $\pi_k$ given by \begin{equation*} \pi_k(w,\alpha) = \frac{e^{w'\alpha_k}}{\sum_{u = 1}^K e^{w'\alpha_u}} \quad \forall k, \end{equation*} with $\alpha = (\alpha'_k)'_{k=1,\ldots,K}$ and $\alpha_1 \equiv 0$. %%------------------------------------------------------------------------- \subsection{Parameter estimation}\label{sec:estimation} The EM algorithm \citep{mixtures:Dempster+Laird+Rubin:1977} is the most common method for maximum likelihood estimation of finite mixture models where the number of components $K$ is fixed. The EM algorithm applies a missing data augmentation scheme. It is assumed that a latent variable $z_n \in \{0,1\}^K$ exists for each observation $n$ which indicates the component membership, i.e.~$z_{nk}$ equals 1 if observation $n$ comes from component $k$ and 0 otherwise. Furthermore it holds that $\sum_{k=1}^K z_{nk}=1$ for all $n$. In the EM algorithm these unobserved component memberships $z_{nk}$ of the observations are treated as missing values and the data is augmented by estimates of the component membership, i.e.~the estimated a-posteriori probabilities $\hat{p}_{nk}$. For a sample of $N$ observations $\{(y_1, x_1, w_1), \ldots, (y_N, x_N, w_N)\}$ the EM algorithm is given by: \begin{description} \item[E-step:] Given the current parameter estimates $\psi^{(i)}$ in the $i$-th iteration, replace the missing data $z_{nk}$ by the estimated a-posteriori probabilities \begin{align*} \hat{p}_{nk} & = \frac{\displaystyle \pi_k(w_n, \alpha^{(i)}) f(y_n| x_n, \theta_k^{(i)}) }{\displaystyle \sum_{u = 1}^K \pi_u(w_n, \alpha^{(i)}) f(y_n |x_n, \theta_u^{(i)}) }. \end{align*} \item[M-step:] Given the estimates for the a-posteriori probabilities $\hat{p}_{nk}$ (which are functions of $\psi^{(i)}$), obtain new estimates $\psi^{(i+1)}$ of the parameters by maximizing \begin{align*} Q(\psi^{(i+1)}|\psi^{(i)}) &= Q_1(\theta^{(i+1)} | \psi^{(i)}) + Q_2(\alpha^{(i+1)} | \psi^{(i)}), \end{align*} where \begin{align*} Q_1(\theta^{(i+1)} | \psi^{(i)}) &= \sum_{n = 1}^N \sum_{k = 1}^K \hat{p}_{nk} \log(f(y_n | x_n, \theta_k^{(i+1)})) \end{align*} and \begin{align*} Q_2(\alpha^{(i+1)}| \psi^{(i)}) &= \sum_{n = 1}^N \sum_{k = 1}^K \hat{p}_{nk} \log(\pi_k(w_n, \alpha^{(i+1)})). \end{align*} $Q_1$ and $Q_2$ can be maximized separately. The maximization of $Q_1$ gives new estimates $\theta^{(i+1)}$ and the maximization of $Q_2$ gives $\alpha^{(i+1)}$. $Q_1$ is maximized separately for each $d=1,\ldots,D$ using weighted ML estimation of GLMs and $Q_2$ using weighted ML estimation of multinomial logit models. \end{description} Different variants of the EM algorithm exist such as the stochastic EM \citep[SEM;][]{mixtures:Diebolt+Ip:1996} or the classification EM \citep[CEM;][]{mixtures:Celeux+Govaert:1992}. These two variants are also implemented in package \pkg{flexmix}. For both variants an additional step is made between the expectation and maximization steps. This step uses the estimated a-posteriori probabilities and assigns each observation to only one component, i.e.~classifies it into one component. For SEM this assignment is determined in a stochastic way while it is a deterministic assignment for CEM. For the SEM algorithm the additional step is given by: \begin{description} \item[S-step:] Given the a-posteriori probabilities draw \begin{align*} \hat{z}_n &\sim \textrm{Mult}((\hat{p}_{nk})_{k=1,\ldots,K}, 1) \end{align*} where $\textrm{Mult}(\theta, T)$ denotes the multinomial distribution with success probabilities $\theta$ and number of trials $T$. \end{description} Afterwards, the $\hat{z}_{nk}$ are used instead of the $\hat{p}_{nk}$ in the M-step. For the CEM the additional step is given by: \begin{description} \item[C-step:] Given the a-posteriori probabilities define \begin{align*} \hat{z}_{nk} &= \left\{\begin{array}{ll} 1&\textrm{if } k = \min\{ l : \hat{p}_{nl} \geq \hat{p}_{nk}\, \forall k=1,\ldots,K\}\\ 0&\textrm{otherwise}. \end{array}\right. \end{align*} \end{description} Please note that in this step the observation is assigned to the component with the smallest index if the same maximum a-posteriori probability is observed for several components. Both of these variants have been proposed to improve the performance of the EM algorithm, because the ordinary EM algorithm tends to converge rather slowly and only to a local optimum. The convergence behavior can be expected to be better for the CEM than ordinary EM algorithm, while SEM can escape convergence to a local optimum. However, the CEM algorithm does not give ML estimates because it maximizes the complete likelihood. For SEM good approximations of the ML estimator are obtained if the parameters where the maximum likelihood was encountered are used as estimates. Another possibility for determining parameter estimates from the SEM algorithm could be the mean after discarding a suitable number of burn-ins. An implementational advantage of both variants is that no weighted maximization is necessary in the M-step. It has been shown that the values of the likelihood are monotonically increased during the EM algorithm. On the one hand this ensures the convergence of the EM algorithm if the likelihood is bounded, but on the other hand only the detection of a local maximum can be guaranteed. Therefore, it is recommended to repeat the EM algorithm with different initializations and choose as final solution the one with the maximum likelihood. Different initialization strategies for the EM algorithm have been proposed, as its convergence to the optimal solution depends on the initialization \citep{mixtures:Biernacki+Celeux+Govaert:2003,mixtures:Karlis+Xekalaki:2003}. Proposed strategies are for example to first make several runs of the SEM or CEM algorithm with different random initializations and then start the EM at the best solution encountered. The component specific parameter estimates can be determined separately for each $d=1,\ldots,D$. For simplicity of presentation the following description assumes $D=1$. If all parameter estimates vary between the component distributions they can be determined separately for each component in the M-step. However, if also constant or nested varying parameters are specified, the component specific estimation problems are not independent from each other any more. Parameters have to be estimated which occur in several or all components and hence, the parameters of the different components have to be determined simultaneously for all components. The estimation problem for all component specific parameters is then obtained by replicating the vector of observations $y = (y_n)_{n=1,\ldots,N}$ $K$ times and defining the covariate matrix $X = (X_{\textrm{constant}}, X_{\textrm{nested}}, X_{\textrm{varying}})$ by \begin{align*} &X_{\textrm{constant}} = \mathbf{1}_K \otimes (x'_{1,n})_{n=1,\ldots,N}\\ &X_{\textrm{nested}} = \mathbf{J} \odot (x'_{2,n})_{n=1,\ldots,N}\\ &X_{\textrm{varying}} = \mathbf{I}_K \otimes(x'_{3,n})_{n=1,\ldots,N}, \end{align*} where $\mathbf{1}_K$ is a vector of 1s of length $K$, $\mathbf{J}$ is the incidence matrix for each component $k=1,\ldots,K$ and each nesting group $c \in C$ and hence is of dimension $K \times |C|$, and $\mathbf{I}_K$ is the identity matrix of dimension $K \times K$. $\otimes$ denotes the Kronecker product and $\odot$ the Khatri-Rao product (i.e., the column-wise Kronecker product). $x_{m,n}$ are the covariates of the corresponding coefficients $\beta_{m,.}$ for $m=1,2,3$. Please note that the weights used for the estimation are the a-posteriori probabilities which are stacked for all components, i.e.~a vector of length $N K$ is obtained. Due to the replication of data in the case of constant or nested varying parameters the amount of memory needed for fitting the mixture model to large datasets is substantially increased and it might be easier to fit only varying coefficients to these datasets. To overcome this problem it could be considered to implement special data structures in order to avoid storing the same data multiple times for large datasets. Before each M-step the average component sizes (over the given data points) are checked and components which are smaller than a given (relative) minimum size are omitted in order to avoid too small components where fitting problems might arise. This strategy has already been recommended for the SEM algorithm \citep{mixtures:Celeux+Diebolt:1988} because it allows to determine the suitable number of components in an automatic way given that the a-priori specified number of components is large enough. This recommendation is based on the assumption that the redundent components will be omitted during the estimation process if the algorithm is started with too many components. If omission of small components is not desired the minimum size required can be set to zero. All components will be then retained throughout the EM algorithm and a mixture with the number of components specified in the initialization will be returned. The algorithm is stopped if the relative change in the log-likelihood is smaller than a pre-specified $\epsilon$ or the maximum number of iterations is reached. For model selection different information criteria are available: AIC, BIC and ICL \citep[Integrated Complete Likelihood;][]{mixtures:Biernacki+Celeux+Govaert:2000}. They are of the form twice the negative loglikelihood plus number of parameters times $k$ where $k=2$ for the AIC and $k$ equals the logarithm of the number of observations for the BIC. The ICL is the same as the BIC except that the complete likelihood (where the missing class memberships are replaced by the assignments induced by the maximum a-posteriori probabilities) instead of the likelihood is used. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Using the new functionality} \label{sec:using-new-funct} In the following model fitting and model selection in \proglang{R} is illustrated on several examples including mixtures of Gaussian, binomial and Poisson regression models, see also \cite{mixtures:Gruen:2006} and \cite{mixtures:Gruen+Leisch:2007a}. More examples for mixtures of GLMs are provided as part of the software package through a collection of artificial and real world datasets, most of which have been previously used in the literature (see references in the online help pages). Each dataset can be loaded to \proglang{R} with \code{data("}\textit{name}\code{")} and the fitting of the proposed models can be replayed using \code{example("}\textit{name}\code{")}. Further details on these examples are given in a user guide which can be accessed using \code{vignette("regression-examples", package="flexmix")} from within \proglang{R}. %%----------------------------------------------------------------------- \subsection{Artificial example}\label{sec:artificial-example} In the following the artificial dataset \code{NPreg} is used which has already been used in \cite{mixtures:Leisch:2004} to illustrate the application of package \pkg{flexmix}. The data comes from two latent classes of size \Sexpr{nrow(NPreg)/2} each and for each of the classes the data is drawn with respect to the following structure: \begin{center} \begin{tabular}{ll} Class~1: & $ \mathit{yn} = 5x+\epsilon$\\ Class~2: & $ \mathit{yn} = 15+10x-x^2+\epsilon$ \end{tabular} \end{center} with $\epsilon\sim N(0,9)$, see the left panel of Figure~\ref{fig:npreg}. The dataset \code{NPreg} also includes a response $\mathit{yp}$ which is given by a generalized linear model following a Poisson distribution and using the logarithm as link function. The parameters of the mean are given for the two classes by: \begin{center} \begin{tabular}{ll} Class~1: & $ \mu_1 = 2 - 0.2x$\\ Class~2: & $ \mu_2 = 1 + 0.1x$. \end{tabular} \end{center} This signifies that given $x$ the response $\mathit{yp}$ in group $k$ follows a Poisson distribution with mean $e^{\mu_k}$, see the right panel of Figure~\ref{fig:npreg}. \setkeys{Gin}{width=\textwidth} \begin{figure} \centering <>= par(mfrow=c(1,2)) plot(yn~x, col=class, pch=class, data=NPreg) plot(yp~x, col=class, pch=class, data=NPreg) @ \caption{Standard regression example (left) and Poisson regression (right).} \label{fig:npreg} \end{figure} This model can be fitted in \proglang{R} using the commands: <<>>= suppressWarnings(RNGversion("3.5.0")) set.seed(1802) library("flexmix") data("NPreg", package = "flexmix") Model_n <- FLXMRglm(yn ~ . + I(x^2)) Model_p <- FLXMRglm(yp ~ ., family = "poisson") m1 <- flexmix(. ~ x, data = NPreg, k = 2, model = list(Model_n, Model_p), control = list(verbose = 10)) @ If the dimensions are independent the component specific model for multivariate observations can be specified as a list of models for each dimension. The estimation can be controlled with the \code{control} argument which is specified with an object of class \code{"FLXcontrol"}. For convenience also a named list can be provided which is used to construct and set the respective slots of the \code{"FLXcontrol"} object. Elements of the control object are \code{classify} to select ordinary EM, CEM or SEM, \code{minprior} for the minimum relative size of components, \code{iter.max} for the maximum number of iterations and \code{verbose} for monitoring. If \code{verbose} is a positive integer the log-likelihood is reported every \code{verbose} iterations and at convergence together with the number of iterations made. The default is to not report any log-likelihood information during the fitting process. The estimated model \code{m1} is of class \code{"flexmix"} and the result of the default plot method for this class is given in Figure~\ref{fig:root1}. This plot method uses package \pkg{lattice} \citep{mixtures:Sarkar:2008} and the usual parameters can be specified to alter the plot, e.g.~the argument \code{layout} determines the arrangement of the panels. The returned object is of class \code{"trellis"} and the plotting can also be influenced by the arguments of its show method. The default plot prints rootograms (i.e., a histogram of the square root of counts) of the a-posteriori probabilities of each observation separately for each component. For each component the observations with a-posteriori probabilities less than a pre-specified $\epsilon$ (default is $10^{-4}$) for this component are omitted in order to avoid that the bar at zero dominates the plot \citep{mixtures:Leisch:2004a}. Please note that the labels of the y-axis report the number of observations in each bar, i.e.~the squared values used for the rootograms. \begin{figure} \centering <>= print(plot(m1)) @ \caption{The plot method for \code{"flexmix"} objects, here obtained by \code{plot(m1)}, shows rootograms of the posterior class probabilities.} \label{fig:root1} \end{figure} More detailed information on the estimated parameters with respect to standard deviations and significance tests can be obtained with function \code{refit()}. This function determines the variance-covariance matrix of the estimated parameters by using the inverted negative Hesse matrix as computed by the general purpose optimizer \code{optim()} on the full likelihood of the model. \code{optim()} is initialized in the solution obtained with the EM algorithm. For mixtures of GLMs we also implemented the gradient, which speeds up convergence and gives more precise estimates of the Hessian. Naturally, function \code{refit()} will also work for models which have been determined by applying some model selection strategy depending on the data (AIC, BIC, \ldots). The same caution is necessary as when using \code{summary()} on standard linear models selected using \code{step()}: The p-values shown are not correct because they have not been adjusted for the fact that the same data are used to select the model and compute the p-values. So use them only in an exploratory manner in this context, see also \cite{mixtures:Harrell:2001} for more details on the general problem. The returned object can be inspected using \code{summary()} with arguments \code{which} to specify if information for the component model or the concomitant variable model should be shown and \code{model} to indicate for which dimension of the component models this should be done. Selecting \code{model=1} gives the parameter estimates for the dimension where the response variable follows a Gaussian distribution. <<>>= m1.refit <- refit(m1) summary(m1.refit, which = "model", model = 1) @ \begin{figure} \centering <>= print(plot(m1.refit, layout = c(1,3), bycluster = FALSE, main = expression(paste(yn *tilde(" ")* x + x^2))), split= c(1,1,2,1), more = TRUE) print(plot(m1.refit, model = 2, main = expression(paste(yp *tilde(" ")* x)), layout = c(1,2), bycluster = FALSE), split = c(2,1,2,1)) @ \caption{The default plot for refitted \code{"flexmix"} objects, here obtained by \code{plot(refit(m1), model = 1)} and \code{plot(refit(m1), model = 2)}, shows the coefficient estimates and their confidence intervals.} \label{fig:refit} \end{figure} The default plot method for the refitted \code{"flexmix"} object depicts the estimated coefficients with corresponding confidence intervals and is given in Figure~\ref{fig:refit}. It can be seen that for the first model the confidence intervals of the coefficients of the intercept and the quadratic term of \code{x} overlap with zero. A model where these coefficients are set to zero can be estimated with the model driver function \code{FLXMRglmfix()} and the following commands for specifying the nesting structure. The argument \code{nested} needs input for the number of components in each group (given by \code{k}) and the formula which determines the model matrix for the nesting (given by \code{formula}). This information can be provided in a named list. For the restricted model the element \code{k} is a vector with two 1s because each of the components has different parameters. The formulas specifying the model matrices of these coefficients are \verb/~ 1 + I(x^2)/ for an intercept and a quadratic term of $x$ for component 1 and \code{~ 0} for no additional coefficients for component 2. The EM algorithm is initialized in the previously fitted model by passing the posterior probabilities in the argument \code{cluster}. <<>>= Model_n2 <- FLXMRglmfix(yn ~ . + 0, nested = list(k = c(1, 1), formula = c(~ 1 + I(x^2), ~ 0))) m2 <- flexmix(. ~ x, data = NPreg, cluster = posterior(m1), model = list(Model_n2, Model_p)) m2 @ Model selection based on the BIC would suggest the smaller model which also corresponds to the true underlying model. <<>>= c(BIC(m1), BIC(m2)) @ %%----------------------------------------------------------------------- \subsection{Beta-blockers dataset} \label{sec:beta-blockers} The dataset is analyzed in \cite{mixtures:Aitkin:1999, mixtures:Aitkin:1999a} using a finite mixture of binomial regression models. Furthermore, it is described in \citet[p.~165]{mixtures:McLachlan+Peel:2000}. The dataset is from a 22-center clinical trial of beta-blockers for reducing mortality after myocardial infarction. A two-level model is assumed to represent the data, where centers are at the upper level and patients at the lower level. The data is illustrated in Figure~\ref{fig:beta}. First, the center information is ignored and a binomial logit regression model with treatment as covariate is fitted using \code{glm}, i.e.~$K=1$ and it is assumed that the different centers are comparable: <<>>= data("betablocker", package = "flexmix") betaGlm <- glm(cbind(Deaths, Total - Deaths) ~ Treatment, family = "binomial", data = betablocker) betaGlm @ The residual deviance suggests that overdispersion is present in the data. In the next step the intercept is allowed to follow a mixture distribution given the centers. This signifies that the component membership is fixed for each center. This grouping is specified in \proglang{R} by adding \code{| Center} to the formula similar to the notation used in \pkg{nlme} \citep{mixtures:Pinheiro+Bates:2000}. Under the assumption of homogeneity within centers identifiability of the model class can be ensured as induced by the sufficient conditions for identifability given in \cite{mixtures:Follmann+Lambert:1991} for binomial logit models with varying intercepts and \cite{mixtures:Gruen+Leisch:2008} for multinomial logit models with varying and constant parameters. In order to determine the suitable number of components, the mixture is fitted with different numbers of components. <<>>= betaMixFix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ 1 | Center, model = FLXMRglmfix(family = "binomial", fixed = ~ Treatment), k = 2:4, nrep = 5, data = betablocker) @ The returned object is of class \code{"stepFlexmix"} and printing the object gives the information on the number of iterations until termination of the EM algorithm, a logical indicating if the EM algorithm has converged, the log-likelihood and some model information criteria. The plot method compares the fitted models using the different model information criteria. <<>>= betaMixFix @ A specific \code{"flexmix"} model contained in the \code{"stepFlexmix"} object can be selected using \code{getModel()} with argument \code{which} to specify the selection criterion. The best model with respect to the BIC is selected with: <<>>= betaMixFix_3 <- getModel(betaMixFix, which = "BIC") betaMixFix_3 <- relabel(betaMixFix_3, "model", "Intercept") @ The components of the selected model are ordered with respect to the estimated intercept values. In this case a model with three components is selected with respect to the BIC. The fitted values for the model with three components are given in Figure~\ref{fig:beta} separately for each component and the treatment and control groups. The fitted parameters of the component specific models can be accessed with: <<>>= parameters(betaMixFix_3) @ Please note that the coefficients of variable \code{Treatment} are the same for all three components. \begin{figure} \centering <>= library("grid") betablocker$Center <- with(betablocker, factor(Center, levels = Center[order((Deaths/Total)[1:22])])) clusters <- factor(clusters(betaMixFix_3), labels = paste("Cluster", 1:3)) print(dotplot(Deaths/Total ~ Center | clusters, groups = Treatment, as.table = TRUE, data = betablocker, xlab = "Center", layout = c(3, 1), scales = list(x = list(cex = 0.7, tck = c(1, 0))), key = simpleKey(levels(betablocker$Treatment), lines = TRUE, corner = c(1,0)))) betaMixFix.fitted <- fitted(betaMixFix_3) for (i in 1:3) { seekViewport(trellis.vpname("panel", i, 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[1:22, i], "native"), gp = gpar(lty = 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[23:44, i], "native"), gp = gpar(lty = 2)) } @ \setkeys{Gin}{width=0.8\textwidth} \caption{Relative number of deaths for the treatment and the control group for each center in the beta-blocker dataset. The centers are sorted by the relative number of deaths in the control group. The lines indicate the fitted values for each component of the 3-component mixture model with varying intercept and constant parameters for treatment.} \label{fig:beta} \end{figure} The variable \code{Treatment} can also be included in the varying part of the model. This signifies that a mixture distribution is assumed where for each component different values are allowed for the intercept and the treatment coefficient. This mixture distribution can be specified using function \code{FLXMRglm()}. Again it is assumed that the heterogeneity is only between centers and therefore the aggregated data for each center can be used. <<>>= betaMix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ Treatment | Center, model = FLXMRglm(family = "binomial"), k = 3, nrep = 5, data = betablocker) betaMix <- relabel(betaMix, "model", "Treatment") parameters(betaMix) c(BIC(betaMixFix_3), BIC(betaMix)) @ The difference between model \code{betaMix} and \code{betaMixFix\_3} is that the treatment coefficients are the same for all three components for \code{betaMixFix\_3} while they have different values for \code{betaMix} which can easily be seen when comparing the fitted component specific parameters. The larger model \code{betaMix} which also allows varying parameters for treatment has a higher BIC and therefore the smaller model \code{betaMixFix\_3} would be preferred. The default plot for \code{"flexmix"} objects gives a rootogram of the posterior probabilities for each component. Argument \code{mark} can be used to inspect with which components the specified component overlaps as all observations are coloured in the different panels which are assigned to this component based on the maximum a-posteriori probabilities. \begin{figure} \centering <>= print(plot(betaMixFix_3, nint = 10, mark = 1, col = "grey", layout = c(3, 1))) @ \caption{Default plot of \code{"flexmix"} objects where the observations assigned to the first component are marked.}\label{fig:default} \end{figure} \begin{figure} \centering <>= print(plot(betaMixFix_3, nint = 10, mark = 2, col = "grey", layout = c(3, 1))) @ \caption{Default plot of \code{"flexmix"} objects where the observations assigned to the third component are marked.}\label{fig:default-2} \end{figure} The rootogram indicates that the components are well separated. In Figure~\ref{fig:default} it can be seen that component 1 is completely separated from the other two components, while Figure~\ref{fig:default-2} shows that component 2 has a slight overlap with both other components. The cluster assignments using the maximum a-posteriori probabilities are obtained with: <<>>= table(clusters(betaMix)) @ The estimated probabilities of death for each component for the treated patients and those in the control group can be obtained with: <<>>= predict(betaMix, newdata = data.frame(Treatment = c("Control", "Treated"))) @ or by obtaining the fitted values for two observations (e.g.~rows 1 and 23) with the desired levels of the predictor \code{Treatment} <<>>= betablocker[c(1, 23), ] fitted(betaMix)[c(1, 23), ] @ A further analysis of the model is possible with function \code{refit()} which returns the estimated coefficients together with the standard deviations, z-values and corresponding p-values. Please note that the p-values are only approximate in the sense that they have not been corrected for the fact that the data has already been used to determine the specific fitted model. <<>>= summary(refit(betaMix)) @ Given the estimated treatment coefficients we now also compare this model to a model where the treatment coefficient is assumed to be the same for components 1 and 2. Such a model is specified using the model driver \code{FLXMRglmfix()}. As the first two components are assumed to have the same coeffcients for treatment and for the third component the coefficient for treatment shall be set to zero the argument \code{nested} has \code{k = c(2,1)} and \code{formula = c(\~{}Treatment, \~{})}. <<>>= ModelNested <- FLXMRglmfix(family = "binomial", nested = list(k = c(2, 1), formula = c(~ Treatment, ~ 0))) betaMixNested <- flexmix(cbind(Deaths, Total - Deaths) ~ 1 | Center, model = ModelNested, k = 3, data = betablocker, cluster = posterior(betaMix)) parameters(betaMixNested) c(BIC(betaMix), BIC(betaMixNested), BIC(betaMixFix_3)) @ The comparison of the BIC values suggests that the nested model with the same treatment effect for two components and no treatment effect for the third component is the best. %%----------------------------------------------------------------------- \subsection[Productivity of Ph.D. students in biochemistry]{Productivity of Ph.D.~students in biochemistry} \label{sec:bioChemists} <>= data("bioChemists", package = "flexmix") @ This dataset is taken from \cite{mixtures:Long:1990}. It contains \Sexpr{nrow(bioChemists)} observations from academics who obtained their Ph.D.~degree in biochemistry in the 1950s and 60s. It includes \Sexpr{sum(bioChemists$fem=="Women")} women and \Sexpr{sum(bioChemists$fem=="Men")} men. The productivity was measured by counting the number of publications in scientific journals during the three years period ending the year after the Ph.D.~was received. In addition data on the productivity and the prestige of the mentor and the Ph.D.~department was collected. Two measures of family characteristics were recorded: marriage status and number of children of age 5 and lower by the year of the Ph.D. First, mixtures with one, two and three components and only varying parameters are fitted, and the model minimizing the BIC is selected. This is based on the assumption that unobserved heterogeneity is present in the data due to latent differences between the students in order to be productive and achieve publications. Starting with the most general model to determine the number of components using information criteria and checking for possible model restrictions after having the number of components fixed is a common strategy in finite mixture modelling \citep[see][]{mixtures:Wang+Puterman+Cockburn:1996}. Function \code{refit()} is used to determine confidence intervals for the parameters in order to choose suitable alternative models. However, it has to be noted that in the course of the procedure these confidence intervals will not be correct any more because the specific fitted models have already been determined using the same data. <<>>= data("bioChemists", package = "flexmix") Model1 <- FLXMRglm(family = "poisson") ff_1 <- stepFlexmix(art ~ ., data = bioChemists, k = 1:3, model = Model1) ff_1 <- getModel(ff_1, "BIC") @ The selected model has \Sexpr{ff_1@k} components. The estimated coefficients of the components are given in Figure~\ref{fig:coefficients-1} together with the corresponding 95\% confidence intervals using the plot method for objects returned by \code{refit()}. The plot shows that the confidence intervals of the parameters for \code{kid5}, \code{mar}, \code{ment} and \code{phd} overlap for the two components. In a next step a mixture with two components is therefore fitted where only a varying intercept and a varying coefficient for \code{fem} is specified and all other coefficients are constant. The EM algorithm is initialized with the fitted mixture model using \code{posterior()}. \begin{figure} \centering <>= print(plot(refit(ff_1), bycluster = FALSE, scales = list(x = list(relation = "free")))) @ \caption{Coefficient estimates and confidence intervals for the model with only varying parameters.}\label{fig:coefficients-1} \end{figure} <<>>= Model2 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + mar + ment) ff_2 <- flexmix(art ~ fem + phd, data = bioChemists, cluster = posterior(ff_1), model = Model2) c(BIC(ff_1), BIC(ff_2)) @ If the BIC is used for model comparison the smaller model including only varying coefficients for the intercept and \code{fem} is preferred. The coefficients of the fitted model can be obtained using \code{refit()}: <<>>= summary(refit(ff_2)) @ It can be seen that the coefficient of \code{phd} does for both components not differ significantly from zero and might be omitted. This again improves the BIC. <<>>= Model3 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + mar + ment) ff_3 <- flexmix(art ~ fem, data = bioChemists, cluster = posterior(ff_2), model = Model3) c(BIC(ff_2), BIC(ff_3)) @ The coefficients of the restricted model without \code{phd} are given in Figure~\ref{fig:coefficients-2}. \begin{figure}[t] \centering <>= print(plot(refit(ff_3), bycluster = FALSE, scales = list(x = list(relation = "free")))) @ \caption{Coefficient estimates and confidence intervals for the model with varying and constant parameters where the variable \code{phd} is not used in the regression.}\label{fig:coefficients-2} \end{figure} An alternative model would be to assume that gender does not directly influence the number of articles but has an impact on the segment sizes. <<>>= Model4 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + mar + ment) ff_4 <- flexmix(art ~ 1, data = bioChemists, cluster = posterior(ff_2), concomitant = FLXPmultinom(~ fem), model = Model4) parameters(ff_4) summary(refit(ff_4), which = "concomitant") BIC(ff_4) @ This suggests that the proportion of women is lower in the second component which is the more productive segment. The alternative modelling strategy where homogeneity is assumed at the beginning and a varying interept is added if overdispersion is observed leads to the following model which is the best with respect to the BIC. <<>>= Model5 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + ment + fem) ff_5 <- flexmix(art ~ 1, data = bioChemists, cluster = posterior(ff_2), model = Model5) BIC(ff_5) @ \begin{figure} \centering \setkeys{Gin}{width=0.8\textwidth} <>= pp <- predict(ff_5, newdata = data.frame(kid5 = 0, mar = factor("Married", levels = c("Single", "Married")), fem = c("Men", "Women"), ment = mean(bioChemists$ment))) matplot(0:12, sapply(unlist(pp), function(x) dpois(0:12, x)), type = "b", lty = 1, xlab = "Number of articles", ylab = "Probability") legend("topright", paste("Comp.", rep(1:2, each = 2), ":", c("Men", "Women")), lty = 1, col = 1:4, pch = paste(1:4), bty = "n") @ \caption{The estimated productivity for each compoment for men and women.} \label{fig:estimated} \end{figure} \setkeys{Gin}{width=0.98\textwidth} In Figure~\ref{fig:estimated} the estimated distribution of productivity for model \code{ff\_5} are given separately for men and women as well as for each component where for all other variables the mean values are used for the numeric variables and the most frequent category for the categorical variables. The two components differ in that component 1 contains the students who publish no article or only a single article, while the students in component 2 write on average several articles. With a constant coefficient for gender women publish less articles than men in both components. This example shows that different optimal models are chosen for different modelling procedures. However, the distributions induced by the different variants of the model class may be similar and therefore it is not suprising that they then will have similar BIC values. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Implementation}\label{sec:implementation} The new features extend the available model class described in \cite{mixtures:Leisch:2004} by providing infrastructure for concomitant variable models and for fitting mixtures of GLMs with varying and constant parameters for the component specific parameters. The implementation of the extensions of the model class made it necessary to define a better class structure for the component specific models and to modify the fit functions \code{flexmix()} and \code{FLXfit()}. An overview on the \proglang{S}4 class structure of the package is given in Figure~\ref{fig:class structure}. There is a class for unfitted finite mixture distributions given by \code{"FLXdist"} which contains a list of \code{"FLXM"} objects which determine the component specific models, a list of \code{"FLXcomponent"} objects which specify functions to determine the component specific log-likelihoods and predictions and which contain the component specific parameters, and an object of class \code{"FLXP"} which specifies the concomitant variable model. Class \code{"flexmix"} extends \code{"FLXdist"}. It represents a fitted finite mixture distribution and it contains the information about the fitting with the EM algorithm in the object of class \code{"FLXcontrol"}. Repeated fitting with the EM algorithm with different number of components is provided by function \code{stepFlexmix()} which returns an object of class \code{"stepFlexmix"}. Objects of class \code{"stepFlexmix"} contain the list of the fitted mixture models for each number of components in the slot \code{"models"}. \setkeys{Gin}{width=.9\textwidth} \begin{figure}[t] \centering \includegraphics{flexmix} \caption{UML class diagram \citep[see][]{mixtures:Fowler:2004} of the \pkg{flexmix} package.} \label{fig:class structure} \end{figure} \setkeys{Gin}{width=\textwidth} For the component specific model a virtual class \code{"FLXM"} is introduced which (currently) has two subclasses: \code{"FLXMC"} for model-based clustering and \code{"FLXMR"} for clusterwise regression, where predictor variables are given. Additional slots have been introduced to allow for data preprocessing and the construction of the components was separated from the fit and is implemented using lexical scoping \citep{mixtures:Gentleman+Ihaka:2000} in the slot \code{defineComponent}. \code{"FLXMC"} has an additional slot \code{dist} to specify the name of the distribution of the variable. In the future functionality shall be provided for sampling from a fitted or unfitted finite mixture. Using this slot observations can be generated by using the function which results from adding an \code{r} at the beginnning of the distribution name. This allows to only implement the (missing) random number generator functions and otherwise use the same method for sampling from mixtures with component specific models of class \code{"FLXMC"}. For \code{flexmix()} and \code{FLXfit()} code blocks which are model dependent have been identified and different methods implemented. Finite mixtures of regressions with varying, nested and constant parameters were a suitable model class for this identification task as they are different from models previously implemented. The main differences are: \begin{itemize} \item The number of components is related to the component specific model and the omission of small components during the EM algorithm impacts on the model. \item The parameters of the component specific models can not be determined separately in the M-step and a joint model matrix is needed. \end{itemize} This makes it also necessary to have different model dependent methods for \code{fitted()} which extracts the fitted values from a \code{"flexmix"} object, \code{predict()} which predicts new values for a \code{"flexmix"} object and \code{refit()} which refits an estimated model to obtain additional information for a \code{"flexmix"} object. %%----------------------------------------------------------------------- \subsection{Component specific models with varying and constant parameters}\label{sec:comp-models-with} A new M-step driver is provided which fits finite mixtures of GLMs with constant and nested varying parameters for the coefficients and the dispersion parameters. The class \code{"FLXMRglmfix"} returned by the driver \code{FLXMRglmfix()} has the following additional slots with respect to \code{"FLXMRglm"}: \begin{description} \item[\code{design}:] An incidence matrix indicating which columns of the model matrix are used for which component, i.e.~$\mathbf{D}=(\mathbf{1}_K,\mathbf{J}, \mathbf{I}_K)$. \item[\code{nestedformula}:] An object of class \code{"FLXnested"} containing the formula for the nested regression coefficients and the number of components in each $K_c$, $c \in C$. \item[\code{fixed}:] The formula for the constant regression coefficients. \item[\code{variance}:] A logical indicating if different variances shall be estimated for the components following a Gaussian distribution or a vector specifying the nested structure for estimating these variances. \end{description} The difference between estimating finite mixtures including only varying parameters using models specified with \code{FLXMRglm()} and those with varying and constant parameters using function \code{FLXMRglmfix()} is hidden from the user, as only the specified model is different. The fitted model is also of class \code{"flexmix"} and can be analyzed using the same functions as for any model fitted using package \pkg{flexmix}. The methods used are the same except if the slot containing the model is accessed and method dispatching is made via the model class. New methods are provided for models of class \code{"FLXMRglmfix"} for functions \code{refit()}, \code{fitted()} and \code{predict()} which can be used for analyzing the fitted model. The implementation allows repeated measurements by specifying a grouping variable in the formula argument of \code{flexmix()}. Furthermore, it has to be noticed that the model matrix is determined by updating the formula of the varying parameters successively with the formula of the constant and then of the nested varying parameters. This ensures that if a mixture distribution is fitted for the intercept, the model matrix of a categorical variable includes only the remaining columns for the constant parameters to have full column rank. However, this updating scheme makes it impossible to estimate a constant intercept while allowing varying parameters for a categorical variable. For this model one big model matrix is constructed where the observations are repeated $K$ times and suitable columns of zero added. The coefficients of all $K$ components are determined simultaneously in the M-step, while if only varying parameters are specified the maximization of the likelihood is made separately for all components. For large datasets the estimation of a combination of constant and varying parameters might therefore be more challenging than only varying parameters. %% ----------------------------------------------------------------------- \subsection{Concomitant variable models}\label{sec:conc-vari-models} For representing concomitant variable models the class \code{"FLXP"} is defined. It specifies how the concomitant variable model is fitted using the concomitant variable model matrix as predictor variables and the current a-posteriori probability estimates as response variables. The object has the following slots: \begin{description} \item[\code{fit}:] A \code{function (x, y, ...)} returning the fitted values for the component weights during the EM algorithm. \item[\code{refit}:] A \code{function (x, y, ...)} used for refitting the model. \item[\code{df}:] A \code{function (x, k, ...)} returning the degrees of freedom used for estimating the concomitant variable model given the model matrix \code{x} and the number of components \code{k}. \item[\code{x}:] A matrix containing the model matrix of the concomitant variables. \item[\code{formula}:] The formula for determining the model matrix \code{x}. \item[\code{name}:] A character string describing the model, which is only used for print output. \end{description} Two constructor functions for concomitant variable models are provided at the moment. \code{FLXPconstant()} is for constant component weights without concomitant variables and for multinomial logit models \code{FLXPmultinom()} can be used. \code{FLXPmultinom()} has its own class \code{"FLXPmultinom"} which extends \code{"FLXP"} and has an additional slot \code{coef} for the fitted coefficients. The multinomial logit models are fitted using package \pkg{nnet} \citep{mixtures:Venables+Ripley:2002}. %%----------------------------------------------------------------------- \subsection{Further changes} The estimation of the model with the EM algorithm was improved by adapting the variants to correspond to the CEM and SEM variants as outlined in the literature. To make this more explicit it is now also possible to use \code{"CEM"} or \code{"SEM"} to specify an EM variant in the \code{classify} argument of the \code{"FLXcontrol"} object. Even though the SEM algorithm can in general not be expected to converge the fitting procedure is also terminated for the SEM algorithm if the change in the relative log-likelhood is smaller than the pre-specified threshold. This is motivated by the fact that for well separated clusters the posteriors might converge to an indicator function with all weight concentrated in one component. The fitted model with the maximum likelihood encountered during the SEM algorithm is returned. For discrete data in general multiple observations with the same values are given in a dataset. A \code{weights} argument was added to the fitting function \code{flexmix()} in order to avoid repeating these observations in the provided dataset. The specification is through a \code{formula} in order to allow selecting a column of the data frame given in the \code{data} argument. The weights argument allows to avoid replicating the same observations and hence enables more efficient memory use in these applications. This possibitliy is especially useful in the context of model-based clustering for mixtures of Poisson distributions or latent class analysis with multivariate binary observations. In order to be able to apply different initialization strategies such as for example first running several different random initializations with CEM and then switching to ordinary EM using the best solution found by CEM for initialization a \code{posterior()} function was implemented. \code{posterior()} also takes a \code{newdata} argument and hence, it is possible to apply subset strategies for large datasets as suggested in \cite{mixtures:Wehrens+Buydens+Fraley:2004}. The returned matrix of the posterior probabilities can be used to specify the \code{cluster} argument for \code{flexmix()} and the posteriors are then used as weights in the first M-step. The default plot methods now use trellis graphics as implemented in package \pkg{lattice} \citep{mixtures:Sarkar:2008}. Users familiar with the syntax of these graphics and with the plotting and printing arguments will find the application intuitive as a lot of plotting arguments are passed to functions from \pkg{lattice} as for example \code{xyplot()} and \code{histogram()}. In fact only new panel, pre-panel and group-panel functions were implemented. The returned object is of class \code{"trellis"} and the show method for this class is used to create the plot. Function \code{refit()} was modified and has now two different estimation methods: \code{"optim"} and \code{"mstep"}. The default method \code{"optim"} determines the variance-covariance matrix of the parameters from the inverse Hessian of the full log-likelihood. The general purpose optimizer \code{optim()} is used to maximize the log-likelihood and initialized in the solution obtained with the EM algorithm. For mixtures of GLMs there are also functions implemented to determine the gradient which can be used to speed up convergence. The second method \code{"mstep"} is only a raw approximation. It performs an M-step where the a-posteriori probabilities are treated as given instead of estimated and returns for the component specific models nearly complete \code{"glm"} objects which can be further analyzed. The advantage of this method is that the return value is basically a list of standard \code{"glm"} objects, such that the regular methods for this class can be used. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Writing your own drivers}\label{sec:writing-your-own} Two examples are given in the following to demonstrate how new drivers can be provided for concomitant variable models and for component specific models. Easy extensibility is one of the main implementation aims of the package and it can be seen that writing new drivers requires only a few lines of code for providing the constructor functions which include the fit functions. %%----------------------------------------------------------------------- \subsection{Component specific models: Zero-inflated models}\label{sec:component-models} \lstset{frame=trbl,basicstyle=\small\tt,stepnumber=5,numbers=left} In Poisson or binomial regression models it can be often encountered that the observed number of zeros is higher than expected. A mixture with two components where one has mean zero can be used to model such data. These models are also referred to as zero-inflated models \citep[see for example][]{mixtures:Boehning+Dietz+Schlattmann:1999}. A generalization of this model class would be to fit mixtures with more than two components where one component has a mean fixed at zero. So this model class is a special case of a mixture of generalized linear models where (a) the family is restricted to Poisson and binomial and (b) the parameters of one component are fixed. For simplicity the implementation assumes that the component with mean zero is the first component. In addition we assume that the model matrix contains an intercept and to have the first component absorbing the access zeros the coefficient of the intercept is set to $-\infty$ and all other coefficients are set to zero. Hence, to implement this model using package \pkg{flexmix} an appropriate model class is needed with a corresponding convenience function for construction. During the fitting of the EM algorithm using \code{flexmix()} different methods for this model class are needed when determining the model matrix (to check the presence of an intercept), to check the model after a component is removed and for the M-step to account for the fact that the coefficients of the first component are fixed. For all other methods those available for \code{"FLXMRglm"} can be re-used. The code is given in Figure~\ref{fig:ziglm.R}. \begin{figure} \centering \begin{minipage}{0.98\textwidth} \lstinputlisting{ziglm.R} \end{minipage} \caption{Driver for a zero-inflated component specific model.} \label{fig:ziglm.R} \end{figure} The model class \code{"FLXMRziglm"} is defined as extending \code{"FLXMRglm"} in order to be able to inherit methods from this model class. For construction of a \code{"FLXMRziglm"} class the convenicence function \code{FLXMRziglm()} is used which calls \code{FLXMRglm()}. The only differences are that the family is restricted to binomial or Poisson, that a different name is assigned and that an object of the correct class is returned. The presence of the intercept in the model matrix is checked in \code{FLXgetModelmatrix()} after using the method available for \code{"FLXMRglm"} models as indicated by the call to \code{callNextMethod()}. During the EM algorithm \code{FLXremoveComponent()} is called if one component is removed. For this model class it checks if the first component has been removed and if this is the case the model class is changed to \code{"FLXMRglm"}. In the M-step the coefficients of the first component are fixed and not estimated, while for the remaining components the M-step of \code{"FLXMRglm"} objects can be used. During the EM algorithm \code{FLXmstep()} is called to perform the M-step and returns a list of \code{"FLXcomponent"} objects with the fitted parameters. A new method for this function is needed for \code{"FLXMRziglm"} objects in order to account for the fixed coefficients in the first component, i.e.~for the first component the \code{"FLXcomponent"} object is constructed and concatenated with the list of \code{"FLXcomponent"} objects returned by using the \code{FLXmstep()} method for \code{"FLXMRglm"} models for the remaining components. Similar modifications are necessary in order to be able to use \code{refit()} for this model class. The code for implementing the \code{refit()} method using \code{optim()} for \code{"FLXMRziglm"} is not shown, but can be inspected in the source code of the package. \subsubsection{Example: Using the driver} This new M-step driver can be used to estimate a zero-inflated Poisson model to the data given in \cite{mixtures:Boehning+Dietz+Schlattmann:1999}. The dataset \code{dmft} consists of count data from a dental epidemiological study for evaluation of various programs for reducing caries collected among school children from an urban area of Belo Horizonte (Brazil). The variables included are the number of decayed, missing or filled teeth (DMFT index) at the beginning and at the end of the observation period, the gender, the ethnic background and the specific treatment for \Sexpr{nrow(dmft)} children. The model can be fitted with the new driver function using the following commands: <<>>= data("dmft", package = "flexmix") Model <- FLXMRziglm(family = "poisson") Fitted <- flexmix(End ~ log(Begin + 0.5) + Gender + Ethnic + Treatment, model = Model, k = 2 , data = dmft, control = list(minprior = 0.01)) summary(refit(Fitted)) @ Please note that \cite{mixtures:Boehning+Dietz+Schlattmann:1999} added the predictor \code{log(Begin + 0.5)} to serve as an offset in order to be able to analyse the improvement in the DMFT index from the beginning to the end of the study. The linear predictor with the offset subtracted is intended to be an estimate for $\log(\mathbb{E}(\textrm{End})) - \log(\mathbb{E}(\textrm{Begin}))$. This is justified by the fact that for a Poisson distributed variable $Y$ with mean between 1 and 10 it holds that $\mathbb{E}(\log(Y + 0.5))$ is approximately equal to $\log(\mathbb{E}(Y))$. $\log(\textrm{Begin} + 0.5)$ can therefore be seen as an estimate for $\log(\mathbb{E}(\textrm{Begin}))$. The estimated coefficients with corresponding confidence intervals are given in Figure~\ref{fig:dmft}. As the coefficients of the first component are restricted a-priori to minus infinity for the intercept and to zero for the other variables, they are of no interest and only the second component is plotted. The box ratio can be modified as for \code{barchart()} in package \pkg{lattice}. The code to produce this plot is given by: <>= print(plot(refit(Fitted), components = 2, box.ratio = 3)) @ \begin{figure} \centering \setkeys{Gin}{width=0.9\textwidth} <>= <> @ \caption{The estimated coefficients of the zero-inflated model for the \code{dmft} dataset. The first component is not plotted as this component captures the inflated zeros and its coefficients are fixed a-priori.} \label{fig:dmft} \end{figure} %%----------------------------------------------------------------------- \subsection{Concomitant variable models}\label{sec:concomitant-models} If the concomitant variable is a categorical variable, the multinomial logit model is equivalent to a model where the component weights for each level of the concomitant variable are determined by the mean values of the a-posteriori probabilities. The driver which implements this \code{"FLXP"} model is given in Figure~\ref{fig:myConcomitant.R}. A name for the driver has to be specified and a \code{fit()} function. In the \code{fit()} function the mean posterior probability for all observations with the same covariate points is determined, assigned to the corresponding observations and the full new a-posteriori probability matrix returned. By contrast \code{refit()} only returns the new a-posteriori probability matrix for the number of unique covariate points. \lstset{frame=trbl,basicstyle=\small\tt,stepnumber=5,numbers=left} \begin{figure} \centering \begin{minipage}{0.98\textwidth} \lstinputlisting{myConcomitant.R} \end{minipage} \caption{Driver for a concomitant variable model where the component weights are determined by averaging over the a-posteriori probabilities for each level of the concomitant variable.} \label{fig:myConcomitant.R} \end{figure} \subsubsection{Example: Using the driver} If the concomitant variable model returned by \code{myConcomitant()} is used for the artificial example in Section~\ref{sec:using-new-funct} the same fitted model is returned as if a multinomial logit model is specified. An advantage is that in this case no problems occur if the fitted probabilities are close to zero or one. <>= Concomitant <- FLXPmultinom(~ yb) MyConcomitant <- myConcomitant(~ yb) set.seed(1234) m2 <- flexmix(. ~ x, data = NPreg, k = 2, model = list(Model_n, Model_p), concomitant = Concomitant) m3 <- flexmix(. ~ x, data = NPreg, k = 2, model = list(Model_n, Model_p), cluster = posterior(m2), concomitant = MyConcomitant) @ <<>>= summary(m2) summary(m3) @ For comparing the estimated component weights for each value of $\mathit{yb}$ the following function can be used: <<>>= determinePrior <- function(object) { object@concomitant@fit(object@concomitant@x, posterior(object))[!duplicated(object@concomitant@x), ] } @ <<>>= determinePrior(m2) determinePrior(m3) @ Obviously the fitted values of the two models correspond to each other. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Summary and outlook}\label{sec:summary-outlook} Package \pkg{flexmix} was extended to cover finite mixtures of GLMs with (nested) varying and constant parameters. This allows for example the estimation of varying intercept models. In order to be able to characterize the components given some variables concomitant variable models can be estimated for the component weights. The implementation of these extensions have triggered some modifications in the class structure and in the fit functions \code{flexmix()} and \code{FLXfit()}. For certain steps, as e.g.~the M-step, methods which depend on the component specific models are defined in order to enable the estimation of finite mixtures of GLMs with only varying parameters and those with (nested) varying and constant parameters with the same fit function. The flexibility of this modified implementation is demonstrated by illustrating how a driver for zero-inflated models can be defined. In the future diagnostic tools based on resampling methods shall be implemented as bootstrap results can give valuable insights into the model fit \citep{mixtures:Gruen+Leisch:2004}. A function which conveniently allows to test linear hypotheses about the parameters using the variance-covariance matrix returned by \code{refit()} would be a further valuable diagnostic tool. The implementation of zero-inflated Poisson and binomial regression models are a first step towards relaxing the assumption that all component specific distributions are from the same parametric family. As mixtures with components which follow distributions from different parametric families can be useful for example to model outliers \citep{mixtures:Dasgupta+Raftery:1998,mixtures:Leisch:2008}, it is intended to also make this functionality available in \pkg{flexmix} in the future. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section*{Computational details} <>= SI <- sessionInfo() pkgs <- paste(sapply(c(SI$otherPkgs, SI$loadedOnly), function(x) paste("\\\\pkg{", x$Package, "} ", x$Version, sep = "")), collapse = ", ") @ All computations and graphics in this paper have been done using \proglang{R} version \Sexpr{getRversion()} with the packages \Sexpr{pkgs}. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section*{Acknowledgments} This research was supported by the the Austrian Science Foundation (FWF) under grants P17382 and T351. Thanks also to Achim Zeileis for helpful discussions on implementation details and an anonymous referee for asking a good question about parameter significance which initiated the new version of function \code{refit()}. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \bibliography{mixture} %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \end{document} flexmix/vignettes/flexmix-intro.Rnw0000644000176200001440000010256514404637307017254 0ustar liggesusers% % Copyright (C) 2004-2005 Friedrich Leisch % $Id: flexmix-intro.Rnw 5187 2020-06-25 17:59:39Z gruen $ % \documentclass[nojss]{jss} \title{FlexMix: A General Framework for Finite Mixture Models and Latent Class Regression in \proglang{R}} \Plaintitle{FlexMix: A General Framework for Finite Mixture Models and Latent Class Regression in R} \Shorttitle{FlexMix: Finite Mixture Models in \proglang{R}} \author{Friedrich Leisch\\Universit\"at f\"ur Bodenkultur Wien} \Plainauthor{Friedrich Leisch} \Address{ Friedrich Leisch\\ Institut f\"ur Angewandte Statistik und EDV\\ Universit\"at f\"ur Bodenkultur Wien\\ Peter Jordan Stra\ss{}e 82\\ 1190 Wien, Austria\\ E-mail: \email{Friedrich.Leisch@boku.ac.at} } \usepackage[utf8]{inputenc} \usepackage{listings} \newcommand{\R}{\proglang{R}} <>= suppressWarnings(RNGversion("3.5.0")) set.seed(1504) options(width=70, prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) grDevices::ps.options(family="Times") library("graphics") library("flexmix") data("NPreg") @ \Abstract{ This article was originally published as \cite{flexmix:Leisch:2004a} in the \emph{Journal of Statistical Software}. FlexMix implements a general framework for fitting discrete mixtures of regression models in the \R{} statistical computing environment: three variants of the EM algorithm can be used for parameter estimation, regressors and responses may be multivariate with arbitrary dimension, data may be grouped, e.g., to account for multiple observations per individual, the usual formula interface of the \proglang{S} language is used for convenient model specification, and a modular concept of driver functions allows to interface many different types of regression models. Existing drivers implement mixtures of standard linear models, generalized linear models and model-based clustering. FlexMix provides the E-step and all data handling, while the M-step can be supplied by the user to easily define new models. } \Keywords{\proglang{R}, finite mixture models, model based clustering, latent class regression} \Plainkeywords{R, finite mixture models, model based clustering, latent class regression} \Volume{11} \Issue{8} \Month{October} \Year{2004} \Submitdate{2004-04-19} \Acceptdate{2004-10-18} %%\usepackage{Sweave} %% already provided by jss.cls %%\VignetteIndexEntry{FlexMix: A General Framework for Finite Mixture Models and Latent Class Regression in R} %%\VignetteDepends{flexmix} %%\VignetteKeywords{R, finite mixture models, model based clustering, latent class regression} %%\VignettePackage{flexmix} \begin{document} \section{Introduction} \label{sec:introduction} Finite mixture models have been used for more than 100 years, but have seen a real boost in popularity over the last decade due to the tremendous increase in available computing power. The areas of application of mixture models range from biology and medicine to physics, economics and marketing. On the one hand these models can be applied to data where observations originate from various groups and the group affiliations are not known, and on the other hand to provide approximations for multi-modal distributions \citep{flexmix:Everitt+Hand:1981,flexmix:Titterington+Smith+Makov:1985,flexmix:McLachlan+Peel:2000}. In the 1990s finite mixture models have been extended by mixing standard linear regression models as well as generalized linear models \citep{flexmix:Wedel+DeSarbo:1995}. An important area of application of mixture models is market segmentation \citep{flexmix:Wedel+Kamakura:2001}, where finite mixture models replace more traditional cluster analysis and cluster-wise regression techniques as state of the art. Finite mixture models with a fixed number of components are usually estimated with the expectation-maximization (EM) algorithm within a maximum likelihood framework \citep{flexmix:Dempster+Laird+Rubin:1977} and with MCMC sampling \citep{flexmix:Diebolt+Robert:1994} within a Bayesian framework. \newpage The \R{} environment for statistical computing \citep{flexmix:R-Core:2004} features several packages for finite mixture models, including \pkg{mclust} for mixtures of multivariate Gaussian distributions \citep{flexmix:Fraley+Raftery:2002,flexmix:Fraley+Raftery:2002a}, \pkg{fpc} for mixtures of linear regression models \citep{flexmix:Hennig:2000} and \pkg{mmlcr} for mixed-mode latent class regression \citep{flexmix:Buyske:2003}. There are three main reasons why we have chosen to write yet another software package for EM estimation of mixture models: \begin{itemize} \item The existing implementations did not cover all cases we needed for our own research (mainly marketing applications). \item While all \R{} packages mentioned above are open source and hence can be extended by the user by modifying the source code, we wanted an implementation where extensibility is a main design principle to enable rapid prototyping of new mixture models. \item We include a sampling-based variant of the EM-algorithm for models where weighted maximum likelihood estimation is not available. FlexMix has a clean interface between E- and M-step such that variations of both are easy to combine. \end{itemize} This paper is organized as follows: First we introduce the mathematical models for latent class regression in Section~\ref{sec:latent-class-regr} and shortly discuss parameter estimation and identifiability. Section~\ref{sec:using-flexmix} demonstrates how to use FlexMix to fit models with the standard driver for generalized linear models. Finally, Section~\ref{sec:extending-flexmix} shows how to extend FlexMix by writing new drivers using the well-known model-based clustering procedure as an example. \section{Latent class regression} \label{sec:latent-class-regr} Consider finite mixture models with $K$ components of form \begin{equation}\label{eq:1} h(y|x,\psi) = \sum_{k = 1}^K \pi_k f(y|x,\theta_k) \end{equation} \begin{displaymath} \pi_k \geq 0, \quad \sum_{k = 1}^K \pi_k = 1 \end{displaymath} where $y$ is a (possibly multivariate) dependent variable with conditional density $h$, $x$ is a vector of independent variables, $\pi_k$ is the prior probability of component $k$, $\theta_k$ is the component specific parameter vector for the density function $f$, and $\psi=(\pi_1,,\ldots,\pi_K,\theta_1',\ldots,\theta_K')'$ is the vector of all parameters. If $f$ is a univariate normal density with component-specific mean $\beta_k'x$ and variance $\sigma^2_k$, we have $\theta_k = (\beta_k', \sigma_k^2)'$ and Equation~(\ref{eq:1}) describes a mixture of standard linear regression models, also called \emph{latent class regression} or \emph{cluster-wise regression} \citep{flexmix:DeSarbo+Cron:1988}. If $f$ is a member of the exponential family, we get a mixture of generalized linear models \citep{flexmix:Wedel+DeSarbo:1995}, known as \emph{GLIMMIX} models in the marketing literature \citep{flexmix:Wedel+Kamakura:2001}. For multivariate normal $f$ and $x\equiv1$ we get a mixture of Gaussians without a regression part, also known as \emph{model-based clustering}. The posterior probability that observation $(x,y)$ belongs to class $j$ is given by \begin{equation}\label{eq:3} \Prob(j|x, y, \psi) = \frac{\pi_j f(y | x, \theta_j)}{\sum_k \pi_k f(y | x, \theta_k)} \end{equation} The posterior probabilities can be used to segment data by assigning each observation to the class with maximum posterior probability. In the following we will refer to $f(\cdot|\cdot, \theta_k)$ as \emph{mixture components} or \emph{classes}, and the groups in the data induced by these components as \emph{clusters}. \subsection{Parameter estimation} \label{sec:parameter-estimation} The log-likelihood of a sample of $N$ observations $\{(x_1,y_1),\ldots,(x_N,y_N)\}$ is given by \begin{equation}\label{eq:4} \log L = \sum_{n=1}^N \log h(y_n|x_n,\psi) = \sum_{n=1}^N \log\left(\sum_{k = 1}^K \pi_kf(y_n|x_n,\theta_k) \right) \end{equation} and can usually not be maximized directly. The most popular method for maximum likelihood estimation of the parameter vector $\psi$ is the iterative EM algorithm \citep{flexmix:Dempster+Laird+Rubin:1977}: \begin{description} \item[Estimate] the posterior class probabilities for each observation \begin{displaymath} \hat p_{nk} = \Prob(k|x_n, y_n, \hat \psi) \end{displaymath} using Equation~(\ref{eq:3}) and derive the prior class probabilities as \begin{displaymath} \hat\pi_k = \frac1N \sum_{n=1}^N \hat p_{nk} \end{displaymath} \item[Maximize] the log-likelihood for each component separately using the posterior probabilities as weights \begin{equation}\label{eq:2} \max_{\theta_k} \sum_{n=1}^N \hat p_{nk} \log f(y_n | x_n, \theta_k) \end{equation} \end{description} The E- and M-steps are repeated until the likelihood improvement falls under a pre-specified threshold or a maximum number of iterations is reached. The EM algorithm cannot be used for mixture models only, but rather provides a general framework for fitting models on incomplete data. Suppose we augment each observation $(x_n,y_n)$ with an unobserved multinomial variable $z_n = (z_{n1},\ldots,z_{nK})$, where $z_{nk}=1$ if $(x_n,y_n)$ belongs to class $k$ and $z_{nk}=0$ otherwise. The EM algorithm can be shown to maximize the likelihood on the ``complete data'' $(x_n,y_n,z_n)$; the $z_n$ encode the missing class information. If the $z_n$ were known, maximum likelihood estimation of all parameters would be easy, as we could separate the data set into the $K$ classes and estimate the parameters $\theta_k$ for each class independently from the other classes. If the weighted likelihood estimation in Equation~(\ref{eq:2}) is infeasible for analytical, computational, or other reasons, then we have to resort to approximations of the true EM procedure by assigning the observations to disjoint classes and do unweighted estimation within the groups: \begin{displaymath} \max_{\theta_k} \sum_{n: z_{nk=1}} \log f(y_n | x_n, \theta_k) \end{displaymath} This corresponds to allow only 0 and 1 as weights. Possible ways of assigning the data into the $K$ classes are \begin{itemize} \item \textbf{hard} \label{hard} assignment to the class with maximum posterior probability $p_{nk}$, the resulting procedure is called maximizing the \emph{classification likelihood} by \cite{flexmix:Fraley+Raftery:2002}. Another idea is to do \item \textbf{random} assignment to classes with probabilities $p_{nk}$, which is similar to the sampling techniques used in Bayesian estimation (although for the $z_n$ only). \end{itemize} Well known limitations of the EM algorithm include that convergence can be slow and is to a local maximum of the likelihood surface only. There can also be numerical instabilities at the margin of parameter space, and if a component gets to contain only a few observations during the iterations, parameter estimation in the respective component may be problematic. E.g., the likelihood of Gaussians increases without bounds for $\sigma^2\to 0$. As a result, numerous variations of the basic EM algorithm described above exist, most of them exploiting features of special cases for $f$. \subsection{Identifiability} \label{sec:identifiability} An open question is still identifiability of many mixture models. A comprehensive overview of this topic is beyond the scope of this paper, however, users of mixture models should be aware of the problem: \begin{description} \item[Relabelling of components:] Mixture models are only identifiable up to a permutation of the component labels. For EM-based approaches this only affects interpretation of results, but is no problem for parameter estimation itself. \item[Overfitting:] If a component is empty or two or more components have the same parameters, the data generating process can be represented by a smaller model with fewer components. This kind of unidentifiability can be avoided by requiring that the prior weights $\pi_k$ are not equal to zero and that the component specific parameters are different. \item[Generic unidentifiability:] It has been shown that mixtures of univariate normal, gamma, exponential, Cauchy and Poisson distributions are identifiable, while mixtures of discrete or continuous uniform distributions are not identifiable. A special case is the class of mixtures of binomial and multinomial distributions which are only identifiable if the number of components is limited with respect to, e.g., the number of observations per person. See \cite{flexmix:Everitt+Hand:1981}, \cite{flexmix:Titterington+Smith+Makov:1985}, \cite{flexmix:Grun:2002} and references therein for details. \end{description} FlexMix tries to avoid overfitting because of vanishing prior probabilities by automatically removing components where the prior $\pi_k$ falls below a user-specified threshold. Automated diagnostics for generic identifiability are currently under investigation. Relabelling of components is in some cases more of a nuisance than a real problem (``component 2 of the first run may be component 3 in the second run''), more serious are interactions of component relabelling and categorical predictor variables, see \cite{flexmix:Grun+Leisch:2004} for a discussion and how bootstrapping can be used to assess identifiability of mixture models. \pagebreak[4] \section{Using FlexMix} \label{sec:using-flexmix} \SweaveOpts{width=12,height=8,eps=FALSE,keep.source=TRUE} The standard M-step \texttt{FLXMRglm()} of FlexMix is an interface to R's generalized linear modelling facilities (the \texttt{glm()} function). As a simple example we use artificial data with two latent classes of size \Sexpr{nrow(NPreg)/2} each: \begin{center} \begin{tabular}{ll} Class~1: & $ y = 5x+\epsilon$\\ Class~2: & $ y = 15+10x-x^2+\epsilon$\\ \end{tabular} \end{center} with $\epsilon\sim N(0,9)$ and prior class probabilities $\pi_1=\pi_2=0.5$, see the left panel of Figure~\ref{fig:npreg}. We can fit this model in \R{} using the commands <<>>= library("flexmix") data("NPreg") m1 <- flexmix(yn ~ x + I(x^2), data = NPreg, k = 2) m1 @ and get a first look at the estimated parameters of mixture component~1 by <<>>= parameters(m1, component = 1) @ and <<>>= parameters(m1, component = 2) @ for component~2. The paramter estimates of both components are close to the true values. A cross-tabulation of true classes and cluster memberships can be obtained by <<>>= table(NPreg$class, clusters(m1)) @ The summary method <<>>= summary(m1) @ gives the estimated prior probabilities $\hat\pi_k$, the number of observations assigned to the corresponding clusters, the number of observations where $p_{nk}>\delta$ (with a default of $\delta=10^{-4}$), and the ratio of the latter two numbers. For well-seperated components, a large proportion of observations with non-vanishing posteriors $p_{nk}$ should also be assigned to the corresponding cluster, giving a ratio close to 1. For our example data the ratios of both components are approximately 0.7, indicating the overlap of the classes at the cross-section of line and parabola. \begin{figure}[htbp] \centering <>= par(mfrow=c(1,2)) plot(yn~x, col=class, pch=class, data=NPreg) plot(yp~x, col=class, pch=class, data=NPreg) @ \caption{Standard regression example (left) and Poisson regression (right).} \label{fig:npreg} \end{figure} Histograms or rootograms of the posterior class probabilities can be used to visually assess the cluster structure \citep{flexmix:Tantrum+Murua+Stuetzle:2003}, this is now the default plot method for \texttt{"flexmix"} objects \citep{flexmix:Leisch:2004}. Rootograms are very similar to histograms, the only difference is that the height of the bars correspond to square roots of counts rather than the counts themselves, hence low counts are more visible and peaks less emphasized. \begin{figure}[htbp] \centering <>= print(plot(m1)) @ \caption{The plot method for \texttt{"flexmix"} objects, here obtained by \texttt{plot(m1)}, shows rootograms of the posterior class probabilities.} \label{fig:root1} \end{figure} Usually in each component a lot of observations have posteriors close to zero, resulting in a high count for the corresponing bin in the rootogram which obscures the information in the other bins. To avoid this problem, all probabilities with a posterior below a threshold are ignored (we again use $10^{-4}$). A peak at probability 1 indicates that a mixture component is well seperated from the other components, while no peak at 1 and/or significant mass in the middle of the unit interval indicates overlap with other components. In our simple example the components are medium well separated, see Figure~\ref{fig:root1}. Tests for significance of regression coefficients can be obtained by <<>>= rm1 <- refit(m1) summary(rm1) @ Function \texttt{refit()} fits weighted generalized linear models to each component using the standard \R{} function \texttt{glm()} and the posterior probabilities as weights, see \texttt{help("refit")} for details. The data set \texttt{NPreg} also includes a response from a generalized linear model with a Poisson distribution and exponential link function. The two classes of size \Sexpr{nrow(NPreg)/2} each have parameters \begin{center} \begin{tabular}{ll} Class~1: & $ \mu_1 = 2 - 0.2x$\\ Class~2: & $ \mu_2 = 1 + 0.1x$\\ \end{tabular} \end{center} and given $x$ the response $y$ in group $k$ has a Poisson distribution with mean $e^{\mu_k}$, see the right panel of Figure~\ref{fig:npreg}. The model can be estimated using <>= options(width=55) @ <<>>= m2 <- flexmix(yp ~ x, data = NPreg, k = 2, model = FLXMRglm(family = "poisson")) summary(m2) @ <>= options(width=65) @ \begin{figure}[htbp] \centering <>= print(plot(m2)) @ \caption{\texttt{plot(m2)}} \label{fig:root2} \end{figure} Both the summary table and the rootograms in Figure~\ref{fig:root2} clearly show that the clusters of the Poisson response have much more overlap. For our simple low-dimensional example data the overlap of the classes is obvious by looking at scatterplots of the data. For data in higher dimensions this is not an option. The rootograms and summary tables for \texttt{"flexmix"} objects work off the densities or posterior probabilities of the observations and thus do not depend on the dimensionality of the input space. While we use simple 2-dimensional examples to demonstrate the techniques, they can easily be used on high-dimensional data sets or models with complicated covariate structures. \subsection{Multiple independent responses} \label{sec:mult-indep-resp} If the response $y=(y_1,\ldots,y_D)'$ is $D$-dimensional and the $y_d$ are mutually independent the mixture density in Equation~(\ref{eq:1}) can be written as \begin{eqnarray*} h(y|x,\psi) &=& \sum_{k = 1}^K \pi_k f(y|x,\theta_k)\\ &=& \sum_{k = 1}^K \pi_k \prod_{d=1}^D f_d(y|x,\theta_{kd}) \end{eqnarray*} To specify such models in FlexMix we pass it a list of models, where each list element corresponds to one $f_d$, and each can have a different set of dependent and independent variables. To use the Gaussian and Poisson responses of data \texttt{NPreg} simultaneously, we use the model specification \begin{Sinput} > m3 = flexmix(~x, data=NPreg, k=2, + model=list(FLXMRglm(yn~.+I(x^2)), + FLXMRglm(yp~., family="poisson"))) \end{Sinput} <>= m3 <- flexmix(~ x, data = NPreg, k = 2, model=list(FLXMRglm(yn ~ . + I(x^2)), FLXMRglm(yp ~ ., family = "poisson"))) @ Note that now three model formulas are involved: An overall formula as first argument to function \texttt{flexmix()} and one formula per response. The latter ones are interpreted relative to the overall formula such that common predictors have to be specified only once, see \texttt{help("update.formula")} for details on the syntax. The basic principle is that the dots get replaced by the respective terms from the overall formula. The rootograms show that the posteriors of the two-response model are shifted towards 0 and 1 (compared with either of the two univariate models), the clusters are now well-separated. \begin{figure}[htbp] \centering <>= print(plot(m3)) @ \caption{\texttt{plot(m3)}} \label{fig:root3} \end{figure} \subsection{Repeated measurements} \label{sec:repe-meas} If the data are repeated measurements on $M$ individuals, and we have $N_m$ observations from individual $m$, then the log-likelihood in Equation~(\ref{eq:4}) can be written as \begin{displaymath} \log L = \sum_{m=1}^M \sum_{n=1}^{N_m} \log h(y_{mn}|x_{mn},\psi), \qquad \sum_{m=1}^M N_m = N \end{displaymath} and the posterior probability that individual $m$ belongs to class $j$ is given by \begin{displaymath} \Prob(j|m) = \frac{\pi_j \prod_{n=1}^{N_m} f(y_{mn} | x_{mn}, \theta_j)}{\sum_k \pi_k \prod_{n=1}^{N_m} f(y_{mn} | x_{mn}, \theta_k)} \end{displaymath} where $(x_{mn}, y_{mn})$ is the $n$-th observation from individual $m$. As an example, assume that the data in \texttt{NPreg} are not 200 independent observations, but 4 measurements each from 50 persons such that $\forall m: N_m=4$. Column \texttt{id2} of the data frame encodes such a grouping and can easily be used in FlexMix: <<>>= m4 <- flexmix(yn ~ x + I(x^2) | id2, data = NPreg, k = 2) summary(m4) @ Note that convergence of the EM algorithm is much faster with grouping and the two clusters are now perfectly separated. \subsection{Control of the EM algorithm} \label{sec:control-em-algorithm} Details of the EM algorithm can be tuned using the \texttt{control} argument of function \texttt{flexmix()}. E.g., to use a maximum number of 15 iterations, report the log-likelihood at every 3rd step and use hard assignment of observations to clusters (cf. page~\pageref{hard}) the call is <<>>= m5 <- flexmix(yn ~ x + I(x^2), data = NPreg, k = 2, control = list(iter.max = 15, verbose = 3, classify = "hard")) @ Another control parameter (\texttt{minprior}, see below for an example) is the minimum prior probability components are enforced to have, components falling below this threshold (the current default is 0.05) are removed during EM iteration to avoid numerical instabilities for components containing only a few observations. Using a minimum prior of 0 disables component removal. \subsection{Automated model search} In real applications the number of components is unknown and has to be estimated. Tuning the minimum prior parameter allows for simplistic model selection, which works surprisingly well in some situations: <<>>= m6 <- flexmix(yp ~ x + I(x^2), data = NPreg, k = 4, control = list(minprior = 0.2)) m6 @ Although we started with four components, the algorithm converged at the correct two component solution. A better approach is to fit models with an increasing number of components and compare them using AIC or BIC. As the EM algorithm converges only to the next local maximum of the likelihood, it should be run repeatedly using different starting values. The function \texttt{stepFlexmix()} can be used to repeatedly fit models, e.g., <<>>= m7 <- stepFlexmix(yp ~ x + I(x^2), data = NPreg, control = list(verbose = 0), k = 1:5, nrep = 5) @ runs \texttt{flexmix()} 5 times for $k=1,2,\ldots,5$ components, totalling in 25 runs. It returns a list with the best solution found for each number of components, each list element is simply an object of class \texttt{"flexmix"}. To find the best model we can use <<>>= getModel(m7, "BIC") @ and choose the number of components minimizing the BIC. \section{Extending FlexMix} \label{sec:extending-flexmix} One of the main design principles of FlexMix was extensibility, users can provide their own M-step for rapid prototyping of new mixture models. FlexMix was written using S4 classes and methods \citep{flexmix:Chambers:1998} as implemented in \R{} package \pkg{methods}. The central classes for writing M-steps are \texttt{"FLXM"} and \texttt{"FLXcomponent"}. Class \texttt{"FLXM"} specifies how the model is fitted using the following slots: \begin{description} \item[fit:] A \texttt{function(x,y,w)} returning an object of class \texttt{"FLXcomponent"}. \item[defineComponent:] Expression or function constructing the object of class \texttt{"FLXcomponent"}. \item[weighted:] Logical, specifies if the model may be fitted using weighted likelihoods. If \texttt{FALSE}, only hard and random classification are allowed (and hard classification becomes the default). \item[formula:] Formula relative to the overall model formula, default is \verb|.~.| \item[name:] A character string describing the model, this is only used for print output. \end{description} The remaining slots of class \texttt{"FLXM"} are used internally by FlexMix to hold data, etc. and omitted here, because they are not needed to write an M-step driver. The most important slot doing all the work is \texttt{fit} holding a function performing the maximum likelihood estimation described in Equation~(\ref{eq:2}). The \texttt{fit()} function returns an object of class \texttt{"FLXcomponent"} which holds a fitted component using the slots: \begin{description} \item[logLik:] A \texttt{function(x,y)} returning the log-likelihood for observations in matrices \texttt{x} and \texttt{y}. \item[predict:] A \texttt{function(x)} predicting \texttt{y} given \texttt{x}. \item[df:] The degrees of freedom used by the component, i.e., the number of estimated parameters. \item[parameters:] An optional list containing model parameters. \end{description} In a nutshell class \texttt{"FLXM"} describes an \emph{unfitted} model, whereas class \texttt{"FLXcomponent"} holds a \emph{fitted} model. \lstset{frame=trbl,basicstyle=\small\tt,stepnumber=5,numbers=left} \begin{figure}[tb] \centering \begin{minipage}{0.94\textwidth} \lstinputlisting{mymclust.R} \end{minipage} \caption{M-step for model-based clustering: \texttt{mymclust} is a simplified version of the standard FlexMix driver \texttt{FLXmclust}.} \label{fig:mymclust.R} \end{figure} \subsection{Writing an M-step driver} \label{sec:writing-an-m} Figure~\ref{fig:mymclust.R} shows an example driver for model-based clustering. We use function \texttt{dmvnorm()} from package \pkg{mvtnorm} for calculation of multivariate Gaussian densities. In line~5 we create a new \texttt{"FLXMC"} object named \texttt{retval}, which is also the return value of the driver. Class \texttt{"FLXMC"} extends \texttt{"FLXM"} and is used for model-based clustering. It contains an additional slot with the name of the distribution used. All drivers should take a formula as their first argument, this formula is directly passed on to \texttt{retval}. In most cases authors of new FlexMix drivers need not worry about formula parsing etc., this is done by \texttt{flexmix} itself. In addition we have to declare whether the driver can do weighted ML estimation (\texttt{weighted=TRUE}) and give a name to our model. The remainder of the driver creates a \texttt{fit()} function, which takes regressors \texttt{x}, response \texttt{y} and weights \texttt{w}. For multivariate Gaussians the maximum likelihood estimates correspond to mean and covariance matrix, the standard R function \texttt{cov.wt()} returns a list containing estimates of the weighted covariance matrix and the mean for given data. Our simple example performs clustering without a regression part, hence $x$ is ignored. If \texttt{y} has $D$ columns, we estimate $D$ parameters for the mean and $D(D-1)/2$ parameters for the covariance matrix, giving a total of $(3D+D^2)/2$ parameters (line~11). As an additional feature we allow the user to specify whether the covariance matrix is assumed to be diagonal or a full matrix. For \texttt{diagonal=TRUE} we use only the main diagonal of the covariance matrix (line~14) and the number of parameters is reduced to $2D$. In addition to parameter estimates, \texttt{flexmix()} needs a function calculating the log-likelihood of given data $x$ and $y$, which in our example is the log-density of a multivariate Gaussian. In addition we have to provide a function predicting $y$ given $x$, in our example simply the mean of the Gaussian. Finally we create a new \texttt{"FLXcomponent"} as return value of function \texttt{fit()}. Note that our internal functions \texttt{fit()}, \texttt{logLik()} and \texttt{predict()} take only \texttt{x}, \texttt{y} and \texttt{w} as arguments, but none of the model-specific parameters like means and covariances, although they use them of course. \R{} uses \emph{lexical scoping} rules for finding free variables \citep{flexmix:Gentleman+Ihaka:2000}, hence it searches for them first in the environment where a function is defined. E.g., the \texttt{fit()} function uses the variable \texttt{diagonal} in line~24, and finds it in the environment where the function itself was defined, which is the body of function \texttt{mymclust()}. Function \texttt{logLik()} uses the list \texttt{para} in lines~8 and 9, and uses the one found in the body of \texttt{defineComponent()}. Function \texttt{flexmix()} on the other hand never sees the model parameters, all it uses are function calls of form \texttt{fit(x,y,w)} or \texttt{logLik(x,y)}, which are exactly the same for all kinds of mixture models. In fact, it would not be necessary to even store the component parameters in the \texttt{"FLXcomponent"} object, they are there only for convenience such that users can easily extract and use them after \texttt{flexmix()} has finished. Lexical scope allows to write clean interfaces in a very elegant way, the driver abstracts all model details from the FlexMix main engine. \subsection{Example: Using the driver} \label{sec:example:-model-based} \SweaveOpts{width=12,height=6,eps=FALSE} <>= library("flexmix") set.seed(1504) options(width=60) grDevices::ps.options(family="Times") suppressMessages(require("ellipse")) suppressMessages(require("mvtnorm")) source("mymclust.R") @ As a simple example we use the four 2-dimensional Gaussian clusters from data set \texttt{Nclus}. Fitting a wrong model with diagonal covariance matrix is done by <<>>= data("Nclus") m1 <- flexmix(Nclus ~ 1, k = 4, model = mymclust()) summary(m1) @ The result can be seen in the left panel of Figure~\ref{fig:ell}, the result is ``wrong'' because we forced the ellipses to be parallel to the axes. The overlap between three of the four clusters can also be inferred from the low ratio statistics in the summary table (around 0.5 for components 1, 3 and 4), while the much better separated upper left cluster has a much higher ratio of 0.85. Using the correct model with a full covariance matrix can be done by setting \texttt{diagonal=FALSE} in the call to our driver \texttt{mymclust()}: <<>>= m2 <- flexmix(Nclus ~ 1, k = 4, model = mymclust(diagonal = FALSE)) summary(m2) @ \begin{figure}[htbp] \centering <>= par(mfrow=1:2) plotEll(m1, Nclus) plotEll(m2, Nclus) @ \caption{Fitting a mixture model with diagonal covariance matrix (left) and full covariance matrix (right).} \label{fig:ell} \end{figure} \pagebreak[4] \section{Summary and outlook} \label{sec:summary} The primary goal of FlexMix is extensibility, this makes the package ideal for rapid development of new mixture models. There is no intent to replace packages implementing more specialized mixture models like \pkg{mclust} for mixtures of Gaussians, FlexMix should rather be seen as a complement to those. By interfacing R's facilities for generalized linear models, FlexMix allows the user to estimate complex latent class regression models. Using lexical scope to resolve model-specific parameters hides all model details from the programming interface, FlexMix can in principle fit almost arbitrary finite mixture models for which the EM algorithm is applicable. The downside of this is that FlexMix can in principle fit almost arbitrary finite mixture models, even models where no proper theoretical results for model identification etc.\ are available. We are currently working on a toolset for diagnostic checks on mixture models to test necessary identifiability conditions for those cases where results are available. We also want to implement newer variations of the classic EM algorithm, especially for faster convergence. Another plan is to have an interactive version of the rootograms using \texttt{iPlots} \citep{flexmix:Urbanek+Theus:2003} such that the user can explore the relations between mixture components, possibly linked to background variables. Other planned extensions include covariates for the prior probabilities and to allow to mix different distributions for components, e.g., to include a Poisson point process for background noise. \section*{Computational details} <>= SI <- sessionInfo() pkgs <- paste(sapply(c(SI$otherPkgs, SI$loadedOnly), function(x) paste("\\\\pkg{", x$Package, "} ", x$Version, sep = "")), collapse = ", ") @ All computations and graphics in this paper have been done using \proglang{R} version \Sexpr{getRversion()} with the packages \Sexpr{pkgs}. \section*{Acknowledgments} This research was supported by the Austrian Science Foundation (FWF) under grant SFB\#010 (`Adaptive Information Systems and Modeling in Economics and Management Science'). Bettina Gr\"un has modified the original version to include and reflect the changes of the package. \bibliography{flexmix} \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End: flexmix/vignettes/myConcomitant.R0000644000176200001440000000130314404637307016712 0ustar liggesusersmyConcomitant <- function(formula = ~ 1) { z <- new("FLXP", name = "myConcomitant", formula = formula) z@fit <- function(x, y, w, ...) { if (missing(w) || is.null(w)) w <- rep(1, length(x)) f <- as.integer(factor(apply(x, 1, paste, collapse = ""))) AVG <- apply(w*y, 2, tapply, f, mean) (AVG/rowSums(AVG))[f,,drop=FALSE] } z@refit <- function(x, y, w, ...) { if (missing(w) || is.null(w)) w <- rep(1, length(x)) f <- as.integer(factor(apply(x, 1, paste, collapse = ""))) AVG <- apply(w*y, 2, tapply, f, mean) (AVG/rowSums(AVG)) } z } flexmix/vignettes/flexmix.png0000644000176200001440000022746114404637307016144 0ustar liggesusersPNG  IHDRsBITO pHYsjw IDATxi@W7IqW6wUqZX`]ՂhkRn7ԭժuC*+*BByy30f>M9s5IIuu5@ 8P ; 0;;  2=~M,P1ed?˗bg/BPqq1BuPYfݺu; ޣG.uرcbg[rY`_q` $[[۫WPbbbBBY2`RYۋ:&q. eā 8Pׯ3f؉eȊԩSl0F0>>۷*_~}رJ\zS"UWW_p#,]T. .Ѯ.ӽ{ڵ+̙Èm޼2777001cԩSkLryd"""Wd'H?ѣG5jQ#GdEQ20>)}YjO2۶m;uꔺˀ(jqEx[Ņܵkm۶ݻw3֭k/_n )={x{{{yyM>}޽O>-//d׮]ۼyĉ.^@2 ׯkzH۶m7n\c?^Þ2CZT*ǎ˨teǎRvuƌҋ/fΜɑ$2 0TdӤIdlȜv'ODEE1$99l֭m]ӧ~aHHիWklP(<@2 /_fgg#իW՜o>tEd}||˗S|7m޼ѣGׯ ťKٳg;qm۶8=BEFxfKKKCCCARZZZh<}}+11'D~Y˅ >P!ܼyi߾} t]v 6Դ{8p 7w9rՕgJ/ex"}Eͽj8}@EFD˖-`O>wF3OOOuKeee1n!Gή޽{C ~s2\X TqQ~oѐ!C߈.]zHzP"l۶0GJQ }maa^1"C឵G2ĸ۷o`Ν{r111۶m?VBB]k֬O>y#nff6z]veggyF&ܼy399yTG. +hWzW?oe]P(e˖iR޽բ˗/t_3&PCBExxyy22lرr\.7}˶XMԥ2++uEĚ5kSltrݴ֭[׫W޾K.6mz}MP?|/(h=DKu5 ѣGnݚ?g͚EJ~!۷׮]#?~pBjW6mbbbWBi7 B@E̞=ϏȈ;wnFFc_(RTy禵kRثnTVVl2Fw84336lg:u-K-(W^^~=FpN{TG7|߿0UĨK"##=Yn]^^.666;v1M͟?c#JSRR؋mZ A 4HMMUyW8wqJJJ*//-++KNNV٘oVRRBXXXݻ^z<sJ*T?}vUUF4oٹ~\\\tY`ҤI .gСþ}K?IO*R7x:w7|o߾_~j9rȏ?x߾}>;uD_@__)dƍFN<;v.=zJ_6mO4~{Э^=ˏCHH̙3 ())IKKϩmBBBƏ_c+#2f̘v P:p8ȞQ`c_S#"&Gƍ؃7nܥKj^ZZ:}tYF~'߿z*{=1c,_|޼yAAA-x|ώ9BN'jԭՔ)S,YB>\v-.C_ xʔ)|z*#2d\u`&6ʸ5RYa:,7nT|zXwTŋ#Fͥ"K,i޼9 hݛ4iʉ`n>|x׮]ߏ3 oƍ:99e-<<wƍ/ۧO$yvXPPSĀ r"VIP|&"11ρ#%3'N}||oA7m۶ݲe 3FnkO-J=' :ՕgUQtXHG@dT5J@x یG9GׯSu ^zvZNSݻ_6l W9coNP1NGQѣZwIm۷/???''СC* ~ lْ6C<ٹiӦbgP'ܻwqMjJ~P(!!!nݢ% g||_~icc?&MPr6kΝ;OޥKr}Tǎ58mՙs.^855 Yҥםܹ taРA/^=csm۶ۛ駟jݻw 7n,//,ӧ?~LhBĀ &.!88x߾}F^a?BABB; XD -EGAlٲޱ522-[bqՙ {P]4e JJJ¸ۄi_Hcφ r9}F]98802W^E]F LM(P~,Yi&z~GO4^jB:%3A$$$ԫW,.M6mZ^^wh0aBÆ \jF76{_%Qe WѢcǎzldJsaю;85- b2`TPo׮]۶m۷kW_~hh(;Z~}0`#C-2`@7od(=襟s}g/k׮8p ݿ}/_';vHnׯ_qH$^^^|0PRqq1$))i֭Y%%%Z M"hqMw߾}4iB(#FiX_Lрի3gPe_M@kwffY9nӶ:tн ݹ2775{ & f͚XaΜ9~)=yafեdkkKUa(n޼NۦMFq0DYgvӦMcs򢢢kjԡC~8qwѴz͞=oo޼{n=b ,ʀ [6o\,@L7oTR꣭,7sLstܙEѡC(SPPKHHȲeuҥKuO ˀCuƠ߿sNF}K,!.]/;v1bDPPEEE2sްaCvv6=x-OOϐaÆ¢477֭[ϟ?t :&GD804-.yMPyyy`` +oJJ ;T*5k#xӧOטeT&>$<M:щT*ݺu-VLSN-..4!CT0ѣ7f EZZZHHH6mիgiiiooyfeA]j3c(C".T*njszСCꮌ7n+#tSB]j7Tg+22G>mXF<<<Û7oKjen1tϟW[=g ۵k=܌3ؽ1nդYYYUVV2zprrꌧgzzˆFƆ&))|UDDDQQe˖ϟ߱cNvlfffCpeoj_Hb+3fUwv}G`fffF?oܸO?VVV]v8q͛>,-- =uR ---e0}} k] E&EDDП)v  @-[vĉw҃O9s&;s XOOϥK daI.(211Q,t 9b}g޾}wee%=>wAܾ};&&qm۬ [#v 4`,,,իעE ݕPmllDLTB]@8^^^}=(Ǝ{U ƍ'Gzyy e#0(Ѐqvv?=vXV\9k,a=DLTB]@Pg{z0###&&:##ϏTM6=z4++˗vvv۷2dHhhx)_~_nܸ/_* kkF9;;m۶cǎ]vvqq\qf<@*. (TҥKׯ_Vb7nРAjjTuߴӧx<{ٳgΝ[dI\\\hh^2?Ǎw}F۷o߾}kר)ǰQ=~XL\fի'v`v%APˠ. 6mO4T*얫Wnݺ5GWV-))8qbnn,F-s9UVb`RRRƏ/v`>#S'Jg}&((hĉ ~/q"bܹ… (~*Pƍurrڸq#J2<<1˦iӦ 999rɓ'k֬iڴ^=q#2|k׮)ϟ8qbΜ9nnnccGZ uq٭^Zx%{9E؜9s&22¢EQQQgΜ -[tɩ_~K,ySu.29r䈺]G بN:1:uNԴiBz#F`.o>Ԗ!pu4ͣG=z$vBC]@޶m~!C5t#eppu޽{JHHH6m۴i֥K={~J(#/- ƊP:q S@www-5w9rq?lꡅE>}""":21`}L6-//M^^^tt#կ__eKuqt͛7hSYYyaÆM8x ư1 u#ݻw޽{O6+///**O^Q]\GM6ٳgHHϞ={m!쪓R4Dn|zr hu߿Ν`,YBn/]]v;v`V>Fd͚5we333tJ ߿3gmҤ #)ݚrssuNMW:SP].% EEESNet"JnjkkK>Uy5ԩS!Coߞy~nݺgϞUVV]?|'{>}taaiiR|ٳg?SF{+W{LD IDATqʕ+mڴݻ PPZeѢEdQf+Wŋ#G8q(Do_LúPŋekr|dQ&88ʕ+2˗֭22P...fߢE._LDXXɠag͚%jv`@/\.'7RKrw{{UV1Pk0uo޼W={~g}ֶm[KK tu…^bШ ]'ѣMFC߿YYAcƌWsBQ(6m۷C@@֭[TfشnzڵA$''w-99Y8p>_ǂ ޼yu>|j?7@xoHHa˖--[$!!!Cx}Gedd 6I&VVV{:REEE\\_ӦM-,,5jԽ{?Խ? gS `|}}ogϞƍDpp¯Ja̟?bƤCbҲOQ@@z~Y؟C.\3ɼyTNxiݺ5Scu3Aѣ]|< ??/^3wvv7/%ٳgG-Y=D#i*fSBp-(('~dpҤIdP]<__Q5UXX5mϳEVXAO\G l @]z0\]o߾d|ɝ;w***222OFoOX~~~.]xYLL P7?|Sm6m~wvؑ .Z^===8:!}vEE۷ DٙO>۷j޾};w\ĉGIҀ@2ޯ_?Ԣ<_MҧSˑ6חqg8PZZZ^^~Š ]2e ի<ظqc^ؙh^Yp1Y`?| 0.C0P]d:uTwaX=~‚l_^^N*,,li&uisʧ=&22 ۷ ƚ߸qc2k> SϧL&2ÃuiF晙#r}aΝN -;uD:u|jڿ5б=ڽ\]]]]]#GreլY3cZO7-77W}{gC]a}:PgQQQ6utt4+++ a\.1ydrCz9"m4䆭-3::Ғ[ZZN6NMMeE~W_DFȇ}袣QQQ4ˇdSvvZ >}S }͛7c%%%X4mCPK/hjD >룏>b?y$E͛z̐?# ȍ/_h&r ѻwo.*B=rEaz里P+ \="RihϧW:$=_;;Dzd)eQIys}4hЊ+Ξ=[QQnMkg6e1P%uѲeKƮVZSwrr277733333JR*"P0XÆ m333rCPh ȍoɁ"TFu D K>$C
    QFۣG q,>/[?u)BHOO={G}ԸqO?qqv5b32PS7A1r c;BЮ멪R*J1+AeA*o1ài...F^^țjϧ[k=ue 5.Դczyy]t3ח~}j#ӽALo>j $kg6OB 4$[\yy9c;B^$ikM;fff韚t|rs=uuI1>߫W~yq].?O>ݵk_|Aŋo= ZS1L.,X ٻr onƮG۷mۖ צEe8Μ9ç=5nj]Tj#ԋH|KKKGIC#G,-->)]^/ 6~R_.LziÇ1cFEEEZZŋkMܹsBsΑ^gϞe qy* a`iiI4r9^iڞ?_ Cc@h@Z48nڠ?"Cj*( \p{R/$OvƌR{.\8df@@^9qaNr9///B>ܻw,b;/_dVRR2n8r^]q-ꫯ~LUUU5sLz_%{xxd2*.ɨ'NPFww *.;uD:~p$33S&eddߟe . {-..}vlllfj(WF]\:wL2tK.z=z`W*9hР;wdL PT*0`@FFL&{n`` ۷v|j?eʔ)d=z\|^PI7O[zNJɈuFFZԩW^=z͍$&&Fg.`P P19J2))^IJJ2:t`-ZDmWUU͘1C~ǎcPen|T"|???>TWW6" 5,\=G~~v|j?ѣGO 5v'K.d%Sǎ߼y2m'6h ꤴkј~C].&uѡ.cZ.]ԳgO33<ٙ3gҥK.S]]ի绻[XXkϞ=՜_]\2iҤvX[[cʕ^CFn|Kv5x`GGG333[[[p\._~}>}6mjnnnggiӦJ 5ܹW^666666?PVVu>ϛO'}ݻwoԨ\u5>;a2߿޽{m&L.mo'\xq@@СCwޭQ{u? x? u'Ƣ `&MD]3 O<)**rrrrvv;crJ¸WU[nbv֭% dÔ}3gR"##>͛7{zz&%%ٓr )S6lؠ؇Rp1cB:Z orYfQH~L`NXdggӕ߾}{ĉMq>_yT>)J988̛7oܹkז,YRUU%Vn )YdIee%A^tdت3RSr5𕝝?S׬YSRR"b>ƏbiqB]dPeH1!TFP#!1C]40&ː0eFSJPP0 2$Lњ!'(@s.`TN!aʌtlO5eL2$LM+(P0v2 ?Ĕ.z cLԩӁ\]]r 2F=Yf޼y666~-=)3r 2F=Y&88 ɓ'c C]x,#J ”S R7Y)3u#1Y)3u#=Y)3&\@'ː)3QQQTd͚5_}UӦMĭZ*--M,KEE)-#>eH'O^lӧOɇ䔙E %;p=z--D" kZRus Sf{{{?yDDQO谾 ?YUf,ZJ?>//8اp0'T.`\4,C䤧/vtts/ش8ڤ6uLE2$2Gxxرc¨eggKrrr 36u]vbg9t萏Y u#dVV,ׯH.|~>u:??_,@4i.|0"M!a0rA2%UtBPt9}8׮]>|EV/q}Ylll^6mjnnz-NDP(6oܯ_?KKKoٲJe{]N@, 'ː ĭѡCzbBɓ5kd27m`?ŋUUUϟ?OII֭իNT\\7eʔSNVVV(Æ 7oL8A:uWT~w:G?~l!ɨxEEPX=11Q2ZΝ k„ Wbg (>YUf S0d\~}EEA'OgH$/6XjіTʊ}vu?}ѡ. >W*3s%v"޽; Fcǎcƌ 1\BnݛW9D 5Y)3ФICs ~k:u 1ZlEE6lO@tLeH2yhРXYY^Pg@` 2N!a @-T*[ ڙWQ^^EEL2bd7UUUxqqGqss#7ܹnuBn<~PmL2b2d@GG.֨ERk׮qrcǎk'ۛ8wc4eDc2$L:ٙ f<<< Ikqp+++ 6mtYv~Aݱ=|5Xf #mt狝qA]@4,C”OnL:2L|Dn̙3ɓr<'''""ŋzUV_Bϟ?޽{r˗Ǐ}ّ@>}6lpE{{{= b'P 0YDN"k֬ꫯ6m\OK,Yd C;;kײQG58o<U㓬jaaƳ.uͅ בݻ #pb& uA;.Hm OOO3CuLUKgϞ5D+|ѣه5#G3f I/yXXX4nܸe˖}6m_Lɩ PjqEx[Ņܵkm۶ݻw3֭k/_n 5P(JKKsrrΜ90xۧZu`BEF0M4INNfFaITT#(H7n=VYY֭[$%%}ӱc.[pCe"#~EGG3@T2O>o߾|JLLTBbb"us 2~A"#e˖yxx0OGΜ9htReee;vLgWdPPT`qonaaϝ;Ν;۷cbb ,--mfmmu֬Yÿu'ggxiǪPP1^^^L&;v\.ƍc(66Kӳ񬬬Ǐk7Tڰaî].X`ϟ?X u#4{l???F0###&&fܹ]~~~fٹ7T*U޹iڵ;GyfD ~0Q--TҠAF|ժUfhРAjjT{=.NIIIeee*!L @n޼pƒ[$w ,0\9:tWoJR_]6mO4TyWnݺ5CBBfΜYXXHDIIIZZNMMMn2~x-Wg޽7]+,,\|yzzG޾}K H$F9_0 6DDDU!?xCSG>ȑ#h4qD2eʒ%Kȇk׮eO2RU~Ç8rJް0݇P0y7oTH$2d7^x1??_^''7jmxx˫qŋ} N>I1776qQ"bڴit&/FmѢEdQfyyyh IDATOAIGDj޼xuu#?~,pJի퍏Ӣ-Z :zH͑O:tdiio!L 2`ɍŋ;::LF߹s{\.߰a;#8r䈺]GպHj{߾}999R@.^8o<)Q!7\\\I$#G޾}[￯П/_"Icpm۶ۛzaz^}\qD& <== P^^ӧ 7A]\.'7 8HR3LAGd+))qMܰψٰa͛U2W^B6}e}իW366͛7|JHH򲶶nԨQ@@/>$//oލ7677¢UV_xx.|N#16lt9}ĭiӦqˋ֮ &4lؐ%MDFk,`UTT?]ve4(++d2j߬Ebҭ[6XϧmJII1pNVVcT?ϹT( }IMM_ʏ=ƍS($VTT2XqF[[[Q5?^zծ];*^zۿzuT>W0w25rA F]tPׅωp$цA֧&dŊ511Qe*ueXXأGٳg|}}u;y5Ν;kڃ?kZ۵k׶m[vڥ4m4vh-Nf2lfgΜə0akڃB].BB].#J?㌌O>08++YYYQQQdz?+//nJٳEI, l)#JO|(pbǏdTs)F?&sfU*};+-,_A*+j<#]T|ذa8B>}ht9J&G Ie42j(vWӧO'piڥԿ^}'ųYbb"TZY2 h+++ꡅ9 ۷sCaЧa'&&=o߾f3g$j˖-'F A[YYQC_ ɓ'3J$ŋsd7|ӯ_? JT* ߿_MԩS @ndff2Z hDC]ugD;w2۷_d tRSʎ;T^T#ϜʇzڬY3z?A5 z͈|ƕ+W87nw5 ~;wNĨCP˗/3v;v3f w:H$T!%%B wNDnxRE#ڽ(OTgtroER R֭[lmmnM`޽wfܨ+.]ڵk߾}OO\\\>SF0!!AP`Tyy9#BAYPCJT*JU6>.1pT}oo*X˃_fjsEׅ?cAup #""A &v $vE5""PY(2 ('O...LI&5@ĨCPBܹ|L2͛7n0~*z\58{ @ Bdddaa!#8c ___u y|||:/2 g2"ϟ'7u9 /^[AFĨKSc]FQ||}J>}:b5j5 L&`܇:M6kFA۹saO2'O&Μ9Sݽ4EԗCIkq]^gggr###C)ھtij:1e@`:t;wr_5|۷oѧOj&vZjGįs\BQRRrp77{ ؀Ȼt߽{7(((33S.gffݽ{ O(?xݓ/_<~͛KV U;>>>rbB qs9y\.ɉxGu߿?1u˗/:=5VgU`AhBt/&׺uk5|ߦP7&s[MرϹ9Pbbx]]]#JbnݳgςunٲE墏fffV*** [>}Jɡn2#ɴE/xR^^x,,,>uy董ʷʡ1:= Yb͐h᪪Ug(zwΝ9 &_ϋ` jiiiͻᄏpk׮999𬬬ۻQFfffƍط&1?c}377ӧφ .^hooI&eggYXX8993ի_}.)EEE]r ssݻw% X2/_e X[[9s&""Օ|~@ҪU[n͙3GS)iУZe@xVVVE-Z0EԙQ7mݺuvvZ`All,0%%ey!u tZ>{mb[rY T*ٳpBb: 5ʛ7o4h@=ܹ[9ДDG/0_ sgxB] .Dθq1;Sոqcu::3rH0.͛7;S Se.vP'ݎ61 8Pǿc֭XG={L,W]+_ZJ,uL6ٳYegg.ػgm EŝEͥD}i5-Ls->&*[Yk)hVZhfij.YnZ*K2af =\:>3} ~WJA^@e2PΝ;_uAM6w\QGʕeee}ݕ+WrssF^nThărr@h4jժ t: 6DEEU^]ݐ`52pj7o KKKS;p۷oONN>` … Ť̈#233v\/t:݂ -y.7@^N-ZTV-uq}lذ[wN:UVӳN:#G0`ZY͛7WX:Ç*ݾ}cǎF s玩m~~~ʫVXQnڊ+ġBCCg;u$]rAL} eg2Kv%m۶FósFf_cV>\{'X׬YvD(/BVPN:Җٳg#JzP߿3g󓓓}YARSSGe•+Wݻwܹ/^?"##e}F>(333//ȑ#9sذaÎ;&SO=ug>S 9rdĈFG^nsss}}}RRRܹԹsS̼W_}UL8qBV_~EW_},XMFM=z^o߾5k p+;sFbmZ5fPX_P2̣^ 2ceBBB/,,lӦx4saz[Yi5Ǎ7JK4iD *l߾}) !!!~A6իW}}}Řgu:oT,>}tiY&1bS^xA駟=z]Rź˒rq'boo˗/ׯ__NɌˡ^8Ԕ)S||| / %[l1saǎɓ'׬Y#.+KO̭oذmݺ04c覜bڵ ;gϞfѢEfĘ1că"񸸸xͲn7ސׯx\\3EY,3jԨ`ٳgK)q9e96|IYK׮]ŃǏpGlC<r ȁo};Ogee%%%}ݍ7A[n߾}͏`["t{L2((H+X5f`Pl&oooC:f̘1Ғ6fr9el%R;jѠAS-f.С/\ Ҳk̠$eXFDɌK[0pQAxwLj},̐!C?e,"LZʆsSx lȺ1b%3. p . 0ܹs/ٳgAѣG2װa˗ wގ;n۶ڵk>{'Nشi۷;>~t 8099099y))) ӧwަnÇ? }=3+KTRcƌ1U 8P<={ 'MtRϥO>oq1i,/Q25jdIÇw5~К5kz{{{xxծ]}zΜ9~awTZ^x!((֭[;w7oݻw t:݄ RRR>5d IDAT^z-^ޒo?ɼbret1t:]|||KFuSRRRƏoa1M8m`CÖzܾ[_pRmä4f̘%K/ō/^l(a}IY?)effe>,떟&UZ̙3Μ9\;p׮]*uo o_/ZСC- X^z:u*77ɓ*ew?~\>QݭxsluSc*Gcr V,#*3.LEժU۷hϞ={eKeYKdddxxgѢEҖ"ʡڴi#klӦMdd1))҈mעE nzÆ F{mj:,]T٧Yf{i׮V ]j-(8LeDcƌ>*&>Z2O>eȐ!5zg"##WXqL!saY/lgNd-%& da#Ge'6J*رUTٱcΘ1CKTv}M&W'uζp:l~SeYYFUf\H2o ˗ٳjժiӦ# _~v*DW\]XbŊ)))oٲ^JS2)-Zlڴ)Fy&MXanݤ/kԨӽ{wK///iK^^3eۛǰIWQu;t7ߌ=ƍ۷o߾}#G+VD X)YYY;(sXa5;~ի!k[nϞ=K5lo#iAÆ e-ť6)(?֭kI#eDb̤I -3gά^zY͕)/#B~.]_7~}YnF]ZF*UmveB7x#>>Ydk$MZnTF6)(?Z-kbb3lj_^vΝ;Ǐ/f͚$WƹZ"55hrZ9=n?>>#? l޼yƍ61ˈXe% /#UZ!C?ٳg(K*TO(pO>11h;d--淟*j3jҔs˛l^,#bWL>}lr)))?u-ZXfldddJJJY+)nϮ2"Jf2}9tP5:wctCeSZn}iӦiZ(DEEmܸQ,( `SN)KaĈX [geXeƙO6QF.]k׮8qϟ|7޽jԨѴiӎ;4}:z觟~/?x@ϭM!iݻw*U|8PV!%%eڴi]\e9n_ R,x2 hbcc&g@^rb%3c}Ν;6Ywܹ-[f 7od_jԨ1eʔdC^&co^]X_@eA^@eA^@eA^@eA^@/_V+}O^F @sL / KKK!\իWՎi4sL."#/2ygD :˸ hp3e 2 /J(q.d^(?<nvڥvGmڴxm6u)~ʕ+p@2.Fzsݻ+EsCp!&2 /,,/2_#ԎƗ_~o|2.E`SN5/sLNG(WG^F}dX( F^FeV(kE^FMdU(ˣE^@eA^zC /2jSP&dv6~~``Qep>L(8Btt4yh4r[vsL*+_y`*cy _~UCU^5l06aKB?~7nj"//U6hРG'O޳gOAAol #zu-[$!!/5֫WoڲemϽ{L]Z>NOO?x`ll3<ӤI[j|X FlڴIY쟖)kh46mZrrrٸqcnnnIQ[/##W_]t˨Oח{{x `ӧO?yQwSLYb1;;{ȑ?W9N7rlSNի%sYfڴi~=tk֬)eֈׯ_02ӪU]ۨR!mҥ}JJ˗/9se]۪U%KX8F~w.]*E*..^f0c1-[ds9wx|٨(Yoo___犍5u?|S oTqqqvvo6|e(\y@ 8v ^}Ç+73i׮]&._l?W\r׮]+;{ ˸$8HoV.]dgΜ3gΙ3gdt2k,  t:ѝVZe5cǎ <|%[lY׮]kժSN]7o;e{rS8qb̘17jꩧv%5Yٳg 6]s=}v+bp֗ 8zݻwՎ)t:q~~={T yuA(<<<۶m?H?eJ*m޼J^{M<޸q ZlNNΦM;VPN:fo۶-22Ν;7nܸqСCK,Ylٸql^={e edddddݻwРA Z5jԮ] Weff&%%%%% 8p۶mv :e2b7|jGtyp  ՎnGYbŘ1c?\288 2s̬,A޽u_pv͆mj֬9dȐ#FX)BΝ;;eSfgg?>33ӆI˗/7z*11qر~i>}~W}v1c 3.@TKٟ)ѣG8|5THkLVZ%=+#7&zܹs'M$;;aWӦM+V`޼ye(SI֭[{a*)#ZfŋmWq"\kժelPPu;LO8B ~ⱧĉRc=pBيœ'OV#^0aBqqr˗/OKK+((z|PrDXF%>˸r7+W:bŊ@+_ / 52bԫWϊ-b gΟ?/ӧׯ_ۻA3fؿ]pիשSrssO<bOǏ7o\vV`}ѣGtm۶ʩoԩ!CX7lDDDbbx_߼yP~mDDu#["((())3oL8144T:qD3 ٽ{ :tXt?/ӬY={ 'Zw8W--ZTvmN:E^oLݼyK/LXgϞ!!!))) _>//PVza]̖ٳΝ;M/ZLe b̌3Ĥ]v>ӦMfO:Qv䓓'Ǒb͛ӧOfͬժU5ٳGLxxTMЗ_~i2ȝJcDs΍.,,T*,,;wK$&&*mҤŋ%K4nX?߱c:w,7rիK[zG}$djʕ͉'֭[g yPRN111:t8v옴رc:t~Z(W4}o!#..ΰѧx;w6^s=WJEՎ?^=iO?ݲeKi˃vnݺׯ/_wF$S6lp!__3fӧO=tІ ԍu떬qʔ)]ttU ͛7uEDDxzzJ[rrr>iFYn] nݚ8qbݺu4h0sLM0R>:w|0ׯ_Ν;V, I^y/jذaپ}rM`ZdIӦMe_|_mlnݺ/166ÇҖ]~G%;8>F^h<˗/W^%-,Y8Bi':z/bg@@Y>|֭7n:tjժ%W_}%/[0GEe5k֜7oo*m ;yyj֬VltҐY/_.mY|eZjdիב#Gt8trA3(Yr.\hꔷwttc8ZݲeKXXXQQ}Μ9kݺ gϞ]흐Xp??裏+;wn޼y@@Ço߾}۷'$$K/ ~W ]v111oW_=q Ç/((]Ӯ];E :s̙3g,a???{x *p'n魷ҥ̙3QQQsQ@٥KJٟ}k־}@ooo//jժ<\bŋ\_-]駟nԨ y䑧zjkMsĉ1c4nX֯_ڵk_[^gϞ4lǧv=ۭ+++'֭SV.],]Ν;%uv燆V\ӳVZ=\bb%pu :4$$jժ 4ׯ{w5{df%vhvݳgOqno åSffIՎ*ߥ_ R;"$188خs͟?_u:(M]|'ٍ_Z߈J:lBڵ6,f?qĬ,㵁~Zzkzy+Vu:N5\288P.\h߾{wcE$n۶nzj۶U |w}Wy; $&&;6//O>v2z;f̘aD ,Zh3zݻ#FP";yƎg@?}+VP>&UTTv)33!|g}6y={6iD hذa'Om۶7oY&00Px]m<elX/s޽>w޵kVqL&f8ɛCcP/Sد^DScGHOO RRRe;U\YR ر.\P޲s&L{W\իWe--[4Sٞf~nݺI_֨QC٧{җ^^^Җ< Tas='kQ>;1FuJJ=B[eP^ܿ_Rn]U"qi>M<233Lblnnjժ%\۾WrAA6l(k)..6?Q--7iDbQ~ʲGHr eA0]ܹS:Lƍ5j$kܲeˮ]'k)s(gD<0So~pZFGl$ Yv*U v}*T0ѫ,W~օ(nPl攩}uRjUOOO__ 3køqAAAiii5kT7n1ˉ'ƌӸqcV[~zj׮]^߳gπ6lSv{nfn\6);wL8Q֨h6n?qDO3ZJ|ZJYjr9XJ%<젠 ñ޾@-j?~ZjjG#HN[vOg틊\hgN_7o|饗~'oooA  5^={e ػwAZ=j(_333m61RM"##o޼)k2eʓO>)ɓeggffFFF*k:ut9iKll3Z̙3-Vhrte~B7lZڹJMNZZ,$e(qե2NΐIOOVh[m۶ o󸸸իW++T?Z}%n#qKOeܔyeLyKHGʪYiY4SZn߾- G{cǎ7n?ǏK\t:e6z걱酅׮][zue}¬xGjժ}Ұf͚uvc%BV֮]˗/۷o}1/$^>|իW@e˔e(U^@9NYz[wΜ9{yd}MwUk1ߒmbΜ9Ve)8yr2C Qv0}xԩʞCuz2ca0zܹs6!2bŊVc xv|/$^?r` 2Py+2y3Tϲއu vP/Zr_L,B^N:{ɐ}w?~yʳeyxS摗q 2力e+٦M*mڴe=-U9?`?u;%FG)r[ٳ5Kdא:T,A^ /we :7iD(޽{oݺvϖ-[d-TϢEch)**ڲeˬYd=⒓PHnzÆ F{nA:tt^֧Yf{W# ]j|<%M&M$k3;Vݸqcnd[GO4[nҒ=&&fڵ k׮O>}~n^裏j'&QTTTݺugΜitĀ+W>Nx[>}tttu=%:wm-\V}Ԏ\8|K4hн{wc)Tbms/ZeZn-pqC=x@ֳo߾F{VTUT?,O?vHKKSYzPZon]yvwƍ裏VVB UVmݺСC?CKt\xqѢEkذVh4 4۷;cZKa{u{׵kڵk{yyըQ#<<|ɒ%EyT}˻ܽ{W@۶m[Z5OOOV[z͛wm̘1K,IJJRb~_=$$RJߋQn?Aќ>}Zp` 2pΖ)R35}C|MnM078^^^oToco<2A^\U^nlԨQ_|EÁ5<(YYYN߿^zu~*l&ť6)R.]JHH0LLL}zQQK߿qư0䘟nN-o pFeD̸2@ѣ[nҖ={o~ݺu999:.//ʕ+%K[F_ĉ"""5#-ZhӦђ '78 2"Jf\.iӦMo6$''O8ъq+hگՒw}VE nySP}:eLdddllX2裏:<4XzPn5]*)))?u-ZXfldddJJJY+)yȊeڵkr.]Z(q-e+nӦMnlqi\Y#^uñԅ-o pbyi4hi#̸2T^vmƍÇo۶mj<===<<<}%{-ZTV-u)g /PСCvڔ)S>c׮]֭n$sL?N02szALLLV|||;wlfFNjT2`?|톖nݺiUV iӦ-Z4mtӦMY~ѣG7jۻRJ۷_`-3gμ >>>aaa۷o7ڳo#G^{51+>GB 81wE jsϜ9ӧOh?mԫ3֮]ҢE A]z겏KG}Tի^^^'„ bO>]T^륽w3}.\ ~p,2΃zQ/vuV 22RL(|}}LRPVzW ǝ;w.^Xի'LblþTk׮-**!**J8رcz12ӧO]vc6XnNaʔ)\_ ]O6cǎ]v޽mVZŃ# 'k+aAABBBăSN駟.{R^xێ @A^pR9h X(##Cr䈡Ѱ_x?^~[Ys޽#Fd"+BZraYb?nHL@޽{Kg4(%%ٳ?KԎ.'̙3… yyyׯ/m2xz-A.^ةSݻΝ[`AӦMl2!=x'n߾ԩ1c̘1؏ K|||Ϟ=/\dh_paTTIddڵko޼"nhmUɲhѢ>(55^Q^Me !ݸqcԨQҫ/^\x(' .I:uN81-[zyyiN:}WsQ;4 \2{h4wKǨTRtttttQ&fx53]-A[" 5oNY҆ ~ )Ώ : t1K /2`lzFc W_}u}@>|a|}}ٳg_tI(`±cǪJf͚<2q$bB^@/[h!ieU]x 2{lw`ҥKycPyu&2 /Bcqن*wojUbvg~LdW&b2|k[͛7zy\G7nE(DPhԍF#5/3jPfƌō-h0_@cqB~J|۷7kJo%4F/dJEAAر4Chjjj>h'<~899yժUN=4۶m6l÷o^WWUVy{{[YY<&z]sDK=zښѣG]Or;99YZZEDDDѣUV 0M6:t9s7O`ޖtzh$>sT*v8`X)))zoBOO寯>{{JIIIݹ;uj+QT IDAṬGZYYQ]v-**tP)?%={rǷhт?iGx)âmu. tnnqڅFݝy(J#hZlI9;;c @S+\rѣG@S^r2jԨ,\5j(BHZZڌ3̚5Io^zU.ߺuk̘1c~Һ_u„ ԣC]vMMMСbJe_XܬYBBBr' !ׯ_޲eKHHHuuMTT͛7rgϒ pBM;Ԯ4F/~A#/Ӭ|'NPż /˽CNbrY*ãW(>>>WY~W3e@E!:weo۶ggccC177gJrٲe:vD-꾁` 2Ɓ2U=]f`޽T""" X[[ӃG{a{n*pB/VVV&q?)qQfʈb̗#G٬qqqr2gh!D"| S--nK&fB+==J 2uΡ']tJNJSvABȮ] 4%̗/_J޽{J m$ a\1aΝh-SYY)jS@B/_d&\2%@3$Q L:Dkz)ԩ9!֭;Q*z~s_xjum $I8Ď111ÇС M6x1C?:D$UwҖ.]J\lw#B?}SWW[ڵ+0m lq !1L2{ɓ \JesRHHkfiiٺuΝ;;vLP8Mŋ_{ȓ'O>~]1jՊJ</8wCߟJ9sF);s挫맟~P(Mָ>8ҋ3^VFA%ۧӾ̧BDi&PxM۽{Y6mokϞ=7r5U[[[QQQPPaÆ~Ã6zرcY75.h*++7mD?`wN%TvP(6l@Y;L>J_ί9pkܽ{7"IffĆM%-(7nc+ͳ&lݺٳk֬Qw.xffC"Zݖ "GG;vpyp燅2%Ɏ;ZnVUUUBBߓN62\lÆ )zh< ??BRTpÙ[RRR͛ʻw~-uŔJ%ѣoܸP(nܸ1j(*sJu[oE}7^P(rrrƌCOWUG$ӧuoEG[b?B?6mhqy=666ѷnR(ѣyse\.m] ! 0]htݙ hEH=FpBn%~ :[rѢEֵkWkUWWǺ vJ%077?5Jcӧ+3f\}-))R\\LI$;wkHe0w)??_pxiennT*/0]ߧ_*J-ַ➢'O}_.[gddB]/㏅D333wޫʿ{'O* Ν;}f]eeB_`H jժՋ/442_}=su*ѻwoq#Q,]Ϗ|rߒ%KVNoĦT*UܴqFz_~-qX9]v۷С'OPO]%gvwwԩӨQ~}رccǎuss~WƌsaV (|KQQX~~3$ƙ6tz( na/e#/#'R=ڱcرˀ@Z_F`<(fff̏:-[pC駟t6m+++KڵK``{Wyþ}Gh4-$6nRtR'7ncǎU^PPBhu/˹KBO.x n___S @PQ^ˌ/^|!!!"@sꫯLRI?M[~FOM4^^l޽̣vN'MQ N$4h6l:u+**BBB֬Y>裯{ !s̑dÇ_TijX[[9rɉezo \zU]vG־m#:` 4/_5̗iV|u޽+vtb|H֣[ꃂ Ҵkkz,n;SLTVTT\z5**;py={VЅSA >eff]+I*!!AjOVyy` @/Џ?sҥwtt477wtto_!vt ۫;Eͣ_iiiT̙3Tb޼yZTN988}B` 箤C;wn]]3UV111 "//kժ.ʕ+J%1hF~/Z_ke)ݻW_}uҥڲGFFىiXԩsUzlcǎuuuբrVZ}V|mV'O~ᇝ:uܹŋOw:¸ 4Y?޽{Y]taeٳ.p3gϜ9S֯/_}/;fӡ իQ[hhhII +3""Ϗ3x~ɓP\SvZ:j֭K,s$ɖ-[̛7رC,,,Ú O? :t֬Y:f`L:+22RHffS)JL#a皛+vD`XO(J-*~.>Ч:t{k׮ܶv-avM` 54ydVN:|U,66V.HLk֬ii!Fm!۶m Oܣ-ZVרJ̭ʝ!O(pA*-->]B=dkk-okk?2"EY`ӧO ,\SPPpVfXX؞={XXl(QTWWg̘Nsƨ;Q8@ZZZ\hѠAԝ())!տ~2BLrݯzڵtqq4hЗ_~3gΜ<)...,X͛L//͛73s6l*vM]n/ 2zZY:ڴ_6n8`\?; hL0.LO>}믿>eʔAyzz:::>}޽{7s&BT*6mX7+h233333޾}͉@/ sO[YYy{{駕999111ƍ֭y֭}}}:::Z[[ 3;;7ߴJ7n$رںk׮*D?g͚եK++-[{ϟ?W6s!; _C111~~~mڴtppx7/^thj~4:)))blh6Zzz: ;"hdF"##[T*c`ÙwNJJ^+>N|\ZJNNoŋ666<uOwЁY,$$R"={Dtt4wUBT*s`ůQZ\ݻ De+WҥKںu &ܽ{WMVs/&ڲr'qݲJehhh߾}ccc㏲ZY32y^?/^P:RI/@qDB&No23 ő#GG/d4i¸ L&cOЏ_~ӦM6m޻wOPPuuuBZH$jv2 S/4+;{,+~TH3gt:T*?QMM!ך7|C'ΌUVT^e@W ebcc ˗6lSNT͛s+++W^{ ?$$ֱlٲ 3f;އ;33T%C@޽;t钁 鯐 @ &''gرk׮ҥK!Çʼ;[n%L6mӦM?Rc0tK.ꫯܹ3`5kݽ{711qϞ=۵ksN67Z Խ{q :ǧm۶2ܹs~!utT3f\xGyyyZ+hN8a'Kǁ3ߝ`#F&11k$22R;R*1H0A;'%%E/piiN"SFsSWWh"nv۷O]l>WeaӮ~קwѣGdOl Ս=)~fbG&]苳3c[b `kkkkk۽{5kddd8;;e222V\٣G T\vR;bffnݺٳggڵߟM͕JÃ^"eDuT3uԧO@booqv˼s`%|ʔ)G|,88ܪ+]rv04<@!6mر#+{Qwݻ26m|D!vDe!qǎ!Ђn0VD"ٱcG֭ %4-?Æ `eVTT̜9}B`XjU^#4'mҥǎp33333**>33Uoɒ% V[QQ?:u*++-[tuu߿Qu>}ꎺ0k֬bVf@@@bbG^|YQQqEjՊ{ fN6m֯__SSSXXaÆ6m0 <H RsNf=ˎ[;GƄq֯_?ssN<\jթS^y VZ[SNeCݻ3rh9pLXXX=X=z ceΝ;3sX;5Bʴi~muG q!,gf%LSiiG}cggܮFs/%FPVV6w\2sc-tB{!+_~ck*Kr+3f +LL&k DmM>A;z___]8""BjVN֭WYkkkw}n+Ɓ>{h}~zrss Y|KiD0.#Sz`֠rŢEm-\[~qzS__ u1Ա ,6sL:"Z;`\cq33z+33S.gggGbLL+ܹ#޽K==,k._\\,H/]UСCCcƌΖ噙#F`BJDEEݼyS.?{,99A&t?KS[)'O8A~znmyGA5jTVV\.5j9zh5~0.`ʚŷF2|#}CGvZ]]mk׮f`0s9:: <ܹsZ_a.\QGV+bǛˀv0.SMMM=CO9_ee~;}15kffvIf*B{ǏSѣΗdB~iŒgccCp)QRl2Y O1uw}m6*ߟui^'NwBr7u2 BHii Xfff m !?#w <}_-^X <Ǐ]"]vmZAZЫ`@GKKKKz={د_?zr 9yfj)S2h"22RWW}v:߾}T",,MDD&j@\\\.'̙3ݘ"H>s=6ה̟?rȑTi^{R+++:ښ{y|ƎoZ#44hѢA;o…֭cf,XVZ}w+VHJJ:}tfffaaaEED"wuu߿ѣU6ԩSÇ߽{СCΝ}vII\.vvvJ B-wrBH20~EU]z@ E<ϝ/7>L&cPkBX XuJRc !z!\<0͋L<@q0ر.5S9::6}JNX1>|K0*c#ܸqC6753=bw>MeΞ=9<`#FBܹ# . a%OMMC2&zbfիW؊i^dc.82` ~ 6P)SRy.\Kn‰'2s{bccrǺcۺu+wdfuvЁJdff oQFjT2qF=b,ɓDll,sGPwIm b CNNرcoܸQSSs֭geeBqss!?~_~/**-++;yyvza)ߺu&++w޹w.Zr۷kjj'N>|8?+WZEPPON Ɣ|wŎD毑Hb"J$p杓:W\c6yfu+:tUѣG̹*2۷SףR;L lEd2V𖖖GUWO"^d|_}=0CIMM]`\2feqqqNNN߲eKZZZv4^z#`~F'))iܸqoVlRinnS\W%%%ez^9xiennT*//  SVVc֩ @ƻ:@sq`1c7$ceāqq`}"ɘ/߿'V0]vbEK.] $v` b%;;wbGMG>}222;\TRiT< 2u̗RtȐ!bQ~~~jj*} iСe~v)V0%%%1eċāqq`\@eāqq`\@eāqq`\@eāqq`\@eāqq`\@b"H$T^Ht/̗@0@0.ax9&q`\@i.$)S?s֬Y]tjٲe޽W^"dff־ֱoT*ݸq#!dǎ^^^]vݱc󬜜quܼu־Ŭ&&MD5ѹsgw;w'M]Zc^:88;޽{󵻞Liiiѣ/_,DS|wUFv7Ott>Tz.^hcc>CBBB/%ٳgtUV&JJJڵkG׿EeΞ=qvv.--m0jjj+** ^1 TH۷o eEMM2C%(Ǐ'*>2dЙ3gԝ5gΜ\B!|'*`BȳglTn!_~5qh/rff2!!!ԃ'˖-ι(//1c*$8::R7o2+++W^͛7;=o۶m۶mTСCqqq׮]={6nœ`dw'88X/r3<|} [hbڵ۷o`ZExsJKKUnloo)VJOOO*gϞ2ʗdL^^^U_ZZz[%ɮ]z*s z?u$_|b㒘`ddU1Jz (/ pss~o888Ų[.==}666}Yv'Ol5䔑r=zXXXH>ڵk ,!ޞgVWWsɒ%&L:~BHmmmBB333+**ZnݫW)S{'r(gllٳg =<<-ZԪU{w…B++!CDGG߿VZYYYĎz0%w'88X::t<$$R"={{.cY;v,++SW/6#V.^hcc?}_~*+//upO F t^h333R;w{.<<{7|n㒘`ddU1AT ~@RRRĎ@dݺucP֊4Mx8qbuu%Kaaa˗/''$$ЅwF,^8..͛^^^M6+5bĈL\=fBHNN{JHHkRRREEL&KKK b4S^r2jԨ,\5zhBȅ hzYzRt\.QTT!ÇAAA -XBKBBBIIL&ziN~@/]__-VNNN_AH+t#Ǵ+kr:GԡSN;m۶*'h' W(ԡ3g,<i}˳$C&1;w.Ux֭tPBR<~^*!_~5qYllB_|rÆ TzԩƉ[:66ί{0KmZNKK3y6&HHHe˖qϘ1C/ iei֭3r<ˀrrrƎUSSs֭geeBnFHyfPPPvvvMM y&!dȑÆ 3N01rH*|LATtRBȝ;w pȑ2BqƍիWwuziH*++ sΧOk׮͞={F@u9XK 駟FGGLooo]nhD"ٷoѣcǎ;vY_~{:ɓ7oR[GK$~I_3Y>s\ߺuk„ Cڵ۷o.kw!=~g_0_DbŊ0ֶ{k֬pvv6fڵKKKoӦS@@֭[/\жm[ccNNNov˖-mll<}t}offnݺٳggڵߟU &&_״-H2nܸ .L4u~ҴB 2fjٲeެ̌Ȍ !oo?PBիY jX:PN(޽gϞ/^`oܸ[~Ϟ=BߺoߞSVVc ^{be԰27oܮ];[Y9 a\@%A]CHHHPP2AAAfҽhVf۶mu¸ ۱cG-Tuqqٱc.WWW?~ĉb>|.X_tB #- ͛ꫯ4dH4sLM+0_8yCIIIk7,,k׮a\yW@ݫ={~GCk X!FjbJKK̼͛yJKKh&M#GD"=V ,X_-\P}ˆ5sj$\\\z;i$k0.`?߳2;tP[[337iҤc3cJWQ( ~~~͛7H$ ? i߾D"7V^:;BQTFFƸq6m*J>,;~N:ىD&MxyyEGG+ʑYsRRmllƎ{]5]HMM}Iw}p4D}5tPvmݺ]vuE*ڵۊs%/ޕ2Ν;BCC4\OM11Npp^f(..ׯbO>: k+߬į7NJJRU3wյLuu… 륿Ѵ9ZbV;w\K@p%fUƿԿE^_qQfK.պ*i׮3ĺᓓMih[ܪ-ZDFDDpN2Š!YN:1.Baa@2L<'233e2Yfff`` 3foܸrL&{QTTdj*رcǞ={VQQ,rʳgWTTk͚5(kkl5;::ǿz*;;K.$O>I$&&>y򤢢3fv_?5iUͧs-ZT^^96k,,C|¢5+`\8LXϑ#GtիWW:t-sQur`\%3III!e<<<*++Jr… Zן')޽{EEDBBʛ5k#5M_BBBFg8UOnn.jkk:ʚOuk ?.ó9YYYnb撗=z ~jt} e@;!(}"gggVP/0]|4S- 5ˁq0/J FDD &D"p߳gWUUQzdΜ9$Ze-[Ԣ)S/EQ "۷om߾mE}̗pxW [4qqq!/>|H&TBXXؓ'OX̜sfzqXXa= (AO2dsU?wIqO55OTi^PP'vrr"2V/ĸqlTcuՂ-EQ5b)5F@>|AVu븅?SwwwV?1(07%!oߞR}ݱcI:tW/殡rrrx6\~93TnF@k8\.WUˋ$._:DOz%\Æ (̙3|b_Eu/\&H(:q†888DII h 2޷8##CUz丸8\.߲e IO:Uݺu#+WܹdZ͊+s.>}:c PiӦ$իuFwa/Ria-+5zT:O]Gt e,Έ#Hbiii2~~~Eeggeeer&##G>|g@:wl2n߾=`#GVVV޼ys{եo&IL6ҥK/_<}7w%E]x!CbLΞ={ɒ%J F0rHͭرcwWКUEa?~z^P ߿?000==ԩSNbׯ߾}T㏧Nyf׮]k׮d6muĉ7o~;HQԺu~嗜lN8AtGw`֬Yt@ Xn†?~8;;l-vޭ,}T6b嫺E\_z'++իt5e.Ph_HZZ>wzܛA.8::ZYY999&$$TUU/իWFiiiigСu>}ϟ}ŋUVuʪ]vݻwUU:[LKK !]hԨK֨uY#Z֭[G8p`rrPzFL=zٯKj]sv1H0C .d9g Yll," JP]ɵk5w) >p zرcǏ_.]tÆ U%۵kw==*"""..~hxLsηoߦ_* Y _~'sSG:2qn̘17uP۷: 'X:pSGuKNNƸLL2q i`\4@CPPPp H~mm-ә/Ǎ'hrZ$*h0.jJ_z|YUU[8 e-[fpuu5uDP74=2u`@yyy]v5u gxt|G-ZHKK3u,/wFFѣF2a<&ףGw/Moqq nKƁq`J666 vK2X Xˀi5m$`npoeˆ e¸ )aDa\40"Ǘ9"ثWs9002S3`i?^{?W:(Ӷmm۶ 2a 19Gm۶˗/={D"1rl,V 4Lj6P4|=z`ܣw&G{=o^"ۿW~CQTFFƸq6m*J>Cy{{J$UVx[LP$$$5o\"8;;'&&VWW+V&z{{;::b޽{/Y$==]U$pdRRmllƎ{]9YYYCi׮֭[)ڵkW.]R]~(JMM}IHvvv;;w֗A}׉O=c2e zKYܳNʿkRE={={޽{e2j1Nppu~礶#Fp>YB%#wڵ}Ҷ2\z5Ν;?|Y_~JaooO")**֭FevDUrG\]]KKKyͧsZjŬvܹ̗ҥK̪s?*U]]pBYBpm؎=|C.]uU+ޮ];= FGJNN6hxSN B]=EEE˖-SJ?vڥ[%66Yy\\5z|Cii+{:eϞ=[f=inZSShx^M/.2SGy1ĸɇ+H#GCBC7Y IDATn$1x{{_rE&=z(**dzzzd2n[t䞞;vc _2@h 2,2"C`\~;!??[8//UX ;wNM4I;3hztPq?2>>>727l@}||yyybݻWTT0͛7mYf1 U%vH_YYAO$3%%xxx0vap-Z|ּoe#Ǵ Odee޺uK^уbO_rssɐmnn.(0`\bi 2,>"C`\Eq+aM`6l{ァQxbM7['YSRKKKIBj,,pLB Doֽ֭[S~丸>$ c]p5{EV1OOO?T_6'N$#O'N,9eUEGU~¨N zPSի|_``/]jI6]Kq*=;OOO:DeeelkkKQTFX9_}y?DQ5n8 P&>]kk}L7o$LlD"ٻw/w<.f EAAcǸ*N~X e,BƍnjC:1f̘ƍ3sL\\\D"H$ BNOHQcǎ iZ9U),,\jU߾}7nleeբE1cưVS۷ookk+5jk[NƼ`. B@@@fJJJ._\[[Kh֬Y@@]YYSQQv^QQh]QQAA>iF5jÇϟ8qE=}>:v]R'y?dooOzkoذa֭Jc(j۶gϞt=~!q }Ue&Bݻ{ICJ2ٳG/ڷoymm˿ |dɒCVTT?ϙ3g&N_*y S`4ڏ?>vΜ9Iϟ?[W^E —/_j.Ib"uFrv0̰+++wI&CZpT8љgPgM"!QuU*؏ XTǤ0Ǥ]7UsN>MLLKH̭-͛Yے%K6u>})ܲSS_FMlȧ+WobѢEѯڬY36k~.bSÃ2d۶mu!& Ԝ9sf„ ҥK/_LSH^^^˗/*ܯ_? I$דwՖ1k dٙd#\:2DQԉ'XO6AZ=L2G~sl 9y$sf4WPPЬYto&::]Q_)m6R4֬Y@\\\xx8wM[nPQRRp:14E`!Ηeʹl߾b999xNXGKKKOD]k׮2ίG^Ν;G2Ϝ9Cr<<<>1 Ou6Je}u5FTU*$رct&=?mMϢCshllF-60/pe0z7_N<}+**jѢ\\\4B(,,<~СC%-[VgmzYg"H/ÿMŭ͛rl*ӯ_?T'N/ԛ-eX3W\G}Dt%%%2,333&&Tz]s%Bȑ#333+++FMN#7o$ݗGɪsʕϟ/,,zyRR;)]5FTU*ᑓS^^}vM551YtH-[ܵkWQQQEEߐ_2),,TMS/~CRKVZbD~U1!_~56lذiii;w3r+++Ԯ b:6MQT6m8ӗ/_8piӦ>>>zΞ=*ccc*+ٳudX'49 PCLڵke2٦Mnݺ5qD͛߿__!Ĭ\Ksλw_ :uԩS۷oڬ,3###YF믁?~8;;̺w1c^7fHTPP,֭[K<`8| Yu~ƌ_|/F5כZ3BÜd+++sӧOpAu릯ڮ]v}fnN_|U_1K8p`666bcժU/^"}|||}}֮]*H$رc{xxxxxXg< Uj4ץK:kC~=w6fhϞ=)))n$s»_߅̜9sk׮whyyylll|||hh~X'C(--yy{{sWZ&Mbbb.\s]4͛76l󥭭X,s***~3f +ɉ}Cc7f3q󕎳0"a\6mCIKK3¸\.' ˜eob;octƜ-\P}ˆ{j݊X,vpppqqݻIȳ*.((ؽ{w^6oloow}EEEUUUEEEΝ3gΐ!Ch}]`t 2Ν;?ࡡz39"V;]֭[JKrU}`\ L\:uZB"$$dܹyyy2,%%ٙ7nر[xs-//Ζd%%%III ZhRR5uԫWR)233(JMMUϠF Θp1dȐ!C,\u0<C=֯ "[lbefeemݺi0.@̟?rȑ$/(3g/@ XvT^$""B"Ro>U!`tFLfbէ(J(&&& |4jHL/))1\_lI&6id-tԪU+VΊ+a֌>}9Cݺu=zm۶CB7n`0.δ:Q޽/]\\HӧgϞ%S!0HbȐ!CtyI)7ΘDXXؓ'OX̜GDD=~8,,̰om ٻwH$2]`׮]JJJ۴i#H\]],XKLL4n`0.@4mڔʊ$X%ܹCݻw7B`<=x$+n9t.74Çz-cTVVnٲj)iӦ͉'e1bI̟?իG ˗/?w\. RSS؊yCP5jժU999r,%%ejvl1ܛ}lܸ1;;@CˁqPL}'E͘1!0Xˀ%bTAQڵk&F@Fg޽gX@0.Iմ [6uE.P|´D=0`UVfΜyMTqƠN:ىDƍs…ΒdގbW^/NOO)Zl/x ϭ*!!}77jh]ׯO0I,EDDriwR]]e˖_?99Y/>K),,㏽4ibee4z'NРԂ9IKKc^`7cǎFqXW_: f+VRTTԭ[7nQ-NWn _}۷/7oӢէ?~\,NٳL&Su :QP;VK+ UQ{챳SZx Bѣs.]`.\ȼsMuԉCwx`\\#^-((0uD+̗,۷o;wnyyuTTTvvL&+))IJJ4hТEtosʕgϞϯzAbbKmmuv:eŊd‚ozzzEEL&}; Oa<֏?m4DBQԵkײ\r(T:m4W?$$dܹyyy2,%%ٙ7nرURϲy3gά\2Ν;ziEi޽3fx򥭭M +**RSS{ԽАF#v횗288|8EQ'4ǏSVZMSS&O|!/^jٲeׯ(jҤIo`>>?3\}V[[޶mѸ87b˗B0%%eFyyyϞ=޽+rss;t蠪 ]O;vK.ݰabEDD/MuKB!L}K,_&xZxqvv6ń0Ϡ1-_T;o}Zէ9{,3ޚٙgUj> &SVpGGGbӦMݶm9|r5M}c9&K{ )<9& r쨥lhIDATY:uAw^TTD"! g|(z9|ΝIbÇ_fMJJ˗/4)FOv.**JJJ"O.(((_} Ocݛ)We|BҴ#܌3G}||Hj*}h0Lϝ;wH{ke˖-~ee555?xҤIϟ?K^GVpD>}G۶mK)\| Xz:8}… +++bqdd͛7+**5'Lp̙~13N<ٿ[n !3g.^xqϳgϖ/_~qb;v2d˗ w}'Jɤ$$œ>!Z1B&S(j}}ѣGhh]Aq Pukt@ktgɓ'OiӦ{,zEbh!!!d DTJg0$JϓWߠM~c,5=q$~VLFxr$ XW^4iyX&777##(;;ɓ'0$Jϓ!Zѥr~Ojxd0.cABCCGلK.q YFչݺu#+WiC$Aݹs믿(ڿ$ZnmS ֖5 3yd[[[> GQry{+tUV$AFlbѢE)zI`x+**z===mmm {R fXZ 2-66(B1jԨUV򲲲ѣGGGG:޾ɩܹse2YYY fQnݺEGGυ ŋ'Ndթ)4{l5/MBϓvW8#F^j( ">|P(JKKϝ;~a;`?~ܧO7fgg:c@=S $--yuķ~kccýD"ƍUP(FYSYYAM^zUYj@z]NQS^Mw֠eܹ3)-ZєFW_M0JiwlEV-ZpRʶmTۺuC}>=lwҥz 2d#4'ef̘QXXhx24ݯ 2O:ubP( SG Xٳg߻w/&&I,L:ڵkK,QuH$:yƍx l Dr̙M6ݻQFbCaaa7nRuVVVֺuFٺukD"H\]]ǎ{$r8ڴiC3g4m$L]}lEݸqc}qppDj sʕ+H͛5jǎwܡtO&kת$5 8?%M@k0k׮1?[8pv߿߮]P?c4Fpر/.]aEDDD\\299900ЍҫkD*UB3>l`:w\]PN=3l%$$PA][؇u~Gn-Z-,,$ׯ [Cݽ{7xc&LPU^.?LL"05QTb_~}ݺu.]zYVƎ;::ѣG gΜ}#>z_EQ2\ b׮](++kҤIϞ=zYf)~K%#g oAAA׮]7ի4>ю S?.YS&) oǎ5$+M;&05eXāy[4a_0̼2eʭ[(3fLVVL&UKEifVB1r`!!!s˓d)))Eݸqcǎ۷o;wnyyuTTTvvL&+))IJJ4hТE4/ZtsԩdLL@QTjj3}zLӁ0(̗|@eΜ9C*޽\.+**:wL7>et?E <[QSL!:ܸqoN#{DϳԬXBǎhqVJJ )QYYId2r7̗2E(JRSG ~σ)߿$ÙϤX[[GDD(a̗#G$,VxԜ9s|}}YGڵk J=IDDDH$:_*.\n߾}܀ yݻ7:x`֡Cچs޽/]\\HӧgϞ%S!0?d!:<kX_ $Ѷm[!777S4mڔVUU*y޽Iէs2\Pa\ $lllX9";˗$aooop4+Psȹw^x`(vvv$QQQ:ͱ(555z~_xjudkkKj>9¸ J6mH"??u1v4@?}S]]/))c+$qM=V#WWWcs2 2`(^^^$믿]xјHJaa!3zlߟ$ wJz b\˗/9tq0z{8r2,..@ @kÃ$-[걕PTJQTBB¥K֬YVZDFFC(ꭷ"8Oee-[Hڬ60 Q9a„[n7|ݻHbΝ_`Ajj[qss(JP5jժU999r,%%eѪ1bI̟?իjȑ>>>Eeggeee򬬬l@=%5u `b ,Ɂz'77f͚+WFi/b-lE&'++)9o*AMݹsgxx8w]H~%K(0//_~?bs+'Oܒ}MNNn޼9+_]Νo߾MT("|0-[Zk׮bf2uh`mm} ni„ W^3fۚ={{bbbbԩS]jP(777n,_O>zpvv}||;}T |0|z e80_4L7nX[[: S*//7u`0.l+V0u1eL2zM0CIENDB`flexmix/R/0000755000176200001440000000000014404662042012131 5ustar liggesusersflexmix/R/flexmix.R0000644000176200001440000005224414404637304013742 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: flexmix.R 5184 2020-06-20 18:27:29Z gruen $ # log_row_sums <- function(m) { M <- m[cbind(seq_len(nrow(m)), max.col(m, "first"))] M + log(rowSums(exp(m - M))) } ## The following two methods only fill in and rearrange the model argument setMethod("flexmix", signature(formula = "formula", model="missing"), function(formula, data=list(), k=NULL, cluster=NULL, model=NULL, concomitant=NULL, control=NULL, weights=NULL) { mycall = match.call() z <- flexmix(formula=formula, data=data, k=k, cluster=cluster, model=list(FLXMRglm()), concomitant=concomitant, control=control, weights = weights) z@call <- mycall z }) setMethod("flexmix", signature(formula = "formula", model="FLXM"), function(formula, data=list(), k=NULL, cluster=NULL, model=NULL, concomitant=NULL, control=NULL, weights=NULL) { mycall = match.call() z <- flexmix(formula=formula, data=data, k=k, cluster=cluster, model=list(model), concomitant=concomitant, control=control, weights=weights) z@call <- mycall z }) ## This is the real thing setMethod("flexmix", signature(formula = "formula", model="list"), function(formula, data=list(), k=NULL, cluster=NULL, model=NULL, concomitant=NULL, control=NULL, weights=NULL) { mycall = match.call() control = as(control, "FLXcontrol") if (!is(concomitant, "FLXP")) concomitant <- FLXPconstant() groups <- .FLXgetGrouping(formula, data) model <- lapply(model, FLXcheckComponent, k, cluster) k <- unique(unlist(sapply(model, FLXgetK, k))) if (length(k) > 1) stop("number of clusters not specified correctly") model <- lapply(model, FLXgetModelmatrix, data, formula) groups$groupfirst <- if (length(groups$group)) groupFirst(groups$group) else rep(TRUE, FLXgetObs(model[[1]])) if (is(weights, "formula")) { weights <- model.frame(weights, data = data, na.action = NULL)[,1] } ## check if the weights are integer ## if non-integer weights are wanted modifications e.g. ## for classify != weighted and ## plot,flexmix,missing-method are needed if (!is.null(weights) & !identical(weights, as.integer(weights))) stop("only integer weights allowed") ## if weights and grouping is specified the weights within each ## group need to be the same if (!is.null(weights) & length(groups$group)>0) { unequal <- tapply(weights, groups$group, function(x) length(unique(x)) > 1) if (any(unequal)) stop("identical weights within groups needed") } postunscaled <- initPosteriors(k, cluster, FLXgetObs(model[[1]]), groups) if (ncol(postunscaled) == 1L) concomitant <- FLXPconstant() concomitant <- FLXgetModelmatrix(concomitant, data = data, groups = groups) z <- FLXfit(model=model, concomitant=concomitant, control=control, postunscaled=postunscaled, groups=groups, weights = weights) z@formula = formula z@call = mycall z@k0 = as.integer(k) z }) ###********************************************************** setMethod("FLXgetK", signature(model = "FLXM"), function(model, k, ...) k) setMethod("FLXgetObs", signature(model = "FLXM"), function(model) nrow(model@x)) setMethod("FLXcheckComponent", signature(model = "FLXM"), function(model, ...) model) setMethod("FLXremoveComponent", signature(model = "FLXM"), function(model, ...) model) setMethod("FLXmstep", signature(model = "FLXM"), function(model, weights, components, ...) { if ("component" %in% names(formals(model@fit))) sapply(seq_len(ncol(weights)), function(k) model@fit(model@x, model@y, weights[,k], component = components[[k]]@parameters)) else sapply(seq_len(ncol(weights)), function(k) model@fit(model@x, model@y, weights[,k])) }) setMethod("FLXdeterminePostunscaled", signature(model = "FLXM"), function(model, components, ...) { matrix(sapply(components, function(x) x@logLik(model@x, model@y)), nrow = nrow(model@y)) }) ###********************************************************** setMethod("FLXfit", signature(model="list"), function(model, concomitant, control, postunscaled=NULL, groups, weights) { ### initialize k <- ncol(postunscaled) N <- nrow(postunscaled) control <- allweighted(model, control, weights) if(control@verbose>0) cat("Classification:", control@classify, "\n") if (control@classify %in% c("SEM", "random")) iter.rm <- 0 group <- groups$group groupfirst <- groups$groupfirst if(length(group)>0) postunscaled <- groupPosteriors(postunscaled, group) logpostunscaled <- log(postunscaled) postscaled <- exp(logpostunscaled - log_row_sums(logpostunscaled)) llh <- -Inf if (control@classify %in% c("SEM", "random")) llh.max <- -Inf converged <- FALSE components <- rep(list(rep(list(new("FLXcomponent")), k)), length(model)) ### EM for(iter in seq_len(control@iter.max)) { ### M-Step postscaled = .FLXgetOK(postscaled, control, weights) prior <- if (is.null(weights)) ungroupPriors(concomitant@fit(concomitant@x, postscaled[groupfirst,,drop=FALSE]), group, groupfirst) else ungroupPriors(concomitant@fit(concomitant@x, (postscaled/weights)[groupfirst & weights > 0,,drop=FALSE], weights[groupfirst & weights > 0]), group, groupfirst) # Check min.prior nok <- if (nrow(prior) == 1) which(prior < control@minprior) else { if (is.null(weights)) which(colMeans(prior[groupfirst,,drop=FALSE]) < control@minprior) else which(colSums(prior[groupfirst,] * weights[groupfirst])/sum(weights[groupfirst]) < control@minprior) } if(length(nok)) { if(control@verbose>0) cat("*** Removing", length(nok), "component(s) ***\n") prior <- prior[,-nok,drop=FALSE] prior <- prior/rowSums(prior) postscaled <- postscaled[,-nok,drop=FALSE] postscaled[rowSums(postscaled) == 0,] <- if (nrow(prior) > 1) prior[rowSums(postscaled) == 0,] else prior[rep(1, sum(rowSums(postscaled) == 0)),] postscaled <- postscaled/rowSums(postscaled) if (!is.null(weights)) postscaled <- postscaled * weights k <- ncol(prior) if (k == 0) stop("all components removed") if (control@classify=="random") { llh.max <- -Inf iter.rm <- iter } model <- lapply(model, FLXremoveComponent, nok) components <- lapply(components, "[", -nok) } components <- lapply(seq_along(model), function(i) FLXmstep(model[[i]], postscaled, components[[i]])) postunscaled <- matrix(0, nrow = N, ncol = k) for (n in seq_along(model)) postunscaled <- postunscaled + FLXdeterminePostunscaled(model[[n]], components[[n]]) if(length(group)>0) postunscaled <- groupPosteriors(postunscaled, group) ### E-Step ## Code changed thanks to Nicolas Picard ## to avoid problems with small likelihoods postunscaled <- if (nrow(prior) > 1) postunscaled + log(prior) else sweep(postunscaled, 2, log(prior), "+") logpostunscaled <- postunscaled postunscaled <- exp(postunscaled) postscaled <- exp(logpostunscaled - log_row_sums(logpostunscaled)) ##: wenn eine beobachtung in allen Komonenten extrem ## kleine postunscaled-werte hat, ist exp(-postunscaled) ## numerisch Null, und damit postscaled NaN ## log(rowSums(postunscaled)) ist -Inf ## if (any(is.nan(postscaled))) { index <- which(as.logical(rowSums(is.nan(postscaled)))) postscaled[index,] <- if(nrow(prior)==1) rep(prior, each = length(index)) else prior[index,] postunscaled[index,] <- .Machine$double.xmin } ### check convergence llh.old <- llh llh <- if (is.null(weights)) sum(log_row_sums(logpostunscaled[groupfirst,,drop=FALSE])) else sum(log_row_sums(logpostunscaled[groupfirst,,drop=FALSE])*weights[groupfirst]) if(is.na(llh) | is.infinite(llh)) stop(paste(formatC(iter, width=4), "Log-likelihood:", llh)) if (abs(llh-llh.old)/(abs(llh)+0.1) < control@tolerance){ if(control@verbose>0){ printIter(iter, llh) cat("converged\n") } converged <- TRUE break } if (control@classify=="random") { if (llh.max < llh) { components.max <- components prior.max <- prior postscaled.max <- postscaled postunscaled.max <- postunscaled llh.max <- llh } } if(control@verbose && (iter%%control@verbose==0)) printIter(iter, llh) } ### Construct return object if (control@classify=="random") { components <- components.max prior <- prior.max postscaled <- postscaled.max postunscaled <- postunscaled.max llh <- llh.max iter <- control@iter.max - iter.rm } components <- lapply(seq_len(k), function(i) lapply(components, function(x) x[[i]])) names(components) <- paste("Comp", seq_len(k), sep=".") cluster <- max.col(postscaled) size <- if (is.null(weights)) tabulate(cluster, nbins=k) else tabulate(rep(cluster, weights), nbins=k) names(size) <- seq_len(k) concomitant <- FLXfillConcomitant(concomitant, postscaled[groupfirst,,drop=FALSE], weights[groupfirst]) df <- concomitant@df(concomitant@x, k) + sum(sapply(components, sapply, slot, "df")) control@nrep <- 1 prior <- if (is.null(weights)) colMeans(postscaled[groupfirst,,drop=FALSE]) else colSums(postscaled[groupfirst,,drop=FALSE] * weights[groupfirst])/sum(weights[groupfirst]) retval <- new("flexmix", model=model, prior=prior, posterior=list(scaled=postscaled, unscaled=postunscaled), weights = weights, iter=iter, cluster=cluster, size = size, logLik=llh, components=components, concomitant=concomitant, control=control, df=df, group=group, k=as(k, "integer"), converged=converged) retval }) ###********************************************************** .FLXgetOK = function(p, control, weights){ n = ncol(p) N = seq_len(n) if (is.null(weights)) { if (control@classify == "weighted") return(p) else { z = matrix(FALSE, nrow = nrow(p), ncol = n) if(control@classify %in% c("CEM", "hard")) m = max.col(p) else if(control@classify %in% c("SEM", "random")) m = apply(p, 1, function(x) sample(N, size = 1, prob = x)) else stop("Unknown classification method") z[cbind(seq_len(nrow(p)), m)] = TRUE } }else { if(control@classify=="weighted") z <- p * weights else{ z = matrix(FALSE, nrow=nrow(p), ncol=n) if(control@classify %in% c("CEM", "hard")) { m = max.col(p) z[cbind(seq_len(nrow(p)), m)] = TRUE z <- z * weights } else if(control@classify %in% c("SEM", "random")) z = t(sapply(seq_len(nrow(p)), function(i) table(factor(sample(N, size=weights[i], prob=p[i,], replace=TRUE), N)))) else stop("Unknown classification method") } } z } ###********************************************************** RemoveGrouping <- function(formula) { lf <- length(formula) formula1 <- formula if(length(formula[[lf]])>1) { if (deparse(formula[[lf]][[1]]) == "|"){ formula1[[lf]] <- formula[[lf]][[2]] } else if (deparse(formula[[lf]][[1]]) == "("){ form <- formula[[lf]][[2]] if (length(form) == 3 && form[[1]] == "|") formula1[[lf]] <- form[[2]] } } formula1 } .FLXgetGroupingVar <- function(x) { lf <- length(x) while (lf > 1) { x <- x[[lf]] lf <- length(x) } x } .FLXgetGrouping <- function(formula, data) { group <- factor(integer(0)) formula1 <- RemoveGrouping(formula) if (!identical(formula1, formula)) group <- factor(eval(.FLXgetGroupingVar(formula), data)) return(list(group=group, formula=formula1)) } setMethod("FLXgetModelmatrix", signature(model="FLXM"), function(model, data, formula, lhs=TRUE, ...) { formula <- RemoveGrouping(formula) if (length(grep("\\|", deparse(model@formula)))) stop("no grouping variable allowed in the model") if(is.null(model@formula)) model@formula = formula ## model@fullformula = update.formula(formula, model@formula) ## : ist das der richtige weg, wenn ein punkt in beiden ## formeln ist? model@fullformula = update(terms(formula, data=data), model@formula) ## if (lhs) { mf <- if (is.null(model@terms)) model.frame(model@fullformula, data=data, na.action = NULL) else model.frame(model@terms, data=data, na.action = NULL, xlev = model@xlevels) model@terms <- attr(mf, "terms") response <- as.matrix(model.response(mf)) model@y <- model@preproc.y(response) } else { mt1 <- if (is.null(model@terms)) terms(model@fullformula, data=data) else model@terms mf <- model.frame(delete.response(mt1), data=data, na.action = NULL, xlev = model@xlevels) model@terms<- attr(mf, "terms") ## : warum war das da??? ## attr(mt, "intercept") <- attr(mt1, "intercept") ## } X <- model.matrix(model@terms, data=mf) model@contrasts <- attr(X, "contrasts") model@x <- model@preproc.x(X) model@xlevels <- .getXlevels(model@terms, mf) model }) ## groupfirst: for grouped observation we need to be able to use ## the posterior of each group, but for computational simplicity ## post(un)scaled has N rows (with mutiple identical rows for each ## group). postscaled[groupfirst,] extracts posteriors of each ## group ordered as the appear in the data set. groupFirst <- function(x) !duplicated(x) ## if we have a group variable, set the posterior to the product ## of all density values for that group (=sum in logarithm) groupPosteriors <- function(x, group) { if (length(group) > 0) { group <- as.integer(group) x.by.group <- matrix(unname(apply(x, 2, tapply, group, sum)), ncol = ncol(x)) x <- x.by.group[group,, drop = FALSE] } x } ungroupPriors <- function(x, group, groupfirst) { if (!length(group)) group <- seq_along(groupfirst) if (nrow(x) >= length(group[groupfirst])) { x <- x[order(as.integer(group[groupfirst])),,drop=FALSE] x <- x[as.integer(group),,drop=FALSE] } x } setMethod("allweighted", signature(model = "list", control = "ANY", weights = "ANY"), function(model, control, weights) { allweighted <- all(sapply(model, function(x) allweighted(x, control, weights))) if(allweighted){ if(control@classify=="auto") control@classify <- "weighted" } else{ if(control@classify=="auto") control@classify <- "hard" else if (control@classify=="weighted") { warning("only hard classification supported for the modeldrivers") control@classify <- "hard" } if(!is.null(weights)) stop("it is not possible to specify weights for models without weighted ML estimation") } control }) setMethod("allweighted", signature(model = "FLXM", control = "ANY", weights = "ANY"), function(model, control, weights) { model@weighted }) initPosteriors <- function(k, cluster, N, groups) { if(is(cluster, "matrix")){ postunscaled <- cluster if (!is.null(k)) if (k != ncol(postunscaled)) stop("specified k does not match the number of columns of cluster") } else{ if(is.null(cluster)){ if(is.null(k)) stop("either k or cluster must be specified") else cluster <- ungroupPriors(as.matrix(sample(seq_len(k), size = sum(groups$groupfirst), replace=TRUE)), groups$group, groups$groupfirst) } else{ cluster <- as(cluster, "integer") if (!is.null(k)) if (k != max(cluster)) stop("specified k does not match the values in cluster") k <- max(cluster) } postunscaled <- matrix(0.1, nrow=N, ncol=k) for(K in seq_len(k)){ postunscaled[cluster==K, K] <- 0.9 } } postunscaled } ###********************************************************** setMethod("predict", signature(object="FLXdist"), function(object, newdata=list(), aggregate=FALSE, ...){ if (missing(newdata)) return(fitted(object, aggregate=aggregate, drop=FALSE)) x = list() for(m in seq_along(object@model)) { comp <- lapply(object@components, "[[", m) x[[m]] <- predict(object@model[[m]], newdata, comp, ...) } if (aggregate) { prior_weights <- prior(object, newdata) z <- lapply(x, function(z) matrix(rowSums(do.call("cbind", z) * prior_weights), nrow = nrow(z[[1]]))) } else { z <- list() for (k in seq_len(object@k)) { z[[k]] <- do.call("cbind", lapply(x, "[[", k)) } names(z) <- paste("Comp", seq_len(object@k), sep=".") } z }) ###********************************************************** setMethod("parameters", signature(object="FLXdist"), function(object, component=NULL, model=NULL, which = c("model", "concomitant"), simplify=TRUE, drop=TRUE) { which <- match.arg(which) if (is.null(component)) component <- seq_len(object@k) if (is.null(model)) model <- seq_along(object@model) if (which == "model") { if (simplify) { parameters <- sapply(model, function(m) sapply(object@components[component], function(x) unlist(x[[m]]@parameters), simplify=TRUE), simplify = FALSE) } else { parameters <- sapply(model, function(m) sapply(object@components[component], function(x) x[[m]]@parameters, simplify=FALSE), simplify = FALSE) } if (drop) { if (length(component) == 1 && !simplify) parameters <- lapply(parameters, "[[", 1) if (length(model) == 1) parameters <- parameters[[1]] } } else { parameters <- object@concomitant@coef[,component,drop=FALSE] } parameters }) setMethod("prior", signature(object="FLXdist"), function(object, newdata, ...) { if (missing(newdata)) prior <- object@prior else { groups <- .FLXgetGrouping(object@formula, newdata) nobs <- if (is(newdata, "data.frame")) nrow(newdata) else min(sapply(newdata, function(x) { if (is.null(nrow(x))) length(x) else nrow(x) })) group <- if (length(groups$group)) groups$group else factor(seq_len(nobs)) object@concomitant <- FLXgetModelmatrix(object@concomitant, data = newdata, groups = list(group=group, groupfirst = groupFirst(group))) prior <- determinePrior(object@prior, object@concomitant, group)[as.integer(group),] } prior }) setMethod("posterior", signature(object="flexmix", newdata="missing"), function(object, newdata, unscaled = FALSE, ...) { if (unscaled) return(object@posterior$unscaled) else return(object@posterior$scaled) }) setMethod("posterior", signature(object="FLXdist", newdata="listOrdata.frame"), function(object, newdata, unscaled=FALSE,...) { comp <- lapply(object@components, "[[", 1) postunscaled <- posterior(object@model[[1]], newdata, comp, ...) for (m in seq_along(object@model)[-1]) { comp <- lapply(object@components, "[[", m) postunscaled <- postunscaled + posterior(object@model[[m]], newdata, comp, ...) } groups <- .FLXgetGrouping(object@formula, newdata) prior <- prior(object, newdata = newdata) if(length(groups$group)>0) postunscaled <- groupPosteriors(postunscaled, groups$group) postunscaled <- postunscaled + log(prior) if (unscaled) return(exp(postunscaled)) else return(exp(postunscaled - log_row_sums(postunscaled))) }) setMethod("posterior", signature(object="FLXM", newdata="listOrdata.frame"), function(object, newdata, components, ...) { object <- FLXgetModelmatrix(object, newdata, object@fullformula, lhs = TRUE) FLXdeterminePostunscaled(object, components, ...) }) setMethod("clusters", signature(object="flexmix", newdata="missing"), function(object, newdata, ...) { object@cluster }) setMethod("clusters", signature(object="FLXdist", newdata="ANY"), function(object, newdata, ...) { max.col(posterior(object, newdata, ...)) }) ###********************************************************** setMethod("summary", "flexmix", function(object, eps=1e-4, ...){ z <- new("summary.flexmix", call = object@call, AIC = AIC(object), BIC = BIC(object), logLik = logLik(object)) TAB <- data.frame(prior=object@prior, size=object@size) rownames(TAB) <- paste("Comp.", seq_len(nrow(TAB)), sep="") TAB[["post>0"]] <- if (is.null(object@weights)) colSums(object@posterior$scaled > eps) else colSums((object@posterior$scaled > eps) * object@weights) TAB[["ratio"]] <- TAB[["size"]]/TAB[["post>0"]] z@comptab = TAB z }) ###********************************************************** flexmix/R/flxmcsparse.R0000644000176200001440000000225214404637304014607 0ustar liggesuserssetClass("FLXMCsparse", contains = "FLXM") as.data.frame.simple_triplet_matrix <- function(x, ...) { as.data.frame.model.matrix(x, ...) } setMethod("FLXgetModelmatrix", signature(model = "FLXMCsparse"), function(model, data, formula, lhs=TRUE, ...) { formula <- RemoveGrouping(formula) if (length(grep("\\|", deparse(model@formula)))) stop("no grouping variable allowed in the model") if(is.null(model@formula)) model@formula <- formula model@fullformula <- update(terms(formula, data = data), model@formula) fullformula <- terms(model@fullformula, data = data) model@terms <- attr(fullformula, "terms") if (lhs) { env <- environment(fullformula) vars <- attr(fullformula, "variables") varnames <- vapply(vars, function(x) paste(deparse(x, backtick = FALSE), collapse = " "), " ")[-1L] variables <- eval(vars, data, env) resp <- attr(fullformula, "response") response <- variables[[resp]] model@y <- model@preproc.y(response) model@x <- matrix(nrow = nrow(model@y), ncol = 0) } else { model@x <- matrix(nrow = nrow(as.data.frame(data)), ncol = 0) } model }) flexmix/R/ziglm.R0000644000176200001440000000405314404637304013403 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: ziglm.R 5079 2016-01-31 12:21:12Z gruen $ # setClass("FLXMRziglm", contains = "FLXMRglm") FLXMRziglm <- function(formula = . ~ ., family = c("binomial", "poisson"), ...) { family <- match.arg(family) new("FLXMRziglm", FLXMRglm(formula, family, ...), name = paste("FLXMRziglm", family, sep=":")) } setMethod("FLXgetModelmatrix", signature(model="FLXMRziglm"), function(model, data, formula, lhs=TRUE, ...) { model <- callNextMethod(model, data, formula, lhs) if (attr(terms(model@fullformula), "intercept") == 0) stop("please include an intercept") model }) setMethod("FLXremoveComponent", signature(model = "FLXMRziglm"), function(model, nok, ...) if (1 %in% nok) as(model, "FLXMRglm") else model) setMethod("FLXmstep", signature(model = "FLXMRziglm"), function(model, weights, components, ...) { coef <- c(-Inf, rep(0, ncol(model@x)-1)) names(coef) <- colnames(model@x) comp.1 <- model@defineComponent( list(coef = coef, df = 0, offset = NULL, family = model@family)) c(list(comp.1), FLXmstep(as(model, "FLXMRglm"), weights[, -1, drop=FALSE], components[-1])) }) setMethod("FLXgetDesign", signature(object = "FLXMRziglm"), function(object, components) rbind(0, FLXgetDesign(as(object, "FLXMRglm"), components[-1]))) setMethod("FLXreplaceParameters", signature(object="FLXMRziglm"), function(object, components, parms) c(components[[1]], FLXreplaceParameters(as(object, "FLXMRglm"), components[-1], parms))) setMethod("FLXgradlogLikfun", signature(object="FLXMRziglm"), function(object, components, weights, ...) FLXgradlogLikfun(as(object, "FLXMRglm"), components[-1], weights[,-1,drop=FALSE])) setMethod("refit_optim", signature(object = "FLXMRziglm"), function(object, components, ...) { x <- refit_optim(as(object, "FLXMRglm"), components[-1], ...) names(x) <- paste("Comp", 1 + seq_along(x), sep = ".") x }) flexmix/R/z.R0000644000176200001440000000063114404637304012530 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: z.R 5079 2016-01-31 12:21:12Z gruen $ # ###********************************************************** ## Backward compatibility ## component model driver FLXglm <- FLXMRglm FLXglmFix <- FLXMRglmfix FLXmclust <- FLXMCmvnorm FLXbclust <- FLXMCmvbinary ## concomitant model driver FLXmultinom <- FLXPmultinom FLXconstant <- FLXPconstant flexmix/R/models.R0000644000176200001440000002641214404637304013547 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: models.R 5079 2016-01-31 12:21:12Z gruen $ # FLXMRglm <- function(formula=.~., family=c("gaussian", "binomial", "poisson", "Gamma"), offset=NULL) { family <- match.arg(family) glmrefit <- function(x, y, w) { fit <- c(glm.fit(x, y, weights=w, offset=offset, family=get(family, mode="function")()), list(call = sys.call(), offset = offset, control = eval(formals(glm.fit)$control), method = "weighted.glm.fit")) fit$df.null <- sum(w) + fit$df.null - fit$df.residual - fit$rank fit$df.residual <- sum(w) - fit$rank fit$x <- x fit } z <- new("FLXMRglm", weighted=TRUE, formula=formula, name=paste("FLXMRglm", family, sep=":"), offset = offset, family=family, refit=glmrefit) z@preproc.y <- function(x){ if (ncol(x) > 1) stop(paste("for the", family, "family y must be univariate")) x } if(family=="gaussian"){ z@defineComponent <- function(para) { predict <- function(x, ...) { dotarg = list(...) if("offset" %in% names(dotarg)) offset <- dotarg$offset p <- x %*% para$coef if (!is.null(offset)) p <- p + offset p } logLik <- function(x, y, ...) dnorm(y, mean=predict(x, ...), sd=para$sigma, log=TRUE) new("FLXcomponent", parameters=list(coef=para$coef, sigma=para$sigma), logLik=logLik, predict=predict, df=para$df) } z@fit <- function(x, y, w, component){ fit <- lm.wfit(x, y, w=w, offset=offset) z@defineComponent(para = list(coef = coef(fit), df = ncol(x)+1, sigma = sqrt(sum(fit$weights * fit$residuals^2 / mean(fit$weights))/ (nrow(x)-fit$rank)))) } } else if(family=="binomial"){ z@preproc.y <- function(x){ if (ncol(x) != 2) stop("for the binomial family, y must be a 2 column matrix\n", "where col 1 is no. successes and col 2 is no. failures") if (any(x < 0)) stop("negative values are not allowed for the binomial family") x } z@defineComponent <- function(para) { predict <- function(x, ...) { dotarg = list(...) if("offset" %in% names(dotarg)) offset <- dotarg$offset p <- x %*% para$coef if (!is.null(offset)) p <- p + offset get(family, mode = "function")()$linkinv(p) } logLik <- function(x, y, ...) dbinom(y[,1], size=rowSums(y), prob=predict(x, ...), log=TRUE) new("FLXcomponent", parameters=list(coef=para$coef), logLik=logLik, predict=predict, df=para$df) } z@fit <- function(x, y, w, component){ fit <- glm.fit(x, y, weights=w, family=binomial(), offset=offset, start=component$coef) z@defineComponent(para = list(coef = coef(fit), df = ncol(x))) } } else if(family=="poisson"){ z@defineComponent <- function(para) { predict <- function(x, ...) { dotarg = list(...) if("offset" %in% names(dotarg)) offset <- dotarg$offset p <- x %*% para$coef if (!is.null(offset)) p <- p + offset get(family, mode = "function")()$linkinv(p) } logLik <- function(x, y, ...) dpois(y, lambda=predict(x, ...), log=TRUE) new("FLXcomponent", parameters=list(coef=para$coef), logLik=logLik, predict=predict, df=para$df) } z@fit <- function(x, y, w, component){ fit <- glm.fit(x, y, weights=w, family=poisson(), offset=offset, start=component$coef) z@defineComponent(para = list(coef = coef(fit), df = ncol(x))) } } else if(family=="Gamma"){ z@defineComponent <- function(para) { predict <- function(x, ...) { dotarg = list(...) if("offset" %in% names(dotarg)) offset <- dotarg$offset p <- x %*% para$coef if (!is.null(offset)) p <- p + offset get(family, mode = "function")()$linkinv(p) } logLik <- function(x, y, ...) dgamma(y, shape = para$shape, scale=predict(x, ...)/para$shape, log=TRUE) new("FLXcomponent", parameters = list(coef = para$coef, shape = para$shape), predict = predict, logLik = logLik, df = para$df) } z@fit <- function(x, y, w, component){ fit <- glm.fit(x, y, weights=w, family=Gamma(), offset=offset, start=component$coef) z@defineComponent(para = list(coef = coef(fit), df = ncol(x)+1, shape = sum(fit$prior.weights)/fit$deviance)) } } else stop(paste("Unknown family", family)) z } ###********************************************************** FLXMCmvnorm <- function(formula=.~., diagonal=TRUE) { z <- new("FLXMC", weighted=TRUE, formula=formula, dist = "mvnorm", name="model-based Gaussian clustering") z@defineComponent <- function(para) { logLik <- function(x, y) mvtnorm::dmvnorm(y, mean=para$center, sigma=para$cov, log=TRUE) predict <- function(x, ...) matrix(para$center, nrow=nrow(x), ncol=length(para$center), byrow=TRUE) new("FLXcomponent", parameters=list(center = para$center, cov = para$cov), df=para$df, logLik=logLik, predict=predict) } z@fit <- function(x, y, w, ...){ para <- cov.wt(y, wt=w)[c("center","cov")] para$df <- (3*ncol(y) + ncol(y)^2)/2 if(diagonal){ para$cov <- diag(diag(para$cov)) para$df <- 2*ncol(y) } z@defineComponent(para) } z } FLXMCnorm1 <- function(formula=.~.) { z <- new("FLXMC", weighted=TRUE, formula=formula, dist = "mvnorm", name="model-based univariate Gaussian clustering") z@defineComponent <- function(para) { logLik <- function(x, y) dnorm(y, mean=para$center, sd=sqrt(para$cov), log=TRUE) predict <- function(x, ...) matrix(para$center, nrow=nrow(x), ncol=1, byrow=TRUE) new("FLXcomponent", parameters=list(mean = as.vector(para$center), sd = as.vector(sqrt(para$cov))), df=para$df, logLik=logLik, predict=predict) } z@fit <- function(x, y, w, ...){ para <- cov.wt(as.matrix(y), wt=w)[c("center","cov")] z@defineComponent(c(para, list(df = 2))) } z } ###********************************************************** FLXMCmvbinary <- function(formula=.~., truncated = FALSE) { if (truncated) return(MCmvbinary_truncated(formula)) else return(MCmvbinary(formula)) } MCmvbinary <- function(formula=.~.) { z <- new("FLXMC", weighted=TRUE, formula=formula, dist = "mvbinary", name="model-based binary clustering") ## make sure that y is binary z@preproc.y <- function(x){ storage.mode(x) <- "logical" storage.mode(x) <- "integer" x } z@defineComponent <- function(para) { predict <- function(x, ...){ matrix(para$center, nrow=nrow(x), ncol=length(para$center), byrow=TRUE) } logLik <- function(x, y){ p <- matrix(para$center, nrow=nrow(x), ncol=length(para$center), byrow=TRUE) rowSums(log(y*p+(1-y)*(1-p))) } new("FLXcomponent", parameters=list(center=para$center), df=para$df, logLik=logLik, predict=predict) } z@fit <- function(x, y, w, ...) z@defineComponent(list(center = colSums(w*y)/sum(w), df = ncol(y))) z } ###********************************************************** binary_truncated <- function(y, w, maxit = 200, epsilon = .Machine$double.eps) { r_k <- colSums(y*w)/sum(w) r_0 <- 0 llh.old <- -Inf for (i in seq_len(maxit)) { p <- r_k/(1+r_0) llh <- sum((r_k*log(p))[r_k > 0])+ sum(((1 - r_k + r_0) * log(1-p))[(1-r_k+r_0) > 0]) if (abs(llh - llh.old)/(abs(llh) + 0.1) < epsilon) break llh.old <- llh prod_p <- prod(1-p) r_0 <- prod_p/(1-prod_p) } p } MCmvbinary_truncated <- function(formula=.~.) { z <- MCmvbinary(formula=formula) z@defineComponent <- function(para) { predict <- function(x, ...) { matrix(para$center, nrow = nrow(x), ncol = length(para$center), byrow = TRUE) } logLik <- function(x, y) { p <- matrix(para$center, nrow = nrow(x), ncol = length(para$center), byrow = TRUE) rowSums(log(y * p + (1 - y) * (1 - p))) - log(1 - prod(1-para$center)) } new("FLXcomponent", parameters = list(center = para$center), df = para$df, logLik = logLik, predict = predict) } z@fit <- function(x, y, w, ...){ z@defineComponent(list(center = binary_truncated(y, w), df = ncol(y))) } z } ###********************************************************** setClass("FLXMCmvcombi", representation(binary = "vector"), contains = "FLXMC") FLXMCmvcombi <- function(formula=.~.) { z <- new("FLXMCmvcombi", weighted=TRUE, formula=formula, dist = "mvcombi", name="model-based binary-Gaussian clustering") z@defineComponent <- function(para) { predict <- function(x, ...){ matrix(para$center, nrow=nrow(x), ncol=length(para$center), byrow=TRUE) } logLik <- function(x, y){ if(any(para$binary)){ p <- matrix(para$center[para$binary], nrow=nrow(x), ncol=sum(para$binary), byrow=TRUE) z <- rowSums(log(y[,para$binary,drop=FALSE]*p + (1-y[,para$binary,drop=FALSE])*(1-p))) } else z <- rep(0, nrow(x)) if(!all(para$binary)){ if(sum(!para$binary)==1) z <- z + dnorm(y[,!para$binary], mean=para$center[!para$binary], sd=sqrt(para$var), log=TRUE) else z <- z + mvtnorm::dmvnorm(y[,!para$binary,drop=FALSE], mean=para$center[!para$binary], sigma=diag(para$var), log=TRUE) } z } new("FLXcomponent", parameters=list(center=para$center, var=para$var), df=para$df, logLik=logLik, predict=predict) } z@fit <- function(x, y, w, binary, ...){ para <- cov.wt(y, wt=w)[c("center","cov")] para <- list(center = para$center, var = diag(para$cov)[!binary], df = ncol(y) + sum(!binary), binary = binary) z@defineComponent(para) } z } setMethod("FLXgetModelmatrix", signature(model="FLXMCmvcombi"), function(model, data, formula, lhs=TRUE, ...) { model <- callNextMethod(model, data, formula, lhs) model@binary <- apply(model@y, 2, function(z) all(unique(z) %in% c(0,1))) model }) setMethod("FLXmstep", signature(model = "FLXMCmvcombi"), function(model, weights, components) { return(sapply(seq_len(ncol(weights)), function(k) model@fit(model@x, model@y, weights[,k], model@binary))) }) flexmix/R/glmFix.R0000644000176200001440000001622614404637304013514 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: glmFix.R 5156 2019-02-12 08:11:16Z gruen $ # FLXMRglmfix <- function(formula=.~., fixed=~0, varFix = FALSE, nested = NULL, family=c("gaussian", "binomial", "poisson", "Gamma"), offset=NULL) { family <- match.arg(family) nested <- as(nested, "FLXnested") if (length(fixed) == 3) stop("no left hand side allowed for fixed") z <- new("FLXMRglmfix", FLXMRglm(formula, family, offset), fixed=fixed, name=paste("FLXMRglmfix", family, sep=":"), nestedformula=nested, variance = varFix) if(family=="gaussian"){ z@fit <- function(x, y, w, incidence, variance, ...){ fit <- lm.wfit(x, y, w=w, offset=offset) k <- nrow(incidence) n <- nrow(x)/k sigma <- vector(length=k) cumVar <- cumsum(c(0, variance)) for (i in seq_along(variance)) { ind <- cumVar[i]*n + seq_len(n*variance[i]) sigma[cumVar[i] + seq_len(variance[i])] <- sqrt(sum(fit$weights[ind] * fit$residuals[ind]^2 / mean(fit$weights[ind]))/ (length(ind) - sum(incidence[i,]))) } fit <- fit[c("coefficients")] coefs <- coef(fit) names(coefs) <- colnames(incidence) df <- rowSums(incidence/rep(colSums(incidence), each = nrow(incidence))) + rep(1/variance, variance) lapply(seq_len(k), function(K) z@defineComponent( list(coef = coefs[as.logical(incidence[K, ])], sigma = sigma[K], df = df[K]))) } } else if(family=="binomial"){ z@fit <- function(x, y, w, incidence, ...){ fit <- glm.fit(x, y, weights=w, family=binomial(), offset=offset) fit <- fit[c("coefficients","family")] k <- nrow(incidence) coefs <- coef(fit) names(coefs) <- colnames(incidence) df <- rowSums(incidence/rep(colSums(incidence), each = nrow(incidence))) lapply(seq_len(k), function(K) z@defineComponent( list(coef = coefs[as.logical(incidence[K, ])], df = df[K]))) } } else if(family=="poisson"){ z@fit <- function(x, y, w, incidence, ...){ fit <- glm.fit(x, y, weights=w, family=poisson(), offset=offset) fit <- fit[c("coefficients","family")] k <- nrow(incidence) coefs <- coef(fit) names(coefs) <- colnames(incidence) df <- rowSums(incidence/rep(colSums(incidence), each = nrow(incidence))) lapply(seq_len(k), function(K) z@defineComponent( list(coef = coefs[as.logical(incidence[K, ])], df = df[K]))) } } else if(family=="Gamma"){ z@fit <- function(x, y, w, incidence, ...){ fit <- glm.fit(x, y, weights=w, family=Gamma(), offset=offset) shape <- sum(fit$prior.weights)/fit$deviance fit <- fit[c("coefficients","family")] k <- nrow(incidence) coefs <- coef(fit) names(coefs) <- colnames(incidence) df <- rowSums(incidence/rep(colSums(incidence), each = nrow(incidence))) lapply(seq_len(k), function(K) z@defineComponent( list(coef = coefs[as.logical(incidence[K, ])], df = df[K], shape = shape))) } } else stop(paste("Unknown family", family)) z } ###********************************************************** setMethod("refit_mstep", signature(object="FLXMRglmfix", newdata="missing"), function(object, newdata, weights, ...) { warning("Separate regression models are fitted using posterior weights.") lapply(seq_len(ncol(weights)), function(k) { x <- object@x[object@segment[, k], as.logical(object@design[k,]), drop = FALSE] colnames(x) <- colnames(object@design)[as.logical(object@design[k,])] y <- object@y[object@segment[, k],, drop = FALSE] fit <- object@refit(x, y, weights[,k], ...) fit <- c(fit, list(formula = object@fullformula, terms = object@terms, contrasts = object@contrasts, xlevels = object@xlevels)) class(fit) <- c("glm", "lm") fit }) }) ###********************************************************** setMethod("fitted", signature(object="FLXMRglmfix"), function(object, components, ...) { N <- nrow(object@x)/length(components) z <- list() for(n in seq_along(components)){ x <- object@x[(n-1)*N + seq_len(N), as.logical(object@design[n,]), drop=FALSE] z[[n]] <- list(components[[n]]@predict(x)) } z }) ###********************************************************** setMethod("predict", signature(object="FLXMRglmfix"), function(object, newdata, components, ...) { model <- FLXgetModelmatrix(object, newdata, object@fullformula, lhs=FALSE) k <- sum(object@nestedformula@k) N <- nrow(model@x)/k z <- list() for (m in seq_len(k)) { z[[m]] <- components[[m]]@predict(model@x[model@segment[,m], as.logical(model@design[m,]), drop=FALSE], ...) } z }) ###********************************************************** setMethod("FLXgetModelmatrix", signature(model="FLXMRfix"), function(model, data, formula, lhs=TRUE, ...) { formula <- RemoveGrouping(formula) if (length(grep("\\|", deparse(model@formula)))) stop("no grouping variable allowed in the model") if(is.null(model@formula)) model@formula <- formula model@fullformula <- update.formula(formula, model@formula) k <- model@nestedformula mm.all <- modelMatrix(model@fullformula, model@fixed, k@formula, data, lhs, model@xlevels) model@design <- modelDesign(mm.all, k) desNested <- if (sum(sapply(mm.all$nested, ncol))) { rbind(ncol(mm.all$fixed) + seq_len(sum(sapply(mm.all$nested, ncol))), unlist(lapply(seq_along(mm.all$nested), function(i) rep(i, ncol(mm.all$nested[[i]]))))) }else matrix(ncol=0, nrow=2) model@x <- cbind(kronecker(rep(1, sum(k@k)), mm.all$fixed), do.call("cbind", lapply(unique(desNested[2,]), function(i) { kronecker(model@design[,desNested[1, desNested[2, ] == i][1]], mm.all$nested[[i]])})), kronecker(diag(sum(k@k)), mm.all$random)) N <- nrow(model@x)/sum(k@k) model@segment <- matrix(FALSE, ncol = sum(k@k), nrow = nrow(model@x)) for (m in seq_len(sum(k@k))) model@segment[(m - 1) * N + seq_len(N), m] <- TRUE if (lhs) { y <- mm.all$response rownames(y) <- NULL response <- as.matrix(apply(y, 2, rep, sum(k@k))) model@y <- model@preproc.y(response) } model@x <- model@preproc.x(model@x) model@xlevels <- mm.all$xlevels model }) flexmix/R/allGenerics.R0000644000176200001440000000621014404637304014506 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: allGenerics.R 5079 2016-01-31 12:21:12Z gruen $ # setGeneric("flexmix", function(formula, data=list(), k=NULL, cluster=NULL, model=NULL, concomitant=NULL, control=NULL, weights = NULL) standardGeneric("flexmix")) setGeneric("FLXfit", function(model, concomitant, control, postunscaled=NULL, groups, weights) standardGeneric("FLXfit")) ###********************************************************** setGeneric("FLXgetModelmatrix", function(model, data, ...) standardGeneric("FLXgetModelmatrix")) setGeneric("FLXfillConcomitant", function(concomitant, ...) standardGeneric("FLXfillConcomitant")) ###********************************************************** setGeneric("logLik") setGeneric("clogLik", function(object, ...) standardGeneric("clogLik")) setGeneric("EIC", function(object, ...) standardGeneric("EIC")) ###********************************************************** setGeneric("FLXcheckComponent", function(model, ...) standardGeneric("FLXcheckComponent")) setGeneric("FLXgetK", function(model, ...) standardGeneric("FLXgetK")) setGeneric("FLXgetObs", function(model) standardGeneric("FLXgetObs")) setGeneric("FLXmstep", function(model, ...) standardGeneric("FLXmstep")) setGeneric("FLXremoveComponent", function(model, ...) standardGeneric("FLXremoveComponent")) setGeneric("FLXdeterminePostunscaled", function(model, ...) standardGeneric("FLXdeterminePostunscaled")) setGeneric("FLXgetDesign", function(object, ...) standardGeneric("FLXgetDesign")) setGeneric("FLXreplaceParameters", function(object, ...) standardGeneric("FLXreplaceParameters")) setGeneric("FLXlogLikfun", function(object, ...) standardGeneric("FLXlogLikfun")) setGeneric("FLXgradlogLikfun", function(object, ...) standardGeneric("FLXgradlogLikfun")) setGeneric("VarianceCovariance", function(object, ...) standardGeneric("VarianceCovariance")) setGeneric("FLXgetParameters", function(object, ...) standardGeneric("FLXgetParameters")) setGeneric("logLikfun_comp", function(object, ...) standardGeneric("logLikfun_comp")) setGeneric("getPriors", function(object, ...) standardGeneric("getPriors")) setGeneric("existGradient", function(object, ...) standardGeneric("existGradient")) setGeneric("refit_mstep", function(object, newdata, ...) standardGeneric("refit_mstep")) setGeneric("refit_optim", function(object, ...) standardGeneric("refit_optim")) ###********************************************************** setGeneric("group", function(object, ...) standardGeneric("group")) setGeneric("rflexmix", function(object, newdata, ...) standardGeneric("rflexmix")) setGeneric("rFLXM", function(model, components, ...) standardGeneric("rFLXM")) ## just to make sure that some S3 generics are available in S4 setGeneric("fitted", package = "stats") setGeneric("predict", package = "stats") setGeneric("simulate", package = "stats") setGeneric("summary", package = "base") setGeneric("unique", package = "base") setGeneric("allweighted", function(model, control, weights) standardGeneric("allweighted")) flexmix/R/boot.R0000644000176200001440000003401314404637304013223 0ustar liggesuserssetGeneric("boot", function(object, ...) standardGeneric("boot")) setGeneric("LR_test", function(object, ...) standardGeneric("LR_test")) setClass("FLXboot", representation(call="call", object="flexmix", parameters="list", concomitant="list", priors="list", logLik="matrix", k="matrix", converged="matrix", models="list", weights="list")) setMethod("show", "FLXboot", function(object) { cat("\nCall:", deparse(object@call,0.75*getOption("width")), sep="\n") }) generate_weights <- function(object) { if(is.null(object@weights) & is(object@model, "FLXMC")) { X <- do.call("cbind", lapply(object@model, function(z) z@y)) x <- apply(X, 1, paste, collapse = "") x <- as.integer(factor(x, unique(x))) object@weights <- as.vector(table(x)) indices_unique <- !duplicated(x) for (i in seq_along(object@model)) { object@model[[i]]@x <- object@model[[i]]@x[indices_unique,,drop=FALSE] object@model[[i]]@y <- object@model[[i]]@y[indices_unique,,drop=FALSE] } object@concomitant@x <- object@concomitant@x[indices_unique,,drop=FALSE] } object } setGeneric("FLXgetNewModelmatrix", function(object, ...) standardGeneric("FLXgetNewModelmatrix")) setMethod("FLXgetNewModelmatrix", "FLXM", function(object, model, indices, groups) { if (length(groups$group) > 0) { obs_groups <- lapply(groups$group[groups$groupfirst][indices], function(x) which(x == groups$group)) indices_grouped <- unlist(obs_groups) } else { indices_grouped <- indices } object@y <- model@y[indices_grouped,,drop=FALSE] object@x <- model@x[indices_grouped,,drop=FALSE] object }) setMethod("FLXgetNewModelmatrix", "FLXMRglmfix", function(object, model, indices, groups) { if (length(groups$group) > 0) { obs_groups <- lapply(groups$group[groups$groupfirst][indices], function(x) which(x == groups$group)) indices_grouped <- unlist(obs_groups) } else { indices_grouped <- indices } object@y <- do.call("rbind", rep(list(model@y[indices_grouped,,drop=FALSE]), sum(model@nestedformula@k))) object@x <- do.call("rbind", lapply(seq_len(sum(model@nestedformula@k)), function(K) model@x[model@segment[,K],,drop=FALSE][indices_grouped,,drop=FALSE])) N <- nrow(object@x)/sum(model@nestedformula@k) object@segment <- matrix(FALSE, ncol = sum(model@nestedformula@k), nrow = nrow(object@x)) for (m in seq_len(sum(model@nestedformula@k))) object@segment[(m - 1) * N + seq_len(N), m] <- TRUE object }) boot_flexmix <- function(object, R, sim = c("ordinary", "empirical", "parametric"), initialize_solution = FALSE, keep_weights = FALSE, keep_groups = TRUE, verbose = 0, control, k, model = FALSE, ...) { sim <- match.arg(sim) if (missing(R)) stop("R needs to be specified") if (!missing(control)) object@control <- do.call("new", c(list(Class = "FLXcontrol", object@control), control)) if (missing(k)) k <- object@k m <- length(object@model) has_weights <- !keep_weights & !is.null(object@weights) if (has_weights) object <- undo_weights(object) if (!keep_groups & length(object@group)) { object@concomitant@x <- object@concomitant@x[as.integer(object@group),,drop = FALSE] object@group <- factor() } groups <- list() groups$group <- object@group groups$groupfirst <- if (length(groups$group) > 0) groupFirst(groups$group) else rep(TRUE, FLXgetObs(object@model[[1]])) concomitant <- parameters <- priors <- models <- weights <- vector("list", R) logLik <- ks <- converged <- matrix(nrow=R, ncol = length(k), dimnames = list(BS = seq_len(R), k = k)) for (iter in seq_len(R)) { new <- object newgroups <- groups if(verbose && !(iter%%verbose)) cat("* ") if (iter > 1) { if (sim == "parametric") { y <- rflexmix(object, ...)$y for (i in seq_len(m)) new@model[[i]]@y <- matrix(as.vector(t(y[[i]])), nrow = nrow(new@model[[i]]@x), ncol = ncol(y[[i]]), byrow = TRUE) } else { n <- sum(groups$groupfirst) indices <- sample(seq_len(n), n, replace = TRUE) if (length(groups$group) > 0) { obs_groups <- lapply(groups$group[groups$groupfirst][indices], function(x) which(x == groups$group)) newgroups$group <- factor(rep(seq_along(obs_groups), sapply(obs_groups, length))) newgroups$groupfirst <- !duplicated(newgroups$group) } for (i in seq_len(m)) { new@model[[i]] <- FLXgetNewModelmatrix(new@model[[i]], object@model[[i]], indices, groups) } new@concomitant@x <- new@concomitant@x[indices,,drop=FALSE] } } if (has_weights & !length(groups$group) > 0) { new <- generate_weights(new) newgroups$groupfirst <- rep(TRUE, FLXgetObs(new@model[[1]])) } parameters[[iter]] <- concomitant[[iter]] <- priors[[iter]] <- list() NREP <- rep(object@control@nrep, length(k)) if (initialize_solution & object@k %in% k) NREP[k == object@k] <- 1L for (K in seq_along(k)) { fit <- new("flexmix", logLik = -Inf) for (nrep in seq_len(NREP[K])) { if (k[K] != object@k | !initialize_solution) { postunscaled <- initPosteriors(k[K], NULL, FLXgetObs(new@model[[1]]), newgroups) } else { postunscaled <- matrix(0, nrow = FLXgetObs(new@model[[1]]), ncol = k[K]) for (i in seq_len(m)) postunscaled <- postunscaled + FLXdeterminePostunscaled(new@model[[i]], lapply(new@components, function(x) x[[i]])) if(length(newgroups$group)>0) postunscaled <- groupPosteriors(postunscaled, newgroups$group) prior <- evalPrior(new@prior, new@concomitant) postunscaled <- if (is(prior, "matrix")) postunscaled + log(prior) else sweep(postunscaled, 2, log(prior), "+") postunscaled <- exp(postunscaled - log_row_sums(postunscaled)) } x <- try(FLXfit(new@model, new@concomitant, new@control, postunscaled, newgroups, weights = new@weights)) if (!is(x, "try-error")) { if(logLik(x) > logLik(fit)) fit <- x } } if (is.finite(logLik(fit))) { parameters[[iter]][paste(k[K])] <- list(parameters(fit, simplify = FALSE, drop = FALSE)) concomitant[[iter]][paste(k[K])] <- list(parameters(fit, which = "concomitant")) priors[[iter]][[paste(k[K])]] <- prior(fit) logLik[iter, paste(k[K])] <- logLik(fit) ks[iter, paste(k[K])] <- fit@k converged[iter, paste(k[K])] <- fit@converged if (model) { models[[iter]] <- fit@model weights[[iter]] <- fit@weights } } else { parameters[[iter]][[paste(k[K])]] <- concomitant[[iter]][[paste(k[K])]] <- priors[[iter]][[paste(k[K])]] <- NULL } } } if(verbose) cat("\n") new("FLXboot", call = sys.call(-1), object = object, parameters = parameters, concomitant = concomitant, priors = priors, logLik = logLik, k = ks, converged = converged, models = models, weights = weights) } setMethod("boot", signature(object="flexmix"), boot_flexmix) setMethod("LR_test", signature(object="flexmix"), function(object, R, alternative = c("greater", "less"), control, ...) { alternative <- match.arg(alternative) if (missing(control)) control <- object@control if (object@k == 1 & alternative == "less") stop(paste("alternative", alternative, "only possible for a mixture\n", "with at least two components")) k <- object@k + switch(alternative, greater = 0:1, less = 0:-1) names(k) <- k boot <- boot(object, R, sim = "parametric", k = k, initialize_solution = TRUE, control = control, ...) ok <- apply(boot@k, 1, identical, k) lrts <- 2*apply(boot@logLik[ok,order(k)], 1, diff) STATISTIC <- lrts[1] names(STATISTIC) <- "LRTS" PARAMETER <- length(lrts) names(PARAMETER) <- "BS" RETURN <- list(parameter = PARAMETER, p.value = sum(lrts[1] <= lrts)/length(lrts), alternative = alternative, null.value = object@k, method = "Bootstrap likelihood ratio test", data.name = deparse(substitute(object)), bootstrap.results = boot) class(RETURN) <- "htest" RETURN }) setMethod("parameters", "FLXboot", function(object, k, ...) { if (missing(k)) k <- object@object@k Coefs <- lapply(seq_along(object@parameters), function(i) if (is.na(object@k[i])) NULL else do.call("cbind", c(lapply(seq_len(object@k[i]), function(j) unlist(sapply(seq_along(object@object@model), function(m) FLXgetParameters(as(object@object@model[[m]], "FLXMR"), if (is(object@object@model[[m]]@defineComponent, "expression")) list(eval(object@object@model[[m]]@defineComponent, c(object@parameters[[i]][[paste(k)]][[m]][[j]], list(df = object@object@components[[j]][[m]]@df)))) else { list(object@object@model[[m]]@defineComponent( c(object@parameters[[i]][[paste(k)]][[m]][[j]], list(df = object@object@components[[j]][[m]]@df)))) })))), as.list(rep(NA, k - object@k[i]))))) Coefs <- t(do.call("cbind", Coefs)) colnames(Coefs) <- gsub("Comp.1_", "", colnames(Coefs)) Prior <- t(do.call("cbind", lapply(object@concomitant, function(x) do.call("cbind", c(list(x[[paste(k)]]), as.list(rep(NA, k - ifelse(length(x), ncol(x[[paste(k)]]), k)))))))) cbind(Coefs, Prior) }) setMethod("clusters", signature(object = "FLXboot", newdata = "listOrdata.frame"), function(object, newdata, k, ...) { if (missing(k)) k <- object@object@k lapply(seq_len(length(object@priors)), function(i) { new <- object@object new@prior <- object@priors[[i]][[paste(k)]] new@k <- length(new@prior) new@components <- rep(list(vector("list", length(object@object@model))), length(new@prior)) for (m in seq_along(new@model)) { variables <- c("x", "y", "offset", "family") variables <- variables[variables %in% slotNames(new@model[[m]])] for (var in variables) assign(var, slot(new@model[[m]], var)) for (K in seq_len(object@k[i])) { new@components[[K]][[m]] <- if (is(object@object@model[[m]]@defineComponent, "expression")) eval(object@object@model[[m]]@defineComponent, c(object@parameters[[i]][[paste(k)]][[m]][[K]], list(df = object@object@components[[K]][[m]]@df))) else object@object@model[[m]]@defineComponent( c(object@parameters[[i]][[paste(k)]][[m]][[K]], list(df = object@object@components[[K]][[m]]@df))) } } clusters(new, newdata = newdata)}) }) setMethod("posterior", signature(object = "FLXboot", newdata = "listOrdata.frame"), function(object, newdata, k, ...) { if (missing(k)) k <- object@object@k lapply(seq_len(length(object@priors)), function(i) { new <- object@object new@prior <- object@priors[[i]][[paste(k)]] new@k <- length(new@prior) new@components <- rep(list(vector("list", length(object@object@model))), length(new@prior)) for (m in seq_along(new@model)) { variables <- c("x", "y", "offset", "family") variables <- variables[variables %in% slotNames(new@model[[m]])] for (var in variables) assign(var, slot(new@model[[m]], var)) for (K in seq_len(object@k[i])) { new@components[[K]][[m]] <- if (is(object@object@model[[m]]@defineComponent, "expression")) eval(object@object@model[[m]]@defineComponent, c(object@parameters[[i]][[paste(k)]][[m]][[K]], list(df = object@object@components[[K]][[m]]@df))) else object@object@model[[m]]@defineComponent( c(object@parameters[[i]][[paste(k)]][[m]][[K]], list(df = object@object@components[[K]][[m]]@df))) } } posterior(new, newdata = newdata)}) }) setMethod("predict", signature(object = "FLXboot"), function(object, newdata, k, ...) { if (missing(k)) k <- object@object@k lapply(seq_len(length(object@priors)), function(i) { new <- object@object new@components <- vector("list", object@k[i, paste(k)]) new@components <- lapply(new@components, function(x) vector("list", length(new@model))) for (m in seq_along(new@model)) { variables <- c("x", "y", "offset", "family") variables <- variables[variables %in% slotNames(new@model[[m]])] for (var in variables) assign(var, slot(new@model[[m]], var)) for (K in seq_len(object@k[i, paste(k)])) { new@components[[K]][[m]] <- if (is(object@object@model[[m]]@defineComponent, "expression")) eval(object@object@model[[m]]@defineComponent, c(object@parameters[[i]][[paste(k)]][[m]][[K]], list(df = object@object@components[[1]][[m]]@df))) else object@object@model[[m]]@defineComponent( c(object@parameters[[i]][[paste(k)]][[m]][[K]], list(df = object@object@components[[1]][[m]]@df))) } } predict(new, newdata = newdata, ...)}) }) flexmix/R/lattice.R0000644000176200001440000000746614404637304013721 0ustar liggesusers# # Copyright (C) Deepayan Sarkar # Internal code copied from package lattice for use in flexmix # hist.constructor <- function (x, breaks, include.lowest = TRUE, right = TRUE, ...) { if (is.numeric(breaks) && length(breaks) > 1) hist(as.numeric(x), breaks = breaks, plot = FALSE, include.lowest = include.lowest, right = right) else hist(as.numeric(x), breaks = breaks, right = right, plot = FALSE) } checkArgsAndCall <- function (FUN, args) { if (!("..." %in% names(formals(FUN)))) args <- args[intersect(names(args), names(formals(FUN)))] do.call(FUN, args) } formattedTicksAndLabels <- function (x, at = FALSE, used.at = NULL, labels = FALSE, logsc = FALSE, ..., num.limit = NULL, abbreviate = NULL, minlength = 4, format.posixt = NULL, equispaced.log = TRUE) { rng <- if (length(x) == 2) as.numeric(x) else range(as.numeric(x)) if (is.logical(logsc) && logsc) logsc <- 10 have.log <- !is.logical(logsc) if (have.log) logbase <- if (is.numeric(logsc)) logsc else if (logsc == "e") exp(1) else stop("Invalid value of 'log'") logpaste <- if (have.log) paste(as.character(logsc), "^", sep = "") else "" check.overlap <- if (is.logical(at) && is.logical(labels)) TRUE else FALSE if (is.logical(at)) { at <- if (have.log && !equispaced.log) checkArgsAndCall(axisTicks, list(usr = log10(logbase^rng), log = TRUE, axp = NULL, ...)) else checkArgsAndCall(pretty, list(x = x[is.finite(x)], ...)) } else if (have.log && (length(at) > 0)) { if (is.logical(labels)) labels <- as.character(at) at <- log(at, base = logbase) } if (is.logical(labels)) { if (have.log && !equispaced.log) { labels <- as.character(at) at <- log(at, logbase) } else labels <- paste(logpaste, format(at, trim = TRUE), sep = "") } list(at = at, labels = labels, check.overlap = check.overlap, num.limit = rng) } calculateAxisComponents <- function (x, ..., packet.number, packet.list, abbreviate = NULL, minlength = 4) { if (all(is.na(x))) return(list(at = numeric(0), labels = numeric(0), check.overlap = TRUE, num.limit = c(0, 1))) ans <- formattedTicksAndLabels(x, ...) rng <- range(ans$num.limit) ok <- ans$at >= min(rng) & ans$at <= max(rng) ans$at <- ans$at[ok] ans$labels <- ans$labels[ok] if (is.logical(abbreviate) && abbreviate) ans$labels <- abbreviate(ans$labels, minlength) ans } extend.limits <- function (lim, length = 1, axs = "r", prop = if (axs == "i") 0 else lattice.getOption("axis.padding")$numeric) { if (all(is.na(lim))) NA_real_ else if (is.character(lim)) { c(1, length(lim)) + c(-1, 1) * if (axs == "i") 0.5 else lattice.getOption("axis.padding")$factor } else if (length(lim) == 2) { if (lim[1] > lim[2]) { ccall <- match.call() ccall$lim <- rev(lim) ans <- eval.parent(ccall) return(rev(ans)) } if (!missing(length) && !missing(prop)) stop("'length' and 'prop' cannot both be specified") if (length <= 0) stop("'length' must be positive") if (!missing(length)) { prop <- (as.numeric(length) - as.numeric(diff(lim)))/(2 * as.numeric(diff(lim))) } if (lim[1] == lim[2]) lim + 0.5 * c(-length, length) else { d <- diff(as.numeric(lim)) lim + prop * d * c(-1, 1) } } else { print(lim) stop("improper length of 'lim'") } } flexmix/R/utils.R0000644000176200001440000000321714404637304013422 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: utils.R 5079 2016-01-31 12:21:12Z gruen $ # list2object = function(from, to){ n = names(from) s = slotNames(to) p = pmatch(n, s) if(any(is.na(p))) stop(paste("\nInvalid slot name(s) for class", to, ":", paste(n[is.na(p)], collapse=" "))) names(from) = s[p] do.call("new", c(from, Class=to)) } printIter = function(iter, logLik, label="Log-likelihood") cat(formatC(iter, width=4), label, ":", formatC(logLik, width=12, format="f"),"\n") ## library(colorspace) ## dput(x[c(1,3,5,7,2,4,6,8)]) ## x = hcl(seq(0, 360*7/8, length.out = 8), c=30) LightColors <- c("#F9C3CD", "#D0D4A8", "#9DDDD5", "#D1CCF5", "#EDCAB2", "#AFDCB8", "#ACD7ED", "#EFC4E8") ## x = hcl(seq(0, 360*7/8, length.out = 8), c=100, l=65) FullColors <- c("#FF648A", "#96A100", "#00BCA3", "#9885FF", "#DC8400", "#00B430", "#00AEEF", "#F45BE1") ###********************************************************** ## similar defaults to silhouette plots in flexclust unipolarCols <- function(n, hue=0, chr=50, lum = c(55, 90)) { lum <- seq(lum[1], lum[2], length=n) hcl(hue, chr, lum) } bipolarCols <- function(n, hue=c(10, 130), ...) { if(n%%2){ # n odd n2 <- (n-1)/2 c1 <- unipolarCols(n2, hue[1]) c2 <- rev(unipolarCols(n2, hue[2])) return(c(c1, "white", c2)) } else{ # n even n2 <- n/2 c1 <- unipolarCols(n2, hue[1]) c2 <- rev(unipolarCols(n2, hue[2])) return(c(c1, c2)) } } ###********************************************************** flexmix/R/factanal.R0000644000176200001440000000345214404637304014034 0ustar liggesuserssetClass("FLXMCfactanal", contains = "FLXMC") ###********************************************************** FLXMCfactanal <- function(formula=.~., factors = 1, ...) { z <- new("FLXMCfactanal", weighted=TRUE, formula=formula, dist = "mvnorm", name="mixtures of factor analyzers") z@fit <- function(x, y, w, ...){ cov.weighted <- cov.wt(y, wt = w)[c("center","cov")] cov <- cov.weighted$cov; center <- cov.weighted$center fa <- factanal(covmat = cov, factors = factors, ...) Sigma <- fa$loadings %*% t(fa$loadings) + diag(fa$uniquenesses) df <- (factors + 2) * ncol(y) predict <- function(x) matrix(center, nrow=nrow(x), ncol=length(center), byrow=TRUE) logLik <- function(x, y){ sds <- sqrt(diag(cov)) mvtnorm::dmvnorm(y, mean = center, sigma = Sigma * (sds %o% sds), log = TRUE) } new("FLXcomponent", parameters=list(mu = center, variance = diag(cov), loadings = fa$loadings, uniquenesses = fa$uniquenesses), df=df, logLik=logLik, predict=predict) } z } ###********************************************************** setMethod("rFLXM", signature(model = "FLXMCfactanal", components = "FLXcomponent"), function(model, components, class, ...) { FUN <- paste("r", model@dist, sep = "") Sigma <- components@parameters$loadings %*% t(components@parameters$loadings) + diag(components@parameters$uniquenesses) sds <- sqrt(components@parameters$variance) args <- list(n = nrow(model@x), mean = components@parameters$mu, sigma = Sigma * (sds %o% sds)) return(do.call(FUN, args)) }) flexmix/R/initFlexmix.R0000644000176200001440000000355614404637304014570 0ustar liggesuserssetClass("initMethod", representation(step1 = "FLXcontrol", step2 = "FLXcontrol")) initMethod <- function(name = c("tol.em", "cem.em", "sem.em"), step1 = list(tolerance = 10^-2), step2 = list(), control = list(), nrep = 3L) { name <- match.arg(name) z <- new("initMethod", step1 = as(c(step1, control), "FLXcontrol"), step2 = as(c(step2, control), "FLXcontrol")) z@step1@nrep <- as.integer(nrep) z@step2@nrep <- 1L z@step1@classify <- switch(name, cem.em = "CEM", sem.em = "SEM", tol.em = "weighted") z } initFlexmix <- function(..., k, init = list(), control = list(), nrep = 3L, verbose = TRUE, drop = TRUE, unique = FALSE) { MYCALL <- match.call() if (missing(k)) stop("'k' is missing.") if (!missing(control) & is(init, "initMethod")) warning("'control' argument ignored.") init <- do.call("initMethod", c(init, list(control = control, nrep = nrep))) MYCALL1 <- lapply(k, function(K) { MYCALL[["k"]] <- as.numeric(K) MYCALL }) names(MYCALL1) <- paste(k) STEP1 <- stepFlexmix(..., k = k, verbose = verbose, drop = FALSE, unique = FALSE, nrep = init@step1@nrep, control = init@step1) models <- lapply(k, function(K) { if (length(k) > 1 && verbose) cat("* ") new("flexmix", flexmix(..., control = init@step2, cluster = posterior(getModel(STEP1, paste(K)))), k0 = as.integer(K), call = MYCALL1[[paste(K)]]) }) if (length(k) > 1 && verbose) cat("\n") names(models) <- paste(k) if (drop & length(models) == 1) { return(models[[1]]) } else { z <- new("stepFlexmix", models = models, k = as.integer(k), logLiks = STEP1@logLiks, nrep = STEP1@nrep, call = MYCALL) if (unique) z <- unique(z) return(z) } } flexmix/R/plot-FLXboot.R0000644000176200001440000001421014404637304014546 0ustar liggesusersprepanel.parallel.horizontal <- function (x, y, z, horizontal = TRUE, ...) { if (horizontal) list(xlim = extend.limits(c(1, ncol(as.data.frame(z))), prop = 0.03), ylim = c(0, 1), dx = 1, dy = 1) else list(xlim = c(0, 1), ylim = extend.limits(c(1, ncol(as.data.frame(z))), prop = 0.03), dx = 1, dy = 1) } panel.parallel.horizontal <- function (x, y, z, subscripts, groups = NULL, col = superpose.line$col, lwd = superpose.line$lwd, lty = superpose.line$lty, alpha = superpose.line$alpha, common.scale = FALSE, lower = sapply(z, function(x) min(as.numeric(x), na.rm = TRUE)), upper = sapply(z, function(x) max(as.numeric(x), na.rm = TRUE)), horizontal = TRUE, ...) { superpose.line <- trellis.par.get("superpose.line") reference.line <- trellis.par.get("reference.line") n.r <- ncol(z) n.c <- length(subscripts) if (is.null(groups)) { col <- rep(col, length = n.c) lty <- rep(lty, length = n.c) lwd <- rep(lwd, length = n.c) alpha <- rep(alpha, length = n.c) } else { groups <- as.factor(groups)[subscripts] n.g <- nlevels(groups) gnum <- as.numeric(groups) col <- rep(col, length = n.g)[gnum] lty <- rep(lty, length = n.g)[gnum] lwd <- rep(lwd, length = n.g)[gnum] alpha <- rep(alpha, length = n.g)[gnum] } if (is.function(lower)) lower <- sapply(z, lower) if (is.function(upper)) upper <- sapply(z, upper) if (common.scale) { lower <- min(lower) upper <- max(upper) } lower <- rep(lower, length = n.r) upper <- rep(upper, length = n.r) dif <- upper - lower if (n.r > 1) { if (horizontal) panel.segments(y0 = 0, y1 = 1, x0 = seq_len(n.r), x1 = seq_len(n.r), col = reference.line$col, lwd = reference.line$lwd, lty = reference.line$lty) else panel.segments(x0 = 0, x1 = 1, y0 = seq_len(n.r), y1 = seq_len(n.r), col = reference.line$col, lwd = reference.line$lwd, lty = reference.line$lty) }else return(invisible()) for (i in seq_len(n.r - 1)) { x0 <- (as.numeric(z[subscripts, i]) - lower[i])/dif[i] x1 <- (as.numeric(z[subscripts, i + 1]) - lower[i + 1])/dif[i + 1] if (horizontal) panel.segments(y0 = x0, x0 = i, y1 = x1, x1 = i + 1, col = col, lty = lty, lwd = lwd, alpha = alpha, ...) else panel.segments(x0 = x0, y0 = i, x1 = x1, y1 = i + 1, col = col, lty = lty, lwd = lwd, alpha = alpha, ...) } invisible() } confidence.panel.boot <- function(x, y, z, subscripts, lwd = 1, SD = NULL, ..., lower, upper, range = c(0, 1)) { nc <- ncol(z) if (missing(lower)) lower <- sapply(z, function(x) quantile(x, range[1])) if (missing(upper)) upper <- sapply(z, function(x) quantile(x, range[2])) dif <- upper - lower if (!is.null(SD)) { SD <- lapply(SD, function(x) (x - lower)/dif) for (l in seq_along(SD)) { grid.polygon(y = unit(c(SD[[l]][,1], rev(SD[[l]][,3])), "native"), x = unit(c(seq_len(nc),rev(seq_len(nc))), "native"), gp = gpar(fill = rgb(190/225, 190/225, 190/225, 0.5), col = "darkgrey")) } } panel.parallel.horizontal(x, y, z, subscripts, ..., lower = lower, upper = upper) if (!is.null(SD)) { for (l in seq_along(SD)) { llines(y = SD[[l]][,2], x = seq_len(nc), col="white", lwd=lwd, lty = 1) } } } setMethod("plot", signature(x = "FLXboot", y = "missing"), function(x, y, ordering = NULL, range = c(0, 1), ci = FALSE, varnames = colnames(pars), strip_name = NULL, ...) { k <- x@object@k pars <- parameters(x) if (ci) { x_refit <- refit(x@object) sd <- sqrt(diag(x_refit@vcov)) CI <- x_refit@coef + qnorm(0.975) * cbind(-sd, 0, sd) indices_prior <- grep("alpha$", names(x_refit@coef)) if (length(indices_prior)) { z <- mvtnorm::rmvnorm(10000, x_refit@coef[indices_prior,drop=FALSE], x_refit@vcov[indices_prior,indices_prior,drop=FALSE]) Priors <- t(apply(cbind(1, exp(z))/rowSums(cbind(1, exp(z))), 2, quantile, c(0.025, 0.5, 0.975))) indices <- lapply(seq_len(k), function(i) grep(paste("_Comp.", i, sep = ""), names(x_refit@coef[-indices_prior]))) SD <- lapply(seq_len(k), function(i) rbind(CI[indices[[i]], ], prior = Priors[i,])) } else { indices <- lapply(seq_len(k), function(i) grep(paste("_Comp.", i, sep = ""), names(x_refit@coef))) SD <- lapply(seq_len(k), function(i) CI[indices[[i]], ]) mnrow <- max(sapply(SD, nrow)) SD <- lapply(SD, function(x) if (nrow(x) < mnrow) do.call("rbind", c(list(x), as.list(rep(0, mnrow - nrow(x))))) else x) } if (any("gaussian" %in% sapply(x@object@model, function(x) if (is(x, "FLXMRglm")) x@family else ""))) { i <- grep("sigma$", colnames(pars)) pars[,i] <- log(pars[,i]) colnames(pars)[i] <- "log(sigma)" } } else SD <- NULL range_name <- vector(mode = "character", length=2) range_name[1] <- if (range[1] == 0) "Min" else paste(round(range[1]*100), "%", sep = "") range_name[2] <- if (range[2] == 1) "Max" else paste(round(range[2]*100), "%", sep = "") Ordering <- if (is.null(ordering)) NULL else factor(as.vector(apply(matrix(pars[,ordering], nrow = k), 2, function(x) order(order(x))))) if(is.null(strip_name)) formula = ~ pars else { opt.old <- options(useFancyQuotes = FALSE) on.exit(options(opt.old)) formula <- as.formula(paste("~ pars | ", sQuote(strip_name))) } pars <- na.omit(pars) if (!is.null(attr(pars, "na.action"))) Ordering <- Ordering[-attr(na.omit(pars), "na.action")] parallel.plot <- parallelplot(formula, groups = Ordering, default.scales = list(y = list(at = c(0, 1), labels = range_name), x = list(alternating = FALSE, axs = "i", tck = 0, at = seq_len(ncol(pars)))), range = range, panel = confidence.panel.boot, prepanel = prepanel.parallel.horizontal, SD = SD, ...) parallel.plot$x.scales$labels <- varnames parallel.plot }) flexmix/R/flxdist.R0000644000176200001440000000554214404637304013742 0ustar liggesusersFLXdist <- function(formula, k = NULL, model=FLXMRglm(), components, concomitant=FLXPconstant()) { mycall <- match.call() if(is(model, "FLXM")) model <- list(model) if (length(k)==1) prior <- rep(1/k, k) else { prior <- k/sum(k) } concomitant@x <- matrix(c(1, rep(0, ncol(concomitant@coef))[-1]), nrow = 1) prior <- as.vector(evalPrior(prior, concomitant)) lf <- length(formula) formula1 <- formula if(length(formula[[lf]])>1 && deparse(formula[[lf]][[1]]) == "|") formula1[[lf]] <- formula[[lf]][[2]] for(n in seq(along.with=model)) { if(is.null(model[[n]]@formula)) model[[n]]@formula <- formula1 else if(length(model[[n]]@formula) == 3 && model[[n]]@formula[[2]] == ".") model[[n]]@formula <- model[[n]]@formula[-2] model[[n]]@fullformula <- update.formula(formula1, model[[n]]@formula) } if (missing(components)) stop("no parameter values specified") if (length(components) != length(prior)) stop("components not specified correctly") comp <- list() for (k in seq(along.with=prior)) { comp[[k]] <- list() if (length(components[[k]]) != length(model)) stop("components not specified correctly") for (n in seq(along.with=model)) { comp[[k]][[n]] <- FLXcomponent(model[[n]], components[[k]][[n]]) } } new("FLXdist", formula=formula, call=mycall, concomitant=concomitant, prior=prior, k=length(prior), model=model, components=comp) } ###********************************************************** setGeneric("FLXcomponent", function(object, ...) standardGeneric("FLXcomponent")) setMethod("FLXcomponent", signature(object="FLXM"), function(object, components, ...) { components$df <- numeric() if (is(object@defineComponent, "expression")) eval(object@defineComponent, components) else object@defineComponent(components) }) #### setMethod("FLXcomponent", signature(object="FLXMRglm"), function(object, components, ...) { components$df <- numeric() offset <- NULL family <- object@family if (is(object@defineComponent, "expression")) eval(object@defineComponent, components) else object@defineComponent(components) }) ###********************************************************** setMethod("show", "FLXdist", function(object){ cat("\nCall:", deparse(object@call,0.75*getOption("width")), sep="\n") cat("\nPriors:\n") names(object@prior) <- paste("Comp.", seq_along(object@prior), sep="") print(object@prior) cat("\n") }) ###********************************************************** evalPrior <- function(prior, concomitant) prior setGeneric("evalPrior", function(prior, concomitant) standardGeneric("evalPrior")) setMethod("evalPrior", signature(concomitant="FLXPmultinom"), function(prior, concomitant) { exps <- exp(concomitant@x %*% concomitant@coef) exps/rowSums(exps) }) flexmix/R/robust.R0000644000176200001440000000421714404637304013601 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: robust.R 5079 2016-01-31 12:21:12Z gruen $ # ###********************************************************* setClass("FLXMRrobglm", representation(bgw="logical"), prototype(bgw=FALSE), contains = "FLXMRglm") FLXMRrobglm <- function(formula = . ~ ., family=c("gaussian", "poisson"), bgw=FALSE, ...) { family <- match.arg(family) new("FLXMRrobglm", FLXMRglm(formula, family, ...), name = paste("FLXMRrobglm", family, sep=":"), bgw = bgw) } setMethod("FLXgetModelmatrix", signature(model="FLXMRrobglm"), function(model, data, formula, lhs=TRUE, ...) { model <- callNextMethod(model, data, formula, lhs) if (attr(terms(model@fullformula), "intercept")==0) stop("please include an intercept") new("FLXMRrobglm", model) }) setMethod("FLXremoveComponent", signature(model = "FLXMRrobglm"), function(model, nok, ...) { if (1 %in% nok) model <- as(model, "FLXMRglm") model }) setMethod("FLXmstep", signature(model = "FLXMRrobglm"), function(model, weights, ...) { if(model@bgw){ w <- weights[,1] } else{ w <- rep(1, nrow(weights)) } if(model@family=="gaussian") { cwt <- cov.wt(model@y, w) coef <- c(cwt$center, rep(0, ncol(model@x)-1)) names(coef) <- colnames(model@x) comp.1 <- model@defineComponent(list(coef = coef, df = 0, offset = NULL, sigma=sqrt(cwt$cov), family = model@family)) } else if(model@family=="poisson") { cwt <- cov.wt(model@y, w) coef <- c(log(3*cwt$center), rep(0, ncol(model@x)-1)) names(coef) <- colnames(model@x) comp.1 <- model@defineComponent(list(coef = coef, df = 0, offset = NULL, family = model@family)) } else{ stop("Other families not implemented yet!") } c(list(comp.1), FLXmstep(as(model, "FLXMRglm"), weights[, -1, drop=FALSE], ...)) }) flexmix/R/plot-refit.R0000644000176200001440000001125014404637304014343 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: plot-refit.R 5079 2016-01-31 12:21:12Z gruen $ # prepanel.default.coef <- function (x, y, subscripts, groups=NULL, horizontal = TRUE, nlevels, origin = NULL, ...) { if (any(!is.na(x) & !is.na(y))) { if (horizontal) { if (!is.factor(y)) { if (missing(nlevels)) nlevels <- length(unique(y)) y <- factor(y, levels = seq_len(nlevels)) } if (!is.null(groups)) { if (!is.numeric(x)) stop("x must be numeric") x <- rep(x, each = 2) + rep(groups[subscripts], each = 2) *c(-1,1) } list(xlim = if (is.numeric(x)) range(x, origin, finite = TRUE) else levels(x), ylim = levels(y), yat = sort(unique(as.numeric(y))), dx = 1, dy = 1) } else { if (!is.factor(x)) { if (missing(nlevels)) nlevels <- length(unique(x)) x <- factor(x, levels = seq_len(nlevels)) } if (!is.null(groups)) { if (!is.numeric(y)) stop("y must be numeric") y <- rep(as.numeric(y), each = 2) + rep(groups[subscripts], each = 2) *c(-1,1) } list(xlim = levels(x), xat = sort(unique(as.numeric(x))), ylim = if (is.numeric(y)) range(y, origin, finite = TRUE) else levels(y), dx = 1, dy = 1) } } else list(xlim = c(NA, NA), ylim = c(NA, NA), dx = 1, dy = 1) } panel.coef <- function(x, y, subscripts, groups, significant = NULL, horizontal = TRUE, lwd = 2, col, col.line = c("black", "grey"), ...) { col.sig <- rep(col.line[1], length(x)) if (!is.null(significant)) { if (missing(col)) col <- c("grey", "white") col.fill <- rep(col[1], length(x)) col.sig[!significant[subscripts]] <- col.line[2] col.fill[!significant[subscripts]] <- col[2] } else if (missing(col)) col.fill <- "grey" else col.fill <- col panel.barchart(x, y, border = col.sig, col = col.fill, horizontal = horizontal, ...) if (!missing(groups)) { if (horizontal) { z <- x + rep(c(-1,1), each = length(x)) * matrix(rep(groups[subscripts], 2), ncol = 2) for (i in seq_along(x)) { panel.xyplot(z[i,], rep(y[i], 2), type = "l", col = col.sig[i], lwd = lwd) } } else { z <- y + rep(c(-1,1), each = length(y)) * matrix(rep(groups[subscripts], 2), ncol = 2) for (i in seq_along(y)) { panel.xyplot(rep(x[i], 2), z[i,], type = "l", col = col.sig[i], lwd = lwd) } } } } getCoefs <- function(x, alpha = 0.05, components, ...) { names(x) <- sapply(names(x), function(z) strsplit(z, "Comp.")[[1]][2]) x <- x[names(x) %in% components] Comp <- lapply(names(x), function(n) data.frame(Value = x[[n]][,1], SD = x[[n]][,2] * qnorm(1-alpha/2), Variable = rownames(x[[n]]), Component = n, Significance = x[[n]][,4] <= alpha)) do.call("rbind", Comp) } setMethod("plot", signature(x="FLXRoptim", y="missing"), function(x, y, model = 1, which = c("model", "concomitant"), bycluster=TRUE, alpha=0.05, components, labels=NULL, significance = FALSE, xlab = NULL, ylab = NULL, ci = TRUE, scales = list(), as.table = TRUE, horizontal = TRUE, ...) { which <- match.arg(which) if (missing(components)) components <- seq_len(x@k) plot.data <- if (which == "model") getCoefs(x@components[[model]], alpha, components) else getCoefs(x@concomitant, alpha, components) if (!is.null(labels)) plot.data$Variable <- factor(plot.data$Variable, labels = labels) plot.data$Component <- with(plot.data, factor(Component, sort(unique(Component)), labels = paste("Comp.", sort(unique(Component))))) if (bycluster) { formula <- if (horizontal) Variable ~ Value | Component else Value ~ Variable | Component plot.data$Variable <- with(plot.data, factor(Variable, levels = rev(unique(Variable)))) } else { formula <- if (horizontal) Component ~ Value | Variable else Value ~ Component | Variable plot.data$Component <- with(plot.data, factor(Component, levels = rev(levels(Component)))) } groups <- if (ci) plot.data$SD else NULL significant <- if (significance) plot.data$Significance else NULL xyplot(formula, data = plot.data, xlab = xlab, ylab = ylab, origin = 0, horizontal = horizontal, scales = scales, as.table = as.table, significant = significant, groups = groups, prepanel = function(...) prepanel.default.coef(...), panel = function(x, y, subscripts, groups, ...) panel.coef(x, y, subscripts, groups, ...), ...) }) flexmix/R/multinom.R0000644000176200001440000000340114404637304014121 0ustar liggesuserssetClass("FLXMRmultinom", contains = "FLXMRglm") FLXMRmultinom <- function(formula=.~., ...) { z <- new("FLXMRmultinom", weighted=TRUE, formula=formula, family = "multinom", name=paste("FLXMRglm", "multinom", sep=":")) z@preproc.y <- function(x){ x <- as.integer(factor(x)) if (min(x) < 1 | length(unique(x)) != max(x)) stop("x needs to be coercible to an integer vector containing all numbers from 1 to max(x)") y <- matrix(0, nrow = length(x), ncol = max(x)) y[cbind(seq_along(x), x)] <- 1 y } z@defineComponent <- function(para) { predict <- function(x) { p <- tcrossprod(x, para$coef) eta <- cbind(1, exp(p)) eta/rowSums(eta) } logLik <- function(x, y) { log(predict(x))[cbind(seq_len(nrow(y)), max.col(y, "first"))] } new("FLXcomponent", parameters=list(coef=para$coef), logLik=logLik, predict=predict, df=para$df) } z@fit <- function(x, y, w, component){ r <- ncol(x) p <- ncol(y) if (p < 2) stop("Multinom requires at least two components.") mask <- c(rep(0, r + 1), rep(c(0, rep(1, r)), p - 1)) fit <- nnet.default(x, y, w, mask = mask, size = 0, skip = TRUE, softmax = TRUE, censored = FALSE, rang = 0, trace=FALSE, ...) fit$coefnames <- colnames(x) fit$weights <- w fit$vcoefnames <- fit$coefnames[seq_len(ncol(x))] fit$lab <- seq_len(ncol(y)) class(fit) <- c("multinom", "nnet") coef <- coef(fit) z@defineComponent(list(coef = coef, df = length(coef))) } z } setMethod("existGradient", signature(object = "FLXMRmultinom"), function(object) FALSE) flexmix/R/refit.R0000644000176200001440000005270514404637304013401 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: refit.R 5079 2016-01-31 12:21:12Z gruen $ # ###********************************************************* setMethod("FLXgetParameters", signature(object="FLXdist"), function(object, model) { if (missing(model)) model <- seq_along(object@model) coefficients <- unlist(lapply(model, function(m) { Model <- unlist(FLXgetParameters(object@model[[m]], lapply(object@components, "[[", m))) names(Model) <- paste("model.", m, "_", names(Model), sep = "") Model })) c(coefficients, FLXgetParameters(object@concomitant)) }) setMethod("FLXgetParameters", signature(object="FLXM"), function(object, components, ...) { lapply(components, function(x) unlist(slot(x, "parameters"))) }) setMethod("FLXgetParameters", signature(object="FLXMC"), function(object, components, ...) { if (object@dist == "mvnorm") { return(lapply(components, function(x) { pars <- x@parameters if (identical(pars$cov, diag(diag(pars$cov)))) return(c(pars$center, diag(pars$cov))) else return(c(pars$center, pars$cov[lower.tri(pars$cov, diag = TRUE)])) })) } else return(lapply(components, function(x) unlist(slot(x, "parameters")))) }) setMethod("FLXgetParameters", signature(object="FLXMRglm"), function(object, components, ...) { parms <- lapply(components, function(x) unlist(slot(x, "parameters"))) Design <- FLXgetDesign(object, components) if (object@family == "gaussian") { parms <- lapply(parms, function(x) { x["sigma"] <- log(x["sigma"]) x }) colnames(Design) <- gsub("sigma$", "log(sigma)", colnames(Design)) } parms_unique <- vector(length = ncol(Design)) names(parms_unique) <- colnames(Design) for (k in seq_along(parms)) parms_unique[as.logical(Design[k,])] <- parms[[k]] parms_unique }) setMethod("FLXgetParameters", signature(object="FLXP"), function(object, ...) { if (length(object@coef) == 1) return(NULL) alpha <- log(object@coef[-1]) - log(object@coef[1]) names(alpha) <- paste("concomitant", paste("Comp", seq_along(object@coef)[-1], "alpha", sep = "."), sep = "_") return(alpha) }) setMethod("FLXgetParameters", signature(object="FLXPmultinom"), function(object, ...) { coefficients <- object@coef[,-1,drop=FALSE] if (ncol(coefficients) > 0) { Names <- paste("Comp", rep(seq_len(ncol(coefficients)+1)[-1], each = nrow(coefficients)), rownames(coefficients), sep = ".") coefficients <- as.vector(coefficients) names(coefficients) <- paste("concomitant", Names, sep = "_") return(coefficients) }else return(NULL) }) setMethod("VarianceCovariance", signature(object="flexmix"), function(object, model = TRUE, gradient, optim_control = list(), ...) { if (object@control@classify != "weighted") stop("Only for weighted ML estimation possible.") if (length(FLXgetParameters(object)) != object@df) stop("not implemented yet for restricted parameters.") if (missing(gradient)) gradient <- FLXgradlogLikfun(object) optim_control$fnscale <- -1 fit <- optim(fn = FLXlogLikfun(object), par = FLXgetParameters(object), gr = gradient, hessian = TRUE, method = "BFGS", control = optim_control, ...) list(coef = fit$par, vcov = -solve(as.matrix(fit$hessian))) }) setMethod("logLikfun_comp", signature(object="flexmix"), function(object) { postunscaled <- matrix(0, nrow = FLXgetObs(object@model[[1]]), ncol = object@k) for (m in seq_along(object@model)) postunscaled <- postunscaled + FLXdeterminePostunscaled(object@model[[m]], lapply(object@components, "[[", m)) if(length(object@group)>0) postunscaled <- groupPosteriors(postunscaled, object@group) postunscaled }) setMethod("FLXlogLikfun", signature(object="flexmix"), function(object, ...) function(parms) { object <- FLXreplaceParameters(object, parms) groupfirst <- if (length(object@group) > 1) groupFirst(object@group) else rep(TRUE, FLXgetObs(object@model[[1]])) logpostunscaled <- logLikfun_comp(object) + log(getPriors(object@concomitant, object@group, groupfirst)) if (is.null(object@weights)) sum(log_row_sums(logpostunscaled[groupfirst,,drop=FALSE])) else sum(log_row_sums(logpostunscaled[groupfirst,,drop=FALSE])*object@weights[groupfirst]) }) setMethod("getPriors", signature(object="FLXP"), function(object, group, groupfirst) { priors <- matrix(apply(object@coef, 2, function(x) object@x %*% x), nrow = nrow(object@x)) ungroupPriors(priors/rowSums(priors), group, groupfirst) }) setMethod("getPriors", signature(object="FLXPmultinom"), function(object, group, groupfirst) { priors <- matrix(apply(object@coef, 2, function(x) exp(object@x %*% x)), nrow = nrow(object@x)) ungroupPriors(priors/rowSums(priors), group, groupfirst) }) setMethod("FLXreplaceParameters", signature(object="FLXdist"), function(object, parms) { comp_names <- names(object@components) components <- list() for (m in seq_along(object@model)) { indices <- grep(paste("^model.", m, sep = ""), names(parms)) components[[m]] <- FLXreplaceParameters(object@model[[m]], lapply(object@components, "[[", m), parms[indices]) } object@components <- lapply(seq_along(object@components), function(k) lapply(components, "[[", k)) names(object@components) <- comp_names if (object@k > 1) { indices <- grep("^concomitant_", names(parms)) object@concomitant <- FLXreplaceParameters(object@concomitant, parms[indices]) } object }) setMethod("FLXreplaceParameters", signature(object="FLXM"), function(object, components, parms) { Design <- FLXgetDesign(object, components) lapply(seq_along(components), function(k) { Parameters <- list() parms_k <- parms[as.logical(Design[k,])] for (i in seq_along(components[[k]]@parameters)) { Parameters[[i]] <- parms_k[seq_along(components[[k]]@parameters[[i]])] attributes(Parameters[[i]]) <- attributes(components[[k]]@parameters[[i]]) parms_k <- parms_k[-seq_along(components[[k]]@parameters[[i]])] } names(Parameters) <- names(components[[k]]@parameters) Parameters$df <- components[[k]]@df variables <- c("x", "y") for (var in variables) assign(var, slot(object, var)) if (is(object@defineComponent, "expression")) eval(object@defineComponent, Parameters) else object@defineComponent(Parameters) }) }) setMethod("FLXreplaceParameters", signature(object="FLXMC"), function(object, components, parms) { Design <- FLXgetDesign(object, components) if (object@dist == "mvnorm") { p <- sqrt(1/4+ncol(Design)/nrow(Design)) - 1/2 diagonal <- get("diagonal", environment(object@fit)) if (diagonal) { cov <- diag(seq_len(p)) parms_comp <- as.vector(sapply(seq_len(nrow(Design)), function(i) c(parms[(i-1) * 2 * p + seq_len(p)], as.vector(diag(diag(parms[(i-1) * 2 * p + p + seq_len(p)])))))) parms <- c(parms_comp, parms[(nrow(Design) * 2 * p + 1):length(parms)]) } else { cov <- matrix(NA, nrow = p, ncol = p) cov[lower.tri(cov, diag = TRUE)] <- seq_len(sum(lower.tri(cov, diag = TRUE))) cov[upper.tri(cov)] <- t(cov)[upper.tri(cov)] parms <- parms[c(as.vector(sapply(seq_len(nrow(Design)), function(i) (i-1)*(max(cov)+p) + c(seq_len(p), as.vector(cov) + p))), (nrow(Design) * (max(cov) + p)+1):length(parms))] } } callNextMethod(object = object, components = components, parms = parms) }) setMethod("FLXreplaceParameters", signature(object="FLXMRglm"), function(object, components, parms) { Design <- FLXgetDesign(object, components) lapply(seq_along(components), function(k) { Parameters <- list() parms_k <- parms[as.logical(Design[k,])] for (i in seq_along(components[[k]]@parameters)) { Parameters[[i]] <- parms_k[seq_along(components[[k]]@parameters[[i]])] attributes(Parameters[[i]]) <- attributes(components[[k]]@parameters[[i]]) parms_k <- parms_k[-seq_along(components[[k]]@parameters[[i]])] } names(Parameters) <- names(components[[k]]@parameters) if (object@family == "gaussian") { Parameters[["sigma"]] <- exp(Parameters[["sigma"]]) } Parameters$df <- components[[k]]@df variables <- c("x", "y", "offset", "family") for (var in variables) { assign(var, slot(object, var)) } if (is(object@defineComponent, "expression")) eval(object@defineComponent, Parameters) else object@defineComponent(Parameters) }) }) setMethod("FLXreplaceParameters", signature(object="FLXP"), function(object, parms) { parms <- exp(c(0, parms)) parms <- parms/sum(parms) attributes(parms) <- attributes(object@coef) object@coef <- parms object }) setMethod("FLXreplaceParameters", signature(object="FLXPmultinom"), function(object, parms) { parms <- cbind(0, matrix(parms, nrow = nrow(object@coef))) attributes(parms) <- attributes(object@coef) object@coef <- parms object }) setMethod("FLXgradlogLikfun", signature(object="flexmix"), function(object, ...) { existFunction <- all(sapply(object@model, existGradient)) if (object@k > 1) existFunction <- c(existFunction, existGradient(object@concomitant)) if (any(!existFunction)) return(NULL) function(parms) { object <- FLXreplaceParameters(object, parms) groupfirst <- if (length(object@group) > 1) groupFirst(object@group) else rep(TRUE, FLXgetObs(object@model[[1]])) logLik_comp <- logLikfun_comp(object) Priors <- getPriors(object@concomitant, object@group, groupfirst) Priors_Lik_comp <- logLik_comp + log(Priors) weights <- exp(Priors_Lik_comp - log_row_sums(Priors_Lik_comp)) if (object@k > 1) { ConcomitantScores <- FLXgradlogLikfun(object@concomitant, Priors[groupfirst,,drop=FALSE], weights[groupfirst,,drop=FALSE]) if (!is.null(object@weights)) ConcomitantScores <- lapply(ConcomitantScores, "*", object@weights[groupfirst]) } else ConcomitantScores <- list() ModelScores <- lapply(seq_along(object@model), function(m) FLXgradlogLikfun(object@model[[m]], lapply(object@components, "[[", m), weights)) ModelScores <- lapply(ModelScores, lapply, groupPosteriors, object@group) if (!is.null(object@weights)) ModelScores <- lapply(ModelScores, lapply, "*", object@weights) colSums(cbind(do.call("cbind", lapply(ModelScores, function(x) do.call("cbind", x)))[groupfirst,,drop=FALSE], do.call("cbind", ConcomitantScores))) } }) setMethod("existGradient", signature(object = "FLXM"), function(object) FALSE) setMethod("existGradient", signature(object = "FLXMRglm"), function(object) { if (object@family == "Gamma") return(FALSE) TRUE }) setMethod("existGradient", signature(object = "FLXMRglmfix"), function(object) FALSE) setMethod("existGradient", signature(object = "FLXP"), function(object) TRUE) setMethod("FLXgradlogLikfun", signature(object="FLXMRglm"), function(object, components, weights, ...) { lapply(seq_along(components), function(k) { res <- if (object@family == "binomial") as.vector(object@y[,1] - rowSums(object@y)*components[[k]]@predict(object@x)) else as.vector(object@y - components[[k]]@predict(object@x)) Scores <- weights[,k] * res * object@x if (object@family == "gaussian") { Scores <- cbind(Scores/components[[k]]@parameters$sigma^2, weights[,k] * (-1 + res^2/components[[k]]@parameters$sigma^2)) } Scores }) }) setMethod("FLXgradlogLikfun", signature(object="FLXP"), function(object, fitted, weights, ...) { Pi <- lapply(seq_len(ncol(fitted))[-1], function(i) - fitted[,i] + weights[,i]) lapply(Pi, function(p) apply(object@x, 2, "*", p)) }) setMethod("refit", signature(object = "flexmix"), function(object, newdata, method = c("optim", "mstep"), ...) { method <- match.arg(method) if (method == "optim") { VarCov <- VarianceCovariance(object, ...) z <- new("FLXRoptim", call=sys.call(-1), k = object@k, coef = VarCov$coef, vcov = VarCov$vcov) z@components <- lapply(seq_along(object@model), function(m) { begin_name <- paste("^model", m, sep = ".") indices <- grep(begin_name, names(z@coef)) refit_optim(object@model[[m]], components = lapply(object@components, "[[", m), coef = z@coef[indices], se = sqrt(diag(z@vcov)[indices])) }) z@concomitant <- if (object@k > 1) { indices <- grep("^concomitant_", names(z@coef)) refit_optim(object@concomitant, coef = z@coef[indices], se = sqrt(diag(z@vcov)[indices])) } else NULL } else { z <- new("FLXRmstep", call=sys.call(-1), k = object@k) z@components <- lapply(object@model, function(x) { x <- refit_mstep(x, weights=object@posterior$scaled) names(x) <- paste("Comp", seq_len(object@k), sep=".") x }) z@concomitant <- if (object@k > 1) refit_mstep(object@concomitant, posterior = object@posterior$scaled, group = object@group, w = object@weights) else NULL } z }) setMethod("refit_optim", signature(object = "FLXM"), function(object, components, coef, se) { Design <- FLXgetDesign(object, components) x <- lapply(seq_len(nrow(Design)), function(k) { rval <- cbind(Estimate = coef[as.logical(Design[k,])], "Std. Error" = se[as.logical(Design[k,])]) pars <- components[[k]]@parameters[[1]] rval <- rval[seq_along(pars),,drop=FALSE] rownames(rval) <- names(pars) zval <- rval[,1]/rval[,2] new("Coefmat", cbind(rval, "z value" = zval, "Pr(>|z|)" = 2 * pnorm(abs(zval), lower.tail = FALSE))) }) names(x) <- paste("Comp", seq_along(x), sep = ".") x }) setMethod("refit_optim", signature(object = "FLXMC"), function(object, components, coef, se) { Design <- FLXgetDesign(object, components) if (object@dist == "mvnorm") { p <- length(grep("Comp.1_center", colnames(Design), fixed = TRUE)) diagonal <- get("diagonal", environment(object@fit)) if (diagonal) { cov <- diag(seq_len(p)) coef_comp <- as.vector(sapply(seq_len(nrow(Design)), function(i) c(coef[(i-1) * 2 * p + seq_len(p)], as.vector(diag(diag(coef[(i-1) * 2 * p + p + seq_len(p)])))))) coef <- c(coef_comp, coef[(nrow(Design) * 2 * p + 1):length(coef)]) se_comp <- as.vector(sapply(seq_len(nrow(Design)), function(i) c(se[(i-1) * 2 * p + seq_len(p)], as.vector(diag(diag(se[(i-1) * 2 * p + p + seq_len(p)])))))) se <- c(se_comp, se[(nrow(Design) * 2 * p + 1):length(se)]) } else { cov <- matrix(NA, nrow = p, ncol = p) cov[lower.tri(cov, diag = TRUE)] <- seq_len(sum(lower.tri(cov, diag = TRUE))) cov[upper.tri(cov)] <- t(cov)[upper.tri(cov)] coef <- coef[c(as.vector(sapply(seq_len(nrow(Design)), function(i) (i-1)*(max(cov)+p) + c(seq_len(p), as.vector(cov) + p))), (nrow(Design) * (max(cov) + p)+1):length(coef))] se <- se[c(as.vector(sapply(seq_len(nrow(Design)), function(i) (i-1)*(max(cov)+p) + c(seq_len(p), as.vector(cov) + p))), (nrow(Design) * (max(cov) + p)+1):length(se))] } } callNextMethod(object = object, components = components, coef = coef, se = se) }) setMethod("refit_optim", signature(object = "FLXP"), function(object, coef, se) { x <- lapply(seq_len(ncol(object@coef))[-1], function(k) { indices <- grep(paste("Comp", k, sep = "."), names(coef)) rval <- cbind(Estimate = coef[indices], "Std. Error" = se[indices]) rval <- rval[seq_len(nrow(object@coef)),,drop=FALSE] rownames(rval) <- rownames(object@coef) zval <- rval[,1]/rval[,2] new("Coefmat", cbind(rval, "z value" = zval, "Pr(>|z|)" = 2 * pnorm(abs(zval), lower.tail = FALSE))) }) names(x) <- paste("Comp", 1 + seq_along(x), sep = ".") x }) setMethod("FLXgetDesign", signature(object = "FLXM"), function(object, components, ...) { parms <- lapply(components, function(x) unlist(slot(x, "parameters"))) nr_parms <- sapply(parms, length) cumSum <- cumsum(c(0, nr_parms)) Design <- t(sapply(seq_len(length(cumSum)-1), function(i) rep(c(0, 1, 0), c(cumSum[i], nr_parms[i], max(cumSum) - cumSum[i] - nr_parms[i])))) colnames(Design) <- paste(rep(paste("Comp", seq_len(nrow(Design)), sep = "."), nr_parms), unlist(lapply(parms, names)), sep = "_") Design }) setMethod("FLXgetDesign", signature(object = "FLXMRglmfix"), function(object, components, ...) { if (length(components) == 1) return(callNextMethod(object, components, ...)) Design <- object@design if (object@family == "gaussian") { cumSum <- cumsum(c(0, object@variance)) variance <- matrix(sapply(seq_len(length(cumSum)-1), function(i) rep(c(0, 1, 0), c(cumSum[i], object@variance[i], length(components) - cumSum[i] - object@variance[i]))), nrow = length(components)) colnames(variance) <- paste("Comp", apply(variance, 2, function(x) which(x == 1)[1]), "sigma", sep= ".") Design <- cbind(Design, variance) } Design }) ###********************************************************* setMethod("refit_mstep", signature(object="FLXM"), function(object, newdata, weights, ...) { lapply(seq_len(ncol(weights)), function(k) object@fit(object@x, object@y, weights[,k], ...)@parameters) }) setMethod("refit_mstep", signature(object="FLXMRglm"), function(object, newdata, weights, ...) { lapply(seq_len(ncol(weights)), function(k) { fit <- object@refit(object@x, object@y, weights[,k], ...) fit <- c(fit, list(formula = object@fullformula, terms = object@terms, contrasts = object@contrasts, xlevels = object@xlevels)) class(fit) <- c("glm", "lm") fit }) }) ###********************************************************** setMethod("fitted", signature(object="flexmix"), function(object, drop=TRUE, aggregate = FALSE, ...) { x<- list() for(m in seq_along(object@model)) { comp <- lapply(object@components, "[[", m) x[[m]] <- fitted(object@model[[m]], comp, ...) } if (aggregate) { group <- group(object) prior_weights <- determinePrior(object@prior, object@concomitant, group)[as.integer(group),] z <- lapply(x, function(z) matrix(rowSums(do.call("cbind", z) * prior_weights), nrow = nrow(z[[1]]))) if(drop && all(lapply(z, ncol)==1)){ z <- sapply(z, unlist) } } else { z <- list() for (k in seq_len(object@k)) { z[[k]] <- do.call("cbind", lapply(x, "[[", k)) } names(z) <- paste("Comp", seq_len(object@k), sep=".") if(drop && all(lapply(z, ncol)==1)){ z <- sapply(z, unlist) } } z }) setMethod("fitted", signature(object="FLXM"), function(object, components, ...) { lapply(components, function(z) z@predict(object@x)) }) setMethod("predict", signature(object="FLXM"), function(object, newdata, components, ...) { object <- FLXgetModelmatrix(object, newdata, formula = object@fullformula, lhs = FALSE) z <- list() for(k in seq_along(components)) z[[k]] <- components[[k]]@predict(object@x, ...) z }) ###********************************************************** setMethod("Lapply", signature(object="FLXRmstep"), function(object, FUN, model = 1, component = TRUE, ...) { X <- object@components[[model]] lapply(X[component], FUN, ...) }) ###********************************************************* setMethod("refit_mstep", signature(object="flexmix", newdata="listOrdata.frame"), function(object, newdata, ...) { z <- new("FLXR", call=sys.call(-1), k = object@k) z@components <- lapply(object@model, function(x) { x <- refit_mstep(x, newdata = newdata, weights=posterior(object, newdata = newdata)) names(x) <- paste("Comp", seq_len(object@k), sep=".") x }) z@concomitant <- if (object@k > 1) refit_mstep(object@concomitant, newdata, object@posterior$scaled, object@group, w = object@weights) else NULL z }) setMethod("refit_mstep", signature(object="FLXMRglm", newdata="listOrdata.frame"), function(object, newdata, weights, ...) { w <- weights lapply(seq_len(ncol(w)), function(k) { newdata$weights <- weights <- w[,k] weighted.glm(formula = object@fullformula, data = newdata, family = object@family, weights = weights, ...) }) }) weighted.glm <- function(weights, ...) { fit <- eval(as.call(c(as.symbol("glm"), c(list(...), list(weights = weights, x = TRUE))))) fit$df.null <- sum(weights) + fit$df.null - fit$df.residual - fit$rank fit$df.residual <- sum(weights) - fit$rank fit$method <- "weighted.glm.fit" fit } weighted.glm.fit <- function(x, y, weights, offset = NULL, family = "gaussian", ...) { if (!is.function(family) & !is(family, "family")) family <- get(family, mode = "function", envir = parent.frame()) fit <- c(glm.fit(x, y, weights = weights, offset=offset, family=family), list(call = sys.call(), offset = offset, control = eval(formals(glm.fit)$control), method = "weighted.glm.fit")) fit$df.null <- sum(weights) + fit$df.null - fit$df.residual - fit$rank fit$df.residual <- sum(weights) - fit$rank fit$x <- x fit } flexmix/R/flxmcmvpois.R0000644000176200001440000000153714404637304014634 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: flxmcmvpois.R 5079 2016-01-31 12:21:12Z gruen $ # FLXMCmvpois <- function(formula=.~.) { z <- new("FLXMC", weighted=TRUE, formula=formula, dist="mvpois", name="model-based Poisson clustering") z@preproc.y <- function(x){ storage.mode(x) <- "integer" x } z@defineComponent <- function(para) { logLik <- function(x, y){ colSums(dpois(t(y), para$lambda, log=TRUE)) } predict <- function(x, ...){ matrix(para$lambda, nrow = nrow(x), ncol=length(para$lambda), byrow=TRUE) } new("FLXcomponent", parameters=list(lambda=para$lambda), df=para$df, logLik=logLik, predict=predict) } z@fit <- function(x, y, w, ...){ z@defineComponent(list(lambda = colSums(w*y)/sum(w), df = ncol(y))) } z } flexmix/R/allClasses.R0000644000176200001440000002277114404637304014356 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: allClasses.R 5185 2020-06-23 13:24:06Z gruen $ # setClass("FLXcontrol", representation(iter.max="numeric", minprior="numeric", tolerance="numeric", verbose="numeric", classify="character", nrep="numeric"), prototype(iter.max=200, minprior=0.05, tolerance=10e-7, verbose=0, classify="auto", nrep=1), validity=function(object) { (object@iter.max > 0) }) setAs("list", "FLXcontrol", function(from, to){ z = list2object(from, to) z@classify = match.arg(z@classify, c("auto", "weighted", "hard", "random", "SEM", "CEM")) z }) setAs("NULL", "FLXcontrol", function(from, to){ new(to) }) ###********************************************************** setClassUnion("expressionOrfunction", c("expression", "function")) setClass("FLXM", representation(fit="function", defineComponent="expressionOrfunction", weighted="logical", name="character", formula="formula", fullformula="formula", x="matrix", y="ANY", terms="ANY", xlevels="ANY", contrasts="ANY", preproc.x="function", preproc.y="function", "VIRTUAL"), prototype(formula=.~., fullformula=.~., preproc.x = function(x) x, preproc.y = function(x) x)) ## model-based clustering setClass("FLXMC", representation(y="matrix", dist="character"), contains = "FLXM") ## regression setClass("FLXMR", representation(y="matrix", offset="ANY"), contains = "FLXM") setMethod("show", "FLXM", function(object){ cat("FlexMix model of type", object@name,"\n\nformula: ") print(object@formula) cat("Weighted likelihood possible:", object@weighted,"\n\n") if(!is.null(object@x) && nrow(object@x)>0){ cat("Regressors:\n") print(summary(object@x)) } if(!is.null(object@y) && nrow(object@y)>0){ cat("Response:\n") print(summary(object@y)) } cat("\n") }) setClass("FLXcomponent", representation(df="numeric", logLik="function", parameters="list", predict="function")) setMethod("show", "FLXcomponent", function(object){ if(length(object@parameters)>0) print(object@parameters) }) ###********************************************************** setClass("FLXP", representation(name="character", formula="formula", x="matrix", fit="function", refit="function", coef="matrix", df="function"), prototype(formula=~1, df = function(x, k, ...) (k-1)*ncol(x))) setMethod("initialize", signature(.Object="FLXP"), function(.Object, ...) { .Object <- callNextMethod(.Object=.Object, ...) if (is.null(formals(.Object@refit))) .Object@refit <- .Object@fit .Object }) setClass("FLXPmultinom", contains="FLXP") setMethod("show", "FLXP", function(object){ cat("FlexMix concomitant model of type", object@name,"\n\nformula: ") print(object@formula) if(!is.null(object@x) && nrow(object@x)>0){ cat("\nRegressors:\n") print(summary(object@x)) } cat("\n") }) ###********************************************************** setClass("FLXdist", representation(model="list", prior="numeric", components="list", concomitant="FLXP", formula="formula", call="call", k="integer"), validity=function(object) { (object@k == length(object@prior)) }, prototype(formula=.~.)) setClass("flexmix", representation(posterior="ANY", weights="ANY", iter="numeric", cluster="integer", logLik="numeric", df="numeric", control="FLXcontrol", group="factor", size="integer", converged="logical", k0="integer"), prototype(group=(factor(integer(0))), formula=.~.), contains="FLXdist") setMethod("show", "flexmix", function(object){ cat("\nCall:", deparse(object@call,0.75*getOption("width")), sep="\n") cat("\nCluster sizes:\n") print(object@size) cat("\n") if(!object@converged) cat("no ") cat("convergence after", object@iter, "iterations\n") }) ###********************************************************** setClass("summary.flexmix", representation(call="call", AIC="numeric", BIC="numeric", logLik="logLik", comptab="ANY")) setMethod("show", "summary.flexmix", function(object){ cat("\nCall:", deparse(object@call,0.75*getOption("width")), sep="\n") cat("\n") print(object@comptab, digits=3) cat("\n") print(object@logLik) cat("AIC:", object@AIC, " BIC:", object@BIC, "\n") cat("\n") }) ###********************************************************** setClass("FLXMRglm", representation(family="character", refit="function"), contains="FLXMR") setClass("FLXR", representation(k="integer", components = "list", concomitant = "ANY", call="call", "VIRTUAL")) setClass("FLXRoptim", representation(coef="vector", vcov="matrix"), contains="FLXR") setClass("FLXRmstep", contains="FLXR") setMethod("show", signature(object = "FLXR"), function(object) { cat("\nCall:", deparse(object@call,0.75*getOption("width")), sep="\n") cat("\nNumber of components:", object@k, "\n\n") }) setMethod("summary", signature(object = "FLXRoptim"), function(object, model = 1, which = c("model", "concomitant"), ...) { which <- match.arg(which) z <- if (which == "model") object@components[[model]] else object@concomitant show(z) invisible(object) }) setMethod("summary", signature(object = "FLXRmstep"), function(object, model = 1, which = c("model", "concomitant"), ...) { which <- match.arg(which) if (which == "model") { z <- object@components[[model]] if (!is.null(z)) lapply(seq_along(z), function(k) { cat(paste("$", names(z)[k], "\n", sep = "")) printCoefmat(coef(summary(z[[k]]))) cat("\n") }) } else { z <- object@concomitant fitted.summary <- summary(z) k <- nrow(coef(fitted.summary)) + 1 coefs <- lapply(2:k, function(n) { coef.p <- fitted.summary$coefficients[n - 1, , drop = FALSE] s.err <- fitted.summary$standard.errors[n - 1, , drop = FALSE] tvalue <- coef.p/s.err pvalue <- 2 * pnorm(-abs(tvalue)) coef.table <- t(rbind(coef.p, s.err, tvalue, pvalue)) dimnames(coef.table) <- list(colnames(coef.p), c("Estimate", "Std. Error", "z value", "Pr(>|z|)")) new("Coefmat", coef.table) }) names(coefs) <- paste("Comp", 2:k, sep = ".") print(coefs) } invisible(object) }) setClass("Coefmat", contains = "matrix") setMethod("show", signature(object="Coefmat"), function(object) { printCoefmat(object, signif.stars = getOption("show.signif.stars")) }) ###********************************************************** setClass("FLXnested", representation(formula = "list", k = "numeric"), validity = function(object) { length(object@formula) == length(object@k) }) setAs("numeric", "FLXnested", function(from, to) { new("FLXnested", formula = rep(list(~0), length(from)), k = from) }) setAs("list", "FLXnested", function(from, to) { z <- list2object(from, to) }) setAs("NULL", "FLXnested", function(from, to) { new(to) }) setMethod("initialize", "FLXnested", function(.Object, formula = list(), k = numeric(0), ...) { if (is(formula, "formula")) formula <- rep(list(formula), length(k)) .Object <- callNextMethod(.Object, formula = formula, k = k, ...) .Object }) ###********************************************************** setClass("FLXMRfix", representation(design = "matrix", nestedformula = "FLXnested", fixed = "formula", segment = "matrix", variance = "vector"), contains="FLXMR") setClass("FLXMRglmfix", contains=c("FLXMRfix", "FLXMRglm")) ###********************************************************** setClassUnion("listOrdata.frame", c("list", "data.frame")) ###********************************************************** flexmix/R/condlogit.R0000644000176200001440000000553214404637304014246 0ustar liggesuserssetClass("FLXMRcondlogit", representation(strata="ANY", strata_formula="ANY"), contains = "FLXMRglm") FLXMRcondlogit <- function(formula=.~., strata) { z <- new("FLXMRcondlogit", weighted=TRUE, formula=formula, strata_formula=strata, family="multinomial", name=paste("FLXMRcondlogit")) z@defineComponent <- function(para) { predict <- function(x, ...) tcrossprod(x, t(para$coef)) logLik <- function(x, y, strata) { llh_all <- vector("numeric", length = length(y)) eta <- predict(x) llh_all[as.logical(y)] <- eta[as.logical(y)] ((tapply(llh_all, strata, sum) - tapply(exp(eta), strata, function(z) log(sum(z))))/tabulate(strata))[strata] } new("FLXcomponent", parameters=list(coef=para$coef), logLik=logLik, predict=predict, df=para$df) } z@fit <- function(x, y, w, strata){ index <- w > 0 fit <- survival::coxph.fit(x[index,,drop=FALSE], survival::Surv(1-y, y)[index], strata[index], weights=w[index], control = survival::coxph.control(), method = "exact", rownames = seq_len(nrow(y))[index]) coef <- coef(fit) df <- length(coef) z@defineComponent(list(coef = coef, df = df)) } z } setMethod("FLXgetModelmatrix", signature(model="FLXMRcondlogit"), function(model, data, formula, lhs=TRUE, ...) { formula <- RemoveGrouping(formula) if(is.null(model@formula)) model@formula = formula model@fullformula = update(terms(formula, data=data), model@formula) ## Ensure that an intercept is included model@fullformula <- update(model@fullformula, ~ . + 1) if (lhs) { mf <- model.frame(model@fullformula, data=data, na.action = NULL) model@x <- model.matrix(attr(mf, "terms"), data=mf) response <- as.matrix(model.response(mf)) model@y <- model@preproc.y(response) } else { mt1 <- terms(model@fullformula, data=data) mf <- model.frame(delete.response(mt1), data=data, na.action = NULL) mt <- attr(mf, "terms") model@x <- model.matrix(mt, data=mf) } strata <- update(model@strata_formula, ~ . + 0) mf <- model.frame(strata, data=data, na.action=NULL) model@strata <- as.integer(model.matrix(attr(mf, "terms"), data=mf)) ## Omit the intercept for identifiability model@x <- model@x[,attr(model@x, "assign") != 0, drop=FALSE] model@x <- model@preproc.x(model@x) model }) setMethod("FLXmstep", signature(model = "FLXMRcondlogit"), function(model, weights, ...) { apply(weights, 2, function(w) model@fit(model@x, model@y, w, model@strata)) }) setMethod("FLXdeterminePostunscaled", signature(model = "FLXMRcondlogit"), function(model, components, ...) { sapply(components, function(x) x@logLik(model@x, model@y, model@strata)) }) setMethod("existGradient", signature(object = "FLXMRcondlogit"), function(object) FALSE) flexmix/R/lmmc.R0000644000176200001440000006522614404661473013226 0ustar liggesuserssetClass("FLXMRlmc", representation(family = "character", group = "factor", censored = "formula", C = "matrix"), contains = "FLXMR") setClass("FLXMRlmcfix", contains = "FLXMRlmc") setClass("FLXMRlmmc", representation(random = "formula", z = "matrix", which = "ANY"), contains = "FLXMRlmc") setClass("FLXMRlmmcfix", contains = "FLXMRlmmc") setMethod("allweighted", signature(model = "FLXMRlmc", control = "ANY", weights = "ANY"), function(model, control, weights) { if (!control@classify %in% c("auto", "weighted")) stop("Model class only supports weighted ML estimation.") model@weighted }) update_Residual <- function(fit, w, z, C, which, random, censored) { index <- lapply(C, function(x) x == 1) W <- rep(w, sapply(which, function(x) nrow(z[[x]]))) ZGammaZ <- sapply(seq_along(which), function(i) sum(diag(crossprod(z[[which[i]]]) %*% random$Gamma[[i]]))) WHICH <- which(sapply(C, sum) > 0) Residual <- if (length(WHICH) > 0) sum(sapply(WHICH, function(i) w[i] * sum(diag(censored$Sigma[[i]]) - 2 * z[[which[i]]][index[[i]],,drop=FALSE] * censored$psi[[i]]))) else 0 (sum(W * residuals(fit)^2) + Residual + sum(w * ZGammaZ))/sum(W) } update_latent <- function(x, y, C, fit) { AnyMissing <- which(sapply(C, sum) > 0) index <- lapply(C, function(x) x == 1) Sig <- lapply(seq_along(x), function(i) fit$sigma2 * diag(nrow = nrow(x[[i]]))) SIGMA <- rep(list(matrix(nrow = 0, ncol = 0)), length(x)) if (length(AnyMissing) > 0) { SIGMA[AnyMissing] <- lapply(AnyMissing, function(i) { S <- Sig[[i]] SIG <- S[index[[i]], index[[i]]] if (sum(!index[[i]]) > 0) SIG <- SIG - S[index[[i]],!index[[i]]] %*% solve(S[!index[[i]],!index[[i]]]) %*% S[!index[[i]],index[[i]]] SIG }) } Sigma <- MU <- rep(list(vector("numeric", length = 0)), length(x)) if (length(AnyMissing) > 0) { MU[AnyMissing] <- lapply(AnyMissing, function(i) { S <- Sig[[i]] Mu <- x[[i]][index[[i]],,drop=FALSE] %*% fit$coef if (sum(!index[[i]]) > 0) Mu <- Mu + S[index[[i]],!index[[i]]] %*% solve(S[!index[[i]],!index[[i]]]) %*% (y[[i]][!index[[i]]] - x[[i]][!index[[i]],,drop=FALSE] %*% fit$coef) Mu }) } moments <- lapply(seq_along(x), function(i) { if (sum(index[[i]]) > 0) moments_truncated(MU[[i]], SIGMA[[i]], y[[i]][C[[i]] == 1]) }) Sigma <- lapply(moments, "[[", "variance") censored <- list(mu = lapply(moments, "[[", "mean"), Sigma = Sigma) list(censored = censored) } update_latent_random <- function(x, y, z, C, which, fit) { index <- lapply(C, function(x) x == 1) AnyMissing <- which(sapply(C, sum) > 0) Residual <- fit$sigma2$Residual Psi <- fit$sigma2$Random EVbeta <- lapply(seq_along(z), function(i) solve(1/Residual * crossprod(z[[i]]) + solve(Psi))) Sig <- lapply(seq_along(z), function(i) z[[i]] %*% Psi %*% t(z[[i]]) + Residual * diag(nrow = nrow(z[[i]]))) SIGMA <- rep(list(matrix(nrow = 0, ncol = 0)), length(x)) if (length(AnyMissing) > 0) { SIGMA[AnyMissing] <- lapply(AnyMissing, function(i) { S <- Sig[[which[i]]] SIG <- S[index[[i]], index[[i]]] if (sum(!index[[i]]) > 0) SIG <- SIG - S[index[[i]],!index[[i]]] %*% solve(S[!index[[i]],!index[[i]]]) %*% S[!index[[i]],index[[i]]] SIG }) } Sigma <- MU <- rep(list(vector("numeric", length = 0)), length(x)) if (length(AnyMissing) > 0) { MU[AnyMissing] <- lapply(AnyMissing, function(i) { S <- Sig[[which[i]]] Mu <- x[[i]][index[[i]],,drop=FALSE] %*% fit$coef if (sum(!index[[i]]) > 0) { Mu <- Mu + S[index[[i]],!index[[i]]] %*% solve(S[!index[[i]],!index[[i]]]) %*% (y[[i]][!index[[i]]] - x[[i]][!index[[i]],,drop=FALSE] %*% fit$coef) } Mu }) } moments <- lapply(seq_along(x), function(i) { if (sum(index[[i]]) > 0) moments_truncated(MU[[i]], SIGMA[[i]], y[[i]][C[[i]] == 1]) }) Sigma <- lapply(moments, "[[", "variance") censored <- list(mu = lapply(moments, "[[", "mean"), Sigma = Sigma, psi = lapply(seq_along(x), function(i) { if (sum(index[[i]]) > 0) return(diag(Sigma[[i]] %*% z[[which[i]]][index[[i]],,drop=FALSE] %*% EVbeta[[which[i]]])/Residual) else return(vector("numeric", length = 0)) })) ybar <- lapply(seq_along(y), function(i) { Y <- y[[i]] Y[index[[i]]] <- censored$mu[[i]] Y }) random <- list(beta = lapply(seq_along(x), function(i) EVbeta[[which[i]]] %*% t(z[[which[i]]]) %*% (ybar[[i]] - x[[i]] %*% fit$coef)/Residual), Gamma = lapply(seq_along(x), function(i) { if (sum(index[[i]]) > 0) { return(EVbeta[[which[i]]] + (EVbeta[[which[i]]] %*% (t(z[[which[i]]][index[[i]],,drop=FALSE]) %*% censored$Sigma[[i]] %*% z[[which[i]]][index[[i]],,drop=FALSE]) %*% t(EVbeta[[which[i]]]))/Residual^2) } else return(EVbeta[[which[i]]]) })) list(random = random, censored = censored) } moments_truncated <- function(mu, Sigma, T, ...) { Sigma <- as.matrix(Sigma) mu <- as.vector(mu) T <- as.vector(T) S <- 1/sqrt(diag(Sigma)) T1 <- S * (T - mu) if (length(mu) == 1) { alpha <- pnorm(T1) dT1 <- dnorm(T1) Ex <- - dT1 / alpha Ex2 <- 1 - T1 * dT1 / alpha } else { R <- S * Sigma * rep(S, each = ncol(Sigma)) diag(R) <- 1L alpha <- mvtnorm::pmvnorm(upper = T1, sigma = R, ...) rq <- lapply(seq_along(T1), function(q) (R - tcrossprod(R[,q]))) R2 <- R^2 Vq <- 1 - R2 Sq <- sqrt(Vq) Rq <- lapply(seq_along(T1), function(q) rq[[q]]/(tcrossprod(Sq[,q]))) Tq <- lapply(seq_along(T1), function(q) (T1 - R[,q] * T1[q])/Sq[,q]) Phiq <- if (length(mu) == 1) 1 else sapply(seq_along(Rq), function(q) mvtnorm::pmvnorm(upper = Tq[[q]][-q], sigma = Rq[[q]][-q,-q], ...)) phi_Phiq <- dnorm(T1) * Phiq Ex <- - (R %*% phi_Phiq)/alpha T2_entries <- lapply(seq_along(T1), function(j) sapply(lapply(seq_along(T1)[seq_len(j)], function(i) R[,i] * T1 * phi_Phiq), function(z) sum(z * R[j,]))) T2 <- diag(length(T1)) T2[upper.tri(T2, diag = TRUE)] <- unlist(T2_entries) T2[lower.tri(T2)] <- t(T2)[lower.tri(T2)] phiqr <- lapply(seq_along(T1), function(q) sapply(seq_along(T1), function(r) { if (r == q) return(0) else return(mvtnorm::dmvnorm(T1[c(q, r)], mean = rep(0, length.out = length(c(q,r))), sigma = R[c(q,r), c(q,r)]))})) if (length(mu) == 2) { Ex2 <- R - T2 / alpha + Reduce("+", lapply(seq_along(Tq), function(q) tcrossprod(R[q,], rowSums(sapply(seq_along(Tq)[-q], function(r) phiqr[[q]][r] * (R[,r] - R[q,r] * R[q,])))))) / alpha } else { betaq <- lapply(seq_along(T1), function(q) sweep(rq[[q]], 2, Vq[,q], "/")) Rqr <- lapply(seq_along(T1), function(q) lapply(seq_along(T1), function(r) if (r == q) return(0) else return((Rq[[q]][-c(q,r),-c(q,r)] - tcrossprod(Rq[[q]][-c(q,r),r]))/tcrossprod(sqrt(1 - Rq[[q]][-c(q,r),r]^2))))) Tqr <- lapply(seq_along(T1), function(q) { lapply(seq_along(T1), function(r) if (r == q) return(0) else return((T1[-c(q,r)] - betaq[[r]][-c(r,q),q] * T1[q] - betaq[[q]][-c(r,q),r] * T1[r])/ (Sq[-c(r,q),q] * sqrt(1 - Rq[[q]][-c(r,q),r]^2))))}) T3 <- Reduce("+", lapply(seq_along(Tq), function(q) tcrossprod(R[q,], rowSums(sapply(seq_along(Tq)[-q], function(r) phiqr[[q]][r] * (R[,r] - R[q,r] * R[q,]) * mvtnorm::pmvnorm(upper = Tqr[[q]][[r]], sigma = Rqr[[q]][[r]], ...)))))) / alpha Ex2 <- R - T2 / alpha + 1/2 * (T3 + t(T3)) } } moments <- list(mean = 1/S * Ex + mu, variance = diag(1/S, nrow = length(T)) %*% (Ex2 - tcrossprod(Ex)) %*% diag(1/S, nrow = length(T))) if (!all(is.finite(unlist(moments))) || any(moments$mean > T) || any(eigen(moments$variance)$values < 0)) { moments <- list(mean = T - abs(diag(Sigma)), variance = Sigma) } moments } FLXMRlmmc <- function(formula = . ~ ., random, censored, varFix, eps = 10^-6, ...) { family <- "gaussian" censored <- if (length(censored) == 3) censored else formula(paste(".", paste(deparse(censored), collapse = ""))) if (missing(random)) { if (missing(varFix)) varFix <- FALSE else if ((length(varFix) > 1) || (is.na(as.logical(varFix)))) stop("varFix has to be a logical vector of length one") object <- new("FLXMRlmc", formula = formula, censored = censored, weighted = TRUE, family = family, name = "FLXMRlmc:gaussian") if (varFix) object <- new("FLXMRlmcfix", object) lmc.wfit <- function(x, y, w, C, censored) { W <- rep(w, sapply(x, nrow)) X <- do.call("rbind", x) AnyMissing <- which(sapply(C, sum) > 0) ybar <- lapply(seq_along(y), function(i) { Y <- y[[i]] Y[C[[i]] == 1] <- censored$mu[[i]] Y }) Y <- do.call("rbind", ybar) fit <- lm.wfit(X, Y, W, ...) fit$sigma2 <- if (length(AnyMissing) > 0) (sum(W * residuals(fit)^2) + sum(sapply(AnyMissing, function(i) w[i] * sum(diag(censored$Sigma[[i]])))))/sum(W) else sum(W * residuals(fit)^2)/sum(W) fit$df <- ncol(X) fit } object@defineComponent <- function(para) { predict <- function(x, ...) lapply(x, function(X) X %*% para$coef) logLik <- function(x, y, C, group, censored, ...) { AnyMissing <- which(sapply(C, sum) > 0) index <- lapply(C, function(x) x == 1) V <- lapply(x, function(X) diag(nrow(X)) * para$sigma2) mu <- predict(x, ...) SIGMA <- rep(list(matrix(nrow = 0, ncol = 0)), length(x)) if (length(AnyMissing) > 0) { SIGMA[AnyMissing] <- lapply(AnyMissing, function(i) { S <- V[[i]] SIG <- S[index[[i]], index[[i]]] if (sum(!index[[i]]) > 0) SIG <- SIG - S[index[[i]],!index[[i]]] %*% solve(S[!index[[i]],!index[[i]]]) %*% S[!index[[i]],index[[i]]] SIG }) } MU <- rep(list(vector("numeric", length = 0)), length(x)) if (length(AnyMissing) > 0) { MU[AnyMissing] <- lapply(AnyMissing, function(i) { S <- V[[i]] Mu <- mu[[i]][index[[i]]] if (sum(!index[[i]]) > 0) Mu <- Mu + S[index[[i]],!index[[i]]] %*% solve(S[!index[[i]],!index[[i]]]) %*% (y[[i]][!index[[i]]] - mu[[i]][!index[[i]]]) Mu }) } llh <- sapply(seq_along(x), function(i) { LLH <- 0 if (sum(index[[i]]) > 0) LLH <- log(mvtnorm::pmvnorm(upper = y[[i]][index[[i]]], mean = as.vector(MU[[i]]), sigma = SIGMA[[i]])) if (sum(!index[[i]]) > 0) LLH <- LLH + mvtnorm::dmvnorm(t(y[[i]][!index[[i]]]), mean = mu[[i]][!index[[i]]], sigma = V[[i]][!index[[i]], !index[[i]], drop = FALSE], log=TRUE) LLH/nrow(V[[i]]) }) as.vector(ungroupPriors(matrix(llh), group, !duplicated(group))) } new("FLXcomponent", parameters = list(coef = para$coef, sigma2 = para$sigma2, censored = para$censored), logLik = logLik, predict = predict, df = para$df) } object@fit <- if (varFix) { function(x, y, w, C, fit) { any_removed <- any(w <= eps) if (any_removed) { ok <- apply(w, 2, function(x) x > eps) W <- lapply(seq_len(ncol(ok)), function(i) w[ok[,i],i]) X <- lapply(seq_len(ncol(ok)), function(i) x[ok[,i],,drop = FALSE]) y <- lapply(seq_len(ncol(ok)), function(i) y[ok[,i]]) C <- lapply(seq_len(ncol(ok)), function(i) C[ok[,i]]) } else { X <- rep(list(x), ncol(w)) y <- rep(list(y), ncol(w)) C <- rep(list(C), ncol(w)) W <- lapply(seq_len(ncol(w)), function(i) w[,i]) } if ("coef" %in% names(fit[[1]])) fit <- lapply(seq_len(ncol(w)), function(k) update_latent(X[[k]], y[[k]], C[[k]], fit[[k]])) else { fit <- lapply(seq_len(ncol(w)), function(k) list(censored = list(mu = lapply(seq_along(y[[k]]), function(i) y[[k]][[i]][C[[k]][[i]] == 1]), Sigma = lapply(C[[k]], function(x) diag(1, nrow = sum(x)) * var(unlist(y[[k]])))))) } fit <- lapply(seq_len(ncol(w)), function(k) c(lmc.wfit(X[[k]], y[[k]], W[[k]], C[[k]], fit[[k]]$censored), censored = list(fit[[k]]$censored))) sigma2 <- sum(sapply(fit, function(x) x$sigma2) * colMeans(w)) for (k in seq_len(ncol(w))) fit[[k]]$sigma2 <- sigma2 lapply(fit, function(Z) object@defineComponent(list(coef = coef(Z), df = Z$df + 1/ncol(w), sigma2 = Z$sigma2, censored = Z$censored))) } } else { function(x, y, w, C, fit){ any_removed <- any(w <= eps) if (any_removed) { ok <- w > eps w <- w[ok] x <- x[ok,,drop = FALSE] y <- y[ok] C <- C[ok] } if ("coef" %in% names(fit)) { fit <- update_latent(x, y, C, fit) } else { fit$censored <- list(mu = lapply(seq_along(y), function(i) y[[i]][C[[i]] == 1]), Sigma = lapply(C, function(x) diag(1, nrow = sum(x)) * var(unlist(y)))) } fit <- c(lmc.wfit(x, y, w, C, fit$censored), censored = list(fit$censored)) object@defineComponent( list(coef = coef(fit), df = fit$df + 1, sigma2 = fit$sigma2, censored = fit$censored)) } } } else { if (missing(varFix)) varFix <- c(Random = FALSE, Residual = FALSE) else if (length(varFix) != 2 || is.null(names(varFix)) || any(is.na(pmatch(names(varFix), c("Random", "Residual"))))) stop("varFix has to be a named vector of length two") else names(varFix) <- c("Random", "Residual")[pmatch(names(varFix), c("Random", "Residual"))] random <- if (length(random) == 3) random else formula(paste(".", paste(deparse(random), collapse = ""))) object <- new("FLXMRlmmc", formula = formula, random = random, censored = censored, weighted = TRUE, family = family, name = "FLXMRlmmc:gaussian") if (any(varFix)) object <- new("FLXMRlmmcfix", object) add <- function(x) Reduce("+", x) lmmc.wfit <- function(x, y, w, z, C, which, random, censored) { effect <- lapply(seq_along(which), function(i) z[[which[i]]] %*% random$beta[[i]]) Effect <- do.call("rbind", effect) W <- rep(w, sapply(x, nrow)) X <- do.call("rbind", x) ybar <- lapply(seq_along(y), function(i) { Y <- y[[i]] Y[C[[i]] == 1] <- censored$mu[[i]] Y }) Y <- do.call("rbind", ybar) fit <- lm.wfit(X, Y - Effect, W, ...) wGamma <- add(lapply(seq_along(which), function(i) w[i] * random$Gamma[[i]])) bb <- add(lapply(seq_along(which), function(i) tcrossprod(random$beta[[i]]) * w[i])) fit$sigma2 <- list(Random = (wGamma + bb)/sum(w)) fit$df <- ncol(X) fit } object@defineComponent <- function(para) { predict <- function(x, ...) lapply(x, function(X) X %*% para$coef) logLik <- function(x, y, z, C, which, group, censored, ...) { AnyMissing <- which(sapply(C, sum) > 0) index <- lapply(C, function(x) x == 1) V <- lapply(z, function(Z) tcrossprod(tcrossprod(Z, para$sigma2$Random), Z) + diag(nrow(Z)) * para$sigma2$Residual) mu <- predict(x, ...) SIGMA <- rep(list(matrix(nrow = 0, ncol = 0)), length(x)) if (length(AnyMissing) > 0) { SIGMA[AnyMissing] <- lapply(AnyMissing, function(i) { S <- V[[which[i]]] SIG <- S[index[[i]], index[[i]]] if (sum(!index[[i]]) > 0) SIG <- SIG - S[index[[i]],!index[[i]]] %*% solve(S[!index[[i]],!index[[i]]]) %*% S[!index[[i]],index[[i]]] SIG }) } MU <- rep(list(vector("numeric", length = 0)), length(x)) if (length(AnyMissing) > 0) { MU[AnyMissing] <- lapply(AnyMissing, function(i) { S <- V[[which[i]]] Mu <- mu[[i]][index[[i]]] if (sum(!index[[i]]) > 0) Mu <- Mu + S[index[[i]],!index[[i]]] %*% solve(S[!index[[i]],!index[[i]]]) %*% (y[[i]][!index[[i]]] - mu[[i]][!index[[i]]]) Mu }) } llh <- sapply(seq_along(x), function(i) { LLH <- 0 if (sum(index[[i]]) > 0) LLH <- log(mvtnorm::pmvnorm(upper = y[[i]][index[[i]]], mean = as.vector(MU[[i]]), sigma = SIGMA[[i]])) if (sum(!index[[i]]) > 0) LLH <- LLH + mvtnorm::dmvnorm(t(y[[i]][!index[[i]]]), mean = mu[[i]][!index[[i]]], sigma = V[[which[i]]][!index[[i]], !index[[i]], drop = FALSE], log=TRUE) LLH/nrow(V[[which[i]]]) }) as.vector(ungroupPriors(matrix(llh), group, !duplicated(group))) } new("FLXcomponent", parameters = list(coef = para$coef, sigma2 = para$sigma2, censored = para$censored, random = para$random), logLik = logLik, predict = predict, df = para$df) } object@fit <- if (any(varFix)) { function(x, y, w, z, C, which, fit) { any_removed <- any(w <= eps) if (any_removed) { ok <- apply(w, 2, function(x) x > eps) W <- lapply(seq_len(ncol(ok)), function(i) w[ok[,i],i]) X <- lapply(seq_len(ncol(ok)), function(i) x[ok[,i],,drop = FALSE]) y <- lapply(seq_len(ncol(ok)), function(i) y[ok[,i]]) C <- lapply(seq_len(ncol(ok)), function(i) C[ok[,i]]) which <- lapply(seq_len(ncol(ok)), function(i) which[ok[,i]]) } else { X <- rep(list(x), ncol(w)) y <- rep(list(y), ncol(w)) C <- rep(list(C), ncol(w)) which <- rep(list(which), ncol(w)) W <- lapply(seq_len(ncol(w)), function(i) w[,i]) } if ("coef" %in% names(fit[[1]])) fit <- lapply(seq_len(ncol(w)), function(k) update_latent_random(X[[k]], y[[k]], z, C[[k]], which[[k]], fit[[k]])) else { fit <- lapply(seq_len(ncol(w)), function(k) list(random = list(beta = lapply(seq_along(W[[k]]), function(i) rep(0, ncol(z[[which[[k]][i]]]))), Gamma = lapply(seq_along(W[[k]]), function(i) diag(ncol(z[[which[[k]][i]]])))), censored = list(mu = lapply(seq_along(y[[k]]), function(i) y[[k]][[i]][C[[k]][[i]] == 1]), Sigma = lapply(C[[k]], function(x) diag(1, nrow = sum(x)) * var(unlist(y[[k]]))), psi = lapply(C[[k]], function(x) rep(0, sum(x)))))) } fit <- lapply(seq_len(ncol(w)), function(k) c(lmmc.wfit(X[[k]], y[[k]], W[[k]], z, C[[k]], which[[k]], fit[[k]]$random, fit[[k]]$censored), random = list(fit[[k]]$random), censored = list(fit[[k]]$censored))) if (varFix["Random"]) { prior_w <- apply(w, 2, weighted.mean, w = sapply(x, length)) Psi <- add(lapply(seq_len(ncol(w)), function(k) fit[[k]]$sigma2$Random * prior_w[k])) for (k in seq_len(ncol(w))) fit[[k]]$sigma2$Random <- Psi } for (k in seq_len(ncol(w))) fit[[k]]$sigma2$Residual <- update_Residual(fit[[k]], W[[k]], z, C[[k]], which[[k]], fit[[k]]$random, fit[[k]]$censored) if (varFix["Residual"]) { prior <- colMeans(w) Residual <- sum(sapply(fit[[k]]$sigma2$Residual, function(x) x) * prior) for (k in seq_len(ncol(w))) fit[[k]]$sigma2$Residual <- Residual } n <- nrow(fit[[1]]$sigma2$Random) lapply(fit, function(Z) object@defineComponent( list(coef = coef(Z), df = Z$df + n*(n+1)/(2*ifelse(varFix["Random"], ncol(w), 1)) + ifelse(varFix["Residual"], 1/ncol(w), 1), sigma2 = Z$sigma2, random = Z$random, censored = Z$censored))) } } else { function(x, y, w, z, C, which, fit){ any_removed <- any(w <= eps) if (any_removed) { ok <- w > eps w <- w[ok] x <- x[ok,,drop = FALSE] y <- y[ok] C <- C[ok] which <- which[ok] } if ("coef" %in% names(fit)) fit <- update_latent_random(x, y, z, C, which, fit) else { fit <- list(random = list(beta = lapply(which, function(i) rep(0, ncol(z[[i]]))), Gamma = lapply(which, function(i) diag(ncol(z[[i]])))), censored = list(mu = lapply(seq_along(y), function(i) y[[i]][C[[i]] == 1]), Sigma = lapply(C, function(x) diag(1, nrow = sum(x)) * var(unlist(y))), psi = lapply(C, function(x) rep(0, sum(x))))) } fit <- c(lmmc.wfit(x, y, w, z, C, which, fit$random, fit$censored), random = list(fit$random), censored = list(fit$censored)) fit$sigma2$Residual <- update_Residual(fit, w, z, C, which, fit$random, fit$censored) n <- nrow(fit$sigma2$Random) object@defineComponent( list(coef = coef(fit), df = fit$df + n*(n+1)/2 + 1, sigma2 = fit$sigma2, random = fit$random, censored = fit$censored)) } } } object } setMethod("FLXmstep", signature(model = "FLXMRlmc"), function(model, weights, components) { weights <- weights[!duplicated(model@group),,drop=FALSE] return(sapply(1:ncol(weights), function(k) model@fit(model@x, model@y, weights[,k], model@C, components[[k]]@parameters))) }) setMethod("FLXmstep", signature(model = "FLXMRlmcfix"), function(model, weights, components) { weights <- weights[!duplicated(model@group),,drop=FALSE] return(model@fit(model@x, model@y, weights, model@C, lapply(components, function(x) x@parameters))) }) setMethod("FLXmstep", signature(model = "FLXMRlmmc"), function(model, weights, components) { weights <- weights[!duplicated(model@group),,drop=FALSE] return(sapply(1:ncol(weights), function(k) model@fit(model@x, model@y, weights[,k], model@z, model@C, model@which, components[[k]]@parameters))) }) setMethod("FLXmstep", signature(model = "FLXMRlmmcfix"), function(model, weights, components) { weights <- weights[!duplicated(model@group),,drop=FALSE] return(model@fit(model@x, model@y, weights, model@z, model@C, model@which, lapply(components, function(x) x@parameters))) }) setMethod("FLXgetModelmatrix", signature(model="FLXMRlmc"), function(model, data, formula, lhs=TRUE, ...) { formula_nogrouping <- RemoveGrouping(formula) if (formula_nogrouping == formula) stop("please specify a grouping variable") model <- callNextMethod(model, data, formula, lhs) model@fullformula <- update(model@fullformula, paste(".~. |", .FLXgetGroupingVar(formula))) mt2 <- terms(model@censored, data=data) mf2 <- model.frame(delete.response(mt2), data=data, na.action = NULL) model@C <- model.matrix(attr(mf2, "terms"), data) model@group <- grouping <- .FLXgetGrouping(formula, data)$group model@x <- matrix(lapply(unique(grouping), function(g) model@x[grouping == g, , drop = FALSE]), ncol = 1) if (lhs) model@y <- matrix(lapply(unique(grouping), function(g) model@y[grouping == g, , drop = FALSE]), ncol = 1) model@C <- matrix(lapply(unique(grouping), function(g) model@C[grouping == g, , drop = FALSE]), ncol = 1) model }) setMethod("FLXgetModelmatrix", signature(model="FLXMRlmmc"), function(model, data, formula, lhs=TRUE, ...) { model <- callNextMethod(model, data, formula, lhs) mt1 <- terms(model@random, data=data) mf1 <- model.frame(delete.response(mt1), data=data, na.action = NULL) model@z <- model.matrix(attr(mf1, "terms"), data) rownames(model@z) <- NULL grouping <- .FLXgetGrouping(formula, data)$group z <- matrix(lapply(unique(grouping), function(g) model@z[grouping == g, , drop = FALSE]), ncol = 1) model@z <- unique(z) model@which <- match(z, model@z) model }) setMethod("FLXgetObs", "FLXMRlmc", function(model) sum(sapply(model@x, nrow))) setMethod("FLXdeterminePostunscaled", signature(model = "FLXMRlmc"), function(model, components, ...) { sapply(components, function(x) x@logLik(model@x, model@y, model@C, model@group, x@parameters$censored)) }) setMethod("FLXdeterminePostunscaled", signature(model = "FLXMRlmmc"), function(model, components, ...) { sapply(components, function(x) x@logLik(model@x, model@y, model@z, model@C, model@which, model@group, x@parameters$censored)) }) setMethod("predict", signature(object="FLXMRlmc"), function(object, newdata, components, ...) { object <- FLXgetModelmatrix(object, newdata, formula = object@fullformula, lhs = FALSE) lapply(components, function(comp) unlist(comp@predict(object@x, ...))) }) setMethod("rFLXM", signature(model = "FLXMRlmc", components = "FLXcomponent"), function(model, components, ...) { stop("This model driver is not implemented yet.") }) flexmix/R/lmer.R0000644000176200001440000001506414404637304013224 0ustar liggesuserssigmaMethod <- getExportedValue(if(getRversion() >= "3.3.0") "stats" else "lme4", "sigma") setClass("FLXMRlmer", representation(random = "formula", lmod = "list", control = "ANY", preproc.z = "function"), prototype(preproc.z = function(x, ...) x), contains = "FLXMRglm") defineComponent_lmer <- function(para) { predict <- function(x, ...) x%*%para$coef logLik <- function(x, y, lmod, ...) { z <- as.matrix(lmod$reTrms$Zt) grouping <- lmod$reTrms$flist[[1]] llh <- vector(length=nrow(x)) for (i in seq_len(nlevels(grouping))) { index1 <- which(grouping == levels(grouping)[i]) index2 <- rownames(z) %in% levels(grouping)[i] V <- crossprod(z[index2,index1,drop=FALSE], para$sigma2$Random) %*% z[index2, index1, drop=FALSE] + diag(length(index1)) * para$sigma2$Residual llh[index1] <- mvtnorm::dmvnorm(y[index1,], mean=predict(x[index1,,drop=FALSE], ...), sigma = V, log=TRUE)/length(index1) } llh } new("FLXcomponent", parameters=list(coef=para$coef, sigma2=para$sigma2), logLik=logLik, predict=predict, df=para$df) } FLXMRlmer <- function(formula = . ~ ., random, weighted = TRUE, control = list(), eps = .Machine$double.eps) { random <- if (length(random) == 3) random else formula(paste(".", paste(deparse(random), collapse = ""))) missCtrl <- missing(control) if (missCtrl || !inherits(control, "lmerControl")) { if (!is.list(control)) stop("'control' is not a list; use lmerControl()") control <- do.call(lme4::lmerControl, control) } object <- new("FLXMRlmer", formula = formula, random = random, control = control, family = "gaussian", weighted = weighted, name = "FLXMRlmer:gaussian") if (weighted) object@preproc.z <- function(lmod) { if (length(unique(names(lmod[["reTrms"]][["flist"]]))) != 1) stop("only a single variable for random effects is allowed") for (i in seq_along(lmod[["reTrms"]][["flist"]])) { DIFF <- t(sapply(levels(lmod[["reTrms"]]$flist[[i]]), function(id) { index1 <- which(lmod[["reTrms"]]$flist[[i]] == id) index2 <- rownames(lmod[["reTrms"]]$Zt) == id sort(apply(lmod[["reTrms"]]$Zt[index2, index1, drop=FALSE], 1, paste, collapse = "")) })) if (length(unique(table(lmod[["reTrms"]][["flist"]][[i]]))) != 1 || nrow(unique(DIFF)) != 1) stop("FLXMRlmer does only work correctly if the covariates of the random effects are the same for all observations") } lmod } lmer.wfit <- function(x, y, w, lmod) { zero.weights <- any(w < eps) if (zero.weights) { ok <- w >= eps w <- w[ok] lmod[["fr"]] <- lmod[["fr"]][ok, , drop = FALSE] lmod[["X"]] <- lmod[["X"]][ok, , drop = FALSE] lmod[["reTrms"]][["Zt"]] <- lmod[["reTrms"]][["Zt"]][, ok, drop = FALSE] for (i in seq_along(lmod[["reTrms"]][["flist"]])) { lmod[["reTrms"]][["flist"]][[i]] <- lmod[["reTrms"]][["flist"]][[i]][ok] } } wts <- sqrt(w) lmod$X <- lmod$X * wts lmod$fr[[1]] <- lmod$fr[[1]] * wts devfun <- do.call(lme4::mkLmerDevfun, c(lmod, list(start = NULL, verbose = FALSE, control = control))) opt <- lme4::optimizeLmer(devfun, optimizer = control$optimizer, restart_edge = control$restart_edge, control = control$optCtrl, verbose = FALSE, start = NULL) mer <- lme4::mkMerMod(environment(devfun), opt, lmod$reTrms, fr = lmod$fr) sigma_res <- sigmaMethod(mer) / sqrt(mean(w)) vc <- lme4::VarCorr(mer) n <- c(0, cumsum(sapply(vc, ncol))) Random <- matrix(0, max(n), max(n)) for (i in seq_along(vc)) { index <- (n[i]+1):n[i+1] Random[index, index] <- vc[[i]] } Random <- Random / mean(w) list(coefficients = lme4::fixef(mer), sigma2 = list(Random = Random, Residual = sigma_res^2), df = length(lme4::fixef(mer)) + 1 + length(mer@theta)) } object@defineComponent <- defineComponent_lmer object@fit <- function(x, y, w, lmod){ fit <- lmer.wfit(x, y, w, lmod) object@defineComponent( list(coef = coef(fit), df = fit$df, sigma2 = fit$sigma2)) } object } setMethod("FLXgetModelmatrix", signature(model="FLXMRlmer"), function(model, data, formula, lhs=TRUE, contrasts = NULL, ...) { formula_nogrouping <- RemoveGrouping(formula) if (identical(paste(deparse(formula_nogrouping), collapse = ""), paste(deparse(formula), collapse = ""))) stop("please specify a grouping variable") model <- callNextMethod(model, data, formula, lhs) random_formula <- update(model@random, paste(".~. |", .FLXgetGroupingVar(formula))) fullformula <- model@fullformula if (!lhs) fullformula <- fullformula[c(1,3)] fullformula <- update(fullformula, paste(ifelse(lhs, ".", ""), "~. + ", paste(deparse(random_formula[[3]]), collapse = ""))) model@fullformula <- update(model@fullformula, paste(ifelse(lhs, ".", ""), "~. |", .FLXgetGroupingVar(formula))) model@lmod <- lme4::lFormula(fullformula, data, REML = FALSE, control = model@control) model@lmod <- model@preproc.z(model@lmod) model }) setMethod("FLXmstep", signature(model = "FLXMRlmer"), function(model, weights, ...) { apply(weights, 2, function(w) model@fit(model@x, model@y, w, model@lmod)) }) setMethod("FLXdeterminePostunscaled", signature(model = "FLXMRlmer"), function(model, components, ...) { sapply(components, function(x) x@logLik(model@x, model@y, model@lmod)) }) setMethod("rFLXM", signature(model = "FLXMRlmer", components="FLXcomponent"), function(model, components, ...) { sigma2 <- components@parameters$sigma2 z <- as.matrix(model@lmod$reTrms$Zt) grouping <- model@lmod$reTrms$flist[[1]] y <- matrix(0, nrow=nrow(model@x), ncol = 1) for (i in seq_len(nlevels(grouping))) { index1 <- which(grouping == levels(grouping)[i]) index2 <- rownames(z) %in% levels(grouping)[i] V <- crossprod(z[index2,index1,drop=FALSE], sigma2$Random) %*% z[index2, index1, drop=FALSE] + diag(length(index1)) * sigma2$Residual y[index1, 1] <- mvtnorm::rmvnorm(1, mean=components@predict(model@x[index1,,drop=FALSE], ...), sigma = V) } y }) flexmix/R/concomitant.R0000644000176200001440000000647414404637304014610 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: concomitant.R 5079 2016-01-31 12:21:12Z gruen $ # FLXPmultinom <- function(formula=~1) { z <- new("FLXPmultinom", name="FLXPmultinom", formula=formula) multinom.fit <- function(x, y, w, ...) { r <- ncol(x) p <- ncol(y) if (p < 2) stop("Multinom requires at least two components.") mask <- c(rep(0, r + 1), rep(c(0, rep(1, r)), p - 1)) nnet.default(x, y, w, mask = mask, size = 0, skip = TRUE, softmax = TRUE, censored = FALSE, rang = 0, trace=FALSE,...) } z@fit <- function(x, y, w, ...) multinom.fit(x, y, w, ...)$fitted.values z@refit <- function(x, y, w, ...) { if (missing(w) || is.null(w)) w <- rep(1, nrow(y)) rownames(y) <- rownames(x) <- NULL fit <- multinom(y ~ 0 + x, weights = w, data = list(y = y, x = x), Hess = TRUE, trace = FALSE) fit$coefnames <- colnames(x) fit$vcoefnames <- fit$coefnames[seq_along(fit$coefnames)] dimnames(fit$Hessian) <- lapply(dim(fit$Hessian) / ncol(x), function(i) paste(rep(seq_len(i) + 1, each = ncol(x)), colnames(x), sep = ":")) fit } z } FLXPconstant <- function() { new("FLXP", name="FLXPconstant", formula = ~1, fit = function(x, y, w, ...){ if (missing(w) || is.null(w)) return(matrix(colMeans(y), ncol=ncol(y), dimnames = list("prior", seq_len(ncol(y))))) else return(matrix(colMeans(w*y)/mean(w), ncol=ncol(y), dimnames = list("prior", seq_len(ncol(y))))) }) } ###********************************************************** setMethod("FLXgetModelmatrix", signature(model="FLXP"), function(model, data, groups, lhs, ...) { mt <- terms(model@formula, data=data) mf <- model.frame(delete.response(mt), data=data, na.action = NULL) X <- model.matrix(mt, data=mf) if (nrow(X)){ if (!checkGroup(X, groups$group)) stop("model variables have to be constant for grouping variable") model@x <- X[groups$groupfirst,,drop=FALSE] } else{ model@x <- matrix(1, nrow=sum(groups$groupfirst)) } model }) checkGroup <- function(x, group) { check <- TRUE for(g in levels(group)){ gok <- group==g if(any(gok)){ check <- all(c(check, apply(x[gok,,drop=FALSE], 2, function(z) length(unique(z)) == 1))) } } check } ###********************************************************** setMethod("refit_mstep", signature(object="FLXP", newdata="missing"), function(object, newdata, posterior, group, ...) NULL) setMethod("refit_mstep", signature(object="FLXPmultinom", newdata="missing"), function(object, newdata, posterior, group, ...) { groupfirst <- if (length(group)) groupFirst(group) else rep(TRUE, nrow(posterior)) object@refit(object@x, posterior[groupfirst,,drop=FALSE], ...) }) ###********************************************************** setMethod("FLXfillConcomitant", signature(concomitant="FLXP"), function(concomitant, posterior, weights) { concomitant@coef <- concomitant@refit(concomitant@x, posterior, weights) concomitant }) setMethod("FLXfillConcomitant", signature(concomitant="FLXPmultinom"), function(concomitant, posterior, weights) { concomitant@coef <- cbind("1" = 0, t(coef(concomitant@refit(concomitant@x, posterior, weights)))) concomitant }) ###********************************************************** flexmix/R/infocrit.R0000644000176200001440000000277614404637304014110 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: infocrit.R 5079 2016-01-31 12:21:12Z gruen $ # setMethod("nobs", signature(object="flexmix"), function(object, ...) { if (is.null(object@weights)) nrow(object@posterior$scaled) else sum(object@weights) }) setMethod("logLik", signature(object="flexmix"), function(object, newdata, ...){ if (missing(newdata)) { z <- object@logLik attr(z, "df") <- object@df attr(z, "nobs") <- nobs(object) class(z) <- "logLik" } else { z <- sum(log(rowSums(posterior(object, newdata = newdata, unscaled = TRUE)))) attr(z, "df") <- object@df attr(z, "nobs") <- nrow(newdata) class(z) <- "logLik" } z }) setMethod("ICL", signature(object="flexmix"), function(object, ...){ -2 * clogLik(object) + object@df * log(nobs(object)) }) setMethod("clogLik", signature(object="flexmix"), function(object, ...){ first <- if (length(object@group)) groupFirst(object@group) else TRUE post <- object@posterior$unscaled[first,,drop=FALSE] n <- nrow(post) sum(log(post[seq_len(n) + (clusters(object)[first] - 1)*n])) }) setMethod("EIC", signature(object="flexmix"), function(object, ...) { first <- if (length(object@group)) groupFirst(object@group) else TRUE post <- object@posterior$scaled[first,,drop=FALSE] n <- nrow(post) lpost <- log(post) if (any(is.infinite(lpost))) lpost[is.infinite(lpost)] <- -10^3 1 + sum(post * lpost)/(n * log(object@k)) }) flexmix/R/mgcv.R0000644000176200001440000001373114404637304013220 0ustar liggesuserssetOldClass("gam.prefit") setClassUnion("listOrgam.prefit", c("list", "gam.prefit")) setClass("FLXMRmgcv", representation(G = "listOrgam.prefit", control = "list"), contains="FLXMRglm") FLXMRmgcv <- function(formula = .~., family = c("gaussian", "binomial", "poisson"), offset = NULL, control = NULL, optimizer = c("outer", "newton"), in.out = NULL, eps = .Machine$double.eps, ...) { if (is.null(control)) control <- mgcv::gam.control() family <- match.arg(family) am <- if (family == "gaussian" && get(family)()$link == "identity") TRUE else FALSE z <- new("FLXMRmgcv", FLXMRglm(formula = formula, family = family, offset = offset), name=paste("FLXMRmgcv", family, sep=":"), control = control) scale <- if (family %in% c("binomial", "poisson")) 1 else -1 gam_fit <- function(G, w) { G$family <- get(family)() G$am <- am G$w <- w G$conv.tol <- control$mgcv.tol G$max.half <- control$mgcv.half zero_weights <- any(w < eps) if (zero_weights) { ok <- w >= eps w <- w[ok] G$X <- G$X[ok,,drop=FALSE] if (is.matrix(G$y)) G$y <- G$y[ok,,drop=FALSE] else G$y <- G$y[ok] G$mf <- G$mf[ok,,drop=FALSE] G$w <- G$w[ok] G$offset <- G$offset[ok] if (G$n.paraPen > 0) { OMIT <- which(colSums(abs(G$X)) == 0) if (length(OMIT) > 0) { Ncol <- ncol(G$X) Assign <- unique(G$assign[OMIT]) G$assign <- G$assign[-OMIT] G$nsdf <- G$nsdf - length(OMIT) G$X <- G$X[,-OMIT,drop=FALSE] G$mf$Grouping <- G$mf$Grouping[,-which(colSums(abs(G$mf$Grouping))==0),drop=FALSE] if (length(G$off) > 1) G$off[2] <- G$off[2] - length(OMIT) for (i in seq_along(G$smooth)) { G$smooth[[i]]$first.para <- G$smooth[[i]]$first.para - length(OMIT) G$smooth[[i]]$last.para <- G$smooth[[i]]$last.para - length(OMIT) } G$S[[1]] <- G$S[[1]][-c(OMIT-sum(G$assign != Assign)), -c(OMIT-sum(G$assign != Assign))] } } } z <- mgcv::gam(G = G, method = "ML", optimizer = optimizer, control = control, scale = scale, in.out = in.out, ...) if (zero_weights) { residuals <- z$residuals z$residuals <- rep(0, length(ok)) z$residuals[ok] <- residuals if (G$n.paraPen > 0 && length(OMIT) > 0) { coefficients <- z$coefficients z$coefficients <- rep(0, Ncol) z$coefficients[-OMIT] <- coefficients } } z } if (family=="gaussian"){ z@fit <- function(x, y, w, G){ gam.fit <- gam_fit(G, w) z@defineComponent(list(coef = gam.fit$coefficients, df = sum(gam.fit$edf)+1, sigma = sqrt(sum(w * gam.fit$residuals^2 / mean(w))/ (nrow(x)-sum(gam.fit$edf))))) } } else if(family %in% c("binomial", "poisson")){ z@fit <- function(x, y, w, G){ gam.fit <- gam_fit(G, w) z@defineComponent( list(coef = gam.fit$coefficients, df = sum(gam.fit$edf))) } } else stop(paste("Unknown family", family)) z } setMethod("FLXmstep", signature(model = "FLXMRmgcv"), function(model, weights, ...) { apply(weights, 2, function(w) model@fit(model@x, model@y, w, model@G)) }) setMethod("FLXgetModelmatrix", signature(model="FLXMRmgcv"), function(model, data, formula, lhs=TRUE, paraPen = list(), ...) { formula <- RemoveGrouping(formula) if (length(grep("\\|", deparse(model@formula)))) stop("no grouping variable allowed in the model") if(is.null(model@formula)) model@formula <- formula model@fullformula <- update(terms(formula, data=data), model@formula) gp <- mgcv::interpret.gam(model@fullformula) if (lhs) { model@terms <- terms(gp$fake.formula, data = data) mf <- model.frame(model@terms, data=data, na.action = NULL, drop.unused.levels = TRUE) response <- as.matrix(model.response(mf, "numeric")) model@y <- model@preproc.y(response) } else { model@terms <- terms(gp$fake.formula, data = data) mf <- model.frame(delete.response(model@terms), data=data, na.action = NULL, drop.unused.levels = TRUE) } model@G <- mgcv::gam(model@fullformula, data = data, fit = FALSE) model@x <- model@G$X model@contrasts <- attr(model@x, "contrasts") model@x <- model@preproc.x(model@x) model@xlevels <- .getXlevels(delete.response(model@terms), mf) model }) setMethod("predict", signature(object="FLXMRmgcv"), function(object, newdata, components, ...) { predict_gam <- function (object, newdata, ...) { nn <- names(newdata) mn <- colnames(object$model) for (i in 1:length(newdata)) if (nn[i] %in% mn && is.factor(object$model[, nn[i]])) { newdata[[i]] <- factor(newdata[[i]], levels = levels(object$model[, nn[i]])) } if (length(newdata) == 1) newdata[[2]] <- newdata[[1]] n.smooth <- length(object$smooth) Terms <- delete.response(object$pterms) X <- matrix(0, nrow(newdata), length(object$coefficients)) Xoff <- matrix(0, nrow(newdata), n.smooth) mf <- model.frame(Terms, newdata, xlev = object$xlevels) if (!is.null(cl <- attr(object$pterms, "dataClasses"))) .checkMFClasses(cl, mf) Xp <- model.matrix(Terms, mf, contrasts = object$contrasts) if (object$nsdf) X[, 1:object$nsdf] <- Xp if (n.smooth) for (k in 1:n.smooth) { Xfrag <- mgcv::PredictMat(object$smooth[[k]], newdata) X[, object$smooth[[k]]$first.para:object$smooth[[k]]$last.para] <- Xfrag Xfrag.off <- attr(Xfrag, "offset") if (!is.null(Xfrag.off)) { Xoff[, k] <- Xfrag.off } } X } object@G$model <- object@G$mf z <- list() for(k in seq_along(components)) { object@G$coefficients <- components[[k]]@parameters$coef X <- predict_gam(object@G, newdata) z[[k]] <- components[[k]]@predict(X, ...) } z }) flexmix/R/rFLXmodel.R0000644000176200001440000000566714404637304014131 0ustar liggesuserssetMethod("rFLXM", signature(model="FLXM", components="list"), function(model, components, class, ...) { y <- NULL for (l in seq_along(components)) { yl <- as.matrix(rFLXM(model, components[[l]], ...)) if (is.null(y)) y <- matrix(NA, nrow = length(class), ncol = ncol(yl)) y[class == l,] <- yl[class==l,] } y }) setMethod("rFLXM", signature(model = "FLXMRglm", components="FLXcomponent"), function(model, components, ...) { family <- model@family n <- nrow(model@x) if(family == "gaussian") { sigma <- components@parameters$sigma y <- rnorm(n, mean = components@predict(model@x, ...), sd = sigma) } else if (family == "binomial") { dotarg = list(...) if ("size" %in% names(dotarg)) size <- dotarg$size else { if (nrow(model@y)!=n) stop("no y values - specify a size argument") size <- rowSums(model@y) } parms <- components@parameters y <- rbinom(n, prob = components@predict(model@x, ...), size=size) y <- cbind(y, size - y) } else if (family == "poisson") { y <- rpois(n, lambda = components@predict(model@x, ...)) } else if (family == "Gamma") { shape <- components@parameters$shape y <- rgamma(n, shape = shape, scale = components@predict(model@x, ...)/shape) } else stop("family not supported") y }) setMethod("rFLXM", signature(model = "FLXMRglmfix", components="list"), function(model, components, class, ...) { k <- sum(model@nestedformula@k) n <- nrow(model@x)/k y <- matrix(NA, nrow = length(class), ncol = ncol(model@y)) model.sub <- as(model, "FLXMRglm") for (l in seq_len(k)) { rok <- (l-1)*n + seq_len(n) model.sub@x <- model@x[rok, as.logical(model@design[l,]), drop=FALSE] model.sub@y <- model@y[rok,,drop=FALSE] yl <- as.matrix(rFLXM(model.sub, components[[l]], ...)) y[class==l,] <- yl[class==l,] } y }) rmvbinom <- function(n, size, prob) sapply(prob, function(p) rbinom(n, size, p)) rmvbinary <- function(n, center) sapply(center, function(p) rbinom(n, 1, p)) setMethod("rFLXM", signature(model = "FLXMC", components = "FLXcomponent"), function(model, components, class, ...) { rmvnorm <- function(n, center, cov) mvtnorm::rmvnorm(n = n, mean = center, sigma = cov) dots <- list(...) FUN <- paste("r", model@dist, sep = "") args <- c(n = nrow(model@x), dots, components@parameters) return(do.call(FUN, args)) }) flexmix/R/relabel.R0000644000176200001440000000555414404637304013676 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id$ # setGeneric("dorelabel", function(object, perm, ...) standardGeneric("dorelabel")) setMethod("dorelabel", signature(object="flexmix", perm="vector"), function(object, perm, ...) { object <- callNextMethod(object, perm) object@posterior$scaled <- object@posterior$scaled[,perm,drop=FALSE] object@posterior$unscaled <- object@posterior$unscaled[,perm,drop=FALSE] object@cluster <- order(perm)[object@cluster] object@size <- object@size[perm] names(object@size) <- seq_along(perm) object }) setMethod("dorelabel", signature(object="FLXdist", perm="vector"), function(object, perm, ...) { if (length(perm) != object@k) stop("length of order argument does not match number of components") if (any(sort(perm) != seq_len(object@k))) stop("order argument not specified correctly") object@prior <- object@prior[perm] object@components <- object@components[perm] names(object@components) <- sapply(seq_along(object@components), function(k) gsub("[0-9]+", k, names(object@components)[k])) object@concomitant <- dorelabel(object@concomitant, perm, ...) object }) setMethod("dorelabel", signature(object="FLXP", perm="vector"), function(object, perm, ...) { object@coef <- object@coef[,perm,drop=FALSE] colnames(object@coef) <- sapply(seq_len(ncol(object@coef)), function(k) gsub("[0-9]+", k, colnames(object@coef)[k])) object }) setMethod("dorelabel", signature(object="FLXPmultinom", perm="vector"), function(object, perm, ...) { object@coef <- object@coef[,perm,drop=FALSE] object@coef <- sweep(object@coef, 1, object@coef[,1], "-") colnames(object@coef) <- sapply(seq_len(ncol(object@coef)), function(k) gsub("[0-9]+", k, colnames(object@coef)[k])) object }) setMethod("relabel", signature(object="FLXdist", by="character"), function(object, by, which=NULL, ...) { by <- match.arg(by, c("prior", "model", "concomitant")) if(by=="prior"){ perm <- order(prior(object), ...) } else if(by %in% c("model", "concomitant")) { pars <- parameters(object, which = by) index <- grep(which, rownames(pars)) if (length(index) != 1) stop("no suitable ordering variable given in 'which'") perm <- order(pars[index,], ...) } object <- dorelabel(object, perm=perm) object }) setMethod("relabel", signature(object="FLXdist", by="missing"), function(object, by, ...) { object <- relabel(object, by="prior", ...) object }) setMethod("relabel", signature(object="FLXdist", by="integer"), function(object, by, ...) { if(!all(sort(by) == seq_len(object@k))) stop("if integer, ", sQuote("by"), " must be a permutation of the numbers 1 to ", object@k) object <- dorelabel(object, by) object }) flexmix/R/plot.R0000644000176200001440000000656514404637304013251 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: plot.R 5079 2016-01-31 12:21:12Z gruen $ # ###********************************************************** plotEll <- function(object, data, which=1:2, model = 1, project=NULL, points=TRUE, eqscale=TRUE, col=NULL, number = TRUE, cex=1.5, numcol="black", pch=NULL, ...) { if(is.null(col)) col <- rep(FullColors, length.out = object@k) if (!is.list(data)) { response <- data data <- list() data[[deparse(object@model[[model]]@fullformula[[2]])]] <- response } else { mf <- model.frame(object@model[[model]]@fullformula, data=data, na.action = NULL) response <- as.matrix(model.response(mf)) response <- object@model[[model]]@preproc.y(response) } clustering <- clusters(object, newdata = data) if(!is.null(project)) response <- predict(project, response) type=ifelse(points, "p", "n") if(is.null(pch)){ pch <- (clustering %% 10) pch[pch==0] <- 10 } else if(length(pch)!=nrow(response)){ pch <- rep(pch, length.out = object@k) pch <- pch[clustering] } if(eqscale) plot(response[,which], asp = 1, col=col[clustering], pch=pch, type=type, ...) else plot(response[,which], col=col[clustering], pch=pch, type=type, ...) for(k in seq_along(object@components)){ p = parameters(object, k, model, simplify=FALSE) if(!is.null(project)){ p <- projCentCov(project, p) } lines(ellipse::ellipse(p$cov[which,which], centre=p$center[which], level=0.5), col=col[k], lwd=2) lines(ellipse::ellipse(p$cov[which,which], centre=p$center[which], level=0.95), col=col[k], lty=2) } ## und nochmal fuer die zentren und nummern (damit die immer oben sind) for(k in seq_along(object@components)){ p = parameters(object, k, model, simplify=FALSE) if(!is.null(project)){ p <- projCentCov(project, p) } if(number){ rad <- ceiling(log10(object@k)) + 1.5 points(p$center[which[1]], p$center[which[2]], col=col[k], pch=21, cex=rad*cex, lwd=cex, bg="white") text(p$center[which[1]], p$center[which[2]], k, cex=cex, col=numcol) } else{ points(p$center[which[1]], p$center[which[2]], pch=16, cex=cex, col=col[k]) } } } projCentCov <- function(object, p) UseMethod("projCentCov") projCentCov.default <- function(object, p) stop(paste("Cannot handle projection objects of class", sQuote(class(object)))) projCentCov.prcomp <- function(object, p) { cent <- matrix(p$center, ncol=length(p$center)) cent <- scale(cent, object$center, object$scale) %*% object$rotation cov <- p$cov if(length(object$scale)>1) cov <- cov/outer(object$scale, object$scale, "*") cov <- t(object$rotation) %*% cov %*% object$rotation list(center=cent, cov=cov) } flexmix/R/rflexmix.R0000644000176200001440000000742114404637304014121 0ustar liggesuserssetMethod("rflexmix", signature(object = "FLXdist", newdata="numeric"), function(object, newdata, ...) { newdata <- data.frame(matrix(nrow = as.integer(newdata), ncol = 0)) rflexmix(object, newdata = newdata, ...) }) setMethod("rflexmix", signature(object = "FLXdist", newdata="listOrdata.frame"), function(object, newdata, ...) { groups <- .FLXgetGrouping(object@formula, newdata) object@model <- lapply(object@model, FLXgetModelmatrix, newdata, object@formula, lhs=FALSE) group <- if (length(groups$group)) groups$group else factor(seq_len(FLXgetObs(object@model[[1]]))) object@concomitant <- FLXgetModelmatrix(object@concomitant, data = newdata, groups = list(group=group, groupfirst = groupFirst(group))) rflexmix(new("flexmix", object, group=group, weights = NULL), ...) }) setMethod("rflexmix", signature(object = "flexmix", newdata="missing"), function(object, newdata, ...) { N <- length(object@model) object <- undo_weights(object) group <- group(object) prior <- determinePrior(object@prior, object@concomitant, group) class <- apply(prior, 1, function(x) rmultinom(1, size = 1, prob = x)) class <- if (is.matrix(class)) t(class) else as.matrix(class) class <- max.col(class)[group] y <- vector("list", N) for (i in seq_len(N)) { comp <- lapply(object@components, function(x) x[[i]]) yi <- rFLXM(object@model[[i]], comp, class, group, ...) form <- object@model[[i]]@fullformula names <- if(length(form) == 3) form[[2]] else paste("y", i, seq_len(ncol(yi)), sep = ".") if (ncol(yi) > 1) { if (inherits(names, "call")) names <- as.character(names[-1]) if (length(names) != ncol(yi)) { if (length(names) == 1) names <- paste(as.character(names)[1], i, seq_len(ncol(yi)), sep = ".") else stop("left hand side not specified correctly") } } else if (inherits(names, "call")) names <- deparse(names) colnames(yi) <- as.character(names) y[[i]] <- yi } list(y = y, group=group, class = class) }) ###********************************************************** determinePrior <- function(prior, concomitant, group) { matrix(prior, nrow = length(unique(group)), ncol = length(prior), byrow=TRUE) } setGeneric("determinePrior", function(prior, concomitant, group) standardGeneric("determinePrior")) setMethod("determinePrior", signature(concomitant="FLXPmultinom"), function(prior, concomitant, group) { exps <- exp(concomitant@x %*% concomitant@coef) exps/rowSums(exps) }) undo_weights <- function(object) { if (!is.null(object@weights)) { for (i in seq_along(object@model)) { object@model[[i]]@x <- apply(object@model[[i]]@x, 2, rep, object@weights) object@model[[i]]@y <- apply(object@model[[i]]@y, 2, rep, object@weights) object@concomitant@x <- apply(object@concomitant@x, 2, rep, object@weights) } if (length(object@group) > 0) object@group <- rep(object@group, object@weights) object@weights <- NULL } object } ###********************************************************** setMethod("simulate", signature("FLXdist"), function(object, nsim, seed = NULL, ...) { if (!exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) runif(1) if (is.null(seed)) RNGstate <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE) else { R.seed <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE) set.seed(seed) RNGstate <- structure(seed, kind = as.list(RNGkind())) on.exit(assign(".Random.seed", R.seed, envir = .GlobalEnv, inherits = FALSE)) } ans <- lapply(seq_len(nsim), function(i) rflexmix(object, ...)$y) if (all(sapply(ans, ncol) == 1)) ans <- as.data.frame(ans) attr(ans, "seed") <- RNGstate ans }) flexmix/R/flexmixFix.R0000644000176200001440000001415214404637304014405 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: flexmixFix.R 5079 2016-01-31 12:21:12Z gruen $ # setMethod("FLXcheckComponent", signature(model = "FLXMRfix"), function(model, k, cluster, ...) { if (sum(model@nestedformula@k)) { if (!is.null(k)) { if (k != sum(model@nestedformula@k)) stop("specified k does not match the nestedformula in the model") } else k <- sum(model@nestedformula@k) } else { if (is(cluster, "matrix")) { if (is.null(k)) k <- ncol(cluster) } else if (!is.null(cluster)) { if (is.null(k)) { cluster <- as(cluster, "integer") k <- max(cluster) } } if (is.null(k)) stop("either k, cluster or the nestedformula of the model must be specified") else model@nestedformula <- as(k, "FLXnested") } if (length(model@variance) > 1) { if (sum(model@variance) != k) stop("specified k does not match the specified varFix argument in the model") } else if (model@variance) model@variance <- k else model@variance <- rep(1, k) model }) setMethod("FLXgetObs", signature(model = "FLXMRfix"), function(model) nrow(model@y)/sum(model@nestedformula@k)) setMethod("FLXgetK", signature(model = "FLXMRfix"), function(model, ...) sum(model@nestedformula@k)) setMethod("FLXremoveComponent", signature(model = "FLXMRfix"), function(model, nok, ...) { if (!length(nok)) return(model) K <- model@nestedformula wnok <- sapply(nok, function(i) which(apply(rbind(i > c(0, cumsum(K@k[-length(K@k)])), i <= c(cumsum(K@k))), 2, all))) wnok <- table(wnok) if (length(wnok) > 0) { K@k[as.integer(names(wnok))] <- K@k[as.integer(names(wnok))] - wnok if (any(K@k == 0)) { keep <- K@k != 0 K@k <- K@k[keep] K@formula <- K@formula[keep] } k <- sum(K@k) model@nestedformula <- K } varnok <- sapply(nok, function(i) which(apply(rbind(i > c(0, cumsum(model@variance[-length(model@variance)])), i <= c(cumsum(model@variance))), 2, all))) varnok <- table(varnok) if (length(varnok) > 0) { model@variance[as.integer(names(varnok))] <- model@variance[as.integer(names(varnok))] - varnok if (any(model@variance == 0)) model@variance <- model@variance[model@variance != 0] } rok <- which(!apply(model@segment[,nok,drop=FALSE], 1, function(x) any(x))) model@x <- model@x[rok, which(colSums(model@design[-nok,,drop=FALSE]) > 0), drop=FALSE] model@y <- model@y[rok,, drop=FALSE] model@design <- model@design[-nok,,drop=FALSE] cok <- colSums(model@design) > 0 model@design <- model@design[,cok,drop=FALSE] model@segment <- model@segment[rok,-nok, drop=FALSE] model }) ###********************************************************** setMethod("FLXmstep", signature(model = "FLXMRfix"), function(model, weights, ...) { model@fit(model@x, model@y, as.vector(weights), model@design, model@variance) }) ###********************************************************** setMethod("FLXdeterminePostunscaled", signature(model = "FLXMRfix"), function(model, components, ...) { sapply(seq_along(components), function(m) components[[m]]@logLik(model@x[model@segment[,m], as.logical(model@design[m,]), drop=FALSE], model@y[model@segment[,m],,drop=FALSE])) }) ###********************************************************** modelMatrix <- function(random, fixed, nested, data=list(), lhs, xlevels = NULL) { if (!lhs) random <- random[-2] mf.random <- model.frame(random, data=data, na.action = NULL) response <- if (lhs) as.matrix(model.response(mf.random)) else NULL xlev <- xlevels[names(.getXlevels(terms(mf.random), mf.random))] mm.random <- if (is.null(xlev)) model.matrix(terms(mf.random), data=mf.random) else model.matrix(terms(mf.random), data=data, xlev = xlev) xlevels.random <- .getXlevels(terms(mf.random), mf.random) randomfixed <- if(identical(paste(deparse(fixed), collapse = ""), "~0")) random else update(random, paste("~.+", paste(deparse(fixed[[length(fixed)]]), collapse = ""))) mf.randomfixed <- model.frame(randomfixed, data=data) mm.randomfixed <- model.matrix(terms(mf.randomfixed), data=mf.randomfixed, xlev = xlevels[names(.getXlevels(terms(mf.randomfixed), mf))]) mm.fixed <- mm.randomfixed[,!colnames(mm.randomfixed) %in% colnames(mm.random), drop=FALSE] xlevels.fixed <- .getXlevels(terms(mf.randomfixed), mf.randomfixed) all <- mm.all <- mm.nested <- xlevels.nested <- list() for (l in seq_along(nested)) { all[[l]] <- if (identical(paste(deparse(nested[[l]]), collapse = ""), "~0")) randomfixed else update(randomfixed, paste("~.+", paste(deparse(nested[[l]][[length(nested[[l]])]]), collapse = ""))) mf <- model.frame(all[[l]], data=data) mm.all[[l]] <- model.matrix(terms(mf), data=mf, xlev = xlevels[names(.getXlevels(terms(mf), mf))]) mm.nested[[l]] <- mm.all[[l]][,!colnames(mm.all[[l]]) %in% colnames(mm.randomfixed),drop=FALSE] xlevels.nested[[l]] <- .getXlevels(terms(mf), mf) } return(list(random=mm.random, fixed=mm.fixed, nested=mm.nested, response=response, xlevels=c(xlevels.random, xlevels.fixed, unlist(xlevels.nested)))) } ###********************************************************** modelDesign <- function(mm.all, k) { design <- matrix(1, nrow=sum(k@k), ncol=ncol(mm.all$fixed)) col.names <- colnames(mm.all$fixed) nested <- matrix(0, nrow=sum(k@k), ncol=sum(sapply(mm.all$nested, ncol))) cumK <- c(0, cumsum(k@k)) i <- 0 for (l in seq_along(mm.all$nested)) { if (ncol(mm.all$nested[[l]])) { nested[(cumK[l] + 1):cumK[l+1], i+seq_len(ncol(mm.all$nested[[l]]))] <- 1 i <- i+ncol(mm.all$nested[[l]]) col.names <- c(col.names, colnames(mm.all$nested[[l]])) } } design <- cbind(design, nested) if (ncol(mm.all$random)) design <- cbind(design, kronecker(diag(sum(k@k)), matrix(1, ncol=ncol(mm.all$random)))) colnames(design) <- c(col.names, rep(colnames(mm.all$random), sum(k@k))) design } ###********************************************************** flexmix/R/plot-flexmix.R0000644000176200001440000001353014404637304014711 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: plot-flexmix.R 5079 2016-01-31 12:21:12Z gruen $ # determine_y <- function(h, root) { y <- h$counts if (root) y <- sqrt(y) return(y) } panel.rootogram <- function (x, breaks, equal.widths = TRUE, nint = max(round(log2(length(x)) + 1), 3), alpha = plot.polygon$alpha, col = plot.polygon$col, border = plot.polygon$border, lty = plot.polygon$lty, lwd = plot.polygon$lwd, subscripts, groups, mark, root = TRUE, markcol, ...) { x <- as.numeric(x) plot.polygon <- trellis.par.get("plot.polygon") grid.lines(x = c(0.05, 0.95), y = unit(c(0, 0), "native"), gp = gpar(col = border, lty = lty, lwd = lwd, alpha = alpha), default.units = "npc") if (length(x) > 0) { if (is.null(breaks)) { breaks <- if (equal.widths) do.breaks(range(x, finite = TRUE), nint) else quantile(x, 0:nint/nint, na.rm = TRUE) } h <- hist.constructor(x, breaks = breaks, plot = FALSE, ...) y <- determine_y(h, root) if (!is.null(mark)) { h1 <- hist.constructor(x[groups[subscripts] == mark], breaks = h$breaks, plot = FALSE, ...) y1 <- determine_y(h1, root) } nb <- length(breaks) if (length(y) != nb - 1) warning("problem with hist computations") if (nb > 1) { panel.rect(x = breaks[-nb], y = 0, height = y, width = diff(breaks), col = col, alpha = alpha, border = border, lty = lty, lwd = lwd, just = c("left", "bottom")) if (!is.null(mark)) panel.rect(x = breaks[-nb], y = 0, height = y1, width = diff(breaks), col = markcol, alpha = alpha, border = border, lty = lty, lwd = lwd, just = c("left", "bottom")) } } } prepanel.rootogram <- function (x, breaks, equal.widths = TRUE, nint = max(round(log2(length(x)) + 1), 3), root = TRUE, ...) { if (length(x) < 1) list(xlim = NA, ylim = NA, dx = NA, dy = NA) else { if (is.factor(x)) { isFactor <- TRUE xlimits <- levels(x) } else isFactor <- FALSE if (!is.numeric(x)) x <- as.numeric(x) if (is.null(breaks)) { breaks <- if (equal.widths) do.breaks(range(x, finite = TRUE), nint) else quantile(x, 0:nint/nint, na.rm = TRUE) } h <- hist.constructor(x, breaks = breaks, plot = FALSE, ...) y <- determine_y(h, root) list(xlim = if (isFactor) xlimits else range(x, breaks, finite = TRUE), ylim = range(0, y, finite = TRUE), dx = 1, dy = 1) } } setMethod("plot", signature(x="flexmix", y="missing"), function(x, y, mark=NULL, markcol=NULL, col=NULL, eps=1e-4, root=TRUE, ylim=TRUE, main=NULL, xlab = "", ylab = "", as.table = TRUE, endpoints = c(-0.04, 1.04), ...){ k <- length(x@prior) if(is.null(markcol)) markcol <- FullColors[5] if(is.null(col)) col <- LightColors[4] if(is.null(main)){ main <- ifelse(root, "Rootogram of posterior probabilities", "Histogram of posterior probabilities") main <- paste(main, ">", eps) } groupfirst <- if (length(x@group)) !duplicated(x@group) else TRUE if (is.null(x@weights)) z <- data.frame(posterior = as.vector(x@posterior$scaled[groupfirst,,drop=FALSE]), component = factor(rep(seq_len(x@k), each = nrow(x@posterior$scaled[groupfirst,,drop=FALSE])), levels = seq_len(x@k), labels = paste("Comp.", seq_len(x@k))), cluster = rep(as.vector(x@cluster[groupfirst]), k)) else z <- data.frame(posterior = rep(as.vector(x@posterior$scaled[groupfirst,,drop=FALSE]), rep(x@weights[groupfirst], k)), component = factor(rep(seq_len(x@k), each = sum(x@weights[groupfirst])), seq_len(x@k), paste("Comp.", seq_len(x@k))), cluster = rep(rep(as.vector(x@cluster[groupfirst]), x@weights[groupfirst]), k)) panel <- function(x, subscripts, groups, ...) panel.rootogram(x, root = root, mark = mark, col = col, markcol = markcol, subscripts = subscripts, groups = groups, ...) prepanel <- function(x, ...) prepanel.rootogram(x, root = root, ...) z <- subset(z, posterior > eps) cluster <- NULL # make codetools happy if (is.logical(ylim)) { scales <- if (ylim) list() else list(y = list(relation = "free")) hh <- histogram(~ posterior | component, data = z, main = main, ylab = ylab, xlab = xlab, groups = cluster, panel = panel, prepanel = prepanel, scales = scales, as.table = as.table, endpoints = endpoints, ...) } else hh <- histogram(~ posterior | component, data = z, main = main, ylab = ylab, xlab = xlab, groups = cluster, ylim = ylim, panel = panel, prepanel = prepanel, as.table = as.table, endpoints = endpoints, ...) if (root) { hh$yscale.components <- function (lim, packet.number = 0, packet.list = NULL, right = TRUE, ...) { comps <- calculateAxisComponents(lim, packet.list = packet.list, packet.number = packet.number, ...) comps$at <- sqrt(seq(min(comps$at)^2, max(comps$at)^2, length.out = length(comps$at))) comps$labels <- format(comps$at^2, trim = TRUE) list(num.limit = comps$num.limit, left = list(ticks = list(at = comps$at, tck = 1), labels = list(at = comps$at, labels = comps$labels, cex = 1, check.overlap = comps$check.overlap)), right = right) } } hh }) flexmix/R/examples.R0000644000176200001440000000411314404637304014074 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: examples.R 5079 2016-01-31 12:21:12Z gruen $ # ExNPreg = function(n) { if(n %% 2 != 0) stop("n must be even") x <- runif(2*n, 0, 10) mp <- exp(c(2-0.2*x[1:n], 1+0.1*x[(n+1):(2*n)])) mb <- binomial()$linkinv(c(x[1:n]-5, 5-x[(n+1):(2*n)])) data.frame(x=x, yn=c(5*x[1:n], 40-(x[(n+1):(2*n)]-5)^2)+3*rnorm(n), yp=rpois(2*n, mp), yb=rbinom(2*n, size=1, prob=mb), class = rep(1:2, c(n,n)), id1 = factor(rep(1:n, rep(2, n))), id2 = factor(rep(1:(n/2), rep(4, n/2)))) } ExNclus = function(n=100) { if(n %% 2 != 0) stop("n must be even") rbind(mvtnorm::rmvnorm(n, mean=rep(0,2)), mvtnorm::rmvnorm(n, mean=c(8,0), sigma=diag(1:2)), mvtnorm::rmvnorm(1.5*n, mean=c(-2,6), sigma=diag(2:1)), mvtnorm::rmvnorm(2*n, mean=c(4,4), sigma=matrix(c(1,.9,.9,1), 2))) } ExLinear <- function(beta, n, xdist="runif", xdist.args=NULL, family=c("gaussian", "poisson"), sd=1, ...) { family <- match.arg(family) X <- NULL y <- NULL k <- ncol(beta) d <- nrow(beta)-1 n <- rep(n, length.out=k) if(family=="gaussian") sd <- rep(sd, length.out=k) xdist <- rep(xdist, length.out=d) if(is.null(xdist.args)){ xdist.args <- list(list(...)) } if(!is.list(xdist.args[[1]])) xdist.args <- list(xdist.args) xdist.args <- rep(xdist.args, length.out=d) for(i in 1:k) { X1 <- 1 for(j in 1:d){ xdist.args[[j]]$n <- n[i] X1 <- cbind(X1, do.call(xdist[j], xdist.args[[j]])) } X <- rbind(X, X1) xb <- X1 %*% beta[,i,drop=FALSE] if(family=="gaussian") y1 <- xb + rnorm(n[i], sd=sd[i]) else y1 <- rpois(n[i], exp(xb)) y <- c(y, y1) } X <- X[,-1,drop=FALSE] colnames(X) <- paste("x", 1:d, sep="") z <- data.frame(y=y, X) attr(z, "clusters") <- rep(1:k, n) z } flexmix/R/kldiv.R0000644000176200001440000001006514404637304013372 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: kldiv.R 5079 2016-01-31 12:21:12Z gruen $ # setMethod("KLdiv", "matrix", function(object, eps=10^-4, overlap=TRUE,...) { if(!is.numeric(object)) stop("object must be a numeric matrix\n") z <- matrix(NA, nrow=ncol(object), ncol=ncol(object)) colnames(z) <- rownames(z) <- colnames(object) w <- object < eps if (any(w)) object[w] <- eps object <- sweep(object, 2, colSums(object) , "/") for(k in seq_len(ncol(object)-1)){ for(l in 2:ncol(object)){ ok <- (object[, k] > eps) & (object[, l] > eps) if (!overlap | any(ok)) { z[k,l] <- sum(object[,k] * (log(object[,k]) - log(object[,l]))) z[l,k] <- sum(object[,l] * (log(object[,l]) - log(object[,k]))) } } } diag(z)<-0 z }) setMethod("KLdiv", "flexmix", function(object, method = c("continuous", "discrete"), ...) { method <- match.arg(method) if (method == "discrete") z <- KLdiv(object@posterior$scaled, ...) else { z <- matrix(0, object@k, object@k) for (i in seq_along(object@model)) { comp <- lapply(object@components, "[[", i) z <- z + KLdiv(object@model[[i]], comp) } } z }) setMethod("KLdiv", "FLXMRglm", function(object, components, ...) { z <- matrix(NA, length(components), length(components)) mu <- lapply(components, function(x) x@predict(object@x)) if (object@family == "gaussian") { sigma <- lapply(components, function(x) x@parameters$sigma) for (k in seq_len(ncol(z)-1)) { for (l in seq_len(ncol(z))[-1]) { z[k,l] <- sum(log(sigma[[l]]) - log(sigma[[k]]) + 1/2 * (-1 + ((sigma[[k]]^2 + (mu[[k]] - mu[[l]])^2))/sigma[[l]]^2)) z[l,k] <- sum(log(sigma[[k]]) - log(sigma[[l]]) + 1/2 * (-1 + ((sigma[[l]]^2 + (mu[[l]] - mu[[k]])^2))/sigma[[k]]^2)) } } } else if (object@family == "binomial") { for (k in seq_len(ncol(z)-1)) { for (l in seq_len(ncol(z))[-1]) { z[k,l] <- sum(mu[[k]] * log(mu[[k]]/mu[[l]]) + (1-mu[[k]]) * log((1-mu[[k]])/(1-mu[[l]]))) z[l,k] <- sum(mu[[l]] * log(mu[[l]]/mu[[k]]) + (1-mu[[l]]) * log((1-mu[[l]])/(1-mu[[k]]))) } } } else if (object@family == "poisson") { for (k in seq_len(ncol(z)-1)) { for (l in seq_len(ncol(z))[-1]) { z[k,l] <- sum(mu[[k]] * log(mu[[k]]/mu[[l]]) + mu[[l]] - mu[[k]]) z[l,k] <- sum(mu[[l]] * log(mu[[l]]/mu[[k]]) + mu[[k]] - mu[[l]]) } } } else if (object@family == "gamma") { shape <- lapply(components, function(x) x@parameters$shape) for (k in seq_len(ncol(z)-1)) { for (l in seq_len(ncol(z))[-1]) { X <- mu[[k]]*shape[[l]]/mu[[l]]/shape[[k]] z[k,l] <- sum(log(gamma(shape[[l]])/gamma(shape[[k]])) + shape[[l]] * log(X) - shape[[k]] * (1 - 1/X) + (shape[[k]] - shape[[l]])*digamma(shape[[k]])) z[l,k] <- sum(log(gamma(shape[[k]])/gamma(shape[[l]])) - shape[[k]] * log(X) - shape[[l]] * (1 - X) + (shape[[l]] - shape[[k]])*digamma(shape[[l]])) } } } else stop(paste("Unknown family", object@family)) diag(z) <- 0 z }) setMethod("KLdiv", "FLXMC", function(object, components, ...) { z <- matrix(NA, length(components), length(components)) if (object@dist == "mvnorm") { center <- lapply(components, function(x) x@parameters$center) cov <- lapply(components, function(x) x@parameters$cov) for (k in seq_len(ncol(z)-1)) { for (l in seq_len(ncol(z))[-1]) { z[k,l] <- 1/2 * (log(det(cov[[l]])) - log(det(cov[[k]])) - length(center[[k]]) + sum(diag(solve(cov[[l]]) %*% (cov[[k]] + tcrossprod(center[[k]] - center[[l]]))))) z[l,k] <- 1/2 * (log(det(cov[[k]])) - log(det(cov[[l]])) - length(center[[l]]) + sum(diag(solve(cov[[k]]) %*% (cov[[l]] + tcrossprod(center[[l]] - center[[k]]))))) } } } else stop(paste("Unknown distribution", object@dist)) diag(z) <- 0 z }) ###********************************************************** flexmix/R/group.R0000644000176200001440000000061514404637304013415 0ustar liggesuserssetMethod("group", signature(object="flexmix"), function(object) { group <- object@group if (!length(group)) group <- group(object@model[[1]]) group }) setMethod("group", signature(object="FLXM"), function(object) { factor(seq_len(nrow(object@x))) }) setMethod("group", signature(object="FLXMRglmfix"), function(object) { factor(seq_len(nrow(object@x)/sum(object@nestedformula@k))) }) flexmix/R/FLXMCdist1.R0000644000176200001440000002123114404637304014074 0ustar liggesusers## Note that the implementation of the weighted ML estimation is ## influenced and inspired by the function fitdistr from package MASS ## and function fitdist from package fitdistrplus. FLXMCdist1 <- function(formula=.~., dist, ...) { foo <- paste("FLXMC", dist, sep = "") if (!exists(foo)) stop("This distribution has not been implemented yet.") get(foo)(formula, ...) } prepoc.y.pos.1 <- function(x) { if (ncol(x) > 1) stop("for the inverse gaussian family y must be univariate") if (any(x < 0)) stop("values must be >= 0") x } FLXMClnorm <- function(formula=.~., ...) { z <- new("FLXMC", weighted=TRUE, formula=formula, dist = "lnorm", name="model-based log-normal clustering") z@preproc.y <- prepoc.y.pos.1 z@defineComponent <- function(para) { predict <- function(x, ...) matrix(para$meanlog, nrow = nrow(x), ncol = 1, byrow = TRUE) logLik <- function(x, y) dlnorm(y, meanlog = predict(x, ...), sdlog = para$sdlog, log = TRUE) new("FLXcomponent", parameters = list(meanlog = para$meanlog, sdlog = para$sdlog), predict = predict, logLik = logLik, df = para$df) } z@fit <- function(x, y, w, ...) { logy <- log(y); meanw <- mean(w) meanlog <- mean(w * logy) / meanw sdlog <- sqrt(mean(w * (logy - meanlog)^2) / meanw) z@defineComponent(list(meanlog = meanlog, sdlog = sdlog, df = 2)) } z } FLXMCinvGauss <- function(formula=.~., ...) { z <- new("FLXMC", weighted=TRUE, formula=formula, name = "model-based inverse Gaussian clustering", dist = "invGauss") z@preproc.y <- prepoc.y.pos.1 z@defineComponent <- function(para) { predict <- function(x, ...) matrix(para$nu, nrow = nrow(x), ncol = length(para$nu), byrow = TRUE) logLik <- function(x, y, ...) SuppDists::dinvGauss(y, nu = predict(x, ...), lambda = para$lambda, log = TRUE) new("FLXcomponent", parameters = list(nu = para$nu, lambda = para$lambda), predict = predict, logLik = logLik, df = para$df) } z@fit <- function(x, y, w, ...){ nu <- mean(w * y) / mean(w) lambda <- mean(w) / mean(w * (1 / y - 1 / nu)) z@defineComponent(list(nu = nu, lambda = lambda, df = 2)) } z } FLXMCgamma <- function(formula=.~., method = "Nelder-Mead", warn = -1, ...) { z <- new("FLXMC", weighted=TRUE, formula=formula, name = "model-based gamma clustering", dist = "gamma") z@preproc.y <- prepoc.y.pos.1 z@defineComponent <- function(para) { predict <- function(x, ...) matrix(para$shape, nrow = nrow(x), ncol = length(para$shape), byrow = TRUE) logLik <- function(x, y, ...) dgamma(y, shape = predict(x, ...), rate = para$rate, log = TRUE) new("FLXcomponent", parameters = list(shape = para$shape, rate = para$rate), predict = predict, logLik = logLik, df = para$df) } z@fit <- function(x, y, w, component){ if (!length(component)) { sw <- sum(w) mean <- sum(y * w) / sw var <- (sum(y^2 * w) / sw - mean^2) * sw / (sw - 1) start <- c(mean^2/var, mean/var) } else start <- unname(unlist(component)) control <- list(parscale = c(1, start[2])) f <- function(parms) -sum(dgamma(y, shape = parms[1], rate = parms[2], log = TRUE) * w) oop <- options(warn = warn) on.exit(oop) parms <- optim(start, f, method = method, control = control)$par z@defineComponent(list(shape = parms[1], rate = parms[2], df = 2)) } z } FLXMCexp <- function(formula=.~., ...) { z <- new("FLXMC", weighted=TRUE, formula=formula, name = "model-based exponential clustering", dist = "exp") z@preproc.y <- prepoc.y.pos.1 z@defineComponent <- function(para) { predict <- function(x, ...) matrix(para$rate, nrow = nrow(x), ncol = length(para$rate), byrow = TRUE) logLik <- function(x, y, ...) dexp(y, rate = predict(x, ...), log = TRUE) new("FLXcomponent", parameters = list(rate = para$rate), predict = predict, logLik = logLik, df = para$df) } z@fit <- function(x, y, w, component) z@defineComponent(list(rate = mean(w) / mean(w * y), df = 1)) z } FLXMCweibull <- function(formula=.~., method = "Nelder-Mead", warn = -1, ...) { z <- new("FLXMC", weighted=TRUE, formula=formula, name = "model-based Weibull clustering", dist = "weibull") z@preproc.y <- prepoc.y.pos.1 z@defineComponent <- function(para) { predict <- function(x, ...) matrix(para$shape, nrow = nrow(x), ncol = length(para$shape), byrow = TRUE) logLik <- function(x, y, ...) dweibull(y, shape = predict(x, ...), scale = para$scale, log = TRUE) new("FLXcomponent", parameters = list(shape = para$shape, scale = para$scale), predict = predict, logLik = logLik, df = para$df) } z@fit <- function(x, y, w, component){ if (!length(component)) { ly <- log(y) sw <- sum(w) mean <- sum(ly * w) / sw var <- (sum(ly^2 * w) / sw - mean^2) * sw / (sw - 1) shape <- 1.2/sqrt(var) scale <- exp(mean + 0.572/shape) start <- c(shape, scale) } else start <- unname(unlist(component)) f <- function(parms) -sum(dweibull(y, shape = parms[1], scale = parms[2], log = TRUE) * w) oop <- options(warn = warn) on.exit(oop) parms <- optim(start, f, method = method)$par z@defineComponent(list(shape = parms[1], scale = parms[2], df = 2)) } z } FLXMCburr <- function(formula=.~., start = NULL, method = "Nelder-Mead", warn = -1, ...) { z <- new("FLXMC", weighted=TRUE, formula=formula, name = "model-based Burr clustering", dist = "burr") z@preproc.y <- prepoc.y.pos.1 z@defineComponent <- function(para) { predict <- function(x, ...) matrix(para$shape1, nrow = nrow(x), ncol = length(para$shape1), byrow = TRUE) logLik <- function(x, y, ...) actuar::dburr(y, shape1 = predict(x, ...), shape2 = para$shape2, scale = para$scale, log = TRUE) new("FLXcomponent", parameters = list(shape1 = para$shape1, shape2 = para$shape2, scale = para$scale), predict = predict, logLik = logLik, df = para$df) } z@fit <- function(x, y, w, component){ if (!length(component)) { if (is.null(start)) start <- c(1, 1) } else start <- unname(unlist(component[2:3])) f <- function(parms) { shape1 <- sum(w) / sum(w * log(1 + (y/parms[2])^parms[1])) -sum(actuar::dburr(y, shape1 = shape1, shape2 = parms[1], scale = parms[2], log = TRUE) * w) } oop <- options(warn = warn) on.exit(oop) parms <- optim(start, f, method = method)$par z@defineComponent(list(shape1 = sum(w) / sum(w * log(1 + (y/parms[2])^parms[1])), shape2 = parms[1], scale = parms[2], df = 3)) } z } FLXMCinvburr <- function(formula=.~., start = NULL, warn = -1, ...) { z <- new("FLXMC", weighted=TRUE, formula=formula, name = "model-based Inverse Burr clustering", dist = "invburr") z@preproc.y <- prepoc.y.pos.1 z@defineComponent <- function(para) { predict <- function(x, ...) matrix(para$shape1, nrow = nrow(x), ncol = length(para$shape1), byrow = TRUE) logLik <- function(x, y, ...) actuar::dinvburr(y, shape1 = predict(x, ...), shape2 = para$shape2, scale = para$scale, log = TRUE) new("FLXcomponent", parameters = list(shape1 = para$shape1, shape2 = para$shape2, scale = para$scale), predict = predict, logLik = logLik, df = para$df) } z@fit <- function(x, y, w, component){ if (!length(component)) { if (is.null(start)) start <- c(1, 1) } else start <- unname(unlist(component[2:3])) f <- function(parms) { shape1 <- sum(w) / sum(w * log(1 + (parms[2]/y)^parms[1])) -sum(actuar::dinvburr(y, shape1 = shape1, shape2 = parms[1], scale = parms[2], log = TRUE) * w) } oop <- options(warn = warn) on.exit(oop) parms <- optim(start, f, method = "Nelder-Mead")$par z@defineComponent(list(shape1 = sum(w) / sum(w * log(1 + (parms[2]/y)^parms[1])), shape2 = parms[1], scale = parms[2], df = 3)) } z } flexmix/R/lmm.R0000644000176200001440000002413214404637304013046 0ustar liggesuserssetClass("FLXcomponentlmm", representation(random="list"), contains = "FLXcomponent") setClass("FLXMRlmm", representation(family = "character", random = "formula", group = "factor", z = "matrix", which = "ANY"), contains = "FLXMR") setClass("FLXMRlmmfix", contains = "FLXMRlmm") setMethod("allweighted", signature(model = "FLXMRlmm", control = "ANY", weights = "ANY"), function(model, control, weights) { if (!control@classify %in% c("auto", "weighted")) stop("Model class only supports weighted ML estimation.") model@weighted }) FLXMRlmm <- function(formula = . ~ ., random, lm.fit = c("lm.wfit", "smooth.spline"), varFix = c(Random = FALSE, Residual = FALSE), ...) { family <- "gaussian" lm.fit <- match.arg(lm.fit) if (length(varFix) != 2 || is.null(names(varFix)) || any(is.na(pmatch(names(varFix), c("Random", "Residual"))))) stop("varFix has to be a named vector of length two") else names(varFix) <- c("Random", "Residual")[pmatch(names(varFix), c("Random", "Residual"))] random <- if (length(random) == 3) random else formula(paste(".", paste(deparse(random), collapse = ""))) object <- new("FLXMRlmm", formula = formula, random = random, weighted = TRUE, family = family, name = "FLXMRlmm:gaussian") if (any(varFix)) object <- new("FLXMRlmmfix", object) object@preproc.y <- function(x){ if (ncol(x) > 1) stop(paste("y must be univariate")) x } if (lm.fit == "smooth.spline") { object@preproc.x <- function(x){ if (ncol(x) > 1) stop(paste("x must be univariate")) x } } add <- function(x) Reduce("+", x) lmm.wfit <- function(x, y, w, z, which, random) { effect <- lapply(seq_along(which), function(i) z[[which[i]]] %*% random$beta[[i]]) W <- rep(w, sapply(x, nrow)) X <- do.call("rbind", x) Y <- do.call("rbind", y) Effect <- do.call("rbind", effect) fit <- get(lm.fit)(X, Y - Effect, W, ...) XSigmaX <- sapply(seq_along(z), function(i) sum(diag(crossprod(z[[i]]) %*% random$Sigma[[i]]))) wSum <- tapply(w, which, sum) sigma2 <- (sum(W*residuals(fit)^2) + sum(wSum*XSigmaX))/sum(W) wSigma <- add(lapply(seq_along(z), function(i) wSum[i]*random$Sigma[[i]])) bb <- add(lapply(seq_along(which), function(i) tcrossprod(random$beta[[i]])*w[i])) psi <- (wSigma + bb)/sum(w) list(coefficients = if (is(fit, "smooth.spline")) fit$fit else coef(fit), sigma2 = list(Random = psi, Residual = sigma2), df = if (is(fit, "smooth.spline")) fit$df else ncol(x[[1]])) } object@defineComponent <- function(para) { predict <- function(x, ...) { if (is(para$coef, "smooth.spline.fit")) lapply(x, function(X) getS3method("predict", "smooth.spline.fit")(para$coef, X)$y) else lapply(x, function(X) X %*% para$coef) } logLik <- function(x, y, z, which, group, ...) { V <- lapply(z, function(Z) tcrossprod(tcrossprod(Z, para$sigma2$Random), Z) + diag(nrow(Z)) * para$sigma2$Residual) mu <- predict(x, ...) llh <- sapply(seq_along(x), function(i) mvtnorm::dmvnorm(t(y[[i]]), mean = mu[[i]], sigma = V[[which[i]]], log=TRUE)/nrow(V[[which[i]]])) as.vector(ungroupPriors(matrix(llh), group, !duplicated(group))) } new("FLXcomponentlmm", parameters = list(coef = para$coef, sigma2 = para$sigma2), random = list(), logLik = logLik, predict = predict, df = para$df) } determineRandom <- function(mu, y, z, which, sigma2) { Sigma <- lapply(z, function(Z) solve(crossprod(Z) / sigma2$Residual + solve(sigma2$Random))) Sigma_tilde <- lapply(seq_along(z), function(i) (tcrossprod(Sigma[[i]], z[[i]])/sigma2$Residual)) beta <- lapply(seq_along(which), function(i) Sigma_tilde[[which[i]]] %*% (y[[i]] - mu[[i]])) list(beta = beta, Sigma = Sigma) } object@fit <- if (any(varFix)) { function(x, y, w, z, which, random) { fit <- lapply(seq_len(ncol(w)), function(k) lmm.wfit(x, y, w[,k], z, which, random[[k]])) if (varFix["Random"]) { prior_w <- apply(w, 2, weighted.mean, w = sapply(x, length)) Random <- add(lapply(seq_along(fit), function(i) fit[[i]]$sigma2$Random * prior_w[i])) for (i in seq_along(fit)) fit[[i]]$sigma2$Random <- Random } if (varFix["Residual"]) { prior <- colMeans(w) Residual <- sum(sapply(fit, function(x) x$sigma2$Residual) * prior) for (i in seq_along(fit)) fit[[i]]$sigma2$Residual <- Residual } n <- nrow(fit[[1]]$sigma2$Random) lapply(fit, function(Z) { comp <- object@defineComponent(list(coef = coef(Z), sigma2 = Z$sigma2, df = Z$df + n*(n+1)/(2*ifelse(varFix["Random"], ncol(w), 1)) + ifelse(varFix["Residual"], 1/ncol(w), 1))) comp@random <- determineRandom(comp@predict(x), y, z, which, comp@parameters$sigma2) comp }) } } else { function(x, y, w, z, which, random){ fit <- lmm.wfit(x, y, w, z, which, random) n <- nrow(fit$sigma2$Random) comp <- object@defineComponent( list(coef = coef(fit), df = fit$df + n*(n+1)/2 + 1, sigma2 = fit$sigma2)) comp@random <- determineRandom(comp@predict(x), y, z, which, comp@parameters$sigma2) comp } } object } setMethod("FLXmstep", signature(model = "FLXMRlmm"), function(model, weights, components) { weights <- weights[!duplicated(model@group),,drop=FALSE] if (!is(components[[1]], "FLXcomponentlmm")) { random <- list(beta = lapply(model@which, function(i) rep(0, ncol(model@z[[i]]))), Sigma = lapply(model@z, function(x) diag(ncol(x)))) return(sapply(seq_len(ncol(weights)), function(k) model@fit(model@x, model@y, weights[,k], model@z, model@which, random))) }else { return(sapply(seq_len(ncol(weights)), function(k) model@fit(model@x, model@y, weights[,k], model@z, model@which, components[[k]]@random))) } }) setMethod("FLXmstep", signature(model = "FLXMRlmmfix"), function(model, weights, components) { weights <- weights[!duplicated(model@group),,drop=FALSE] if (!is(components[[1]], "FLXcomponentlmm")) { random <- rep(list(list(beta = lapply(model@which, function(i) rep(0, ncol(model@z[[i]]))), Sigma = lapply(model@z, function(x) diag(ncol(x))))), ncol(weights)) return(model@fit(model@x, model@y, weights, model@z, model@which, random)) }else return(model@fit(model@x, model@y, weights, model@z, model@which, lapply(components, function(x) x@random))) }) setMethod("FLXgetModelmatrix", signature(model="FLXMRlmm"), function(model, data, formula, lhs=TRUE, ...) { formula_nogrouping <- RemoveGrouping(formula) if (identical(paste(deparse(formula_nogrouping), collapse = ""), paste(deparse(formula), collapse = ""))) stop("please specify a grouping variable") model <- callNextMethod(model, data, formula, lhs) model@fullformula <- update(model@fullformula, paste(".~. |", .FLXgetGroupingVar(formula))) mt1 <- terms(model@random, data=data) mf <- model.frame(delete.response(mt1), data=data, na.action = NULL) model@z <- model.matrix(attr(mf, "terms"), data) model@group <- grouping <- .FLXgetGrouping(formula, data)$group rownames(model@z) <- rownames(model@x) <- rownames(model@y) <- NULL model@x <- matrix(lapply(unique(grouping), function(g) model@x[grouping == g, , drop = FALSE]), ncol = 1) if (lhs) model@y <- matrix(lapply(unique(grouping), function(g) model@y[grouping == g, , drop = FALSE]), ncol = 1) z <- lapply(unique(grouping), function(g) model@z[grouping == g, , drop = FALSE]) z1 <- unique(z) if (length(z) == length(z1)) { model@which <- seq_along(z) } else { model@which <- sapply(z, function(y) which(sapply(z1, function(x) isTRUE(all.equal(x, y))))) } model@z <- matrix(z1, ncol = 1) model }) setMethod("FLXgetObs", "FLXMRlmm", function(model) sum(sapply(model@x, nrow))) setMethod("FLXdeterminePostunscaled", signature(model = "FLXMRlmm"), function(model, components, ...) { sapply(components, function(x) x@logLik(model@x, model@y, model@z, model@which, model@group)) }) setMethod("predict", signature(object="FLXMRlmm"), function(object, newdata, components, ...) { object <- FLXgetModelmatrix(object, newdata, formula = object@fullformula, lhs = FALSE) lapply(components, function(comp) unlist(comp@predict(object@x, ...))) }) setMethod("rFLXM", signature(model="FLXMRlmm", components="list"), function(model, components, class, group, ...) { class <- class[!duplicated(group)] y <- NULL for (l in seq_along(components)) { yl <- as.matrix(rFLXM(model, components[[l]], ...)) if (is.null(y)) y <- matrix(NA, nrow = length(class), ncol = ncol(yl)) y[class == l,] <- yl[class==l,,drop=FALSE] y <- matrix(y, ncol = ncol(yl)) } y }) setMethod("rFLXM", signature(model = "FLXMRlmm", components = "FLXcomponent"), function(model, components, ...) { sigma2 <- components@parameters$sigma2 V <- lapply(model@z, function(Z) tcrossprod(tcrossprod(Z, sigma2$Random), Z) + diag(nrow(Z)) * sigma2$Residual) mu <- components@predict(model@x) matrix(lapply(seq_along(model@x), function(i) t(mvtnorm::rmvnorm(1, mean = mu[[i]], sigma = V[[model@which[i]]]))), ncol = 1) }) setMethod("FLXgetNewModelmatrix", "FLXMRlmm", function(object, model, indices, groups) { object@y <- model@y[indices,,drop=FALSE] object@x <- model@x[indices,,drop=FALSE] object@which <- model@which[indices] if (length(unique(object@which)) < length(object@z)) { object@z <- model@z[sort(unique(object@which)),,drop=FALSE] object@which <- match(object@which, sort(unique(object@which))) } object }) flexmix/R/stepFlexmix.R0000644000176200001440000001351314404637304014572 0ustar liggesusers# # Copyright (C) 2004-2016 Friedrich Leisch and Bettina Gruen # $Id: stepFlexmix.R 5079 2016-01-31 12:21:12Z gruen $ # setClass("stepFlexmix", representation(models="list", k="integer", nrep="integer", logLiks="matrix", call="call")) stepFlexmix <- function(..., k=NULL, nrep=3, verbose=TRUE, drop=TRUE, unique=FALSE) { MYCALL <- match.call() MYCALL1 <- MYCALL bestFlexmix <- function(...) { z = new("flexmix", logLik=-Inf) logLiks = rep(NA, length.out = nrep) for(m in seq_len(nrep)){ if(verbose) cat(" *") x = try(flexmix(...)) if (!is(x, "try-error")) { logLiks[m] <- logLik(x) if(logLik(x) > logLik(z)) z = x } } return(list(z = z, logLiks = logLiks)) } z = list() if(is.null(k)){ RET = bestFlexmix(...) z[[1]] <- RET$z logLiks <- as.matrix(RET$logLiks) z[[1]]@call <- MYCALL z[[1]]@control@nrep <- nrep names(z) <- as.character(z[[1]]@k) if(verbose) cat("\n") } else{ k = as.integer(k) logLiks <- matrix(nrow = length(k), ncol = nrep) for(n in seq_along(k)){ ns <- as.character(k[n]) if(verbose) cat(k[n], ":") RET <- bestFlexmix(..., k=k[n]) z[[ns]] = RET$z logLiks[n,] <- RET$logLiks MYCALL1[["k"]] <- as.numeric(k[n]) z[[ns]]@call <- MYCALL1 z[[ns]]@control@nrep <- nrep if(verbose) cat("\n") } } logLiks <- logLiks[is.finite(sapply(z, logLik)),,drop=FALSE] z <- z[is.finite(sapply(z, logLik))] rownames(logLiks) <- names(z) if (!length(z)) stop("no convergence to a suitable mixture") if(drop & (length(z)==1)){ return(z[[1]]) } else{ z <- return(new("stepFlexmix", models=z, k=as.integer(names(z)), nrep=as.integer(nrep), logLiks=logLiks, call=MYCALL)) if(unique) z <- unique(z) return(z) } } ###********************************************************** setMethod("unique", "stepFlexmix", function(x, incomparables=FALSE, ...) { z <- list() K <- sapply(x@models, function(x) x@k) logLiks <- x@logLiks keep <- rep(TRUE, nrow(logLiks)) for(k in sort(unique(K))){ n <- which(k==K) if(length(n)>1){ l <- sapply(x@models[n], logLik) z[as.character(k)] <- x@models[n][which.max(l)] keep[n[-which.max(l)]] <- FALSE } else z[as.character(k)] <- x@models[n] } logLiks <- logLiks[keep,,drop=FALSE] rownames(logLiks) <- names(z) attr(logLiks, "na.action") <- NULL mycall <- x@call mycall["unique"] <- TRUE return(new("stepFlexmix", models=z, k=as.integer(names(z)), nrep=x@nrep, logLiks=logLiks, call=mycall)) }) ###********************************************************** setMethod("show", "stepFlexmix", function(object) { cat("\nCall:", deparse(object@call,0.75*getOption("width")), sep="\n") cat("\n") z <- data.frame(iter = sapply(object@models, function(x) x@iter), converged = sapply(object@models, function(x) x@converged), k = sapply(object@models, function(x) x@k), k0 = sapply(object@models, function(x) x@k0), logLik = sapply(object@models, function(x) logLik(x)), AIC = AIC(object), BIC = BIC(object), ICL = ICL(object)) print(z, na.string="") }) setMethod("nobs", signature(object="stepFlexmix"), function(object, ...) { sapply(object@models, nobs) }) setMethod("logLik", "stepFlexmix", function(object, ..., k = 2) { ll <- lapply(object@models, function(x) logLik(x)) df <- sapply(ll, attr, "df") nobs <- sapply(ll, attr, "nobs") ll <- unlist(ll) attr(ll, "df") <- df attr(ll, "nobs") <- nobs class(ll) <- "logLik" ll }) setMethod("ICL", "stepFlexmix", function(object, ...) { sapply(object@models, function(x) ICL(x, ...)) }) setMethod("EIC", "stepFlexmix", function(object, ...) { sapply(object@models, function(x) EIC(x, ...)) }) ###********************************************************** setMethod("getModel", "stepFlexmix", function(object, which="BIC") { if(which=="AIC") which <- which.min(sapply(object@models, function(x) AIC(x))) if(which=="BIC") which <- which.min(sapply(object@models, function(x) BIC(x))) if(which=="ICL") which <- which.min(sapply(object@models, function(x) ICL(x))) object@models[[which]] } ) ###********************************************************** setMethod("plot", signature(x="stepFlexmix", y="missing"), function(x, y, what=c("AIC", "BIC", "ICL"), xlab=NULL, ylab=NULL, legend="topright", ...) { X <- x@k Y <- NULL for(w in what){ Y <- cbind(Y, do.call(w, list(object=x))) } if(is.null(xlab)) xlab <- "number of components" if(is.null(ylab)){ if(length(what)==1) ylab <- what else ylab <- "" } matplot(X, Y, xlab=xlab, ylab=ylab, type="b", lty=1, pch=seq_along(what), ...) if(legend!=FALSE && length(what)>1) legend(x=legend, legend=what, pch=seq_along(what), col=seq_along(what)) for(n in seq_len(ncol(Y))){ m <- which.min(Y[,n]) points(X[m], Y[m,n], pch=16, cex=1.5, col=n) } }) flexmix/R/glmnet.R0000644000176200001440000000724114404637304013551 0ustar liggesusers#' @title flexmix model driver for adaptive lasso (elastic-net) with GLMs #' @author F. Mortier (fmortier@cirad.fr) and N. Picard (nicolas.picard@cirad.fr) #' @param formula A symbolic description of the model to be fit. #' The general form is y~x|g where y is the response, x the set of predictors and g an #' optional grouping factor for repeated measurements. #' @param family a description of the error distribution and link function to be used in the model. #' "gausian", "poisson" and "binomial" are allowed. #' @param adaptive boolean indicating if algorithm should perform adaptive lasso or not #' @param select boolean vector indicating which covariates will be included in the selection process. #' Others will be included in the model. #' @details Some care is needed to ensure convergence of the #' algorithm, which is computationally more challenging than a standard EM. #' In the proposed method, not only are cluster allocations identified #' and component parameters estimated as commonly done in mixture models, #' but there is also variable selection via penalized regression using #' $k$-fold cross-validation to choose the penalty parameter. #' For the algorithm to converge, it is necessary that the same cross-validation #' partitioning be used across the EM iterations, i.e., #' the subsamples for cross-validation must be defined at the beginning #' This is accomplished using the {\tt foldid} option #' as an additional parameter to be passed to \code{\link{cv.glmnet}} (see \link{glmnet} package documentation). FLXMRglmnet <- function(formula = .~., family = c("gaussian", "binomial", "poisson"), adaptive = TRUE, select = TRUE, offset = NULL, ...) { family <- match.arg(family) z <- FLXMRglm(formula = formula, family = family) z@preproc.x <- function(x) { if (!isTRUE(all.equal(x[, 1], rep(1, nrow(x)), check.attributes = FALSE))) stop("The model needs to include an intercept in the first column.") x } z@fit <- function(x, y, w) { if (all(!select)) { coef <- if (family == "gaussian") lm.wfit(x, y, w = w)$coef else if (family == "binomial") glm.fit(x, y, family = binomial(), weights = w)$coef else if (family == "poisson") glm.fit(x, y, family=poisson(), weights = w)$coef } else { if (adaptive) { coef <- if (family == "gaussian") lm.wfit(x, y, w = w)$coef[-1] else if(family == "binomial") glm.fit(x, y, family = binomial(), weights = w)$coef[-1] else if (family == "poisson") glm.fit(x, y, family = poisson(), weights = w)$coef[-1] penalty <- mean(w) / abs(coef) } else penalty <- rep(1, ncol(x) - 1) if (any(!select)){ select <- which(!select) penalty[select] <- 0 } m <- glmnet::cv.glmnet(x[, -1, drop = FALSE], y, family = family, weights = w, penalty.factor = penalty, ...) coef <- as.vector(coef(m, s = "lambda.min")) } df <- sum(coef != 0) sigma <- if (family == "gaussian") sqrt(sum(w * (y - x %*% coef)^2/mean(w))/(nrow(x) - df)) else NULL z@defineComponent( list(coef = coef, sigma = sigma, df = df + ifelse(family == "gaussian", 1, 0))) } z } flexmix/MD50000644000176200001440000001670314404700401012237 0ustar liggesusers8d99ac72ee27296d504c6139db53e6d6 *DESCRIPTION b54919342a017b1dd43d2f7bba67eeef *NAMESPACE 88a1e6330ac4ebbddc726522a46dfc34 *R/FLXMCdist1.R 0964dce38a7f3b491c9eef5ecedbcd68 *R/allClasses.R ebb73a41e988babd784db3f85a6b966e *R/allGenerics.R b03125231df0214b1395c7ef7f9bf25d *R/boot.R 889f52aae8df7342a12c941702964959 *R/concomitant.R ffe49b2c72bb7df7681bfd5f450468de *R/condlogit.R 3495256b72a55590ac6bd775f3782028 *R/examples.R 3ebb141d9c261a101e884f66f81a9120 *R/factanal.R c4cbbe2b096216ad85ac881bb13713a1 *R/flexmix.R ac7d510575121174e050def18e1d8f68 *R/flexmixFix.R 3eba12e52c7117b1dc31709f780bace7 *R/flxdist.R e46cc7864775903c177d0fbebf62562f *R/flxmcmvpois.R 5c565a257540fde3a2c29cde0181421b *R/flxmcsparse.R 94cecec609f15f14b8eed8002bb5b792 *R/glmFix.R d49134e08be219e48ebaa680b76e647e *R/glmnet.R c5c5f8a325c65778e15792e456459ff2 *R/group.R f929b19e094c157d3556617cafd46a31 *R/infocrit.R 296476baa3f3e730cc906d1d1663c0a0 *R/initFlexmix.R d509a6bcd55ec42162950011f0e3fedb *R/kldiv.R 2ad7b23c815889ec724736b3b5b0f017 *R/lattice.R 49e5b8eef79f6f8884e4e0323329b78f *R/lmer.R 5e9c1a083238f5e9310c257036db61cc *R/lmm.R 223499c1d0b23482e2f62049b4749e25 *R/lmmc.R 5a7ccfa40918a1075c31b79b9b713c64 *R/mgcv.R 2d40a708cbab9a061aa4f2ab7e8c978a *R/models.R a1543ac30b4c4faca4ebdd23e5fca5f3 *R/multinom.R 7a1d8c2b82999b1bd1e0419b00117498 *R/plot-FLXboot.R 621a422936c28bdebd8fbbe1d9fe7970 *R/plot-flexmix.R 4a0e5ec24c4b8b048afd7473bbbf0cb4 *R/plot-refit.R 2f3409e7007c2e61fc5ee28aaef440d5 *R/plot.R ce8c2d2018c456648e26145ea1cbc038 *R/rFLXmodel.R 2dce928976d164087b04d9edba0771eb *R/refit.R 89f17e9ffcdd456af71b16580a64ab7c *R/relabel.R 942a774a6195a4cf5da06bfefc18adb6 *R/rflexmix.R aee09f6209b75aaf4f4aaea863fb9352 *R/robust.R 6c4c0f1663db73d9c2a8fd48ab78eb2e *R/stepFlexmix.R 06a4c9417ad23fc25ba52d1be38dee42 *R/utils.R 90d0c43f34243c3adbb022664bd0ddf5 *R/z.R 16c7cb035f39de0d66d086ecdb22fbe5 *R/ziglm.R eb5b418d76ebf2c2508a5d87e3c10a05 *build/vignette.rds 52f6bcc1bca4a592e3351b49431d9065 *data/BregFix.RData 0b3a641a3f24a16d16e1d48446cf73cc *data/Mehta.RData 90b92021bfed03931e116746bd331d76 *data/NPreg.RData 33ab45d7bc0754237ecd01ce0254a604 *data/Nclus.RData cce71227ec0d604bcb2c78fcb6249384 *data/NregFix.RData b30604eae02b77396b87d0defb8d5c98 *data/betablocker.RData 45576be271f3423ba91c02ebfaca00b9 *data/bioChemists.RData 43669f632f40def80fbb61ca25c657fd *data/candy.RData a8bd5b28da6a6a50d872c0ff711d70f5 *data/dmft.RData 6556322b93e01104ed3485ee02993d6d *data/fabricfault.RData ca4de9fc56b9223d9612d44d3df1420e *data/patent.RData f53cbfa94cbb22c8ba6a8c5d54895e88 *data/salmonellaTA98.txt.gz aadfe3a142210960be9cfaa393a420ab *data/seizure.RData 309d62c6fbd66165dda517923e184109 *data/tribolium.RData c444a25f85d0973c69e2063fa5349b7d *data/trypanosome.RData 84ee26fbac9ffa0d64a1ed89ad17f751 *data/whiskey.RData 165509cd86d080d103cd33fb52bef045 *inst/CITATION 27b7ef427f26bad318ad6f094b145fff *inst/NEWS.Rd 66abb9674dc55bc9dda69ea64ec37fcb *inst/doc/bootstrapping.R e2057aa3aa3ee107d93fbe11a6f356d6 *inst/doc/bootstrapping.Rnw 1da42053ed472396d19ae621f6f91719 *inst/doc/bootstrapping.pdf 6c97ed30becc0159537caec0aa6f7835 *inst/doc/flexmix-intro.R c5a3e4b887e0caf2b8e1383071e699fd *inst/doc/flexmix-intro.Rnw 6049cd14ec3ec21a6d496d7fa4ece28c *inst/doc/flexmix-intro.pdf be0a42d594f1a80d36f502d9f5504e0f *inst/doc/mixture-regressions.R c6745ee38eba2a694d6df5d497adb7b9 *inst/doc/mixture-regressions.Rnw 824194420d5655eadf552ec71e233d41 *inst/doc/mixture-regressions.pdf 6dca73aa5a96343db67a43c9835bb4bf *inst/doc/myConcomitant.R fb9ae81acbfcada421bbd89681834714 *inst/doc/mymclust.R 76f412c774347259ff904027864b759c *inst/doc/regression-examples.R 94b27d5378111289be0df2afaf4bdfab *inst/doc/regression-examples.Rnw e1debbb1eb816667a069e840e121cb57 *inst/doc/regression-examples.pdf 76a28867c7670eb60951e3b1d2d77d33 *inst/doc/ziglm.R 1d39a7829ea8ce6aa54c11a0a06780eb *man/AIC-methods.Rd 00c18dd702febe081d951aa73ebb95d0 *man/BIC-methods.Rd cad198606d9e736399f5c5fc540786a1 *man/BregFix.Rd f66f6ead6d7d0350c6a722f53193094e *man/EIC.Rd 85d7f2bae2833443d209050759a2e525 *man/ExLinear.Rd 7ed1926717bdc73c8e6b117c5075eb0e *man/ExNPreg.Rd cf4a61f3fbf44d92894525cbff4ef003 *man/ExNclus.Rd 78e72f66ed96403287e0eb63e33f4ab1 *man/FLXMCdist1.Rd 4c28bd39db43ab9080cbfb2b2d495054 *man/FLXMCfactanal.Rd 692b3facd60fa4125aff97003555a621 *man/FLXMCmvcombi.Rd ba41986c3ff634c81b8f5b44955efe92 *man/FLXMCmvpois.Rd 421d04a7a1c7c967877e1ed3882965f1 *man/FLXMRcondlogit.Rd 4628f562cd45f409dd16ea2cc7d3eca4 *man/FLXMRglmnet.Rd ec245ed6ad4ca2e7d3efee4256b0bdca *man/FLXMRlmer.Rd 9cad098f9190aacb6862d10a2118ffda *man/FLXMRlmmc.Rd 1f2a4972f51646f42278445d3ee41933 *man/FLXMRmgcv.Rd d5fb9e73fdba6d6586e186d93a61a385 *man/FLXMRmultinom.Rd 9a268e3bbf0b5c8ee42fcfaacbdd793e *man/FLXMRrobglm.Rd b2d3a3fdb9370376403175e23e8acea3 *man/FLXMRziglm.Rd 1be70d781f7affbf5b51663f75fa87e0 *man/FLXP-class.Rd e5014d435eb288059e6daa8b9163d8e7 *man/FLXbclust.Rd c87d4220e578ac71869feed74a3d7c47 *man/FLXcomponent-class.Rd 1348b9444773021d770f7463579961ff *man/FLXconcomitant.Rd eff1590dbfbd4e3f1783fe61b20037d6 *man/FLXcontrol-class.Rd f8a1b4148b65b193d78ca1159e888a0d *man/FLXdist-class.Rd 28a563506d19c0836af092f0c3f15775 *man/FLXdist.Rd 47b0ea1622e710755d45534208ad95ea *man/FLXfit.Rd 35115ac86c9cb6a8aa266a755f26cf16 *man/FLXglm.Rd 8403da6e3cb9d44b66a22e98ef944422 *man/FLXglmFix.Rd 24a39ce85331b9ddbb500d499932fee1 *man/FLXmclust.Rd 8a86639f3d09af06293225779bedfd05 *man/FLXmodel-class.Rd afbfbeb61e91eda57fbaa568a39b38e2 *man/FLXnested-class.Rd 0768bed00f7a8554c33329522dfaa169 *man/ICL.Rd 028fc57ce85d724790cc7da759a24dd7 *man/KLdiv.Rd f6736b5d249d5d684f1464180b8a3b59 *man/Lapply-methods.Rd edab8de219b6630db7a71d22366f70d3 *man/Mehta.Rd 8f55051592bec6c88e23c7d81f57d38b *man/NregFix.Rd fb40ad5bde9b66ddc38d3572ff7429cb *man/betablocker.Rd d67b3b5b0eed42a593c0268b9e6aab21 *man/bioChemists.Rd fdb60dd3f31d2281e7fa21e38a3c8b76 *man/boot.Rd b4db12be29b542f9bea083d3bd0c980e *man/candy.Rd 5a69cda63c913ef33f104c6d5adb3729 *man/dmft.Rd 78db5196994b9a665e4c17fb6653c93f *man/fabricfault.Rd cd0b0ac588e0bd163a930ddbd0d8ae4f *man/fitted.Rd b53ba6892c3f8d98ea7ee048c854a47d *man/flexmix-class.Rd 022f7bae58eaccd9203737c47cf2473d *man/flexmix-internal.Rd 57ab6985b4c34528329c50c5bd67b30c *man/flexmix.Rd 83812639242cedc8bf5bc226f2747c88 *man/group.Rd a187b88ae5b3121c29579aceef270951 *man/logLik-methods.Rd 2ac267bede3099e4807c4b3cc4ef2d86 *man/patent.Rd 3aae6929d9c9dbe26b4be487330edada *man/plot-methods.Rd b3773bddc048e6fbe106eed18469702d *man/plotEll.Rd 2195e1c45f80a83a7b3baf8d449e5b05 *man/posterior.Rd f8ae3162f10350e983e30a5926fa6025 *man/refit.Rd d627472edfd9f5b2c3ec8e0f56296ae4 *man/relabel.Rd cf67fee780ab0ea895b55f07989aa76c *man/rflexmix.Rd 2aa3384e40b6d464a252378e17f08b1c *man/salmonellaTA98.Rd 4d165b442083a5ba599fa8e1c5e5f28a *man/seizure.Rd 4d457ddbe589788191b3ee5021e0ac6d *man/stepFlexmix.Rd 168fda1db5cdaf8e9d0a5410c145e825 *man/tribolium.Rd b5fad3c3973f6658af8b3663f20f4ac3 *man/trypanosome.Rd 4381e5944c15dc03c92a2151c049fca3 *man/whiskey.Rd e2057aa3aa3ee107d93fbe11a6f356d6 *vignettes/bootstrapping.Rnw c5a3e4b887e0caf2b8e1383071e699fd *vignettes/flexmix-intro.Rnw b7d9b60485cfd57d1bfc13e98ff25f48 *vignettes/flexmix.bib 0a928bdb28d680f31098902b157e9204 *vignettes/flexmix.png c6745ee38eba2a694d6df5d497adb7b9 *vignettes/mixture-regressions.Rnw 5a4252ff70a91d81feef9be2ff79c5bf *vignettes/mixture.bib 6dca73aa5a96343db67a43c9835bb4bf *vignettes/myConcomitant.R fb9ae81acbfcada421bbd89681834714 *vignettes/mymclust.R 94b27d5378111289be0df2afaf4bdfab *vignettes/regression-examples.Rnw 76a28867c7670eb60951e3b1d2d77d33 *vignettes/ziglm.R flexmix/inst/0000755000176200001440000000000014404662037012711 5ustar liggesusersflexmix/inst/doc/0000755000176200001440000000000014404662037013456 5ustar liggesusersflexmix/inst/doc/regression-examples.Rnw0000644000176200001440000012407214404637307020152 0ustar liggesusers\documentclass[nojss]{jss} \usepackage{amsfonts,bm,amsmath,amssymb} %%\usepackage{Sweave} %% already provided by jss.cls %%%\VignetteIndexEntry{Applications of finite mixtures of regression models} %%\VignetteDepends{flexmix} %%\VignetteKeywords{R, finite mixture model, generalized linear model, latent class regression} %%\VignettePackage{flexmix} \title{Applications of finite mixtures of regression models} <>= library("stats") library("graphics") library("flexmix") @ \author{Bettina Gr{\"u}n\\ Wirtschaftsuniversit{\"a}t Wien \And Friedrich Leisch\\ Universit\"at f\"ur Bodenkultur Wien} \Plainauthor{Bettina Gr{\"u}n, Friedrich Leisch} \Address{ Bettina Gr\"un\\ Institute for Statistics and Mathematics\\ Wirtschaftsuniversit{\"a}t Wien\\ Welthandelsplatz 1\\ 1020 Wien, Austria\\ E-mail: \email{Bettina.Gruen@R-project.org}\\ Friedrich Leisch\\ Institut f\"ur Angewandte Statistik und EDV\\ Universit\"at f\"ur Bodenkultur Wien\\ Peter Jordan Stra\ss{}e 82\\ 1190 Wien, Austria\\ E-mail: \email{Friedrich.Leisch@boku.ac.at} } \Abstract{ Package \pkg{flexmix} provides functionality for fitting finite mixtures of regression models. The available model class includes generalized linear models with varying and fixed effects for the component specific models and multinomial logit models for the concomitant variable models. This model class includes random intercept models where the random part is modelled by a finite mixture instead of a-priori selecting a suitable distribution. The application of the package is illustrated on various datasets which have been previously used in the literature to fit finite mixtures of Gaussian, binomial or Poisson regression models. The \proglang{R} commands are given to fit the proposed models and additional insights are gained by visualizing the data and the fitted models as well as by fitting slightly modified models. } \Keywords{\proglang{R}, finite mixture models, generalized linear models, concomitant variables} \Plainkeywords{R, finite mixture models, generalized linear models, concomitant variables} %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \begin{document} \SweaveOpts{engine=R, echo=true, height=5, width=8, eps=FALSE, keep.source=TRUE} \setkeys{Gin}{width=0.8\textwidth} <>= options(width=70, prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) suppressWarnings(RNGversion("3.5.0")) set.seed(1802) library("lattice") ltheme <- canonical.theme("postscript", FALSE) lattice.options(default.theme=ltheme) @ %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \section{Introduction} Package \pkg{flexmix} provides infrastructure for flexible fitting of finite mixtures models. The design principles of the package allow easy extensibility and rapid prototyping. In addition, the main focus of the available functionality is on fitting finite mixtures of regression models, as other packages in \proglang{R} exist which have specialized functionality for model-based clustering, such as e.g.~\pkg{mclust} \citep{flexmix:Fraley+Raftery:2002a} for finite mixtures of Gaussian distributions. \cite{flexmix:Leisch:2004a} gives a general introduction into the package outlining the main implementational principles and illustrating the use of the package. The paper is also contained as a vignette in the package. An example for fitting mixtures of Gaussian regression models is given in \cite{flexmix:Gruen+Leisch:2006}. This paper focuses on examples of finite mixtures of binomial logit and Poisson regression models. Several datasets which have been previously used in the literature to demonstrate the use of finite mixtures of regression models have been selected to illustrate the application of the package. The model class covered are finite mixtures of generalized linear model with focus on binomial logit and Poisson regressions. The regression coefficients as well as the dispersion parameters of the component specific models are assumed to vary for all components, vary between groups of components, i.e.~to have a nesting, or to be fixed over all components. In addition it is possible to specify concomitant variable models in order to be able to characterize the components. Random intercept models are a special case of finite mixtures with varying and fixed effects as fixed effects are assumed for the coefficients of all covariates and varying effects for the intercept. These models are often used to capture overdispersion in the data which can occur for example if important covariates are omitted in the regression. It is then assumed that the influence of these covariates can be captured by allowing a random distribution for the intercept. This illustration does not only show how the package \pkg{flexmix} can be used for fitting finite mixtures of regression models but also indicates the advantages of using an extension package of an environment for statistical computing and graphics instead of a stand-alone package as available visualization techniques can be used for inspecting the data and the fitted models. In addition users already familiar with \proglang{R} and its formula interface should find the model specification and a lot of commands for exploring the fitted model intuitive. %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \section{Model specification} Finite mixtures of Gaussian regressions with concomitant variable models are given by: \begin{align*} H(y\,|\,\bm{x}, \bm{w}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(\bm{w}, \bm{\alpha}) \textrm{N}(y\,|\, \mu_s(\bm{x}), \sigma^2_s), \end{align*} where $\textrm{N}(\cdot\,|\, \mu_s(\bm{x}), \sigma^2_s)$ is the Gaussian distribution with mean $\mu_s(\bm{x}) = \bm{x}' \bm{\beta}^s$ and variance $\sigma^2_s$. $\Theta$ denotes the vector of all parameters of the mixture distribution and the dependent variables are $y$, the independent $\bm{x}$ and the concomitant $\bm{w}$. Finite mixtures of binomial regressions with concomitant variable models are given by: \begin{align*} H(y\,|\,T, \bm{x}, \bm{w}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(\bm{w}, \bm{\alpha}) \textrm{Bi}(y\,|\,T, \theta_s(\bm{x})), \end{align*} where $\textrm{Bi}(\cdot\,|\,T, \theta_s(\bm{x}))$ is the binomial distribution with number of trials equal to $T$ and success probability $\theta_s(\bm{x}) \in (0,1)$ given by $\textrm{logit}(\theta_s(\bm{x})) = \bm{x}' \bm{\beta}^s$. Finite mixtures of Poisson regressions are given by: \begin{align*} H(y \,|\, \bm{x}, \bm{w}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(\bm{w}, \bm{\alpha}) \textrm{Poi} (y \,|\, \lambda_s(\bm{x})), \end{align*} where $\textrm{Poi}(\cdot\,|\,\lambda_s(\bm{x}))$ denotes the Poisson distribution and $\log(\lambda_s(\bm{x})) = \bm{x}'\bm{\beta}^s$. For all these mixture distributions the coefficients are split into three different groups depending on if fixed, nested or varying effects are specified: \begin{align*} \bm{\beta}^s &= (\bm{\beta}_1, \bm{\beta}^{c(s)}_{2}, \bm{\beta}^{s}_3) \end{align*} where the first group represents the fixed, the second the nested and the third the varying effects. For the nested effects a partition $\mathcal{C} = \{c_s \,|\, s = 1,\ldots S\}$ of the $S$ components is determined where $c_s = \{s^* = 1,\ldots,S \,|\, c(s^*) = c(s)\}$. A similar splitting is possible for the variance of mixtures of Gaussian regression models. The function for maximum likelihood (ML) estimation with the Expectation-Maximization (EM) algorithm is \code{flexmix()} which is described in detail in \cite{flexmix:Leisch:2004a}. It takes as arguments a specification of the component specific model and of the concomitant variable model. The component specific model with varying, nested and fixed effects can be specified with the M-step driver \code{FLXMRglmfix()} which has arguments \code{formula} for the varying, \code{nested} for the nested and \code{fixed} for the fixed effects. \code{formula} and \code{fixed} take an argument of class \code{"formula"}, whereas \code{nested} expects an object of class \code{"FLXnested"} or a named list specifying the nested structure with a component \code{k} which is a vector of the number of components in each group of the partition and a component \code{formula} which is a vector of formulas for each group of the partition. In addition there is an argument \code{family} which has to be one of \code{gaussian}, \code{binomial}, \code{poisson} or \code{Gamma} and determines the component specific distribution function as well as an \code{offset} argument. The argument \code{varFix} can be used to determine the structure of the dispersion parameters. If only varying effects are specified the M-step driver \code{FLXMRglm()} can be used which only has an argument \code{formula} for the varying effects and also a \code{family} and an \code{offset} argument. This driver has the advantage that in the M-step the weighted ML estimation is made separately for each component which signifies that smaller model matrices are used. If a mixture model with a lot of components $S$ is fitted to a large data set with $N$ observations and the model matrix used in the M-step of \code{FLXMRglm()} has $N$ rows and $K$ columns, the model matrix used in the M-step of \code{FLXMRglmfix()} has $S N$ rows and up to $S K$ columns. In general the concomitant variable model is assumed to be a multinomial logit model, i.e.~: \begin{align*} \pi_s(\bm{w},\bm{\alpha}) &= \frac{e^{\bm{w}'\bm{\alpha}_s}}{\sum_{u = 1}^S e^{\bm{w}'\bm{\alpha}_u}} \quad \forall s, \end{align*} with $\bm{\alpha} = (\bm{\alpha}'_s)_{s=1,\ldots,S}$ and $\bm{\alpha}_1 \equiv \bm{0}$. This model can be fitted in \pkg{flexmix} with \code{FLXPmultinom()} which takes as argument \code{formula} the formula specification of the multinomial logit part. For fitting the function \code{nnet()} is used from package \pkg{MASS} \citep{flexmix:Venables+Ripley:2002} with the independent variables specified by the formula argument and the dependent variables are given by the a-posteriori probability estimates. %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \section[Using package flexmix]{Using package \pkg{flexmix}} In the following datasets from different areas such as medicine, biology and economics are used. There are three subsections: for finite mixtures of Gaussian regressions, for finite mixtures of binomial regression models and for finite mixtures of Poisson regression models. %%------------------------------------------------------------------------- \subsection{Finite mixtures of Gaussian regressions} This artificial dataset with 200 observations is given in \cite{flexmix:Gruen+Leisch:2006}. The data is generated from a mixture of Gaussian regression models with three components. There is an intercept with varying effects, an independent variable $x1$, which is a numeric variable, with fixed effects and another independent variable $x2$, which is a categorical variable with two levels, with nested effects. The prior probabilities depend on a concomitant variable $w$, which is also a categorical variable with two levels. Fixed effects are also assumed for the variance. The data is illustrated in Figure~\ref{fig:artificialData} and the true underlying model is given by: \begin{align*} H(y\,|\,(x1, x2), w, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(w, \bm{\alpha}) \textrm{N}(y\,|\, \mu_s, \sigma^2), \end{align*} with $\bm{\beta}^s = (\beta^s_{\textrm{Intercept}}, \beta^{c(s)}_{\textrm{x1}}, \beta_{\textrm{x2}})$. The nesting signifies that $c(1) = c(2)$ and $\beta^{c(3)}_{\textrm{x1}} = 0$. The mixture model is fitted by first loading the package and the dataset and then specifying the component specific model. In a first step a component specific model with only varying effects is specified. Then the fitting function \code{flexmix()} is called repeatedly using \code{stepFlexmix()}. Finally, we order the components such that they are in ascending order with respect to the coefficients of the variable \code{x1}. <>= set.seed(2807) library("flexmix") data("NregFix", package = "flexmix") Model <- FLXMRglm(~ x2 + x1) fittedModel <- stepFlexmix(y ~ 1, model = Model, nrep = 3, k = 3, data = NregFix, concomitant = FLXPmultinom(~ w)) fittedModel <- relabel(fittedModel, "model", "x1") summary(refit(fittedModel)) @ The estimated coefficients indicate that the components differ for the intercept, but that they are not significantly different for the coefficients of $x2$. For $x1$ the coefficient of the first component is not significantly different from zero and the confidence intervals for the other two components overlap. Therefore we fit a modified model, which is equivalent to the true underlying model. The previously fitted model is used for initializing the EM algorithm: <>= Model2 <- FLXMRglmfix(fixed = ~ x2, nested = list(k = c(1, 2), formula = c(~ 0, ~ x1)), varFix = TRUE) fittedModel2 <- flexmix(y ~ 1, model = Model2, cluster = posterior(fittedModel), data = NregFix, concomitant = FLXPmultinom(~ w)) BIC(fittedModel) BIC(fittedModel2) @ The BIC suggests that the restricted model should be preferred. \begin{figure}[tb] \centering \setkeys{Gin}{width=0.95\textwidth} <>= plotNregFix <- NregFix plotNregFix$w <- factor(NregFix$w, levels = 0:1, labels = paste("w =", 0:1)) plotNregFix$x2 <- factor(NregFix$x2, levels = 0:1, labels = paste("x2 =", 0:1)) plotNregFix$class <- factor(NregFix$class, levels = 1:3, labels = paste("Class", 1:3)) print(xyplot(y ~ x1 | x2*w, groups = class, data = plotNregFix, cex = 0.6, auto.key = list(space="right"), layout = c(2,2))) @ \setkeys{Gin}{width=0.8\textwidth} \caption{Sample with 200 observations from the artificial example.} \label{fig:artificialData} \end{figure} <>= summary(refit(fittedModel2)) @ The coefficients are ordered such that the fixed coefficients are first, the nested varying coefficients second and the varying coefficients last. %%------------------------------------------------------------------------- \subsection{Finite mixtures of binomial logit regressions} %%------------------------------------------------------------------------- \subsubsection{Beta blockers} The dataset is analyzed in \cite{flexmix:Aitkin:1999, flexmix:Aitkin:1999a} using a finite mixture of binomial regression models. Furthermore, it is described in \cite{flexmix:McLachlan+Peel:2000} on page 165. The dataset is from a 22-center clinical trial of beta-blockers for reducing mortality after myocardial infarction. A two-level model is assumed to represent the data, where centers are at the upper level and patients at the lower level. The data is illustrated in Figure~\ref{fig:beta} and the model is given by: \begin{align*} H(\textrm{Deaths} \,|\, \textrm{Total}, \textrm{Treatment}, \textrm{Center}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s \textrm{Bi}( \textrm{Deaths} \,|\, \textrm{Total}, \theta_s). \end{align*} First, the center classification is ignored and a binomial logit regression model with treatment as covariate is fitted using \code{glm}, i.e.~$S=1$: <>= data("betablocker", package = "flexmix") betaGlm <- glm(cbind(Deaths, Total - Deaths) ~ Treatment, family = "binomial", data = betablocker) betaGlm @ In the next step the center classification is included by allowing a random effect for the intercept given the centers, i.e.~the coefficients $\bm{\beta}^s$ are given by $(\beta^s_{\textrm{Intercept|Center}}, \beta_{\textrm{Treatment}})$. This signifies that the component membership is fixed for each center. In order to determine the suitable number of components, the mixture is fitted with different numbers of components and the BIC information criterion is used to select an appropriate model. In this case a model with three components is selected. The fitted values for the model with three components are given in Figure~\ref{fig:beta}. <>= betaMixFix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ 1 | Center, model = FLXMRglmfix(family = "binomial", fixed = ~ Treatment), k = 2:4, nrep = 3, data = betablocker) betaMixFix @ \begin{figure} \centering <>= library("grid") betaMixFix_3 <- getModel(betaMixFix, "3") betaMixFix_3 <- relabel(betaMixFix_3, "model", "Intercept") betablocker$Center <- with(betablocker, factor(Center, levels = Center[order((Deaths/Total)[1:22])])) clusters <- factor(clusters(betaMixFix_3), labels = paste("Cluster", 1:3)) print(dotplot(Deaths/Total ~ Center | clusters, groups = Treatment, as.table = TRUE, data = betablocker, xlab = "Center", layout = c(3, 1), scales = list(x = list(draw = FALSE)), key = simpleKey(levels(betablocker$Treatment), lines = TRUE, corner = c(1,0)))) betaMixFix.fitted <- fitted(betaMixFix_3) for (i in 1:3) { seekViewport(trellis.vpname("panel", i, 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[1:22, i], "native"), gp = gpar(lty = 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[23:44, i], "native"), gp = gpar(lty = 2)) } @ \caption{Relative number of deaths for the treatment and the control group for each center in the beta blocker dataset. The centers are sorted by the relative number of deaths in the control group. The lines indicate the fitted values for each component of the 3-component mixture model with random intercept and fixed effect for treatment.} \label{fig:beta} \end{figure} In addition the treatment effect can also be included in the random part of the model. As then all coefficients for the covariates and the intercept follow a mixture distribution the component specific model can be specified using \code{FLXMRglm()}. The coefficients are $\bm{\beta}^s=(\beta^s_{\textrm{Intercept|Center}}, \beta^s_{\textrm{Treatment|Center}})$, i.e.~it is assumed that the heterogeneity is only between centers and therefore the aggregated data for each center can be used. <>= betaMix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ Treatment | Center, model = FLXMRglm(family = "binomial"), k = 3, nrep = 3, data = betablocker) summary(betaMix) @ The full model with a random effect for treatment has a higher BIC and therefore the smaller would be preferred. The default plot of the returned \code{flexmix} object is a rootogramm of the a-posteriori probabilities where observations with a-posteriori probabilities smaller than \code{eps} are omitted. With argument \code{mark} the component is specified to have those observations marked which are assigned to this component based on the maximum a-posteriori probabilities. This indicates which components overlap. <>= print(plot(betaMixFix_3, mark = 1, col = "grey", markcol = 1)) @ The default plot of the fitted model indicates that the components are well separated. In addition component 1 has a slight overlap with component 2 but none with component 3. The fitted parameters of the component specific models can be accessed with: <>= parameters(betaMix) @ The cluster assignments using the maximum a-posteriori probabilities are obtained with: <>= table(clusters(betaMix)) @ The estimated probabilities for each component for the treated patients and those in the control group can be obtained with: <>= predict(betaMix, newdata = data.frame(Treatment = c("Control", "Treated"))) @ or <>= fitted(betaMix)[c(1, 23), ] @ A further analysis of the model is possible with function \code{refit()} which returns the estimated coefficients together with the standard deviations, z-values and corresponding p-values: <>= summary(refit(getModel(betaMixFix, "3"))) @ The printed coefficients are ordered to have the fixed effects before the varying effects. %%----------------------------------------------------------------------- \subsubsection{Mehta et al. trial} This dataset is similar to the beta blocker dataset and is also analyzed in \cite{flexmix:Aitkin:1999a}. The dataset is visualized in Figure~\ref{fig:mehta}. The observation for the control group in center 15 is slightly conspicuous and might classify as an outlier. The model is given by: \begin{align*} H(\textrm{Response} \,|\, \textrm{Total}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s \textrm{Bi}( \textrm{Response} \,|\, \textrm{Total}, \theta_s), \end{align*} with $\bm{\beta}^s = (\beta^s_{\textrm{Intercept|Site}}, \beta_{\textrm{Drug}})$. This model is fitted with: <>= data("Mehta", package = "flexmix") mehtaMix <- stepFlexmix(cbind(Response, Total - Response)~ 1 | Site, model = FLXMRglmfix(family = "binomial", fixed = ~ Drug), control = list(minprior = 0.04), nrep = 3, k = 3, data = Mehta) summary(mehtaMix) @ One component only contains the observations for center 15 and in order to be able to fit a mixture with such a small component it is necessary to modify the default argument for \code{minprior} which is 0.05. The fitted values for this model are given separately for each component in Figure~\ref{fig:mehta}. \begin{figure} \centering <>= Mehta$Site <- with(Mehta, factor(Site, levels = Site[order((Response/Total)[1:22])])) clusters <- factor(clusters(mehtaMix), labels = paste("Cluster", 1:3)) print(dotplot(Response/Total ~ Site | clusters, groups = Drug, layout = c(3,1), data = Mehta, xlab = "Site", scales = list(x = list(draw = FALSE)), key = simpleKey(levels(Mehta$Drug), lines = TRUE, corner = c(1,0)))) mehtaMix.fitted <- fitted(mehtaMix) for (i in 1:3) { seekViewport(trellis.vpname("panel", i, 1)) sapply(1:nlevels(Mehta$Drug), function(j) grid.lines(unit(1:22, "native"), unit(mehtaMix.fitted[Mehta$Drug == levels(Mehta$Drug)[j], i], "native"), gp = gpar(lty = j))) } @ \caption{Relative number of responses for the treatment and the control group for each site in the Mehta et al.~trial dataset together with the fitted values. The sites are sorted by the relative number of responses in the control group.} \label{fig:mehta} \end{figure} If also a random effect for the coefficient of $\textrm{Drug}$ is fitted, i.e.~$\bm{\beta}^s = (\beta^s_{\textrm{Intercept|Site}}, \beta^s_{\textrm{Drug|Site}})$, this is estimated by: <>= mehtaMix <- stepFlexmix(cbind(Response, Total - Response) ~ Drug | Site, model = FLXMRglm(family = "binomial"), k = 3, data = Mehta, nrep = 3, control = list(minprior = 0.04)) summary(mehtaMix) @ The BIC is smaller for the larger model and this indicates that the assumption of an equal drug effect for all centers is not confirmed by the data. Given Figure~\ref{fig:mehta} a two-component model with fixed treatment is also fitted to the data where site 15 is omitted: <>= Mehta.sub <- subset(Mehta, Site != 15) mehtaMix <- stepFlexmix(cbind(Response, Total - Response) ~ 1 | Site, model = FLXMRglmfix(family = "binomial", fixed = ~ Drug), data = Mehta.sub, k = 2, nrep = 3) summary(mehtaMix) @ %%----------------------------------------------------------------------- \subsubsection{Tribolium} A finite mixture of binomial regressions is fitted to the Tribolium dataset given in \cite{flexmix:Wang+Puterman:1998}. The data was collected to investigate whether the adult Tribolium species Castaneum has developed an evolutionary advantage to recognize and avoid eggs of its own species while foraging, as beetles of the genus Tribolium are cannibalistic in the sense that adults eat the eggs of their own species as well as those of closely related species. The experiment isolated a number of adult beetles of the same species and presented them with a vial of 150 eggs (50 of each type), the eggs being thoroughly mixed to ensure uniformity throughout the vial. The data gives the consumption data for adult Castaneum species. It reports the number of Castaneum, Confusum and Madens eggs, respectively, that remain uneaten after two day exposure to the adult beetles. Replicates 1, 2, and 3 correspond to different occasions on which the experiment was conducted. The data is visualized in Figure~\ref{fig:tribolium} and the model is given by: \begin{align*} H(\textrm{Remaining} \,|\, \textrm{Total}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(\textrm{Replicate}, \bm{\alpha}) \textrm{Bi}( \textrm{Remaining} \,|\, \textrm{Total}, \theta_s), \end{align*} with $\bm{\beta}^s = (\beta^s_{\textrm{Intercept}}, \bm{\beta}_{\textrm{Species}})$. This model is fitted with: <>= data("tribolium", package = "flexmix") TribMix <- stepFlexmix(cbind(Remaining, Total - Remaining) ~ 1, k = 2:3, model = FLXMRglmfix(fixed = ~ Species, family = "binomial"), concomitant = FLXPmultinom(~ Replicate), data = tribolium) @ The model which is selected as the best in \cite{flexmix:Wang+Puterman:1998} can be estimated with: <>= modelWang <- FLXMRglmfix(fixed = ~ I(Species == "Confusum"), family = "binomial") concomitantWang <- FLXPmultinom(~ I(Replicate == 3)) TribMixWang <- stepFlexmix(cbind(Remaining, Total - Remaining) ~ 1, data = tribolium, model = modelWang, concomitant = concomitantWang, k = 2) summary(refit(TribMixWang)) @ \begin{figure} \centering <>= clusters <- factor(clusters(TribMixWang), labels = paste("Cluster", 1:TribMixWang@k)) print(dotplot(Remaining/Total ~ factor(Replicate) | clusters, groups = Species, data = tribolium[rep(1:9, each = 3) + c(0:2)*9,], xlab = "Replicate", auto.key = list(corner = c(1,0)))) @ \caption{Relative number of remaining beetles for the number of replicate. The different panels are according to the cluster assignemnts based on the a-posteriori probabilities of the model suggested in \cite{flexmix:Wang+Puterman:1998}.} \label{fig:tribolium} \end{figure} \cite{flexmix:Wang+Puterman:1998} also considered a model where they omit one conspicuous observation. This model can be estimated with: <>= TribMixWangSub <- stepFlexmix(cbind(Remaining, Total - Remaining) ~ 1, k = 2, data = tribolium[-7,], model = modelWang, concomitant = concomitantWang) @ %%----------------------------------------------------------------------- \subsubsection{Trypanosome} The data is used in \cite{flexmix:Follmann+Lambert:1989}. It is from a dosage-response analysis where the proportion of organisms belonging to different populations shall be assessed. It is assumed that organisms belonging to different populations are indistinguishable other than in terms of their reaction to the stimulus. The experimental technique involved inspection under the microscope of a representative aliquot of a suspension, all organisms appearing within two fields of view being classified either alive or dead. Hence the total numbers of organisms present at each dose and the number showing the quantal response were both random variables. The data is illustrated in Figure~\ref{fig:trypanosome}. The model which is proposed in \cite{flexmix:Follmann+Lambert:1989} is given by: \begin{align*} H(\textrm{Dead} \,|\,\bm{\Theta}) &= \sum_{s = 1}^S \pi_s \textrm{Bi}( \textrm{Dead} \,|\, \theta_s), \end{align*} where $\textrm{Dead} \in \{0,1\}$ and with $\bm{\beta}^s = (\beta^s_{\textrm{Intercept}}, \bm{\beta}_{\textrm{log(Dose)}})$. This model is fitted with: <>= data("trypanosome", package = "flexmix") TrypMix <- stepFlexmix(cbind(Dead, 1-Dead) ~ 1, k = 2, nrep = 3, data = trypanosome, model = FLXMRglmfix(family = "binomial", fixed = ~ log(Dose))) summary(refit(TrypMix)) @ The fitted values are given in Figure~\ref{fig:trypanosome} together with the fitted values of a generalized linear model in order to facilitate comparison of the two models. \begin{figure} \centering <>= tab <- with(trypanosome, table(Dead, Dose)) Tryp.dat <- data.frame(Dead = tab["1",], Alive = tab["0",], Dose = as.numeric(colnames(tab))) plot(Dead/(Dead+Alive) ~ Dose, data = Tryp.dat) Tryp.pred <- predict(glm(cbind(Dead, 1-Dead) ~ log(Dose), family = "binomial", data = trypanosome), newdata=Tryp.dat, type = "response") TrypMix.pred <- predict(TrypMix, newdata = Tryp.dat, aggregate = TRUE)[[1]] lines(Tryp.dat$Dose, Tryp.pred, lty = 2) lines(Tryp.dat$Dose, TrypMix.pred, lty = 3) legend(4.7, 1, c("GLM", "Mixture model"), lty=c(2, 3), xjust=0, yjust=1) @ \caption{Relative number of deaths for each dose level together with the fitted values for the generalized linear model (``GLM'') and the random intercept model (``Mixture model'').} \label{fig:trypanosome} \end{figure} %%------------------------------------------------------------------------- \subsection{Finite mixtures of Poisson regressions} % %%----------------------------------------------------------------------- \subsubsection{Fabric faults} The dataset is analyzed using a finite mixture of Poisson regression models in \cite{flexmix:Aitkin:1996}. Furthermore, it is described in \cite{flexmix:McLachlan+Peel:2000} on page 155. It contains 32 observations on the number of faults in rolls of a textile fabric. A random intercept model is used where a fixed effect is assumed for the logarithm of length: <>= data("fabricfault", package = "flexmix") fabricMix <- stepFlexmix(Faults ~ 1, model = FLXMRglmfix(family="poisson", fixed = ~ log(Length)), data = fabricfault, k = 2, nrep = 3) summary(fabricMix) summary(refit(fabricMix)) Lnew <- seq(0, 1000, by = 50) fabricMix.pred <- predict(fabricMix, newdata = data.frame(Length = Lnew)) @ The intercept of the first component is not significantly different from zero for a signficance level of 0.05. We therefore also fit a modified model where the intercept is a-priori set to zero for the first component. This nested structure is given as part of the model specification with argument \code{nested}. <>= fabricMix2 <- flexmix(Faults ~ 0, data = fabricfault, cluster = posterior(fabricMix), model = FLXMRglmfix(family = "poisson", fixed = ~ log(Length), nested = list(k=c(1,1), formula=list(~0,~1)))) summary(refit(fabricMix2)) fabricMix2.pred <- predict(fabricMix2, newdata = data.frame(Length = Lnew)) @ The data and the fitted values for each of the components for both models are given in Figure~\ref{fig:fabric}. \begin{figure} \centering <>= plot(Faults ~ Length, data = fabricfault) sapply(fabricMix.pred, function(y) lines(Lnew, y, lty = 1)) sapply(fabricMix2.pred, function(y) lines(Lnew, y, lty = 2)) legend(190, 25, paste("Model", 1:2), lty=c(1, 2), xjust=0, yjust=1) @ \caption{Observed values of the fabric faults dataset together with the fitted values for the components of each of the two fitted models.} \label{fig:fabric} \end{figure} %%----------------------------------------------------------------------- \subsubsection{Patent} The patent data given in \cite{flexmix:Wang+Cockburn+Puterman:1998} consist of 70 observations on patent applications, R\&D spending and sales in millions of dollar from pharmaceutical and biomedical companies in 1976 taken from the National Bureau of Economic Research R\&D Masterfile. The observations are displayed in Figure~\ref{fig:patent}. The model which is chosen as the best in \cite{flexmix:Wang+Cockburn+Puterman:1998} is given by: \begin{align*} H(\textrm{Patents} \,|\, \textrm{lgRD}, \textrm{RDS}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s(\textrm{RDS}, \bm{\alpha}) \textrm{Poi} ( \textrm{Patents} \,|\, \lambda_s), \end{align*} and $\bm{\beta}^s = (\beta^s_{\textrm{Intercept}}, \beta^s_{\textrm{lgRD}})$. The model is fitted with: <>= data("patent", package = "flexmix") ModelPat <- FLXMRglm(family = "poisson") FittedPat <- stepFlexmix(Patents ~ lgRD, k = 3, nrep = 3, model = ModelPat, data = patent, concomitant = FLXPmultinom(~ RDS)) summary(FittedPat) @ The fitted values for the component specific models and the concomitant variable model are given in Figure~\ref{fig:patent}. The plotting symbol of the observations corresponds to the induced clustering given by \code{clusters(FittedPat)}. This model is modified to have fixed effects for the logarithmized R\&D spendings, i.e.~$\bm(\beta)^s = (\beta^s_{\textrm{Intercept}}, \beta_{\textrm{lgRD}})$. The already fitted model is used for initialization, i.e.~the EM algorithm is started with an M-step given the a-posteriori probabilities. <>= ModelFixed <- FLXMRglmfix(family = "poisson", fixed = ~ lgRD) FittedPatFixed <- flexmix(Patents ~ 1, model = ModelFixed, cluster = posterior(FittedPat), concomitant = FLXPmultinom(~ RDS), data = patent) summary(FittedPatFixed) @ The fitted values for the component specific models and the concomitant variable model of this model are also given in Figure~\ref{fig:patent}. \begin{figure} \centering \setkeys{Gin}{width=0.95\textwidth} <>= lgRDv <- seq(-3, 5, by = 0.05) newdata <- data.frame(lgRD = lgRDv) plotData <- function(fitted) { with(patent, data.frame(Patents = c(Patents, unlist(predict(fitted, newdata = newdata))), lgRD = c(lgRD, rep(lgRDv, 3)), class = c(clusters(fitted), rep(1:3, each = nrow(newdata))), type = rep(c("data", "fit"), c(nrow(patent), nrow(newdata)*3)))) } plotPatents <- cbind(plotData(FittedPat), which = "Wang et al.") plotPatentsFixed <- cbind(plotData(FittedPatFixed), which = "Fixed effects") plotP <- rbind(plotPatents, plotPatentsFixed) rds <- seq(0, 3, by = 0.02) x <- model.matrix(FittedPat@concomitant@formula, data = data.frame(RDS = rds)) plotConc <- function(fitted) { E <- exp(x%*%fitted@concomitant@coef) data.frame(Probability = as.vector(E/rowSums(E)), class = rep(1:3, each = nrow(x)), RDS = rep(rds, 3)) } plotConc1 <- cbind(plotConc(FittedPat), which = "Wang et al.") plotConc2 <- cbind(plotConc(FittedPatFixed), which = "Fixed effects") plotC <- rbind(plotConc1, plotConc2) print(xyplot(Patents ~ lgRD | which, data = plotP, groups=class, xlab = "log(R&D)", panel = "panel.superpose", type = plotP$type, panel.groups = function(x, y, type = "p", subscripts, ...) { ind <- plotP$type[subscripts] == "data" panel.xyplot(x[ind], y[ind], ...) panel.xyplot(x[!ind], y[!ind], type = "l", ...) }, scales = list(alternating=FALSE), layout=c(1,2), as.table=TRUE), more=TRUE, position=c(0,0,0.6, 1)) print(xyplot(Probability ~ RDS | which, groups = class, data = plotC, type = "l", scales = list(alternating=FALSE), layout=c(1,2), as.table=TRUE), position=c(0.6, 0.01, 1, 0.99)) @ \caption{Patent data with the fitted values of the component specific models (left) and the concomitant variable model (right) for the model in \citeauthor{flexmix:Wang+Cockburn+Puterman:1998} and with fixed effects for $\log(\textrm{R\&D})$. The plotting symbol for each observation is determined by the component with the maximum a-posteriori probability.} \label{fig:patent} \end{figure} \setkeys{Gin}{width=0.8\textwidth} With respect to the BIC the full model is better than the model with the fixed effects. However, fixed effects have the advantage that the different components differ only in their baseline and the relation between the components in return of investment for each additional unit of R\&D spending is constant. Due to a-priori domain knowledge this model might seem more plausible. The fitted values for the constrained model are also given in Figure~\ref{fig:patent}. %%----------------------------------------------------------------------- \subsubsection{Seizure} The data is used in \cite{flexmix:Wang+Puterman+Cockburn:1996} and is from a clinical trial where the effect of intravenous gamma-globulin on suppression of epileptic seizures is studied. There are daily observations for a period of 140 days on one patient, where the first 27 days are a baseline period without treatment, the remaining 113 days are the treatment period. The model proposed in \cite{flexmix:Wang+Puterman+Cockburn:1996} is given by: \begin{align*} H(\textrm{Seizures} \,|\, (\textrm{Treatment}, \textrm{log(Day)}, \textrm{log(Hours)}), \bm{\Theta}) &= \sum_{s = 1}^S \pi_s \textrm{Poi} ( \textrm{Seizures} \,|\, \lambda_s), \end{align*} where $\bm(\beta)^s = (\beta^s_{\textrm{Intercept}}, \beta^s_{\textrm{Treatment}}, \beta^s_{\textrm{log(Day)}}, \beta^s_{\textrm{Treatment:log(Day)}})$ and $\textrm{log(Hours)}$ is used as offset. This model is fitted with: <>= data("seizure", package = "flexmix") seizMix <- stepFlexmix(Seizures ~ Treatment * log(Day), data = seizure, k = 2, nrep = 3, model = FLXMRglm(family = "poisson", offset = log(seizure$Hours))) summary(seizMix) summary(refit(seizMix)) @ A different model with different contrasts to directly estimate the coefficients for the jump when changing between base and treatment period is given by: <>= seizMix2 <- flexmix(Seizures ~ Treatment * log(Day/27), data = seizure, cluster = posterior(seizMix), model = FLXMRglm(family = "poisson", offset = log(seizure$Hours))) summary(seizMix2) summary(refit(seizMix2)) @ A different model which allows no jump at the change between base and treatment period is fitted with: <>= seizMix3 <- flexmix(Seizures ~ log(Day/27)/Treatment, data = seizure, cluster = posterior(seizMix), model = FLXMRglm(family = "poisson", offset = log(seizure$Hours))) summary(seizMix3) summary(refit(seizMix3)) @ With respect to the BIC criterion the smaller model with no jump is preferred. This is also the more intuitive model from a practitioner's point of view, as it does not seem to be plausible that starting the treatment already gives a significant improvement, but improvement develops over time. The data points together with the fitted values for each component of the two models are given in Figure~\ref{fig:seizure}. It can clearly be seen that the fitted values are nearly equal which also supports the smaller model. \begin{figure} \centering <>= plot(Seizures/Hours~Day, pch = c(1,3)[as.integer(Treatment)], data=seizure) abline(v=27.5, lty=2, col="grey") legend(140, 9, c("Baseline", "Treatment"), pch=c(1, 3), xjust=1, yjust=1) matplot(seizure$Day, fitted(seizMix)/seizure$Hours, type="l", add=TRUE, lty = 1, col = 1) matplot(seizure$Day, fitted(seizMix3)/seizure$Hours, type="l", add=TRUE, lty = 3, col = 1) legend(140, 7, paste("Model", c(1,3)), lty=c(1, 3), xjust=1, yjust=1) @ \caption{Observed values for the seizure dataset together with the fitted values for the components of the two different models.} \label{fig:seizure} \end{figure} %%----------------------------------------------------------------------- \subsubsection{Ames salmonella assay data} The ames salomnella assay dataset was used in \cite{flexmix:Wang+Puterman+Cockburn:1996}. They propose a model given by: \begin{align*} H(\textrm{y} \,|\, \textrm{x}, \bm{\Theta}) &= \sum_{s = 1}^S \pi_s \textrm{Poi} ( \textrm{y} \,|\, \lambda_s), \end{align*} where $\bm{\beta}^s = (\beta^s_{\textrm{Intercept}}, \beta_{\textrm{x}}, \beta_{\textrm{log(x+10)}})$. The model is fitted with: <>= data("salmonellaTA98", package = "flexmix") salmonMix <- stepFlexmix(y ~ 1, data = salmonellaTA98, k = 2, nrep = 3, model = FLXMRglmfix(family = "poisson", fixed = ~ x + log(x + 10))) @ \begin{figure} \centering <>= salmonMix.pr <- predict(salmonMix, newdata=salmonellaTA98) plot(y~x, data=salmonellaTA98, pch=as.character(clusters(salmonMix)), xlab="Dose of quinoline", ylab="Number of revertant colonies of salmonella", ylim=range(c(salmonellaTA98$y, unlist(salmonMix.pr)))) for (i in 1:2) lines(salmonellaTA98$x, salmonMix.pr[[i]], lty=i) @ \caption{Means and classification for assay data according to the estimated posterior probabilities based on the fitted model.} \label{fig:almes} \end{figure} %%----------------------------------------------------------------------- \section{Conclusions and future work} Package \pkg{flexmix} can be used to fit finite mixtures of regressions to datasets used in the literature to illustrate these models. The results can be reproduced and additional insights can be gained using visualization methods available in \proglang{R}. The fitted model is an object in \proglang{R} which can be explored using \code{show()}, \code{summary()} or \code{plot()}, as suitable methods have been implemented for objects of class \code{"flexmix"} which are returned by \code{flexmix()}. In the future it would be desirable to have more diagnostic tools available to analyze the model fit and compare different models. The use of resampling methods would be convenient as they can be applied to all kinds of mixtures models and would therefore suit well the purpose of the package which is flexible modelling of various finite mixture models. Furthermore, an additional visualization method for the fitted coefficients of the mixture would facilitate the comparison of the components. %%----------------------------------------------------------------------- \section*{Computational details} <>= SI <- sessionInfo() pkgs <- paste(sapply(c(SI$otherPkgs, SI$loadedOnly), function(x) paste("\\\\pkg{", x$Package, "} ", x$Version, sep = "")), collapse = ", ") @ All computations and graphics in this paper have been done using \proglang{R} version \Sexpr{getRversion()} with the packages \Sexpr{pkgs}. %%----------------------------------------------------------------------- \section*{Acknowledgments} This research was supported by the the Austrian Science Foundation (FWF) under grant P17382 and the Austrian Academy of Sciences ({\"O}AW) through a DOC-FFORTE scholarship for Bettina Gr{\"u}n. %%----------------------------------------------------------------------- \bibliography{flexmix} \end{document} flexmix/inst/doc/regression-examples.pdf0000644000176200001440000061076714404662042020162 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4301 /Filter /FlateDecode /N 71 /First 593 >> stream x;Ys8+ 955l|5D[ȒGrnxIݒih8Bˆ IE8hE[+p1/PI$LM*%<"R) q#š+xQE$DicH-$!_(aBCg( ^: 1qD1 cЏ;'qDHb@ QМ1)2 2DVH ŀyCqCn&##@%$`("d kRd@ xRB K2O!`24C!lc12C3 YH:J! t'Ќk`JuR"dp @P/BIZ$Ha ^#B߯Y6Os,[>Mn ]}J!-nɳgU1],/7 p+d0JNBؽvm{^ZYvc_(V4)()R琅0H f,2c'}rɗ)kLomb9p#z<u.cWz`> Ͱbۏk(ԗi_~S^÷}b}q@\d1Eb@Wtx?hs# Xe6vV kxlfƳR{y@:Zdٸ1Yy`勡6>X3wW: }9e>d}g/Ǿ~/ >00[ߑF"hޭ!:I'Yr kA>((pAD^4@&giK !\#^ΖwpZ +4@Jg=BB0m1`[2_l OlW??4FSP{;$,nHK_E ' C;;^RcņC_(]}6.5-rD_XL̯ȼ*9`v>Kz';0+y2 xqwXd;$ɧ4ZevJu.Ak'׳ tvʒ![3fMព+oE lؒ{D[<*~FHE@ȧZRi"}p^3-`+ɇZUQz[J|֢k)}`YYJ=hFHq6Yæ-ԺMqM&p:wz5V2+WM'+툡>}n8,Jƶ} AG?N ~#tǧBg<7y_%r2Ͱ64*YgըƑJ5>j|ln1#l3c#=!vb9I)+]eI q2.kk@ ӳ˜@H 8WB!€[W,8Jm*+31w!̬7z`*yt{vIgd^)ζlGdk;(n]JeZֺrETbѺǡ1^UbTo\e#nWTg`o/Ó:ۙ]?XZƇjk.U3?#e]rvS=.[GZnn{'md&6SYP|ɰo!7\:-Yrw=Iݪ@3L-[-֦ ,v^48't>ND't5>lt_L^:65)Ѯ ֖.Fs>$.^]e4O[ g6֙X`-QlcP1L}"凷'5ۖ2E; @'eۊ)ӳ6!`xA5( ֧p{>8}C|pzvu!NQ+M^gE'{fQ-wN2|AT4Y0=1nnK:۹4ػl_϶J39lQ_l5BG[EbM (bjMz0!+ssL/#R. e(uF:$tHKR#Ggi*FELTLl世AaN@ŏfv䤎B.G ׻1A6(rRxcf:EP ^̤j~xYE3 <2=Q|E/r0=fQTóq& ";;u{zjS ֳ2([O>ugT7:YX3s d9vq+O4wnq,8 + FPnƽ{,m${dzg⋍[Z\Љ ڰϹvEgMjxc`>zwyFvkbssvjp`T?UC@\q8podf_޾؛lCw$:#buckrLgJ|ER_@졣b1Ɋ.d7mm' F:Aѕl:Ad-,CgykP =m}p:%٬X|E6OWU:rgTq8Ghd!xFyeNxY>zi$ )`:P`JfL F<]Y*My=`&]B]> nef}Dj(p`Tn2\{ ֢$+x\ɶfZHWIm(~_X< /wԘ ,̑ "wi}$c_.v}1?F'{ؒy?pI%q؞ࢗl,E~uOBzix2vzzY~iby;_ bH%endstream endobj 73 0 obj << /Subtype /XML /Type /Metadata /Length 1545 >> stream GPL Ghostscript 10.00.0 R, finite mixture models, generalized linear models, concomitant variables 2023-03-16T19:46:57+01:00 2023-03-16T19:46:57+01:00 LaTeX with hyperref Applications of finite mixtures of regression modelsBettina Grün, Friedrich Leisch endstream endobj 74 0 obj << /Type /ObjStm /Length 2715 /Filter /FlateDecode /N 71 /First 621 >> stream xZrF}Ǚ2zߪRu"%eL偢 e ұ@(`}eyd4{g>0pY,zɂdRҥbRfIAYLirǔL9c`*BdZ ͢`:hϢdFȢbFiе10Eˌ &(NQydXh%ɬ'٨ǸaAq e''ٷۋbTgz6OP<ul9rz|BB"/ݴIUlv1Mġ8m3ԝV+Drv. rK9 Cݛ|x}\A46ㄈj 9__haipk M~?G9=tXV" ~̰c`kG7V?ō_(?9F8Rƺg62G:`Q]ΝQYDrmd'v'mK7l hA~?qxoXwYu r?$ QXO,hW\PLuɜjY;3 .]4fUh՝$_j5<4G+<" ?>(FŘ1/x1#~WlB=ѬO,Iюge ճT tJ4hPF.@a+RP;oXگzRm|بitis~%_7~j<G0f 24 O"Ť?O[_6;(no_#D6YQ_ᗜ_!Aדg~&?GyY&һˊ?ϳb_^{X 1$k>/I9(&9}A%5ǰD d{"y>ɤLgzyO_׏Hr.|3?=%ʕOuv{+v {.EjGpr.fgRi,UmѾ V=қqS'ɵRL]ijp|%%֝`i*{ _6GgUn=ZR{&!-I YҀT|%䲵72cBBWKQTQJHSV "m5,Au^5}  W[ 仆[+%<#\Ǵ$r8HɧV'Wo}r_CBVhMJK,c!h530CƳшDRnj&suЁYTA+CE ǟiY) ˬv pP\f8`#xG;і:i/3'6 ;'ƭB/aљy8Je-љC,Tz b]ek+-Y#Zl]JlQe˥hk:3?skW,歧WBSY3m~!p7~ko$7 6kRuq].)zx{)Ԗeҋ{nH@OUTڦ㓦[O3gENM`i'S YލC`YXΐ%E=D 纚)+íw^ڮyk'6-tFشIaum-,%}-[ =3Mfd9Zyg!+r.S" s^iŁeMFαy/w{1qsp<+ kn^> ?z*+G?xhXendstream endobj 146 0 obj << /Type /ObjStm /Length 2699 /Filter /FlateDecode /N 69 /First 623 >> stream xZ]o8}_],J( 43lݶ3ȩ]lϹ)˲ĉ #JsϽ$%gItLZ8JQ1)*X(QYK? xAx)LdIS>-TxeZ:iz Th[^0cg? :6V*bU Y%V:2z=^5slY|Sl}Yz:œ pt?Dp!Ok(G:Go|=y>[\Gpkg'Y/f:9RrLgtt z)iemӅnz\M7~9> [@Pnm3O~s.\gল4~ˁlޟ8SjN^Mj{}-nԵڴXMjgky>1 )aۻ%{x3[wd)2&4lm "L>ɩrةXx7?n8ly.4;pvs#5᪡`mZe&ԍ(ۡkE44c':eM&=m޸ #2X덮Naɝl9ߙ)Ja@'$tbuX6h;CE bJ͔\4FȐ5_!tgW|tȔ"3K}2~̡L"θjRJoL9ND%'حONTݢ F*Y&xu&7ftjlsn::ROEwRoqם+ &҃i',bI.pgXJvwHHyk[2ƫ:QP󻿿|5Z|ϥxt2JqB67bs9an19 H0$J,^,9ѦMy6mה2!2ERF.Hʳa<3~,1nAu;"["E;X{x,mԸp70>NMs;\];b>u="fE{| Q`*1u- 8 sz/7"DwțxPWVV5 7vfD04HU)x ❵G:؟HŴMQ& W)H31ibp dcATJUɲ31;$2چ(aJK╲. SZ).6;<*<@Q-@Ҍ4޷Xݐa#ts]a(/p)N7$\@g)MACdNJhuU郚.cOm,ȻC;{ZϖlV=o|ُhxTm zRJ7R3ݥf=D1*B DڪoaN`ޢTL<3%`vM>-~;/+ݮ)> stream x\KsFr/Hr¨woRW!^-7||!)MlG@7[UYY20tt/{6WR&{|s{?QW15On74a#>Nuθ I5O{c]5o# ;qg#Xzg8[$o3R9|8Y΍O):#0w0YowČ KgbNsp=f?#.csFЀN+mke[;u@ qݵ F,$CT)`ȪXmq&3{./D;CJ5?WB~y4OnѧnMT+۫^#8ԷWɛ<]9oImΦCj]|"y[a-͊K1[L>J'嚠;~l޳hҾ9i\󡿯aFeB/1/g7ޠ\?[q]?NK~]BtZ =Z1si1^CCX{XG2ؖWeS3 ptlNo7v71*`cW[7kh>\; ~}[-+C!>etNn!; x'x>(帗kX 7琗BB)bjùlf'T/myT:',7ݴHC N'^4c MHzXD,ćn? c; )( #`h gA#GwT/097UM8MCa_nj?]3ŶQXY7$]{  4Qd|'/DH`elՈƑuxZz sP5yi+1H2jm,y18ghrw2o ]PVDwoXB0f|E a7DD-`ZC Shc|%"v5t" OL&gRPPͬmo:>/b3HŜ ;SI(9,'厄iToqg ӽšȾf[}5JZT,p^IvH EhB3AhgxiƢ@EDnp]IJ<5X0/Hѕ pPh[!*GZن4.Ӛ4&uTEErs4&T$6_ZAb٦w]SRp<<.Rt`R*P\ yC^4īؙl4nŎ^ľ70N% eu^;JJ$9Zð AQ~-]'YlCᄆ.yb 50X i Vj yh3t-?Sy,yߞP[̭h(Ld;, 3B[℃FwJ:<D8~ ̋"-LmElf|GNsI:4(gb<0["ϙx~xɒgx왓0 <ӄdqRWf.4%:|Ve D#W%ZAOvE\Pi*F mY3o3`67ڵ!v_xg#!}Wjkκzr7[!ElF ®7$"ߪ] V;n\pSڝ첓EmM>S~O'$ kDKOSH) Lu y! FQn/vE]KlUZ0ҫ,_6+"mkxD҂evqis|4`06=S3x=(_(Į6DFS>)ҌSKcK94k7*[.C 5&Lj}'[g~<ه-@SƐ$1Z.kc ~u ?oGBJw!XO,Aχ`P쪃Wx& ű>O^{ GsuhNZy"jj҄B4Ho:AWR>Y`, ˫PcAR-<ΦlQKƛ !04~\!C젶KlC{ȯ {ת| BuT*)GƼ: )ԴGΜ:{)RܒG-tX^PyeiG3:c©@yc vcH0og[V'pe虏,[ Xaf8Aj_:EkmGGg3& + S2zd4̆ZC漭-ھC:ny ̄zAY+ϿTt؎ 1ebR P~|D1Ր2e8ψK`|r?ow>QvE `Kە0 Pp^@o| Z4=Hk%cH,o|]Yi0Ȣ@; J -l.rK_` BIJV=2c0q0=_'C'sa|y;/á/DNNY^Oscj.ܩQQ ;ȝLڐO U>cfkeAe@(fQy~Apko.. t|Ik󺻆e Rq=ε$^YROtOer>}jlbPO'ą*|\^^ģY;5c^optJ*gQIF\YgK%TubE8fi%߹DOk^kB>;2yzt,!㭁#_{rROHCRsB.J~H)-zgʌ^.g-!s ꃛI!͙hϚ%3$`U'݅R}H2> Pp7"/ETl!`jDY^|qu^sarDKB J߳n~m+,;[(ǝ5!{1MA[ vsѪ6_ gޥSxD|VQDϿv[$zY(nilWrI t 䭃IӵtK>l}b;XJ>'T(rƓ(̋4lUtN]؂tRb,ؿZxf\@_2YMI32Fe/LYk,Aܐ ̚U'-*詠\.;g9._j0eұ7ib<۪dDl+/.daJ4w]A]@t(pK=vDw,ّma`8mIh+NA#GY/Oh ì`e0&X9a9zwZFc𛲂jV%O.EHa)>eHCYh*+*occb( *HGרHG7Sϣ'3b $jmo𞭁3D:db>ynv~RyHpufWWwj>), MmSYJPtjqȐ&-9ݥA r#3 QEX|E׽})ûem:GRkS|XA &df'8Q撥𐉅z#>8N tGzsi'3ۼ7O3ds_VBnfG\( kIYҪF",X^w}q0ԫ( s.+~*wNHJ0%8'C@=?.Yavf` <uDEwP:!B˞\1'/e+?}G%mI ܓʂDlm"FcApVS^zlުޑk@,Pz\۵~PUcdjZٞ0Gan;چ0 KT%GĦ]u~PVw-!19Q/BW1G@ɰqqxA^lwwc BbVOt; g׎gNezFendstream endobj 217 0 obj << /Filter /FlateDecode /Length 4995 >> stream x\ˏǙfs  ÁI'YXƞCiѨmR#5_GwUu5ٴ$X Y]߮HKWovWduu߮7 .KWϯ#teJKZzwհ0Vl,0_^=m~jMZ"60f߬7F;/sZ+VdH:?ADf?_'?ҕa &ps~C7 mNr5Jy*_$鎰)# GMxrSF􇻰 kfpӲn-e80@7?ؽ 2|IpZQN%oIqgm$jv kY7AãF=[.2a0ᅳp'l>&y|rDV Vן_]TPs8R790Z&90|#UcU!ò=" A8p. HSت1 a -Oؾ`Ec%͛i(/rQRn>~+"0AmLh_G0FLvۖLNw(J}\aGL) DLd&x=.Dgq6üH ']m3g36i)t_g6%ړ6c7ZB8C6So&O '6"AR*8)Y>2!ɥZڽ g>3yHKBi.pPQ NHg.KNX]T}m׏q"ir"S LXpj`xvc<*u 0y d6N/*b#-({sA;Fّ9%wTGMz4O `Hwܐk Ix? yp#=*@[&A'"G/*ekč n{T1qBPNlѭh}RnP*@\Ut753ԑ^nCTЮC. IX:] 8âq xO' w~>rV9m-#SE>@'xZA-_B1 u@zvĉ/+>F&:np3Z:C}MpP"u 77:.PDX>1=*vr6&æhN%ɃĪf`h!{x=Je `Cn,I”SĻ#tq"L s bȚ9!11ir[F 9l6/~6¥)>dYROĢeYƼC@B!6qb!!<-E󳼩b[)gQ{RF'z<[ت4P_pLK4aѾϫʢVN@  q? ,G! h0?KFP}Jk=5 G* ]U8Y( <{ҫt` K^οR-q4r[}jc8r(g1Y_ dkZ ?gb<&4'S`߆*B|%Z 6)CL JZmtfȲ (ҭFA+ g , ZYtp m]3ܶ KAIu[ؙ"vr-^&9Ԝİs#βPn[ƞF!xڨVIii(8h ?b\y^INyY P%\,xqj|O@9H[Ic*VU,KlMeW'j)lJ-S!ݱV[6ȟU ptܡ&/E ֳٌReYZfK*"Mq(˴Ǜ݊gtf,K=m2MLVnIx,%-B'L,>ba<8pծ0yoEm_6Lm\JlO/5n)cKssV,CW>C!g!=1 }(e5>S( yڣۧ>jRFsVֈ({uZX(\NwQ Ր ,6y%& Ug ձja >6q)ђic誥&<pVd8P~tD*qOd;&&+х̄@ӵ43fRĀ!5~̄a/qgЪ$2@i>D|>BD4'2z <쏝0f·qNd{DM?6 -i~^<љۅ\.]ԨG`% (`!tI]Mi5#F  F [! cg9$!yPe$V[3C'꼳wq1bxg qAB}RHI&0ʇ 85jwM83AlQ<}gUϡG-8c֓pJ:%XCT(:i!zo(pV(ΛA~^?jce `3֝6wRG{B Q5\ 4B%Bq?V͞肬KW#b3+\.wl*'1dD僆d#rJ~ z^utL`s <~*u%T?tݕQ}+vY+SB24\'wҡ`jxP*?p*E7\9k„ EB'A l`[$F$d XRRV=6$0fpC#q{H\s[ 6aV]$s™ť}7&.|.~QS,4'CQ赳TnVsK.@NwTqr b!̎<բOmL]Y)dHǁ 6}(Z·AD-`Tm$-,ikҭɦ ?$1oldݝ{qQI4D:ucIݖ[trqVkb "˹,ȻUTxx}#\sk:ɝ_i)QC6eGS_ˆ" ۮ7F* ">rF]hX%B(J\Θ}l];~}T 2$O?V?TB-Z4Nbyi17Ỿc# )QbA+}F"0J7tFp!tПm'KTk"Y'3"BP&pEݕm^5-&%j>53%pvfq"o u1=p3yI\ =urQŷS# z,ƄTu0+WW!x}Њ-^u_-mpiOpg=5TqQoy 3ed(>K#c/^~%/}o 8Ɓ/KMΒ%U:M߃un6a " ]1C"?ٷa1ϝRyE]l m觴t+3Si=[9) X6k_P}V`O&r{3_ v+Y '{ γ  M{ ͋%aF`+ !$sr&[a~ f=v6'OWoYoWèy_g"/]T`t01@U/RV@܄yk7A"i7qnD+v !X҅&ygfοҰYT+كxł1Lo54Z7 8"pēVwv^;О^M: X$ɑgH?c`.}]2e|<$<.78< +6]I< 2yb/ǚ1(,12a@鵸ZYD+4gbO \HI6 -?|q=wZ> stream x\͏7vr%j/^+o @8k^{, PvC{$Hg X=],/f쮚_}Voi{85le[V׻W֛neVx0inB D Jk8HMk}2mYszhM^x_GVԆU%?_mdP}U9m[*L`yu|=9X a sSI]uT9&ْ5kmxwt1\5 L8 L:&d,1b)wMITcEVښEۮČmj tψ^$. !k& =]YUG|bj}nHų톡(`(!0Q+r //J|(enI/ZveZ2^we9fc{ײ:,0Jmɛ>+7mX39r/[IJDB3Ԋ+/c(ujQ? 4tpf-O\dFjf'FTe n8M!G6u8~߂e@.9ۨgVkK,WacuqUFUkL@?奘h? GB[\:YS #:u~{ڤ8jxs0;. c?= z+5eBq85n(rJA.0 =ȫd#=w#9umۆ ?Mvy'L~þLi~ Aa21^KBr%_v~D>荳ِʃj<WiɅt:y"TCdB>tD_Ė;y֪Z0EM\yEVZI.Ʌ0-O\s=ĨGowDžZIK.t($ᢺ347^O b:T/&oHTֿ ,ܤ5aV%ؒw+dV&K)֙7! )B0+kR; >8ӠإIW vu_U`y3n>>SA;u )WCrPs#lio=@{0w!{OJ&C|=Gw?owbR0*|W-̩tI(sniG4jteT0~ũ.QWIF8Ar֦mWG@:{hu\I-Ep{45HTZs"Sgq,8 I(4>F $A.mR F[H&g3G9r2%<);3NWTF́>W&՚FWatVbV}9Wf&q8w)o8ΗHt#OV@B1yI#B;f-sV'~X\ hrMk9;uH~:BCr蝂20`vv;C^a[m=9!GpN!YR @۔ B(=]X]TqrDS6 Ľ=t1ʹMrUP'GEB#&tqMSYk JZp5]gpG8W/e'q-xȱљnݱ C!%e;cMZYȚ@ 3ג Ǯxa-hpȅ,")*955#b]b_ͭJ3}Vb h"z"a4'dv|+R3@aÞ\EqB]>N-TabbIa;6(boZmQaK:c@^-{δW6 7}\G yl`7wT KHFL{L$ ,M\HV6H,OU"Y|Hp ?1]앷aHP.N֟ 2nº腟mvDZ/]H9591F@BG[[].נT}r8m/6(?9vwx`dԤԉ~`8Z2exʏH.2x*ʳXipL%l δ 5E> 4~7.=D =h୍!30\w(/qMa>^k+Ym⁞! a!ء2FoWBY}jV/`S/`dqA<|idUXj`zwUPPbX *H$F{,{A_^`XE%QxzsbL+VHBBN|k!Y=Cd^C!fF-QG(u5ݲ68˯UgbhWg`lBer spƫ+革3x?:lPP6tFLJflY CK/J]M8C7&+ |7A (?!rܑAjzÆNtcf-P>_ b?@k@[Bz` Sj4˱,*8[0PN+4;w3E(u?RǘSi#q7h[D0Ppc!?|-3H2R4hU9$;T~4vޯK Br~C ITy]FXqI;$b3ck) @w`3o_:_F^]AqZ!'zCjRP‰P0egWVe97y8ƎÛaxTuu4FA#H0G|Y&=F$"I>2̀~@P8<(6L,NJ p؍}f;CvmHB~BcvX1Y %OȎ@i}gI@Sa~$"k.l+OO9Z dΦ;H^(# 1\+A!@UlSL2>ydɹvڂؕK)g'rVGtx:o`F3h]PHp.Q!3IN&Dz#F yhՐ"w&hNH\r8rOD,+@55^Vف{nz ?Dx=#IOop/)ҼšD#Kx"\ "=:?'_9Rf>%s7y3 *kBl4kxms~q5ڿ2CEQRL"Y5&vVG3?yJnu*k[Hw_yl\wP# ,Β,Nҳ.y]p.CZN]KJؤ8V%%l|(=Vl =qcߎPUslP~R| nڿ-D' Gc366w5q@}L+ݐ<= -;3em+i{aGZY!j"!2͓ҭGKd@{HHLOɹDh}?s"s+L& K&z["Kn9'TWx{c WRA\Ti?+<)O?;;Q xX4dKITȡ@bhUannLZ 9ؚd9ЅwYRŴ>p p'b'e]h8hVc] b.Oneb #d "vMx_7_G3Uc_\x9"jF#bSy15T_.lAc۷#VHbvӢ4nrXib."bvpYZi`~^6ƍ^ 1sx.ГD|Z܈QgiS(xqwMjendstream endobj 219 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4098 >> stream xXXڞuٝQq)gDA lO,&]/QDi ,4(HQ4 -Ab71QDckyF co,g{èTKM4nNx3QA?d(?/>w[gQ c|cl%pyPЮO4y a3K7,cV0hfqg2su\f3x2f!3Y,a^a7/Pf c`F81tیsg &%3j{/ڨ* ס2L2p"N &W7 Gt3NIPvTa[Q-|$H#C9EݑIIhX[36g.\0,[C0'@>p`1-(agT''n$,@kHVKf9)1E1 mx[ ԡ0q^I8d3leɈC~#EbcO]ח 0=VeoH˫*+zr0uյ9 O=;j-L=wnu6TЕ"mWؓh qegeY ^Rqr %{0Iu例^[mF-\atVg?q؂59{7Vb5ds(ζ1)RCk>Q5^](vKL_}P֪yK,=(Euּ@2ůޝbBZz!5=0 k ppMR-)D^k )M;"aX0dpTP NqP>:jwR#vA@G^;ui@͎ztqc!7Eć$r}ȿN~V,_.r EԳ-J"'T`]F[]vahXRZ.`=JW>Gs[~&Z;;>yM;ŲM9Z(e&Qby [k79sVZywYOQWÞV܄ے1C;@ N95ղDv-QK(bhNl!P-Kjٍ?[⻯bsަ$' 6 "tܻ?~_rfJn$%8NKY;`Pz,PNy`RTYTP;2/]_RboZiҝtɰGXPa{{eN[WUS]Xb(ޙ'Xr*k͛X2LH-'(jK@H`_ݸq;2?W:wYҹJa/cL#Nys|Na =lIiRFqˣ2*: =4|kx5;F0gYVR6:릓̵nV*W(:2To{[~e[آí'(k̏P 6I)" K!"g XdUړddt:$aYO7}oq& ʡNY6*ݣZJu WTk;%:O(Zݐ˔"l_0ڇ.fB "5[ψ-+.Nxōؑa&sIN:57g46+)P2S iMo`ES35oiycQo&6[tF;Q+~Z&c;)y'Ds⎔5@n.hĶn2x]$2c$Ӆ=vx .X_>BJ<"bZ{Y_ߪÝuvw?9O8#Ȯ:__t\1~9ky!~-Aظ@]Cbd׎C?XKpwomYuDS&Cl }vB#UynJְ\b_9=Fx.P#JJ]<f \?}5x.#F"qrP V{5bOj.2W[7yUߐ 1 h [AƬ/T_?uE{3cMx.$:)msx{8.4'Z>Ps/DZ{ʶs:Z85SOI<^4*؀85sfA}WjGpJ1,s!:=+ {yuϺ~[C,M<įm%\"| ]{22W1~˹vc vX;CίHߛF,"#5Mdže.rKz/>Ce؟Lpget'&PCV8-݀siPVw\p aq&1{Cl":-Q\ Xi8 O{da0 mˉ,ף]z,edWl>Q8Tzjhcef*~L!{VӍaes=N?GF5?xa ω's5T>񲨙/ |OG oѾ{h VۄX"`FɈ̿|?̔;uCԄ9&`QNIڸI+")~)"U/I#YSsO5@L 96*^Y7pi$ђA8W0ZNVfErpv3]UwU蚷o 45YjVɒ)]QԨʷj# CD{ ڲNʈgqY{5ZtjT qZ6c0 7I{ 3a7uA-~Z+|>&DxjiwymC閐Ґ}‰mJrܙEJbis)Oc{L8ф[ю]}Ee$y8_oG|GOݸ٘`1Si,8,բ.^Y#J9P5U aoXU8/奮|dT*{풻<4틈3w-8\*@A?<'> stream xyw\W ;cWE̬]cEc%T.(HX²]z Kq@@{ݘ1ak,y~w4&e0;={3ʬ%H,/Yk+\=6W8bkuI w=ߍv\OX08.پe2[mVz\g#F9vO4ymVE2j*5ZN VP)jJRRajj8MP#u\jGScBj5ZDMS%$j)5G]<ΔՅR) *b:B^zSVMS jOutt RBDKsoP)m4[bv|yI1˘='tᫎT{vҭ[kp&v+ni"]Ȗ|aύ={uuPòzﵚgego֧K5}<< +Z eIQ}}o9NBU]GkyacysO>Uh6WfŧB(hԱaر"d[-AGƜK :8SC pt @ir(Cfyy.b+ʞ<@A64ޣ&,ϭp9̆ 0eOeA_IYE UZJf+~ +8]ڧlHوN?YXEyܓkiQ"S([Jhx'G GM]uÃVVK9ȡn2ꉫ%Q>w.RDh͚=o@EШhl Mz y;ðw_OMN {M`_so%,`o`M7Kx {_WZ^An VJ<PL: ɲȘil'7 RU?Qo诒*h>D^D4E_֞;X @y$/S<!%\H-%,kvZ4ƃ 0d "% EY5k Kl'qxvDMzQf^ţ3Zr{݄|!Glf`>ܺW3=2=itRh6|4NS EqZWA!tZ$ugʶ 8V1fKPEX>LwdS$EDtI?ϸ6Q%^R{"UMP"ŰG,*o H Ml~j[$m\jԭ]̔yzn/-/,͌ژ,>Q S[*5O5=gn[ ¥{plIJe #l[P}dC#̵ EЋY$e ,ݻ59'҅'e-O  %Q%&0l+=h!~Ê˗yRO('Yf>&+7*}E*+dT-jHePoP53@:F#`f9Mߴ!$CWVb֖t߀m xk/ '7[`Xg;p)%2E r2r>?ZƷĩAr4'P cۺ!GpUˆ:KNTƎn!;]ePhpga/kS( w w kDjc#cK!.=>$si-]ѰoYJy %T2r=\\?uCs&RUrW UkMic0uGЌ:Y30EФ; .h]'3奅y;Q p7PĠޕS:Si&r=OhcBwYNE4$K<>adQ{QeJ&FnD2HYrUE2I/V$[1r0 =KAȚtd" :/tJ0HV4pL'e?Ԣvds !2,j|>Ib!\: %oڏ 澓؃Tj+.C,쳡HvqJr!)*$7WmgغvHrלz՝_:аNv%S.L)']kp{_e r2 ޟxTrP-$haR/pϣbjaDSEZD6lH)P 28{`EhE]j˰h]lT::s8"a-8 <A2>%u|40"GBd)Ǒd].`^c-j ,mlKiz,Gp>H<;4 Fgh8츁r?p!FhWЀ8M6M5H٥ֹjէW0=7":=b臇@L~L@npYENa R.Exp_ټl&φ>p_]ӿGQh::!ʁ=7q9Tj9dh['rPV_[8NvKa^*kUq*`gEsx.uxxQ\PcTU{%jQn;w퇃=~`w>=wFvہe[mцf&3 C]-I993{}DţtsACy2d *F˩纆:>CmEuU+ <7./#7h:ȓꭻ]gXİG]OwZ̻Sā8M8>>BV.hx&/ BxI6TyFM4H 6&T_*`Hl^3 POđٻ~L;;?1CWFz[6"UKu$F!3q̢ .6īgS/mKKta,Ц "cp?E!lJGKl rmILV@@^8lS^o] 4?p#Z L~䩈~qBH= FW~S3"H ,HuqؤAk^/mz;,1m/QAu/j R`A6S`4-=cV Q:-)ے4Yl/SdזU|%>,?MMxv2P C4A3ޏ-_b׼Vz9[B7՚h԰/k㓉iyJMH"N>EbT-AǯCbl3ҬG# t)Yh@S[[/Z^eFjozXe'"NO{Mp_0Sj,сu &Xs)K}y>59nR jV qLLR\R[CXq|),wmB=ہ<Kl(v]?s7a`tp4(򨨸Hyj5-iIʗ N4#D1qbNZ6V|%JVWxy{yWTThhV$u(Q*lBp;7a(yc8~ީׂ hߓI739SNzjю+<` ,3: \K' PiFK!Cm`,':Zb|#&1Zp.]d8p'9Jc8wQo?=4{PP1p7м_-WZEŌL/zF# xadcn.|Gcm>:45:~)J xKJPӟhhUb?7JШ7r怢;e|3 *BCő _ʪdJ[5~h| DkcɨR 'S;Cgyg\ձv35>^aX$̙Fy!`}o"'&Fiĩ6>۲=,.H&4q6iQ蹼kdH#7O,>y)2z{ǎ9aRh`9jlx#Zk@?}]d[y497b{t#gQ:NyB]]8f"uaULzdMRmOu jG9Ɣ E8u{h$D*a:StD.#wp SCru[vk[}7f3 !\^Ͱ3l7lTE !3#bI,=BS h&a*Mo0;=l.ltaq1s/QFDӘX+[ԁCzOfBR_E#4^l MrĎK;Y̕t9ZҠjv8GCδ& ˱/.DI$}_H vFYuNA rw5N"8ߨ$9;OHBd g0~27@>7BVfJM">Q1 +ae$8ӹ4ԟL=X}ydV(tcQptjT/ucpy5g> +iɔ/Z$OO|1aC%5k[Hxb`1$Qע~Pl9|:a&s'_OƮ=ni3}q K$d3 Oz[tl4lq>iZ1r)  2tPBosu9}2ksEVA("/qmVfZ)KE՜6^Tt g'V|}2S,/=<w.[_V*2kPqH秥.۞# wH@w٨+8:r|.vXJL $6?,D}OG H/*y%2F]'| :.N峡8Ěpsq7l :1.9W=E2}~#I"Az"ᒴi*qe).IffVc*G3yv y&X ʢ4 VdN?mcT 1_+mߢݪ-Mjo;Bͅkaя7[ɷGC0f2ΔTzq] œ<䕑l.w::uN :(Rt:NE?+Uendstream endobj 221 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1556 >> stream xukPTϺq%g5iJMI/A\N *; 9{v\4 )4`/1[T&}Lӳ~}}WCQ&km [7֯nYd*ۖMJͬӃA ݱG.H!*RW4؜5[~6Pe KJj1ULPTڇQ-lMgŴ/he}w)8ռJȘ++5Le$CgCViζ@)~xVA%Gh"1ϐ8\%2H }ӾQc?~{\31}gZh1-c;k'8~xQ5Rdb[79hݭoi[ߴm}I1wa syߞvXq{0A> Ga/>yE}bjwsWRS/r6/ʘpv?*T,S bM{}IUU,g4>t6W̒b/- pȣ3Q2hm;:.Y[oaVa ǚxhM+\_?yS0o,Fx-P,C ZʉW* WZ&edRzz2eIq.0~63Ž̥sDʟ7ΫHuP/n ޡd!s(QMj2|r`3'#s1 c R )u5CI}-y0}]\`G_Hmo&bUCo pPpt;4Wˇ{zi9]64! & h F &endstream endobj 222 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 208 >> stream xcd`ab`dd N+64 JM/I, JtwxӋewS0c1##K5|Z2,(~_?~bܥ;~3>,}}Q\rUl<_s/Mn9.|nnͳzt10"wRendstream endobj 223 0 obj << /Filter /FlateDecode /Length 3418 >> stream xZvs,U&GjbVHZP^!<䙦iC5YAcG=Pu _u5/{9=7G0:H\N/Ώjמί>UնRΧGx8Z;c]ΆSZ^{ GRʪ悽Y7{Wα$6xhur( T줘.sο{v?'YpQ>8#1FZGMk3YkmHnJh?9KdK]I*qHԕؔ8 8y%8WJC2FLTRi#ZctE^ɇwb>j|hɵFUJ{mkD;#-+c] t4r}lJX. ^ޫ^b{[G˽9VjRN¤ ےZj=I9kGRD.ɤw\6Oq-^Hk2Qv nLr ?85~QPjӈ*'oH#Au.ՏRKBJ غJ#T5ʴ?d☌JM.`?!Lg)<+t&cvYx/!*`rPWx0rv6ND EZR>軻&-C᱔RסPβb8R5Y7Ov$e3gɤYwg+Pc?Lvk.k~`C1@ !F9lDDj<8a$2]H$\+o_F;ܲq`k Lӿ;t$YM;8ܩCGzQa#+q@wȊtYVE}fAc*@(ҎۛC%35c6Uc^"cM^.iǕ4.ɓfl ˼kv>gl"Ʊr Q!l&MkMUL7z.@8^uٵӦۨzӹ>F9k8/KE:h- f15  _&WBvvme(YH,$R% NQtqg_e+uG "+$ "Z\q adS!ks,pD2֬["w{Nunv [욬,N+E|QL# ~H{qI e_y>"䐰#8[>M Mb9:>G}:te`uZWbRʚ'byKZ"ѬEP[ruDq(_CaQ#>a0@ݢI:\l\/i*g4k^Be;]?TUѓh18ǚY(ṠahfxNoey-#%QHXh&I6ӕrN^ zxyoBLX(BBr/DDD<8W:(GP8W0de26x WƝo{{aeR &Sfo)EO.PEH'5@}ABqO݈ռ%w-e9O8N㾮]ܛL mz yp1CP*fNIp'] ,i`+:,NOTlB^uvyL \ܤ.RyxFh|q ߜ~d2-*qJS^E>FQ4Dmک Hlg48.>h~B 'ੴrXfQN e  +X|.Y4;b1@ Eerӣ#b]{bFyS'G4]SA߅ Urp19 MW/uG|[.q ίQlL:[^?uݳ{ |.Tzɻۤ^u%4u-RG CYۗ'?e8>z_F[.hjqj= &02hʋ-ZfOcP>%g,heo U8iy1K{Q^,O.21xUʏqx~bzjzC6x!/?܇ӐF,ky!_re7etG<[9tݤH(9YFxV-dEtYpzwu~Z=l~Wwu7 _ȼ0سPZz3e=* ^OP,$ K$"Wۓ*wD7OOWGVDZټ)IqA]fRGHfʮHey&k:|tZ>qx#ew/fH{ZJ)sƤIzm펈o& QMVRT,>!^sAǓBx~'ߑ<Y_tG?endstream endobj 224 0 obj << /Filter /FlateDecode /Length 2148 >> stream xXKs7O)LJ3BY)umY{4v{3Rm9) tu7~.Gfۯco_\P_ ]j<-xXrYh\XETfr6z&YYJ\j\3Тf U]^EK,Z36c B1g$^^ws -C4cM_際-9;hrfPY'ٳh^cܛ'PB8#%;vDKt FPe)"eɮT2@/MA|^~$/FQ"ѵUM1A3Q9  YB5E[rF` )rӨ*~bhd,,X맰|Mm[|,1.Rmi>%\X=|9a@X?*RNp[TvF6ب( ǎK{i ;7ƕ2cC8dA8L`~ 拄{#䠦'v(<`JvmHK0ACBob e}c''p%NSΔ*!=gX:aLQ>vbjЏ *(k.==VڹwcJlx1e܊d>\=a?󫁴{h7nfCͰ9/( $Z8|+O3IbFnɓM$y&JmߝCEus *k~7$7/s`̳ܚD́!Ĝ֩``12MV@:ڰ'i= ${Hr[5,/Ϗp-@}X:N3j(:?3cmv֖syM⚢Zm%BLpT]I82AaIM-J#9}YBʒ>*agfS Z6A{* `BT4 W s}N-`l8x4ER1 JNBB-(wVFD>fJ]P-F4ESi-27Rp(0{GA-#PmG}| hkUoNb~@aM1,a;b]>;ĕ6v,9qYCple$̎"2Or@7>Sos1meeLɅe*|mja-(PȊH`MȒ(d)’?  Z ZVjm ư{=:3pi},0A..C#N 3^j a!$2L2m7EBqwd7{<_T+a# ",'QUN߻dZy{2 61[s]l%ɯ#2/2n9q@B oN o@)Bйin:{3 +~OBOH F$麤ZBы#$m=5)y }JRVGa$@V! Uw/@gRP@n:K*>u>mkHP)u[n @RoW{ޒkܰņrNz!P?`MbJP^ z|YՋv=v`~Q3h_xn*^4}0vź  Vɻƙjo9G)_,EVR ~74L&Ep&gBc2@ `uuufV.\v^2N]|E~> stream x PSWo\\#ӱVmU "V+DP^ $8 4k&Sa+3Β_+1iwIs)jzz@EQl*DPORpj5A^^Qj"BQ~Tt}|~g33glj?1!j倗J'n |{d)wF⦙\z(oD(T,yjAJl~N%4"Zcrml\oxIP:5Jt =00O#Ki}" F \%wI6"b3dBi+׆^12;9YmjX ѱ^Y̞ǏU(9D ڈ|CY/׊ ٖ>;u.0DG۫,ګt4с:I~ @/^ ?>=WgmzpxZȨO+w+; f!~ G_/]}G6ZCik`x#DUk$kAg1{QL~סi< 5r'?!K^M] x `,SXhՎXfIH8:ugJ- }z2wZ']=}ۻX[le"sf&9W3_z.HBf]~̒V 1|feN,MVܽ?#]cѳ!1"1'.^ \szc82AS]wq_+t>"2eRGHcmGF !k)!B*c7C#g0 sg#'~zc :N*SePM6ogPfΰvx0҉;J~ȊѾ{IaXh~ K|$FbVCH\0T;-+_j3AuL`q'H>죡!i =5Q 7F J0mg|)hz"2-,,Ⅺ7#W@)қlƇjok9?BL4{!p l(R&W5H3 OdϕŐ~Z}YW+-B)4X,`f ˍ=gRwBu eRl*5 ހ+N,ڜ Ag! &ձeaIIE@7~Gh ~&_$kl>[/;pK6sc)w]\5hqj> 8w!!s~>/zyJCi 9)t΢O@_e Gs;Ǜ_ G |}ĦaWU!CI zm[R={l%b|*6^oj4T{1Pa`WBFYfu tϞ͊ +_Z];!CpaC4q!vxԀ_'&d+$wn}% $o*d-2*vA(38ʝõ+v\&ўo2Y%$6MU&"9Q`osj`{3fmu!Q%y.S=g.=ivطQ Vɭ@9-o”H|5'H@2V@-KV15ln Y̱ e[c~zPK_H~y&O3wZn ;}kCznFGnkhu?vLI"/A QNDИ,][} { eʒ\ vڵ: oJ4LSgmR'sgcO f=l]7I~yEf  FN~%>WN7!19#d4뿅mH H3RufmVD§{0w: }3"w(UǎfM'F+OpZ ȼ2!aQTi{ڠkLg yr c_~]t 1ហf_hԋ*!cd3靤Ue&ۼ]@U&o?!&PEf?<~Ax`דN`[ Ʒ ?aendstream endobj 226 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2854 >> stream xWytSU!mx@q3$ aqtYDdC6]4mɗKuK-me-eJ=8.07rt^ُyww=3xز%8cSqrNfeKv.I. 8ݱLJqhhʣzbM1+%;\Yy8ŭ1hcfiyo\WqBXD%xb,XA3dnNZEsD^鵶1N uP: ia 5 ( ]:bl<$?A ]j`j@|u&SvyP鰳L-݇ED$f`87ɤR* w>HPW>޿ŏh|[Q1RY6eVfg'-Al&hוBYᑶu}gdn] v󀜿 `9B5<ҥڞ#xϣ+d$'lcik-qs %N u{;mH%n#c7qd$.{FYq, ̪UQї6xrxb&zߌKjFJ>U. Aj">u$3=AFx.⬊}.޾5 rr9uoȫ? ZVg4Ԛ (85_}">|sU5%r 4dQHTB:<|5E?|CZߍX!Tee ʄ & H-TS %`0MsoK"Gu&s1lA.:U *MRMbC*nQP QYdՂV,9dVEΊ}u75:mPVWQ=$u6y!c>~7xk~xБ+ Jŷk[<њDIIJ mGq~4RJfX^S(>׸NT /B֗LJEѡҙL jɉ=ͯsd<}Ёw=srX)^rɢ͘ϢǚlMKfNdHƬɧ ֦/ujTES=|cr&^ [ѫ\ 70bO5Ȩ{gE`2i[:ɨnts ?R+^5(L(C<rz{kL!h3A$U`y?حH A˸,7Կ$ק%IoTDa4.Ykp]wxhGfXaoZVYa~A볺cb}֬,2#sc4a BaR{JQ2Y[Ƌ\y(n&}^-CK==좃`jGb[(KNbTS xdI*+<rv ̐oPQ`P8;( :BY]3GҎA}=C`Su']j}/EYkD̋UΚGo m,ꞲvEVq۹$ge|w6+ClAGr]8Xs{XM`+cr/ l? Å#gyfd*$eg%H\rCo УHׁf|hK\rFR0t2UzyzNZҨteb[d+ꇮfURkqlj-(C( *ʸ:91D{a,SA`hk58)2&*KRWJt'"­ V UbYwyN iQO'_V YlopFᡯN##JAJᛂHĄ4БQ)pZN;urXӓe5ԯ9=^8X|Edᅪ$r(pf2oE[C] C3$|oڙQiw" ^g^ 3~{kt4'sh4ABB̽ivIm?v]|s禜_D/ցcc(<Əc^iw%!=>TL-O q pF߇z}xޢ*[> stream xUR{LSw- H+>{cB&`Ttth*SZ2hmQkQh:D[dE.!D,9,9|.\-$|9M$MӪřYEt5YQg~J)(eߕu{nfمbEQQETI$ʠ2xJE)r*:Aɨ*-ܐnޒX!T/t/ wƿ"1$~"Iڔ`'1\p8\zjX<\`અ-ⴑB8.QpsQS8`z5^hqt]T kq&y٦ݶ=!7"Q!$'/3={a]vZqv&&G 셯Q!}# Q8SZd_ `'7'߹ ,yxRl-w䇆p%SĺB?R‚ I< 00c'rWn^ʭ&;4}B"B$`*]Wabក1^ LiyyzK$ ȜATLieyU7P}kGI-J6;`|zɏ!i> stream x]kLU Ժa6o͂nNۘ`np [xCp{^hazW 'd5-ޒA_l%&ϷsK Tc\zرΖ{޷}{&4w;9!7P:t/%)~mØ![P狀sqK0CXĕP A3C<nu8.\8%.װ$E 6l[N hfۏ׀$.Cܔ/[UZg9Z ՚$vvTkНNC*a&_~ࠨ=Bٺy |_=0E #?p2m+VZ;zz:^ϰ nA *0>=L^}cj:)'߱Y= LO'Au2-`8NP#ծR#z!`Ujv3=atFL?qEߡ,}~kpNc6ԎnS0fWc[ 7Vսy4k詂{gh&R|o* hW|% W*EP&INODC? $lBPsOQΛ`fd23RP/Ų\̍IuV-tEӂx21$@Wп>endstream endobj 229 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 962 >> stream x=[Luƿ)RlxfF4&&&fqQè :(mz-+=v}~m)VrRNsj+1 qĘ]MݍYs^<=p>p8OUZͩWj^:ܣm{t۰ݫ/o~' .SZJ \. 9pDIїrܗy!b W`~J)UϤ=&/,`Vo;uNFߤYINOiO>rť$0rJހɵK+#@Wd6L G FPx N]Khmb~JmyAnB.q{<H;R8M M,nq+ πCvj$&c2W3`RVӢ2N3˹x{FĊR;7VeNlHEigRU$n0)&`M tۻrꝭO1rViEq"Ud/̾ɶA&j/ҝ(E)\A(w&owɌZߧ'r6A[X.*+f{V9*Xm1IM1إj㽾^= Ә7wZZsX._G[XQ!gݧp8Gxxй@+NM|g%do;>6՝Ͷs7bx0N]Ð.yH;c#P FƩFPs</j{.g2AUE~t4H&m>@ J'! I&p` c2e2.23?Hx/@D]ԁendstream endobj 230 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 494 >> stream xLMRoman10-Bold  GTheta0( j[Ӏʯx)({zS0W}.L}mgS~Ԃ‹Ӌ̓ѕx_f!98xv{wnJaqY1zafzy0/tL!8H~p "̺"^/4Yb#%)^"J3[F\H@sIS2Vfʆڋɋԓ̻ܳئ΋NՋkCN@:J1Q\i\QiP^   To /endstream endobj 231 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 869 >> stream xE[L[u0T1LY9YX6`\r)lz[KK;` ۲ F}/9<>"DQǔJAP{䥊sumM[oU) RҮVK݉@V[Uл0zGHT)5 j$]YSMk)5;aIbs2EeY;nÉ:(̉32җ!dij% ,WV& {3Mhm%C=EWIx%]28M5/UG ?W{6һAaWxf>r=[LF'D( >l񞵮)p9=$Nv7Ўu.RE, c _4/>a.JA R<0endstream endobj 232 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 412 >> stream xcd`ab`ddM,p(I+34 JM/I,JwafaaA;W Lrť%@})9p~IjE SDc;c3##Kk.)dHnFΈ(+f-k,=ۏ?^N=A{B?i?%~`.vJGU"T[kű72J:Z:+~,!g֟ύݭݒ Sɰc"/͌6PgWV[[8NtAdwWO7IP`oX^d+ʑ?͍]~^dkCe}=l.pqpvpup/;gbOߤxxyqiendstream endobj 233 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 757 >> stream xUkHSqǭvZnj#]hՇB*QJZ 崙+g:%^Pg-/Tn_ .Pn ۨ/=Ϸ?ȃ0]I9'>r:;T` Ug/^~ח<5BaW^aRBD H(6"'< *@z:H%vxɼ{B= }h-wJh~j~(2lv9]?@'{ߦz}S0t|=u٬dM_ƀ8T7= a#ţT# )>juvW$d=Lx}~M1TIgJTKCgsP~*+?x22hѾ86h## ל?zĘfsK{뷎՟v+ ھ­9glcyBYj"l5\{ٱOfKFƾLf# F3bOD,:a}dJܹP(f shlӰFpK4S6-4ٵ f]L=\YChAB b\;<]ˆ=uTpeͩ)VH5~q8wWU {?Fƣ2SԛJJˠ"a$ ;zܔEEv;rΒU"cd:ɻم&֣QJ^ xU# ŪA^K5C]fendstream endobj 234 0 obj << /Filter /FlateDecode /Length 170 >> stream x] wހƤaKQ_GP Or>=>)NGZ`: w*'.*0>\Ux^ SZ?Ĺ^Bi )*7FHFN= LhvD$Y`h:Q7eCp\*E,XG2{yV3endstream endobj 235 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 352 >> stream xULMMathSymbols8-Regulara   asteriskmathprime0Ba1%$˕h%(+oy}|},~y{|x$L$K~xr%|0t}+;y 1omcjF|~K:  W/ *?endstream endobj 236 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6245 >> stream xYXSg>aG{[b{ aHe콗+n-Vmmk[+Vv9I_"&w<D %Frw =kZ/]Rq)8J|fLq/ݰFøQ9x..[!{U轫Yo@)ۦ>cي9sc^7txOLQw7W5*.5ZGMS dj#5Dm-tj+AmQ3)j95ZARs)7uj%5ZEͧܩ˔%N-FP#QhʚA 'Qc(ʖL){j,eN^xJB ,(7ʌQɋ&&:51+1e4Mg?L9NYlne5F؍qw(Q1=v1 _^8q=f6Flک?<8qqg_^̻ym>BA2Q#1T-*N> G U (Q_mDaHRţ&uU%|so C_"`3mcsЫ^Q56qk"9 GYM*;r)H^O̥KoJnfAnw ݉6 mBYCRa4~*kRZ!EVOX ̫jQkT)\j@Zp84[-%+'ȡnP&y''n2"!B j5lE(fU+5΢^;ؔR;y=*4+'fRmlvN_M['s[y,DOf߶8{hWs57p0zlfL£1405pwtrrWzm?eDg sdr~ 5N2)8jC>i/R9KuѓKpѼZwN8E2e$,CQ\B J&;F̬y}>P"ɊΔ UWI@".CQI|Z J5ڌ̑mc7'n}XjDcxdVPh5rO/o 'i%^pnkф6Ām"{bso^p~; 8x}Lv<=8z_n8"Ã=x 摁wH1 =੫Lqv?.O\j|}PwU6HqTw#\VƁ?_40lTTGޯ3 71ȥ,@ U֧jP 0Ca?gymP">%!Y|FtftB5:(xjOIV؛3E?PJA}fm_Wx['W F.I[ 2:nsiM_sھ>/cj! 4'Aע@li:mݾ1K"7_ow\T$3:7U}Z"zŮxo,i#>VP\G8tq>!Ar>,cXs/="Ow_l`x(xzo{-éOohoħ4COoSTP2iz}Wؼ_)5"GH$.&km 0E3lXPЪ;vQ;_P+TM荮M4IZT[S!UxUB<̰&-,[MFfU bȷ U#JT A l[bQ`#f*q-(b:*T'nz >cN4Ĝ튺ZIEą&|I7n/ CWI.,adweTed0tΦN<,;(NU]s l[!Jw]m}ZrŠBQ]9Y<o5X-+m*Q~FFUfIF1*@ggJT_DD%i6 K~6oT^#fS:LGy{h~< '4H><Sa ǧɒxULv'HI~%H2|*u.o qsգ$*@ӛj(S[e#6lMyA)nV;O ~eB a "r.I1tͤ~_6G ~7C2ą;1a^9IEIEd~ E瘌Z;HD"0.<U议BvQ$~~!EnL1#d(ηUH&URf?Bژc -kϡw$': 6 ?#M<)g۱c\{|+`"z2藍e(8ƫvE~dH 5t@ 4O-)Ko@v-EY$Bn2tKښ/YTbjdyCB0rDw8CWWNW`˩H ۉG<8=y_ 9\Iܸ5D:i``4)U3\Ko![wohPq#)3HF6$}c§-kUjHQQG=t<oF8F`nHhNː;FgD]^%/Y7_Ԭ MȅͻlQ+rⶦY PScwFJX'OG>%҂XB <8yV ³oy%uzscLޮوYF'@*[°Jk=,>v(1n|'a8ꢺIS^[,+)sӆ]'՜ǎ1m_t0:q2Ocê Ju%wo66--a@ /lD序I,mf[mn>ߧdDWy㍐m{p#қ.v/pM55MM55:p]W4'5 x]ap#Ż[TPaȫdtrBUՠawс EL_qP Meoڹfa=Ɣ# u5%m@ p{}Jxl62?̓\!d9T0:W~bPl"Px&c#£x6aO ^{  ??< Ldn^ Bp78ux/s%0ضgΫ8Ɠa#y˞aCh<O3g_ըu0j3~Ey,L5],9a[#;-n4qGK6mt;<I: T$ { !8l۟K -|7='pw`^q0@eh8~Ւ&CQc FIp;b#Lr1u$ڍJq Mr^Ϩ|ˈ}5T{Z~xP“1*K<D誚\~WPгH&O8X`l6Q(q/L)؃RInb"7I0Ak 2$V,w*66).2MiErieWc|,A۲rkpڛ{D\:|HHb?cO ID]H] ]\À q͎x񭣇W lʫk˛'[?D̝w]]W'$NКgs2ISCت>N_qXh% a̡/t}tղm.~*%Y|kumio@p4;mm FxrfŻ׭gqINNL]d4,\?w |nW(ǩUI|`p\QbALQLGh"(E1RyYanNnPV/R[Sq5T1`)"I͞^B2MEnOP#ÑۄF*į LBt>عR>j@:S~^L ZH2zV/g\)Bam9ƶ $ǝZ+? &#k+P&]20OުI/._@HFO zn6ٴ)z_/?sDeq9" 5Wt{0Ԏ==p<9? "|ڷve||]f*az/s+.לMVEDHɮ#PC9*_aF: ାXUIf@vjkVC4=No]tx7:ܙ7:=rsa4X?#sk=yxٞS_>mϹ[kh.պ* Np:"QP\g1PdE4zSQo^$@mg$9W9PaS4l:a ;}o=[uǣMwFDYvNN.*gꢪB@s%3>oXa7Ѧ0)xmoAR{=30hO| AtEd +nO9,bb:?_)8wny/16Y9\L+kgrZ)\c՗0]t1[{`{ :upphq ?963Vs#6/`rn>fW"QLBr.<t=*capVW*j# J{T1(;$]e[N豬VM}| GT]ED%4?AkZS.+NbQqba| ^ʂԌ#c"Yb=cWL%V[VQPS[v)NB]7p4);f!0CO/+FL~rUHމmq<1c ֺ:4Tv1 ϴK}NoJ(LI,*H;7q౳-H$a'o?"Pxfj %oa>j@Vr3\++deMQX9endstream endobj 237 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5292 >> stream xXy|Se>!4 :猨*,"([e,B4Ҵif;ɛ=MҽM۴4K)-P b, z7suK=~)Erw_h{y}!Ǝ!8ΔW6nH{fκشbc P.}x3 m'WWfEd+^+ߘ?qKR;ҟ3y_좙1L33z)hǷ? y)FlP$c*rC3jPh) ~(y+AaHsKB1JHt@'L+vTjyh뛿PzsӕCGQK 9 Z$Hs'X$?99I#M7U.L^UyHRFٓQ@,T8!ؼ`8XZ{= ىxZnL:e+mpu9??2ھUhRû7}-X^IN7@f}HUi@и4tYņ!KqZ@&n֤[wO.}C[\&;}/fғ~hkј+s#?-|v;?B#>> *1nȄD;>m>Ew kxcc.X\?=y;|bxd5Jѥ (WV\Zl v! E":p JՀw)܂hƹz4:C_⋾T` nJݹ=}5$)`'&fyij&((¤tLFZJ;G]`w>k9ݎf: 04Biv2ȊVNbvGVgk׵-5VNnad$os~ιᗸë `Q[Y*y=HvүнCn.9"P-f.-J+:C]r`ZJZ%!dt8LAM!>} giX pd4S(ӂx*ȗ;zEj}!NϬ ddr}2Mf .(K>V1}&ֻ;WJ)O&T7qi.2oP-KV2#_+8`Q_ ÑLˡ8DQbCHio%8K`-#BQ^ 6hѾbV٨ :V l r#WK0. ]t~𦽜H( ђO8qͽԏGzi7;4UgؓC\СJpSzT^PWuc=EY:UN2UܭR `%✌MIo';Ѥٯ"0rQ`T`WS{oR(w4y^-uYR0TY*1vA"^]"]bz)[zbTiwuCegZJ]J PzwPَ{mey@d{u`( $h |VvJ?᡹J9 _v3*,Ό\AᩓMMuTmt@M}s6H@h F?TP~/ɑ;4v UiI$ckeT 6jxEyaBz/9 s; =W#:1"smjZo4Ԋ9@<71b[/g,nbpRyO e A+.D`>66u4z4pi Q 5K0* (]Pz̬Q1 ѐE Եb:mT'+3\R$)D"/Z]Fz]^Gk,Ǫ5Ep}]fd#[c<ļ[7C` 6[u= 9x9Y79<%{jfgh(1;&d]<܌2o񊍪8XNG& $eYIoK#%;+ W%2^3Zplj %t,JUy}}u8.<0̿f$ m.fGvxЄCCaD |+?:9ad3/w 5+'27G7 mfsk+| aA"G-S!cq,EG\=AxI"wSdIQUf~,!#{*J"mZX_s?u_lsj3?  &'QD8۷TY\XUSRP=hh*,n7+{__SD}ҋa|iI_\n)`{zjLCRgіZɇwUnk/]fPlop#djr㡟to"*At)m2SW z:~Ν9kr49an* /挎@>SG7!)Zfj𕴔tKf|礰ulߗ#[j$tYz͹\<{f Vrvl$rV' &n'TRh 3!#Gs(<+H=~ oi}[gt7 B_v'=W-6S{ư`>,U7z":~ݻ"LëX;tSV 7`cuON{T~5O&)ע5aJ&ktwXG:Vh,TP/>2 H<#ֲ;PV-k1G$"-"q⏳?֠*~p#^ #c\4Ë<^ԅ| mnMv-d@SjMr& c?v+['ѸegczFEmrOMʮAA ٤P/^E[Ф fFF<\=uٌ Ƽ_?4{vWqVrRMda˧LG#FBML6@OŲKVB ]TQ- q!AK"4vJ4%_`͏3&!w$AR/dWŊ<^KfK"0x‘П>О~Y'}sُ`XS3 %H%/|9zoFQںg@d0r0Qw!\e/<ކ;im7ea£@yғh˷g/ V`#qer(˿ΆARAu1hW-G?Gw/{k~hbz^urtBEOVQN8=0vGFxl0wW aߴ*<1endstream endobj 238 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1014 >> stream x-{LSwoywP< B0[t0+5l9yȀ*l{JaA^^|@`S#v؋[\ 9?ϑP>^D"ْuPWxX#~OP0h_{gy- jcd LܕE))zRQTF5ʇʢ%y|g58(A%Xbm@4[ & ղo袡 rUi;ھ/N/sٟW} .3A8NA9g1ZlsEgUz6\> *&R=19D3Q(JL*ƦiLD^9PQgS=<{yϸI]9%e|?+h(Y oT@zLN)[9n1hF}i΋j^2*n_9K.$RK}Бl`?0 &H X UpĭV^azi1rD۵JmZ^31r28hj{]mRۅ@r!fkl9 WRFfendstream endobj 239 0 obj << /Filter /FlateDecode /Length 2858 >> stream xZIrcnc2xcǧu΁Rk8Z=bߞj(ʣF`d-gd33:[|Yr3{8BRf3 iK _3R?ZUek(Q~<{MiIЌZ⟥֒a2ut7/ॵlYV2U$]+VlcJ9B׬G[v(d`$˄O'Gg)} JSUh%gMќ)5׳RI˽fSKK^2eyunE>O۷w9OϝpEgBj*ck 4">PƐFR !yWLN,q; R4Ҁ`,Ȓp,-+o1 UAo YR60C?&)ܖTrjT+#XU]Pb_gR2Nz,<)Rw[̇yl.o/&?JdEkZeI(+٣9@l٪&v9RA]Oۥn2W"i?6)uX܁h@.rWb\!~n%{˸2N-m >rR;᤾N8!BO`v9xTOLed!f V)} J)R^4׾scy\mڅksE.۸Dخ~[q3'8I L i:TnVa骯s~wf1(NK<14 M:qǴٹ=_Ol7Ɖ?gߟ9upIDeq3Z;[eɉw`B] )EܶYwa팢1b3c%eC(Lmg;HMɽ>(-O:0d@%FBjn¹+GtAQhJ'nP`d=ŠTtŶ0/j䒙86YҡO~fa0mpJ5tXc}>; Vz~,> 6i"h;Qvm "&<Ӄ"م ^ s&ddcH3R-5 P52?C(Rhi*6moYV6D&g0ĝx!rX"ӗcL,AOoSU~e(BE:nzgLLϤMS)(#o,f/n,#i t99abFcYV%3୛o%ԃŪ/-Υ={o7n6_<]ýp'gRFk7%t1|ˋzyp{8\p/|  to;<.5@~֤&Wi?w{8!KY9b!1@b`ϔ5&/.2RظYݶ3նۼmADI3=lƠ>n3 y{Ć|†E$vuu\6 9z:O'_KDq塐q. :B+-g:endstream endobj 240 0 obj << /Filter /FlateDecode /Length 1945 >> stream xYmoFpA( d쾿"zh67+VJK2nl웸KQN}Z\)rvfgvg], / .~]v]^-| 6dZ!dR U/g2ƺ2Wz~S 6? õF(OMԨu"%c2Õ}fT/%G.5>Lq. s!P`ЌMb'׫:*C LTZJ)JADGae8v 7t~zzh&GɁ7h+氿Xd*xF^ې'ƌsJB  @r~:pWdiaQ񫯭fnEW045z \#, pDٺ?@g PՃ76rr+M2z1%o-~pRZJI%#rJ "P h]x-QaIuE 5 ESNoh4ZC&fɰ%Q1I2M6KjzpH m>Ӈ2!%=R4nCMJ=NHtZX i~͕"k*,Wf)_DӨ,K Jx?/!$Gp#$ z=O>8)ە҈ bpBSEzdg-&+&\(w%AF=nL4\dMŏ #cy8=VKe@3f_Sda`#ֲ~c7lHщE/T` CI˶v'lK "L=mۿҢOGe*6a'6-Yim={))Cp$קmAK] Svd"#[`]ҥdrB^ݶt]'vA jRo^(m-sU"Ia%+p˷ݾu$.T# Dɥ crvK;Yu ۫}~7`V>ڜ}MtI⿍[ȡendstream endobj 241 0 obj << /Filter /FlateDecode /Length 3461 >> stream xZ͏z[_Bt5$:@֭ H X'viֱ"y}3$gs(|}qVtV?jwV6g?Q/RVKaJCg˫37t* <^eQJMuI5s+ZR󢤌hI#&!(ʲ"O?hEM2{hVs0kl&/?#_v\̝8h˺/֯8޼la`dBSr}X'qd&Ul/M0дgcz`oepWwMk,v02zB(6_PP €DDAj$zX&,f"SLk LK`u($?WUF:m*T{d/(-4]ȪZ ryHYT<Ϟ<62ZJ;SXGyR#@#%CYx r[kƫ,$f7 G @%D*AF>b**J^DUQ Nớ8JcWpJ^ bnC6csgA`^aZI ԑ>]ɩ$kKhr!YY5e$3"4EX:ȋ$J[g.tJ76/ Ηb&)4ئoQ7cJAؠBYL~LZp,_Y* ؙH0"WpPv%b9`~*~aQT/5FGRj0j"9sH@mlQ dD995BB,@V16X2elAsz׀om=CqAI]k6u lO"ъ'GaE>"ֱ!bv&aAX tXX0r{*NɄ*VUL$;Kd}bt in/Sr[A!'mP'ÌwSa@`JBĥ@û H\b8 @TެB!7=FfIn̉#"X|c|ՒEm@Pel|6a!zw4muxg[:JM=6%Ӡrm@|=ԛ&F EQ2:;A;mo;Y%r:z-Ge,OG_dfduFm <<Т8YJ'FOͮy @MW[+l|MpPw#Nl.2n%ÄV)}7GDB 3LwZ;A9r&vl,Ro۴uYPʣ˽OwR7J!@vGH%{6vPe8*YROp\`թhp Z#_yWU.ug%pi1֎tnط qF&-$$JVRN9h&~A;6EL"5PY9 ឮ'kw\^B!Y[9su킓96v2Mֆ8 phc n1nÞ4%H$(лU{Z\u)wMO?4;2bSGee(V=FɄh$Hca;{'m6)=v0[Q:/'JM'hqW\b96,}XYuD<_:@LBAmRI Ee?őld_op{HIR_E]KqmnE}Ɏu%^H/ OyKsPfe$@y؁'Ad+x,`OKAc ~I/ jo ;ODm:=GyTZ䵗H=ӭ-߫b*iE9VHv]˜PfNkya#^nm׎;h?=]ATL6*x[WXJ1w>LT5ǃ",z0 c5M]*KQ*wX9Sq D:0:G]Q׉ dx*ǘdꔛ8Co 9q'FQޛX fTh6#{~2x -S|)VP+#g8 =p}Ӟm +Z-ïm6"껇*DG06[𲠔wO'ܤ'PIT՝?,\AI搼KVZ+jk8y7S3YPIgcZU#{yJu:^< *ݢRlS |n` [{֪K5]); MTR4Z(A-"Jʠ#HRЕ% ^*`)ލ =MM% A!b[dT{fỊeͶ]ۮ S肙nw9yLOxxg*Ȃ3gw:3A utf]@i?L2)Kϧgƻm`G}|&` G. <ۼL !4dQ4}ҾTN4E ]NJQ$ Gg ><CևzMʁ֠ A{ q 1TAb 8 ; VM6a}Y5|* A0?~XRG[.-xDcӳnj*=+qO@&z#a'>'@`\''|B۞Bҗ^2hl* cRni1tˁsS7V(uy2LD!-J2LX8 3>3Ntvk|H ڄX] Ma1 #ٍo,c^AF›/^-LQ>il$?>ux9z2$GֱbQUb‘Ƒb~beYênUv=$i6;9o-w!D-oȚendstream endobj 242 0 obj << /Filter /FlateDecode /Length 3550 >> stream xZKor$|- !zd؊Ȁ< sKz].=D/!U隝$SA.鮮W]Ӭƿl}T.~:,;[>=>˷מώ/>UնRώG'|QWvǶj?g|!j.? s1RU]dֵy1k8޲^*Lhrnxr"1?*Z PRWwŽVdf\;+Xs7|rg# RNxπy$6s pnA}K `V蛣z>:[;QY/gu2Z0fZ7Wđ̟6iKtɞh.1c+ˠ{SN~.ߙ0 =W5(f_nQs0ө(CcN5y M\Falё5T^y6`MpDrVQ6xC-A8QrVɐ P #QRY]Q3ұMm00m.Y(tGuV@:w+"%nlGUpx-@M)a&Hu? [|D{R_ާ7<~`Y#ٛh1(IA?2mM-%;9kmh'.Rhv k@3媧|$~V.iڤ( n(UE[ (j0*X?k,vp4i:~iHəd0PvfuHsj " "`!oJ%R T_ l۬)J dYO7<&ATY3FrO1wv FH1z=t9u2`.5|*~JI-qnӟ6 0"WIV ͙1I-`0/KtE٦EAW$E.eTErK]BeƅuB4iU-M7 D!(qb/G*#h]r˃p2yy_"f)`cć Ra)<j ߌSWXutYҩGu E,#L*{x@( j<[[npuw?\ $hl7]4pdɡP+3Y"`!^eb`\ + 8.?DxvPs$>#6f@hӤޑJ3>Z3׳e#!~}Oq.ž\kR&(qƊR*^B=G qV4of;xB bAOZRY,n e Am%W!"|YpSiaxʂD;#*[3R?) ~"=Pi+LR?P$=q@AExnN(r)(+H I"ry?˶,QIup3GLr׶2j Tt/=噎L]΂ZzN."J(ms>#7eP9Ń@Z~PiҐVZfPba'e7u\TVR;CT".ҵ ~a1e%X` tV}NPZ4^Y02}Qj7QRj8aH:(W5j.o(Kq?m 7{PSk~Bl LI0SbLZ84 < `:&a۝aLRF;ɱO,ׅ)t4샨tcUjOn9b :j Ay6xvY%!$aJ!L>[icbP:~ l:b D ^>A. DG`Yҥx>a_^%{N;x($j(¾6z;#V<[b[uضLph+ dQ=aS63=rPJ_‹\;5g?<'ܕYڼMU&r=PȎ=w*k (dT&9ηZp2zU){nScm-ɮS_|<<>X-_W@e{gD@0oc]^?Nٳ l3 |1ex3-ΞO)W: ӏ? ? z30SBs&|D(y\/Pfݮn8G جf 6/蒙_/%5 H6bE~ nO!\w)؟Y_W|Y~R 9-_^y5Xcl׃_4`SϯN,-t\D(0Bk=0@f~{єn`A ]%҃jAPm3&[,Y\򪽨igG(u2%Ĭa ͼV׎!@]9p<7㓔$FKzN:{,Ms;#GPg&Pߎ!}!*NјGcNp~8 X-`u,C}o_BV'(CYB.5[)0Cʁ9:IA2o1ΈGoŽ%HƝw¶Kz\qg~a;q@0~&ˇI0X혝&z;xj=[{ѻVWhVa5}]~]ωI@&Z0zw#b2Ά}.C.hl ~̙no g%t釴U 8l-mB.@]ۇ!{5{-j<6*FXKV}NY kv[(YILu'UIJ*׸ F؅MWٰ->G[A 6&nygj+ Ɠp 7B`zo{=D@6vѣ`h'DDz׬n_' .  H;;XN0Xn;F,Q.% LBhݢ d] Jk!*.G>ĮS6ک}.5EN>=bvWTJteޙ{iA ,4WXPVv]>Me $b+NK#Aߠ6%;OVymYW9<~Ђ\[5W+ߤ .qgV(>ӢU PQ~z \ ^=Z6Dո #D+[:7 ҌC:_=9|)Ϋr&b7Gݓendstream endobj 243 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 JXЪj8(Nߗ,N>ˮGֱ4%"@cQ+0I!O X d7~ɧ˪B A#E#V?i vwVgN%Gs&#q*MK\1 >_.=S*endstream endobj 244 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 193 >> stream xcd`ab`ddM,M)6 JM/I,IItw0k7s7B7_ɱe2000003012X~_1w/PSڜEyY%e-ruwtwK֮n2e”I}r~M7{$_~ -ߴm?~n9.p}nnzzfL02O7CWendstream endobj 245 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 473 >> stream x1LMMathItalic6-Regular7M  3suTVNH}!ܫ>Rw,}tkf[Z[onr|{z?J-f&`:{~ŀZvzggwdv`nhMn}pqkv\Ta OD97$y|{{LvZdi-pzz}lxv8lo{cekq`A̝XZ%Zv꿤~tv~iph3]g,hϴYzŋvCp  To };endstream endobj 246 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3319 >> stream xV TSg>!siUJWkXѪ@[DTcA" C7#(*>PQut|L֪W۪߹sOsם;=V>'9Q#hWFlU)}u7) :+zd{w`w&JuUo 9ޏ>S*ZEP:j=;j!HEOMMR(7ʃB)S./E 2DH)kiAsi="d3w?p\t^q[VvvȢ!!:Cl1vy7A\^l3N/1UWݣoXSe?_1i >z4SPp/.ٮ'ҍAIb\*Qi4>mr&ޣ E<3?|O/Pê7$jUķ-G*)hFՅWfm쒞K-Ld q_0id)lj <>2dn-ou%G ]j]vz/2o]a8 %T{"S1 gǡ5vhF2IT'=eEͶ?Gz`GΠe!Oj|EG*0 ό -=Ӕ*F[h̍ӈDH:_?@]"d,Aqղ9it!]Necp.~A6- elH MtsCZv.o?" 8*f}{X_Uae-\KF ;d:59pYLA=P1ic#RsJ&P֤VTs˚TMw@ҍn\1'ڜ,PZ>LC8<#63j-z 0e￀:S))!{^=!#70pxj_׵Ƙhk7Ra%.17Ol;yFc~l.,6m:n٣Hh!v JDסыr5Pw8aF)e&M34{t?`zͣyT2Hk M&ԵAsf\}O& ` }͜qLW]$8g ~WcIz2䬨o#OTP|<~|c (Wћ$FBG![GNO}qtUrfO̵g }"væM!Zﱶ;/\qԧoyxRMymXKgY'OG%Bnk$77--bn`drP]aՀԙMj<5%0,ybD;쯂1Բo|l9 d"49\fd}ۯ2 Z]l1Ahq^Q1v&l풪;XqG*v:J 6` 8JR 2u_M,Օ;X D;^8% ?{o{ħoɱB=cK,U%%a} r9 8Z`7,݊Fϖ> RtANsYЉqݽ!ݛfGEcj7\X0𓉔#[ХZI,W/ i]:a>YD-*#.Aey`[$Fw"Gj-{♕_#$O_״\2Ҙ0Y$/Sy ==oݹUyNjL Ey9lY\-3~w>JU6U*w3vMݴ%tĂ=GLLi&/ߒ$Pf 0{nNGwUYm`3P6;mx𵟮}[_<SR!MeEbEVHFVw.qjYaQ=q߱ 1X]Ek(lГmv 6d>/'Y[Gq̓AƔolJ`7q7Zj8"q!CxI9MGlQ(CWymT:Gzg/5INyOR|J!7ۨ.&m 6tE%F#s%=e]^-GFX H}MdA?EA ʖ{_\.ro4G ݘU‚R-W%\w\u n.j3:.٘yR#<3endstream endobj 247 0 obj << /Filter /FlateDecode /Length 15865 >> stream x}]&m8 +m$H.`br$+WrFX X>?hl5|Y$bXpÁٿO/E"y?Q낹cB[>h6K{●?v3||͗/RHoY ?{y߾tk,9IR;Q4Z'o˳Fŏ?}^~^C-PGJ?t[=rO策=/LG8o#yySUljGMh*Ѹ>C$ʵ[mh-GßH|h[Owy50_ŢCLީ s*3a.T"6Ne U/Ν0͝0]Oz,u)[KYoCU9A[_ Rq8S4E~%#"};K[*6=C,ވQcwQf*Ndry'rF=y˷vvvU~*W8sʃljO@ l~/O תeIGFq MI>GjW|0M,uv62܆C?I |;Z7 p(AnP-\ p-õO5iSŰq;CzG0wO`+v]>q21GUTe[KhaZkx'!ַȀb ~)GYXFahmg(`_aJ_p?z(O:ki,h:l 0̨p~B֒0GisC ,z1&bAO$ ⃗odZ26`2j0޼mtrPxnYjs~$ftdI05|rK\Sc %/ s s 42 ~zZ8EmC NF10eR]^LZ>mo|Q*-48x:!Cap-"6~hҡs| M|8a0{=a-ѝ5vXٳccOW =7Z˫5|)qZ0tC^8 f5959U[8&!<]haZEmP[(-w Mes@>~ oj=Â),618 !ô 9\q1C>6cb_Cނ͇SP k]{̇,1-ֺ̇R,sC(@N>9ό?+6VF\J%wY.58J1Rj0XTp W9\;;l&3lF6a,+5_E8820*< Ƽ z~w:>bs4!gvb bjk)`YI͝vݠrS[Fr&q\&Cap-µt~I;f&DSÎ|9:,r|'ygQLhB&yvGd(ϴ08<뇾70@dZȊ߫X!~ 58v)& *6A1X?f.DKhaZi6O{ 6t?xE#}pM)WҺ9)􎐷ʗlͱ N9,L 6mha*' :RJRlՍ˨82l0*@jq|-KO/FVaA5q}1ܚJ'aI`.qp`_FEn02Lp-õ|o4N20MaaAs瘬9e(Q,ó~ؒ^]re,^8Nh\|sip 0̿ʹ -HrsCTMe.0f Sh{& .X壱 K cpx!@ASyI&G H N "4: 4}NALjEe(S W3s2t!5Gd*7UL_Ut›a[]a:a&)\9pUߌ}%&3{胃fHZ0}S`gV4&AO~P޴p[9< á+>y8tS)Yo|d( Meq2 κ0x_p2藹 9M p-õZ<)OH<`c xpO`;2&κq0dE`vp'!BAЛ{(mͯ\ҤMBT1@ ?ܣ϶j&212jms r٭uuո7X10Ն{o4y'̂5@Zy'.YGo=Z,-^ 'r>"KUYt욅ӸNpQefIst!ܝ4 #XUZ8ndBtlL p-õ/Wz.0M#iYZX[G-˿i)ɪX= Eh[/ ؚ_{ԝ)x>מG7g>N21IWonl2lZ(fӛC]`l|X@[~frS`K܎'-;@߭|hsX_'s"`|;4=)R6'ӫÝCsPY~S}c'i3Y\pv"*Ec )gX8Ucj6 3Ӵp[9<뇾p֩oP1#3W[߇q_cnƠ0ۯ7jochfT9Kp*#@g@uEa Z:P{b6ͨ}letggѥIH2˞2Q~;G?0?yYH# VSgOn w#}c[?;Ll<69g?#.໥;b/ Ӑ*ǰ0g,xt&+|'y )v+ 0L8 [l2~zPR9<^L:-,-2=E Po|@mg˻7wXA`Gt/ qvI;p @{CɧƦpD܉F ǥ8FC*-s ̰8tn;/6NZ+k'د2Uq`%_[ƒ1 [bMtG|XNu0-Lõ[Ezi2ax| Uؾ-,r+vrCI]ɪipH(/ ks}3 -~prYi NhÂ)- 3 ]7 ?XZEhiCf~3t#f{>\~;y6̽apU}0XXz9 ]L4:ۢ-K[89 %Cap-µt~G%#I8R77%DhR?k,Yt#9l kup/K9qp-õjޛ'{8D1H2X`Wb2v>Ua஁8m4w39f:@5P\`j2F3w,/W ;g[ΖCae8,&"~D Z= Cy.L v+P ?]rm  Vx3CbDP2\ p-õ@!&*(:D$`9B`=-a0UTE T 7|7$.@a4pUtNج/nA=iFcQ s^QN1x*~qpXe9PLYN.:5vB^Tubo,0Xt_\a  iZ9GYK*aXpA2 "(LphXԋGǛ)}̣/>9£f,`po._8>vy`H1<8d7Y2F )BoprNISKcXi0䦃f=k^NvX٣A`x}18XaIP-L'pE~c@? <0_KYC(7` 4CSKhZ8kXiHhѹ 'yw4x 3Vu6mhX|0cQY?Ԅ*[`ҡim ?9QsY.c882B1(ΒHagA0sϺΛQڏoeT(jEpL]jqp =UV*+P+{4p Q`չ񮒷*oL 5Wo12bȇo2<—ȳQdbV785VpTyl26iaqx֏$A&R~r=d5">OvZᲭ>'C%}p򏚠Aupw@^w# ̢2ja6*9b2Q`6,,v̅*o{eC\h݈ sBKf\ٯs0z9xrb/~뿔:WRC/sAKq9wV󀳕: xvY@vK7wϛ_{\H<@ Ƃ)dlqpXeaqeCa d h7ͯWOl-̉rz)`ms~a+2^/-µt~h߰;;It&}9<(PnNVw+0>dBa*7s]0^>ut} Њa*w̲ʀW`_4@{N]z+v;ם U:SMV߆:u 497 ^0p݉0B8\aCqIf}ƅ1NGyj ;^H4"T4:l\E&]m(wVyʛE`,Y^Oz 5+jaT'J7:YUׯZe{5vX?X= W}TyQn)w#i5ӂέo 6lȧRcPU{ QT֗\,}aDe4Ҩd[~ڂ.ڢSC>``pG]2 BG 9\aޔ$1QOU\fɪ[G5OiT:(px)o>|M_9_ù P5s´ ~\hڅםsgD\ygFUf>^WYKO3T5=dtOh-µt~؇Vӭz{9o}pn;;f`, u s0q\Lõv6j1o EJ?gipt>.ۅ9>o CiqÊysHD Ah60tÈ1-",RɍocI`NC$ z1 *8RZ9<ē'S F7E;Ʀձ`:ՖF[n V]:k 2NZ:k?tKwR13?R)OY>D5g&ZADC@k6ֶN )Sp<Ip# 1p- `%#:K0 r8ganats+\Xa ~|g+[[-ȝ7P8<넹 ZJ ZB~sH0f7 up%`3mXz+gjt/ma~X`/%3 Z{06~ǝomk4v!P&T S18\;Vcy2[O!wuP0R98l2I8wq1!wi#e< w0·a=y(bHвV;K,.*E(mPxҔiM/rɛ=U {ץ~_^vp6ݨE,/S)BX-yjV͹}"Νg8^a`0 A z]GOmE&50 KU!SV gӏdL^Q,ó~XӸ׼j\]}N Ƃtpll 1Xp82{ -´ ~hMp(O}DbT^~ZʛlhIo! jd4`0Z A*:k't@uM[&thyZ0MVթ);!oaAi]omTjqRO^U9W,B4zDsoT;i @b!+ okՍzP NJk't4Yؾ9a8 YHԯv*cKzܒ0&{a0adõ?hI<ęaKz'?w? V1KzϞ&R;9EQR;?22Q{O :%7lTV{ F57"G,/ὈfQ}董r{H5U7_%#=/_Š3ZߎW^G*~d+V#[AGh;Tqd+#[AGj;Tqd+#[҂ eNgyn3"I?d 6&[?%1ab`m M (|c,`aRΆ<`8t2.SSe}Ҭ# ½c8?-Ej 8?9' L !sKlP)\KZⴝI_0]aQcѧ8Su&-zX6W\-s~M Ìkyu5)i^qد t=8TfN/կ -µt~Xis~s2lfU\w{旪c~/ƚ5ؿ;59]X𬃖73Łr{}fԢۯԤop#frO/BnyۋVւ\ޓyCn,FԳϤ~5w9C4];!F6fM/G;^k1nLB!ͬqƂYڎ;{l!πdPd(SaaRN9yS?S}0qѱSS5s 0LɚšM+ ]7ÛWb-ZMhスi1Ѿ8¨=1Nzk?o^&} uy:y00`{@hn>i8f`3A-p 9\aCp i ې;R1΍ñ|LcAKF,C sko6*to%b,@pY\\Ki-)pr[Jxzr޼%-4UtNfC9uyU?QCGuN;7s~WUW.c-O]õIv sZ0M?jb ! ӹ58E$Ţq0cPO e_{M]?o~0h+<<Ć#5`VT.IgYF@s^5Dt.t]Eo~Uc95r |S#(Ql`)󜁁#аe4 S *Y:g7(ꄵ+*:an]57P\kd;Ob7h\z]'awLcd5z _a5oz)jy982X0k 9V"[2*[Z8k0k?5ss1SM)X^߲`Z{/k7 HiM ;:0˨~RhsC"B<F+rg#.. 3U 7+Leu&CaۡI}p-õHZ/[Y*El©[yZxp&p隞5kzspXecL]d7A:ﮢ7vBNr SYi7"֬ST;n]YFBFuhs*mթ'/9ŧ6[X'3wFbem;j j/wƁ.-ua\>Y vclխqpraxmqDdɨ~qIhqÜӹ~} /a ,&?󒆲'6٪:U[8b6eSjQw(7!9ffI_*s(hݸ`|WW_U *;yZ?t!~У^b>t!~^;a?|=3WVj_k;8:ȟ%\fVl \ |$4΋X}/E;;5L;t VhAOl, ?XZ8k*?ڷug]G.*" V[yb[yKB:KpN|+  nt[[^(TPxbV'A:)K.H5;z(\3=DoзWԚDLzlџ~Zcپ9=b8Aq*x]_}8Go?}WoQ٧NL?֟{Ͼ~o~}oO<&2c7 ~^k7_^,[X*?^Nݿ~7߼W??%5՛w'/>控KM`~*lO3'ӛ~+z߽'v偼_W_>}uޣn({y=|}{xj6e sDQf96szOο솒U9[ëcCYZÏPSo~v}KyoY:-ſۿ.`*!`B."7#`#ao|ĪlD[U hetM'ؙ~N U,GkK0j- VoիWMxo~u&yk_R鰗P`GPF+ ?g#-ZJI3ȿ ɬ$r*íKkt*TzF_LK3j߭ -R(L{gq_X4rendstream endobj 248 0 obj << /Filter /FlateDecode /Length 2599 >> stream xZopf"Z$5>(lOڱ!"RlPEw=885Q_nN ogfӓ? Ԏ:6;8 K̲QvtsB@tAKE-ȟ紦JF rȺY+2-Yt*#+!x$ ||M DTKic%"!+4%;T.(l7X!y&Xu+?P:xC`#%(%[TG@zt#ivq)Xp T2MJjz 0i8HstJ-!;'MReO<<=3wȢ[ dϘ/خو/@C`חD7'~"Y_1.0)- l78q)V#gԂ֠(ێܝN^&KSW+@0$_b'4YD> m7dWu\ E.T ъ`Rpy- ç|q5`4{-"sJYޏ 12rL?uXxi4GNeAȂ,7F76;dݨx=%!ٷs({y 2eJ0[ gh 7J:@s.Ăs}9go|QLn{{fZ<(8dwSd$>D !K]TVE#p^йUbW @ռ7expd&}=PaBl;9]ifπQ6&&CVpoI@n_q[DZ E+c+^6y]vOsG;6U'PR)i@\QB R9 XYp [un:?#Rwhݼo?' <}P]]ؖ/apcz׷ړUz?߾\oȯ% }qh`sz$` )b'G<ܔ2+'Mu{OȈسm9uw pL~0.]x[<ѐxI`4INЌ`(5*ViߑO`o:i$LyqND8 l5`FH3s wGF ~3:EO9]o'r} ]"]ynYh^|1;n*~44t}IZ\4.h]x;S)ZʉEL I=5簤 =XS =N^U:1~Ux PpSڢߕ }N{#E٢z8 eiNk.G>9Dziu>䛒/x_/94(~ 4j/.Ou< #L|`h7T)Xd^siLW%l,QAxE AV¦ fCzTcA#uT Ҹ@ތ,)5qsE# : D^ GWTWȠ*dvDT|/x.xjC 6S^F_ܔź)0qڧ1y~]/m1&G^QIendstream endobj 249 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7011 >> stream xY |UR2)t TdU+)(l-+ݒI4[6K"(ª(|.T'޻Ҕ oν??wܨ(77accVEnNxe+Cv&Fn _*A0cB<~oD /NX'mMڶ,yʎ!;VZ:2*h†6R˓_QL:m5k^EަfQj zZIVQ@j% vE5{kB Elݒ{K-&,vqq>a!5q 8Qi6\e*/͙Md`m &Y,BF=^cHHlv0W*H ,yy0v&\ˏ[f0@o~To&%~ˬmh%G;yrv72vI ,zRb/BTX->^c%?o6׉ .wPM`u>~]kvtQ&DZ]}%ˎvw> VFZ h_Aλ溎`ʌ5d4Cw`\3A4hUC9sn_R:5Rkt['FXv1c  A6(*xQc)?rv1jGؽ:(ٱ[Vlir@мGG+7ٰQh\-z_kO&db*3ŜVaIAj - x`/,~x q8TC2bA.a.340u9< q#M ÑTf pK )*x lG̵ e/RLpx&2E7R䐁x\VMAZ7m xje7/t\E3#T榇Z׵:ޅ7p͎D ;)yJBr~2Hmu4u8=A9Zdk=ޅ@!rR5zɛ5;!f3 ѝCxbfZd.X[--QC.yllO,xsJ(\Kc?RA=zSN"7q+r_pzyO/Q*=H5Fnu~èNjj *joO]P >ka?І %yxR,dâLiNgA:ze^ ?1 ni* ̸K޽ٽ7ME9ULTdkl)eP`-ɭB 䚈]wb݋W VP[wP$qx l!m>\^Chʪ.*$D9L[0N>aUTGKs43od=dI<!!C5 Lٱ4kk;t=o)1Dz 5f/Rh:Ƌ=R BZEOܫsPPnox".N.ЀH!@y㔸>p@ŎDmA=ҿ̖Y|TaS|"RУN!}H2_Ίqʇ se||9S򮾛gj^8ɾy vh$7okZGgu@+ NN^ a(i OQ>".֋$h3[3 >;^M 5*2rlمTYQ'a X.C5 $YjHots7$&4DXnT473 ?<@YBZV! []jid .̀}a*Ãxyl*F,;*!:t ֥$T\3]Fā2v%TF، 28WxZD U'7z:c,ڴfrBZ&_[P_d"Y+6 k>$E$';Rr!C5Y&S-cpPktM!@ '8MƇeCnW@']m5U{뼐q^:n+\J7Jn<%1d `7"MO ^oid;uFG U1۾J05p]3*uz1d'Q!sͣZzBcyUi >rEK>FE{` +ɬ&(4Yms.>Sݼɠ6'E]_-ӕ֎rDo<8g{2l,z,K'k#%t\#p"N3 h(5Ǣ^s m|=8990crz 7[rf-B0OAqcivtJ{`}>@s,dfvCԖXm[fGJ P=Kn| `LE7eao~_|fMࢉB!w'h^^$O?iԧ^'95DR~+."ωՠA܎|eD&ɋrvKAx~R<5Q ˋIYبRNq9z v?XϨrvTBZVLU@FeE<i0f%܂o.N 4e45TԘvWij  ؝tAAveسCchHvzc6x֖q0󁩬ɢTNR@1; vsӷf/sVrwOߺ;MA#ڱaqTŖP']8ԨCS r?܁ٹjMcReZF{ξoo`}o{߼FLǔK3[IB꣊"GUTAMraơ_Ě+&K4-u 4z2]/drfe5ǣ$پ'$bb/b3b1e uU]<53 +D>~" |1+231OW W!ˌMJjTX{P0ubjIߗ}7WRF%hM\bsX=Vԛ...1.3 lEE-Q'gn>l͡&X_s9jt06vC>i{lw8Q#BhCLy І eyG}FҘAk(􄨽'2f +//;};h'Duog毧 5u]gX'$='}+Eo]j6~YĦS]'ѫ5#p \8q_&^<7(|#en/ZȰŗ]V<ӝi^OC+úZNqˏ@chfe]n'CzTA -a;mqeDE(czIɒT;@٭:i.f-ك<٠0$ w8U 2"7{ ı*]l>:S oVf,ygUqzk<rFJʺLv7>zM'Ѩ"4Y@6j}f|Gz\BY6]XAެoC'0H#OY..m 3ǣE^Wtaď1Kna$K{vN&{NnZq=`1QmHm7dfe Iz̨(H)%m r {wtp1zv~}sI_lݍ/݋[B}8#.B *R m=Exp%քĦR$BB`şty&dj )PyŹ,}BL93x:_D_aoS@&Y/g`y06#Z4u3`;,Bou!w|iWIM6-hjȰyԨɈ+F== &'xwN1,L:gL%80uB溞1 V#G{/;~ 4hU<vzir.NUJRsCvPSOH/ M+|/Tޠ~}nBʎϠvsYIH[|qГ˛Nqqs)`ކE*es]KCvQF|Q`Nf}4,*146θӸ"Cnx~H;wIKM&ᛟ|j%MFQGcB "=&5N]V"'*SvD˕A6UAzaJ. Ex!(/!PRyOO-"l.+*n2uW(?CcH kцGE`m$)2Sf 2]xYhؐtlɘW٥e""9x^Ѩ Kf;N# [3zlnNlA+9CUT9CrFB bkm܀9~@ܤO[(+nr楅Bf D_IĎ+wP $VK[TJ-x;3*uW*Ϸ4Ah)duYv|̍ێn7IQܚ^S! 078ۑOHK1Aij=vu١T' :َx s+;]esR}M,/& \<y>o;f;rp mP1bբW$gªc<p >\I 7>?#; I`Ψi en-?S OG˵s"3^$L~2 ӗ<L?88,+bZMBŒ~ RH.e4g~!Vr&̆ }w%xj du;Ii #%uǝנŹmhךq@VѦR zt _l S!UVsA %H Mʙ_CJzymq`wrd<bڠ~wP4w&}fd?M8t})r!Ss:ƐdLlZtpL!sAn2{"e6T8wh%:lVIdPbaN~D8T@v._" YjҴCR&O/`4`OFBiZ atsVv.ZD4P8(]-.֙ȧ/kG# ?皳r3 K1dx`+lU 0u`;28b\]^z,tYƨi-4骥)#)jQɖHG1Y1 4pO5Kg0[ A^Xsendstream endobj 250 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 XЪj8(NߗNN>ˮGֱ4%"@cQ`}*Bv7ޟ@n]O$TVBoh)jI4J 6mR+gh.q\b$Nii 83 3[S9endstream endobj 251 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 221 >> stream xcd`ab`dd74 JM/I, Jtw?ew+;7  KKR3sRJ YDzʅ>?+. +tstUUupOSܞcySمzpgr| ~8On; r\,!<<\<gO 't10Osendstream endobj 252 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3388 >> stream xyTg_)ŊCsGmNҪUREE@b}y{AA " 㮧ֶ㨣9=mO93Oz=ޙyFYcfg%ZICo>/nD!"lYQYϼ zЪlݜܼ}cĦgdf-!yfb>Kl!∗x}"XE&k%]b-XOGN,'61&b&"F h;u wf?hCbwV@@yW8C$ؓoR;zU^@P{F0Fq.ͨ҉PEb"s=o,=%dw8hvulR2oSL!+gQ%UHG-DӮ+ I|$'>||#9(#~ߚDyI]׎7ig+#-@4[>!^_ . f4 ?`0QOTEjPw0hc00TXHG҆D\%hKٗ d!X礇gŻg@UO#.SZ+a0/8[2֑@)J?-*;uymhg -fW AN*5M]!єLf8 H; 6GE{)= ^~q`uqCQH[J(Nߤ 3ewq(hhF'Ҹټ6҈Z`_TJ|COtmXijZ+ZriF XVc"?(е+ ~y m8) >^5U;uM={Sj.-U51!OKP'88[PW,'7M=Ht 7qi%`u 2L^^i+hTqkE>Xe,0ZeGV#^A)̗R|)S->[;* >5H -[?IzbT귳L bvDžG NwܚW9rw%uoz,虻_y1T0& py= pM V?~2Soes&6gWnHMZki7r4oA!v@WXr0ΊhVuIwA79=0r,G#*xQjNT >U^JFNZt*gl(2YEF00"tZbd;UүezߒvvQT|7~u8X׮ݽ llh7T@}F[ F!;[YVAy+lgNf*ש䁽OlouŲ%GFrz3d\vĝ(P`JO|^Y8 d.SȠѯ}ϟNGSEύ6^x _ >) n#M79?'@)xhI73:&&I3 gKjp*wQnn;C8-o.OD{T%nCyAj#PYn0@Cx~M{hOLtOwc{߬<{ YAtoH$MsWGh ?m@nqbٲgR (54|nOdhg I) t+cKPfF/c)d -q [*3:51PQh0FM˶Q64zjN j#_9ȿlzm0q@sXd(gt-Xuܦ`9^ r3i> stream x\[od~_}h%G zHgAa'X%yqDjYE[c,X^>~>G,,9\,W0u?nO/>zZdv1d6kXOStCt7}{7˼%Owl~wsyEb2)i{_+Շ+윳_nLTtWw;`xyu偂E7WÐnQC7vݸ`%l6ۼ~d%n֙O[h&X7`|&4Hոu^m'Rr雇/s^tle5ZCVm0gjˠ1$KZ2h%mE(ټTܯȽLbk0{h9Xȫ53D2 ,ak<;[ջؑ3Δjtc էcxL[9Y✴ҁ[lM#qʤ/!J^5~GYLSl,θw8 BDC B 4P s.-XEӐ˜M$@Xs(s$ BU%\v#ksKf@D@K)TQFBd½,{|HO?M;~z\&3"ӢG t 2) M> E2D!%[8ˣtQ]T:km9ұ`i(i @䊀"!)E$}vKh[E>\tG|FBt怢!P,4P07XAq@ZVYs@hL-yhV*5 +NH D%P]X4kT \]t"V@r]Abc _igT t2ʀZS5 u"W D|@N{r']Cbc  bUd0a ¥5l ,V %u2f쳨`SC,Kj6Xl0 gcYQ_3 b!Ua+Fc"UּP4$D0&!v+jA++ ɕje8˝6 f~B 1ĹbXX+iʝv R,y prO%o!:5Ԡ*+z4M*!d (QeMEC cC\0kbIbS` \]t*KAFP Jw"BNrIb']ޟjD %nzw:*v8>??PDUrz4-NX\k0d ]>Y+cln]"ңȢ%YE5uHNXVdQ4 &W,Tӥ`e f-`YŦ!Q<4Y%=<>_ĀUT <;cv"W5e)w͂)}TƮТfcwgտ;~pO]=zg+'DI?1$J큡PSR Q.be96vd}=N*]Z@G+ՆmPi` Wew9,r"-'d) K5)s D,H]~r9i}o.ZM\-)ܔze 렵`Q{_-$9Q+"2ʼn_נyUD>ttUhHl`̢dn0! JRl>;{`,}{/F3^$1G&4E#u d!^u}\cyj48MiBӶErM*U$_8<>D_gk^굅jItx kIβAEq8ҹ4EmK%iVԃhXf~5fxkifzA)t*+h-j,5"}g4jD ONBAC4ĀՃ z? h4 Q,Y($[1if |k;[Ď]Cb߀hK2l]C;э#MAA 67 ;DAXw@y^ gQGS%;BʕBGm_fYG`lhD>z:l>УiHbaWwS3N7kvN$=w=Q56=H3kקbS ן_\ۗ]Պ_0RPYi]zKeDg/QGؐsUƒdhe M^Z U؏͂)GD!XMť>'Z=;`cv=UMSh0h-<--r[}TY%4$J0q&=Ckُd:Hߨ(㑴"QMrr <2KFϱRЅjpN[“8JD__@KhL:jP54iy/y3K_d?s ~K+%i=imא8}G ~q4X֞}?l{u7O/}Zl4iЖ`Ϭ.tv7nyޱ14gazunxׯuS-% ]䔧%kO7]<+6XөGKs@7{{W?VzK%=7-wM+Tzf{yk, [xE `{>p ~˫Bt}>%D3N3 7?e/`x?j޲"1ǫޘybVffs2v&oHx"L;b#_V,E#ڂwY<}/bFBm}<+g{4P8dh$wVU/=+W(ShwgOy q9^I",VW`})ۛ\3X9+!0 i*dxILNz[=`Իz/{b$X3Q.&x.e=;O!D90O͏OleURf_iD:6[OG/sz(,eN_g9QWl#&tK~ C@/!_-*.vO-pDM8-ysհ^hػw/^/'>@n rsix-mܻ%7{=(a@ vAI+hySZN_zsE1lyUgQxt7z'ؒ|wHfdiY#v41bhi#nn Fְ5^_mİrU{üژ۟R_5i(ݰsj'-ak %f.R@ r|٤O+OP,cOюdӃg &;Q5 [l+Ro;@\{3c~}';x>p8 ǟI{t h{t$OUŸcwLICP 2 D:ql/ ^⑑/K7g'ӡ"zΤu|a/Ay Ľ$Rq=C:+0T&3oy}:nMȳ JT(s1 > stream xZo$y)/m6b'*\8b\RV}({kG} ={7gb373:[xYf;{K_YzRNHSwO 3"yzuhnpJ ΖΖ_]Q)w7+s0Rq(|1>9"|!|ų@VG\/w8 #] iYw[Q)H5F>tq?Tt+k 8%NNt`/9x5"Nm~\s_ a"eܞ6kwJQcllOu Dil m?`ܔFt <}޷iȶ0х] !La=!N.zAF0P]є@g1n,aR2C9C'U!T!W$~[y(6(P-41D,BTQBv AvUY/wresYhI^ Sfj/zǽȽ;"-+ _E\FV\B*];,VYFe43|y^z]]\7\`NF%0 rP.̅)^k\Fn-_ĉݟȪ`E F]nv|no>}< md~.rFq]pOk4.pFb<ɶyrmf:}ɝ^K ;o40ܟըAI;&ۉetDLek9( >xDwDrm7@jN$_/ .o:,(dYQ |}swƝUM|8Ի*:/nv_5uׯUDvK앥3}1v>D,j8,>v//sK Uv R>A \c|F3cA ?!88s:6kq9%>9: y #z>mG%`*JMoSJn)W8ԯ}.=-=9 O >]4aR>AU-~8 dIxIfde) +$c),fUz9G%(їjUW<\4"~V[W^o|g]9ԐX$$Bi0Kzd3TSdM(p,oWA%ū~z!ڻzl>lK0dm͗ 6 ZݦLvw5?g ۄ*ȟлvu21q؝ۺ']o7oW-,p(K*p.=8:8`'QG 4NO^ԛͳM]pNKsb"qzgavxqGM:yȵ<uJų' 읏nv ddeo)lW[SEz://_4%*(XaʠZx | e`UBPۣD@֚/2}Θ<ƍHĝB #P V''lyETKGq)2AU%i6VQ;`,S%}A da۬+P *$F|,'18t6:Y !e.ڋ]ǚ֣M9c0K۸غz3~.>BіGF; ^k̰"՘B>ގ(|sJUc=9,"r5^H+ BlL,/ fVFNk)PǶ,g`[R3pR)(4C"^PNS,G@=tm:xyܤX|SO_J^ NyxQa<<<vлBAۄM h`dͮ_Ђ\MVRVH X/&VVCgE˴tp1qyv*[HO@ɪ3DfG:RLeo~WA]yWFW=zS\`lpQaP <2`tx}(drh;6\A+E)w7 ٚRxKׇCP[ EF8 exh3o )ݯqbpu5BW1~xA 2 qp.l~֌J\ʘRןtq^WX˹/^ւ~$̸Az]rzcQ[3 T?y }ePLTaI#w,=}NE?QNY/dZ*}[5cW:N֏TO+}:-4t muó?eendstream endobj 255 0 obj << /Filter /FlateDecode /Length 2650 >> stream xZ͏۸oA nc,ڠ2{xfG?>J$(Ln7IT1+ϛUVVXۮAPn4TVHW߇>V]Ԝ]o]sO4SMwaj/a0wGL )#"(aQ(HKMeYƨQ `N"P& $L8AP+fu*S00 "C\gRQ-LT0R(Q(*R(Q(*e%LA=j* ZQ(IeߚWQ!-{a8u-P&bz"UsdTb68IqʞfV%.iBSUք |~ e4{NAbN2Xnl S"9[C/r}I t"L jxu ,9$Iԡ6PRL'#4}a$ *t0yO *#a)f`JH@bP HT'b(Q(.b(Q(.eŌLA_!HZ{PEzP܃PMBCʇ6m2*d:ɨ|hQ&MFCʇ6mBJdxIQTB+W22y,,Rj'd&qY]潫)~N)~UHǠj@J&@`!or \&n|L%dTu<.(n2cl-O9Cy_%=)(=eyǢNJAƵ{Տn| ՚Iŵ: fL':4]d$i3l.!t0r;4&\hiȱYo漰 [7<`G儧(:WIܸ&.0 g5*Ԥ`\WMs>~K 3EF.@F9vCһ!|aM+?b2U $~GRÔ0 xrZ^byze9).zshFk"i X?~)z!@:X6*Q/ \9=%>_$*pyȔ+CFa:ե$/-YpW梴,6aZ?F5ZiJDe:MjQk~k~MeFdݮ=]]Ws:miM& טCe<,-w1>Mf3Opd'f(>0$}p%LUxBYYu4b{mLZSr63B9(h Q-T>8C&]lTN{\ Zξ~E|NEB!_'UƂnb`4Yὖv-0 <T䊶_W>&zXXIޟM)aA2b"7OI؜ Ʉ K1FJQD gYҡЈN%zC픋ˢtipٴQiɮחB~kڮ eCku8hPRy+`ZYM8jZ])IsB^FR9ED kCܷͧ^5(*c  N}A wc|[r<q 6TVvwr]/tam1׃5= ~w"^\0TjDBxE|<-y!obuy|0&}X)MA11C]o-McNAy6-?QP'} EA@M;1endstream endobj 256 0 obj << /Filter /FlateDecode /Length 4986 >> stream x\][Ǒ}K.:@쬍 V`HyόFܐC"9۷Ñ8RSʨRfT.sԘb<191., ) F483f[:~{xM"j<@@YA8! Zd7&,f\!\}` 1` jQelQG}W8-P$=$A5*KiQRkA0Mr0M0V9(bO>yiaaG3ϖ}@?CL=R<èg_= 9 :/o=Q>qc<@ NbUq&h`B!V鬌8˔i:+{Յ5UXcgU5}\0L9tqU.e]HG!HY;RUV{6 |u&AHGY| &دkfS&_Ƞ YJ$[ٸE`MXBsGG%#y pc:ݘ22(\ڰhd hFE|ȰIlQ2BGcz .Ԩ-|Ff|er[mć6-8JFڈc c[K'k`#>DDJŖd;C f,Wi { QelQG}uM0g6 1Q-VMaieDC.4*CM` <6*O}eMxȾF2пyJe y[&WgppE$Pu>5 1͸I̧XQ И5-8JF(pDC@ Ƒ3hѲY@˨XDC E.>$7-8JF(9Bdq( F/tB|Hm/xfe!rr-Xdz؂` F)"Y/ Z%,WxESJGC0ʊxG}j%E4Tp`RJ & Z%,KI A!>HlQ2BGG20næ]TY- Ϣ[funeX4al*D[bAdJX>ʾ#hUVtK,8JF?h_Xa=nVTʚn)F`t rmA E5b ,>G?-a?5M'qtxZlQ2BG N/l,}(ӲVAճFY5-7pdG%#y9 E+{7j-RԖZ)f'՚DYhU"iUtYd?ظu{M|CIzv `e%XnI"=8ϢdVV@C,5m2pUs=p'fX+&#\}\ɬ}T[plQ2BGmf=jNqW»T5 Mxj!ɫ!,&uAQ2n[pQsMk8l U6CYScrEj˭v#=8Ϣol2}w2c>2&Sfa:h(2AŇ&Qj(yywʍ51ʍ؄X[n#g-7-8JFH=bDSJ0h ظɊ A`P|$iڂ@N Y(ϣ#a6Fkf&g- $+>"6AdSgr(!+$+>"%#y?# tGNzG . G>BGu>b˷)rGB!yt|D+Mc&@Յ+ȃwabAlCDMS7\- (ϣ#*7Pd$rb *J'H!lLn#=8;{ߊ]SK? 1ץ8FRs\2`}YQlA7ۮtWeI,MxњR%AkB)Uic-`HA[Zo؂` <:-xԧ;Jٌ.@JR,(AyO^&T'o镏*$łd>OX Mfk]y6' ߊz Ҡ %0Çv@0uij T֚5iZuf_;+y~]H"3Z(_0ϞdW'R)ҡ@Cj}nzb~uwBޘ}ᢼ1 RYqbbIƐǿ۽e0D{ ehi| f{}S 3>]o^ //7/ /^,pqĒA(oo^./z?6׻ֺ{&ߜ?4sܭ w'#sПp:W-io1/8@avu4{/ P KH-l^X2>r=b[PmphU$Ci Ej.}%Ld#{oVOR?F[~ 5ho0{w>/뗟gs85iSK E9~mfgX_Pyx_x PðJjƽ aD^*'WxνݺxZ="?/?/> iFׄEך+'nF?!_ j^Rr߿Qh 4l3sUA}Ⴃ*~|F Lpu̒ʆc1nc00-7;'P{#numH=?r᠞ įVǐfqmgSLq%#^X8_]0=NsMe;mEnjr'rXyĂ/Qdf#=;~wo<9>5e2 w~[>/1A?m 'WWs1>= |~& gpVA@SCCl~qy>g"%پ󶙛@{QuN7]t>llY+3_7~"cl\6t>iB&*#ҍDީZ'mTOY"{̒zjb_=iz> stream xYKr[#袦dn2)QlCB`y49P3cZÝ1ɵ@//vtꫮ, ,?YoM|Z9Х-KBTA^J. Mr,^G,/+B}__eO*qQҢ%fJ #ĊIhm[o2; =M],4,+8`G4-$pv0V/r2PB/iahv\f#x2As0򤀕u+ /),8qFK#QR<xbJS8_8m%-eH3NFS0=Ov'whF1:8ߚ$j~wVWKQбF STw܋΋LV8= CmJ Bie~r &DTK"9xT)!o=?rF;|Ē,Å6O"I6w'{Hj IoxWQ$_%N6:aV#GY^a5 jjZ9`4˱&H͓ f2@X(8LZlOϕS] ;?}ty,95 Adt<~8`iFH>aœ}1(rg(wh˫ ҘX~nv7u˂ ؜ Uu&]kn؍Eo 2I4$&CzlL\%3`#dbqG:9N! ؤYg9iEѶ5l]_ݻ0eȩڼa_km1-8bI1Zկ7sNO5ڼk~]x1V6p{# ~!ŀՄGgR٨9nwn=yCsi@^VM}x&a0"Eg$hع o't~vO@f4{]S0%iլXp&0,Dt0<O`|Bx]7MվY|?&:dp$ T|<>GbuBDo);?4%c.%\2 ;Cn~CNn\:v}[Ѫ4&P0>y|lN1 )gYl ^~:8|ǥ "L]qF 0Nc-eZ>H0Ҹ.1F}iA@q?RiJN;_ \cM\/?݉a;Gsj(EA\j NÕOW3"Kf9(@I7sJ}MP;EZT1,ZQ_%}7jJj*\-3n+82w1*vHƈ֛ 7q.W{ 'x`*>֟"8'` >AQ)b(^:<؂)TŶ8DcŠTǑogЈvi-uLv[ɴ+ۜ#e@aK@;8ik C?GےFmꃑm PĖstVywd]^Yzl=\Lz;̵n(c5Ul$pu?53fp^Fte"\dC"LC}D~JZ?0yX:wP[[Yh> stream xncݿ>r],m) uuUk[K~{ə!9#8!-$"m8c'5Oz{2'8Q;NO>|n`4'5>)K\|{=] 8% '/߾YLPsglfX"Uq\77 n4FO)5~6T?oHZގ7͗]%T`+ y3noK nƎ獜nn?||z'v48`GoۋP„NNI0yg~9j sӬG-Pb!MάMp TP\G#&2-cdBvp@A"LGo*@ lL>jSN0bm`/A8N$5`MC'µ ."Qn~`sVBq@#1XgU\Ъ,$1,$'YVX c6'h !2^_"]LAކlW3,Doʛ̘8QI A41,dlCa$ֆY `?0R(HʠL[X3p /b+XUo@pX1(iaZnLc Ғ8vmWS| | \P: x + YP@*[gyAE؄W0x7" +H %CgEwdS}ON{CAv&̛ǘzbPw%ޱDA:ފ޲YMˑzΗp| V2" YDcJP*9 S)jIaK.)E(-Ttpд2 ,D(HKSϓX˕)& &Y0H(Oy5Nd)52&[&8[YddK`+ގ%vD3pEr,f@p=VB#1Ó\H`@"N$ >@pц';I%ql v x2mހnlc9s hlО@]+mLAZ{u67AľQ7BttJ,A4E`Lq<"%x黹=p<܁nq;(|/=w*}o6O>w32OL 8&Zy x0^jBE.2paT" VFqiA%qX9;$ս5Jc4{9\I5P@Z g0=^=˨0Z0iIz;ق:[D I.94[l@-D 294[%$ݙG0_aabLQgS7"үψ xD -CoGr3mKh 75EB3ZQ&.t$wz91j^Wj~q/uDAD]0DAZގ+;p1RQN ](jgSw`rWBJ`+ގ6~5[,4>o^d8^:Dxbt6ilmi|Uxտ g>_i\7O.tcAyl׍\_h̲͔5Y?%kp%`p۩rZ,=VO)WKPrBy X>$"p1f'/R6jUհO츺GZn\[Μ;[ YُLRݓpʛ0velֶ8<5C2|au++#!3tʫFA̱vSUM7-7w<~:n䠠wΩK%poܽmh./` HYHF/xr\3:,|!Ps6d/1kJA\4P/9_4iڮXum}<>:P[ņcYhO^9>ƐlF7!$g^ogo\]_-]lL ?,0^S~o9v|?*-!YcHߑfju)ȳ,۳7Ue+˦o.֛KG}g!,T|g|֨2K ]r yd;Ϯ__كKk3X0eЩB/謌X>K2lŝuL'O0r +^Ey=3٩ʬ{ I#zXȻ)h\%A52!#謣~T PJU: =Gt\N*9%*Qo܃4:i2_M*$4C8HGzGEǒSK>fKcfXҡ:Doa1r .Hsci![ `InjN7i+??42IpFkkNo =mjB m){T :j[0e;YmP;Yv4' 96G~TϮmqQ5MgQe_m7_9cMi&v:[M zϸq6uR'!m?w *X,էcޚjpDA-|+ .ϟ͆=w5M$b?˽=qAp0y۞gǻ0 >.^}%UǯMx77gL~j0߯q˖@$ ǚ_ofh~$SQ|_I›exVv#6yzitZ\1W#lt}ijC~|]oGendstream endobj 259 0 obj << /Filter /FlateDecode /Length 2580 >> stream xYݏSS>\Jr{$B jRKVj%68mAQ|ܙ1^ QU{n]M~?M ywv9,s_6'k gt--U3(#FvTyht+rhSpTAzđ% tDوD0ژyAI႗AT{%: |K9dx1r]E j0Qqd WIBSJG1Z(F+#?opҒ|1-;i R`{7k¢(F8 W]@GTD "yA"hpYYF5`Z Gxf >0u0Q8F ϒ$zO1u$t$+Fle|Sy4XЦU<5J!R0*vWKpT8fdo:D]&lEHFfW8 1{(?[btuB S`:r!J1e >*ԮɓHU0U(#*4*Sr+RWTT#P0b**T[`:D+ 7g;V7RvIj 5^5 k@=jܢ"0B|HW9E=XQ%LMy+I2RHo28|1cGIkFy6cC U1g%@Ԁ Lp03it39h/Cz}/Q=Z=28f9wWL1eCly>-&%u0!O){jjXei#YsA-RY[OʙӴ+Ge=83t B]FA:, o*Z,j qXQY@wBOB[ qa\dak6= V8(PFekNW:]jUE/anE4\ $h ecgϩxa g&9]TQcr=[ﻔc+? fLQ3 HU/y52r_cjq.^Kᬑ#Y/=y~ *nnef^q/H$g?&ie43LmغaW=w창j>5f6_viP0dw7C#"sKX eʰM#h2w)ZQH9e6A#,ABT/UF/ކz6E~%l5cyL g r`(k=aFG1TzTVMnqUu(s1 sи)NnC|BpA>{*fXXM1JOC.Sb)㙠,aH˅U,1B !̇#dl(I/4{x#%z&(oC ۮէZ<;rw58¼dݨUO%njrL(EDSl.: Cݬ'.}:.U,EUJ *OT%oOPұ\yn w'2҈]ԟICz۵l7m3A؟-[oUuOYSga~.d=ht1.9s9»Y|E_Ԕfx=Q;ݝ\c}:$J(>+(=%I E#*Р%'QڮzsI.wn~{ TK=eˋ E4gk|z>DKFy][Q4>IQu[dg2  '(V/?Z9Nf3z^x%|?^[GK]mi%g6V)9ğJ \urqk-}[l!"f?<A Jl zI{.\ai%9taWAArLGG68su9gGqZ:.2[apKDXǞ^*{LTiͳܙ3223(~U $M]Ӻm͸[^MO֕KY;1F6dXme/>RN|_$Ʊ`PswUpendstream endobj 260 0 obj << /Filter /FlateDecode /Length 2226 >> stream xZݏܶyXEuO )qE \ho;5Yv/CZ:]uaJpf8fx8'slnW3bnr児%Xr;sSȜ"+r|Dlα(H! ~a^"Xr1rL(Sh ͋"c,EaDm]GsrAAv4\aӌ$'p,&T(w\%AJAI&Nri4=FB&ʏ*hd#F <:Kr `< omv795]<;]o`~7-гmx?o7!8^+/?ajiN_!}C1 9C.Tp ”l}HL¤%1:<>%=y|yurdD:MH_eaۼ^!|-R@K0a)-_zߴ'2HuٯA`@uٗAT$ ۶m}hVZx@)o" xj A23sжO@[{4/hDU6[,'g1tzO)Eٰ0DžR&ưNsWpg52b4T%J0a>Oob c 6^{, aN0TJ,vӽzZx Wp8q%k~A!8Y7`pGIlƂCʢ@]l ^nmȚv`*"L{"0QqNVgn rtrB_r9zs}(,A 68P K7Hc/ 1bbMTmմUO7aY']7'}ҭj8j&1(IM![>d /uEb]bf| ѬcSXoo\mXUCj{t͡-.3.[)'96 '= 꼕r)M@|L}Z7U?jj첽r6TZw)'/rPClDzDZLIuFR)>q84֛ =/o H}@'hk XCAWB @AMc #,ZLAPۙ1mJ宪()MSu],840,6ARM*""g?Xy۴ۺ|H@lv+w+mC7!pH2UJe }PFpgjm52P`ꯂ&4Dj#sXSSQ~m?$OtaG $*Cn[X^$M͢ZZ=,0I(UR+DB0_!N(oit=&z6usc'WB WPorhR k۫qh)}R-=W&O a:q5Vw^Aq,JKwik?3[gq2M^_8fz ʱ5WCׂ5`p=L\yjQNy:Ho'HϫDN:;rݍy{N.R^S;[iF|("t#kD۾(1.DOr箏|?zXQ9 Xy츖?DgWw) & Uu=6!}B{Bv mg%UzQ/_(endstream endobj 261 0 obj << /Filter /FlateDecode /Length 3843 >> stream x[͏ܶz\C94֣dhIa@x{+Jfv6q͡y}HI^gs(|XD>>#^\|"9,8>smMkGa _*j[yǛ媮j3JkϚJJY\v>A+67k gܮ=] @x~ xbB}/mOxYYagG0"X)++g {m7ו\z]/W\[˕0a0]/[?=b/?++ׁ͗lLT{TU2wqomJi2VDݳvv'KZbebea?~I5S\WR׆dwi+'^/4 /qPrȀ~3s僥(%e FZ֬_g3;AM]dm~ Nssij\v'lTSyUrtB46maֻ(A3il?UD.vIrԆa 4gm|gЬȃ4ZyA hc~nZ3;Sl 7`8Mc@ ._(x] TkӳtI{K<>(DSGҴSւÉQrbe3}ozmN%+#`J>bm/ͮ-}8@lab gu,^$5Ѳ0Uz%xD0o;ƕy@)ŐK2./iJc h,X;xx=q#J^@!DBu.DѴ9dc !gWF.>^R1C* Jأ [fح1kX۝;@=C1AqZCVPvh*,;,y:@A@#9i.>FEgZX|$"D- m<)i:V# XuJFOR*@O`LLq*kל7B /#p][Jqw2P|Q\h j%R=b S:IFc9a c*g cb|KT%4nuBb(c3  lC2c%E. a霡 Cf@n{ :c|J(tuXӬO 3dTP`WVJ*9Iq_Y": aP2¢ -C] kβ.R&g,d@kr(U%2&΃,@h)[Y8i7 P8UŪU8XJ!_f>vլ/@_qZQ)7~s7H0d(;bڳ(G]oH -(3F u*'kz-ƱuYtE M`T4 {PR.,TY><5F=۞oφ=\o.ڷ'ٴ0:1KW=\$L$Sc[և)}Y(gb}vl°+LKa5gCnZGzSFZ ;^B~vi7k(ey/ ;g;BrȄD{4Hqڸ -uG}Km;i& [ؔ0&[a*)BQ$N}W`)j ڴ4X@}9C#jX`Gmn̝6UT° 0Űq"n} H!|Grpޮt^J46>|DBTI 9 2 ^gzyibkKssmt`]Flu;FbP aД*f,YHJF3A)N}T~%ӛLЀ87za ,9Lgd؜MƵs@3W#\B'1òM ёG"99`\vcU=FSn'i܇fq~)HUDQ,,qBZ~* RV2o?h lJDȚ 8}x= QYtldrt<4ia5 S0{16qMzi#Qm+L*b&%h0ښ!V(KUy8Wy$d\k@YvӐ5#ʗ@̑HXg`{PoC-۴? 117wJ;Ͷ0✬)e$j_n UH9iz-f׀Լژס g2PbQ Ss)aϾ XT' 5n_8kck4u< OE  <[Bv9e۵۞9v}HhWfxD;$\؈3c:`xl9Hi AJ2ҩ4ǶyIQ1}NyE 0 ŹV;![:أ*ϸWujppv3ft?3_|3mVF)P\n?6'[×&ce&04 ᴡvt'ER ^M: [8>24ďT(.VŭԦ>/s8\蚙oM %o( !B= S{{x-OSh˦weⳕU3r >:nQʚu,ߌʕ\.5@N-/޳FǓI&tSR~ݞװ^%V*VUqk0:U4q;)![N7ZO :~be;n/ oi@AfE<wkNf'Pg#ʼ2ƾ7寶멗>xߧ[֟FF/S Gۺ*&nBVR[`Cu+" 8 ].qLusv:|V *_Y#9 z䵎؟g/ #e/:VmСvVöty?R.-~K7\ O’F8?++,M.[ yci=endstream endobj 262 0 obj << /Filter /FlateDecode /Length 2939 >> stream xZIorCHvlX xZniMO7%Q0s{tW4!{_}^u<SO3:NiS9|u> K؜s[qF 5_mgOgŒVTYf%}\.0:R/B2NO )'%H$+J5yx45߳l9]pЀ;C~ qraV@&8)$ Hwֻ[7bH}1G3&혥1f[)$gը旻f]Jc77PfBwB]BQz \'bR*B".~?/yKڼ{F_s_7re8@¦EB;FKSic(Uj)K2_u]_\wH,YJ@y1:HeBZn8,l.6r\\6ո?ƎN (F'gXiMr]Jh^*MkT{;Jz#=& e|nWȼ[iC8(lVƔ=īxKe(n0%$4~G`KlRY2lц5KIip˴ĨJZY"+fKP.RM (n8'Q/{M.}k簶?02a=M_tW ΐi#}0Fxׇ zpإ&=F/M]\~E#'(*wQh|zӥ(C2RmQ*N`:ꤿ͋A{D<]b.y`UlS_gi,2( oz׻0rA} %ԗ{;1ش&U2I@v) 3` %:R[H:iE!/XWV@Xaz z߈䋁wYmAwض#@FB]y.0$DBߺ%Z]7Vɡ2C}%/<} )M_n^gig(f!dA1䤪-vؽS F~KE6zEiCLO5a uZyKbl{-{;aOL\Ape}8Ȏ6A0[pKʫw!`7Ϲè-9ٿ̷&MDhSB9Fvx+vFfDsA{ж₱\2& hx~7_W@,ƮoO2w'߰JKaվ#(.>f޴C|e!@CqEn}2D'|мEEWtQKnÉR @E(V9rWI 2Pj,4miCt >xlCg8W¡%?PY&sxȌ2VtQH!Ld6TR%"ė5 +Ό̟\~Pc2'gzʶYL w w潓m _&m-zV̎IGi1j rwTJT018WbRSIȲ(l?} XS7E+d#?qL#Dx1Ȁ>,hdϗ,h" cSB`,ּ$DHf(В{V! ' Gx'm0BB/n"s).}cֱ7L#o ̀93@&1GF;ԫOs^rU>/r|~m^݉{YU||ݼx=Wr3Jw&>)eYh~{Fxh}ÏOU"98߃R??)ʿIE5Y X>H)s«ިbam`wr ־]endstream endobj 263 0 obj << /Filter /FlateDecode /Length 11170 >> stream x}req{IB/:0wݫ&ɖlH`ws$=_\y`0;;w=s媪8s\}^_7u*$9cw}#ZiA)~GʥnG?L>w?=GI-${?ݾ{zϟǷ);pŢ>},q-QfxGTN*THS>}X޹{|o{kJi|8z84ҙ&H\D] ӛЎz.᪶u\/ύ̴@͈pe(bA ~C%#^g kXi[g¹7n :W:ᡟ3+ Dg96{8 v)̝50 U^S+x>*VQ(Mgz+vΘU40pnE "I"%吏iAZE{oj82I;%tNϝGLΥ,an ~C%%bL 9Á2䙻Bk^Kpn[?ʹLKhY@ՈQГS#uLi~C%GCt,C3`u4{+e+$œseJ~Rf:Һy(bA ~C%soH-\CkvH*f5.LQ,1Sc\G)EPE RS ,*<¿sH(P"dycTW)L ,e*YC,)+/^T4̳%U Xe53*(&E\C({2 JjZHR<@,Pe3wB5j`o+?&ǵ8%9"< wZҜKq;EȖ٦K*~y^Ӱ ,oq.PX3k!}FC]4J2-س7LB٦ZK=0 !̀3gS9R$88Eֱ3iNu{f61g-4vZyiufD6ӔZXrh,B^Q1{nO TnY5Rm`KXK'=ՖT@R!'u6'e%E+ yket@O n5JUB S #E< ql u "uL@pNA(uܡD3EB MTLB6;BiRe{ =/"LÔ2HԸCA,b4 .ACmt"eͻ@t08#UMPNX'M5ȢKX+SdJR<%je&^'3 ph%&0*d.עMNPB!Ty]0u2Ӷ 7.-lFdgCzV]PoqWyl}eLa Hr 1ɉEK%U즕8JH\IJLx:")TmI$.D hj%VM2jKR%-Ѣ .!\+ *\t kQlC풑}V{d_hzv iJD+1YP ´yϊMQ=Σ]ېڋDq%Ȱ3DjEfiW Ѣߖil+BhΜ]]"ZGEOg]ȝhZP{:B}DZ =n%fvs(YSc/2Nh!TBF~I(47b2{J½D+1w)]+̜}>i./72h!#V\"Z#KZLtx~B+.a<,4\ սcA p At˄G"]'|hLxGYDVT Th! AB]Ȇjk].ۊ:`C} MUY>0&\RZ؎cQp$d@ h͉64+_&K e)`QCr?Pgiq* ][6KwC}5d`pz dufOZc)@$S"Z) \MpȔ $;ҁH\"Z7if"霄G,aE0t:)-DMhhFvBjCbQ$SD"ZXy?%uMDGU^e}%pBeJX C%NIeO{2s CD>8OI@E庀dd>t4N&?i C$Q@A)0!}EJSi| EHDkDq1a/Y2W8C|َ缸,!^8?[|&g|zV/D '9Ժ쒖$u3T~\Ф-Cᔈ"%dmO6ޱV[F7[)b$-Z68>x@o KT)k%ٻ0h!bq_`wVodi>qoJrq=죄K1L]ܑ'2 ^{@iw.Y;v$'KD Z8ХD'ZD!n /9.X1cȤ%}"f^% 6xƑV W[1NO[&Yme>D+7m]jr[Hkn8n$-oL[x:4rbYrʡZx%y;A f U)Kn blB:\Lat1;ue{kCZCͽ;]&Yh-ĻqRvn%A δ.l!v͖IV[ԛ<1=0r8s~MF,q-tAB^)h!|n%K5qmHEfz&YHA*K2;ᓏ jMɬ9nCY|eu^S2E2[xgldp7 SGkLqåZSּs)Ylxjdҭd$+Mɢeyr:%E߬5vubU [.Y\ Z)kxw:s~ Kt7.:fD]:aCtB @oSs$vnZEBFymILi)9hdӚ^Mk #U G!D{<)S_W/~cl$v4EȿS޼M2 ?qѳ)SC/o/5 G]8׌EFe}xɱ \bfd.bTުZ3JO`pBB#¾mbE2Dv"w+Q8ٍFC>n a\f<,C4fao{nw^jPJ2*- gh:KP τ/yǻO1]a{?߼Nj㧧eo{iRn yq,K5J( HJ%ܩ]v!%n;'u8~$z*R}S=A;2,%I _YBf\xf lUWDf6%?b0$vX2(?l[eH|1w0c9Nv=c (]'3n9a(m4s ^] ؝+=I(XeQpÐuNjf |PH Vw~4`*cyx"}m|Nn9> IK aw6ZQnmԡ}׳Dwy.\$pPhm )HA0Gm`K p\ӎ6R | q A@ aY 0ІΉ;}eCl=9ba*b~)`é,Agz:3 lxϕ6nk=jT3TOµp :&Ha\Aפ50:29 قְk)EZ뇍5\l~NVn5igx &kv5q !x Yg^G ;:l"Qֺӹv$|~S/D盤݁µ \_0Z9~v2o䵮 7j 4$Y,~iߺLI%E~3jro-ۀ"xߔsG7n~m<ߢPt~B-pO¾ݿ} M5ߚ~KU=F~&f2&[mo*WMJ11rioQo3->߲Hï<=xEo;47W x[7ʿ Mƿ &f%ߤN|9ɷLJI+To̍|SķoJ6|mDù7+L½ ozLe#DpoVM`ުB{Yqo6,FwMFM1B -KM/n웂}To<&B)[R97+¾)6ٷgG|]ٷCHR[FSοbXVu_ A%0|L9W4K^!B0gFn܍3.$5l\&'V\#t] \r(875ӄ3FRZ-6'gB+Ng BtכLv=PJIkG'.d9,LcSea9g₽Lܡɥ9\Q1&jL\́b',cODB3˃Ur(?b ^d\|Kzu5[CwW̟ۿ^e_X?~%Oo^=߬پվCB􇷄#ݗ׾~|~~_Z֋ YE[7<&vk xP-iIS7DjNR<==}|{d7[~RA džcGISOBc}a}?7O#cPqfWhO=g}jO{'oORtoY۾GvBg]~Z8-)۾uKC=Z#U('N+ԋַ%~1\cs'lVa㞙>FFVHdхۂ[hۻ́=>{jSUV;Q6!lx2咾}_QN.ɵ<6uA£~bKtN _i_$u\qV}=h4&}q iBBoiCq&_'>nlf2D$4+3p:_vFģn%DlnpHsL]#PJhpekZcNl="a/@$.ۻYh~}> ;Z>ȸ{D\DG<[E yK?( 2ΝڂGP+M:~rrا˟ 8,Aי"tO+[>ϢKU\?X_uۢEpP%-Oz׷/uhư|=~ykD V2A /ǻOTz׌%!{|514eҚ\x9V[k/fWŬO1!z{MAП/W%}.ˏb^Dw<[n\p ͸僥F-mWۛehFZOQJ]^@iR%y !W7kf4󺆽{3kK<}~ V'nVA剖cNJux5*WV/_3,NJAV3,_Ǜ{*?PBآI]]j+awlAa֥_Ko锺Q3N&5f+kqwNXM( $L.unҿ9!H&W=Լ_SM_߶Be+!u].rẢe,o'u׏B) &9Amp7voxCe~/_?^\+ۆ (#89T;~J8Oڷu8C|w0~t@ysn[4ي&9jKyaWszq4{Xy2K_>^pi\23'~x8.޴nȵ= I<9m{iJȿ%#i>w2|coޯ{{GI˓eUEB}6MG3Bz2G-Mwɩ2剧OCe?||"L/jē!@囧gLij/͊A.<¢=Z8G//nEgw,[ZĈB>GPws?_$GbH ~y/)z nzxQNL@;fl(}xN8Hdij<ڑg'Kf/(0aMM?<}Z7,>?ܿ]0z:)rQ4& bA~GL%}fe8|q ?#h|$w^<0bw|nn7ju./lj#^l(iË [l#!v 129>Y{DrwmCBf+s]cZ\aOI'G0'WZ'_\M+iyQ.]endstream endobj 264 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 948 >> stream x=mL[e==gΙKƘmjnc%*|FaBSʨ`PB֡I6ACKP$8]f|`CϏs] A EleE=Ϥ2_U$yNcBL {\]T\BVKH>I'HBi$T\Dnb8^|ɒ|F(=Ү5erhmPKd;go=O`>'I,^g7*d.=p{s^R +}JHgQSl9J{+Zfɉj,L_lVyD:XϢCԙ5B_7{5%lXyndc|m+ q`p%`ʻ$89Za.;]m`1%4:1XݩVp;N?U< @=pfUTZ銫.~u`x iț/[%̭8 7i!Z~bnkUA3$oaGl~99a `aWѬŤԇt[ SY)f/\}3x+"MH=ULŅF×/snxfC5ЍaOʱAM0~rFk|6kP8*nLDonڳmgϔx7=rIwa>?KT7vǺnFAfWU 6L8G{hN1XCendstream endobj 265 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1321 >> stream xkLSgO)6F9uq[t7lsʥ\m(қmpTZ\ 7}6lȒ,qef>}oX|&l߱Gq@^EK/y>u Y:P]\qv[òX[DzXb,) ĭ.^O3//b`4=(@qQmaԁ8[O嗧GbVY1פvs;ލKdX(SXnsГ O¨6:OVOҼ a( T@)Vkt;pdyn4ICn/6[F$Dy7jA aЖ(4:ϵEX_b]~2ceL(S ?/t),;;BdR78;ۻ[ύF~|S{B G9.SO\5yUՊ;AX8o{͜Nbo렂P%Ck_݀T5&&&I9 Q"@47hF!(ˮJendstream endobj 266 0 obj << /Filter /FlateDecode /Length 5038 >> stream x[odqrdr!@7wl-+!%ʊ 0r8W]1;KJ0eTWUWWkejʿWeus Ujſ=39-I._]-j*0'zT\_~d%˼8ԒK6vw2=wzcSm@TMܡuFZ{NQ G-$\țiXo4^v|=?_KZJ^ n^Jfdfz݅]2#DžQR\n~o``VFe2 .1߽u:ӧ q~vv95H(\]|ya,iB ѩ4B%7&5a$;u,xGx8,c fPEAx :]|aDox*.l.h 7RXG;TW0a(쌦QuBFJ4eM<C 9lYȳ˒*@YvFa"ޒ(u Yg!C\JiNzU`"Aִ!0XĶ8qRO|Be<\ z nS]YQbIk#(5(uA;b)f:yU ) s7fXM^Z7V|*Y1V/v{b+,h@a>tWd(UBzBYE9&r ~Nnub@-Ld5|E#{aύ3!sQ:yM;D )xH'$aテhM7С!Da,nW]b;,xG b2*""A XvMpxAT1] "N%KH-(Z6HRȂWr*."9/BX"+usR wsPh: "P( z0A~ex|McK )hjqak6 W;RT6e-=P |< tB`p$g4(Oh(4[J+`|1W u- ArJkrk<$)6)C 98cӬq$$Pw4(p;J#5rA> à23 Re;7v n" dQU=q'2J1 {N-5eA7(u$W{B)!R 9pD \6j1i#e^krUkʼ( GF@֙o( ~HA^ "b?${_g'ǶG&68uk]+JUHxP,RyTU)C 9fjL (ewJ0 WK!huaUuRr9;4?!" \Dö ArnJzu%>0P\- k)"5P^x"_hYFa<SP (/,ޣk4'YS3Ȃð7 S1)nE*鎀,MS*jUeD Q(`>LV!ExP,ąF #F FAfRU^(Tъ #K((k4H̠)[7SU ^1PwQ]$fyz_ T!qԊ)4чt盲ӧktA)-aھ=vpE1:Ν(+r.@bȸʃ{l/HY4)mK)?9@4蔺'h!;uț$#HuDnF%];Xve}\% <`e xH&(L'ŰySIb|N[V-U'̓Tz"lJ&(ǾzMax5Qk>Y7#ԭi mNpD@4ikyo4}lRUEu :Suz:vKz/髪>`#4Pb{Ib6,98E&zpۇ>"VC0Dy *>| @_ *GPz t!@]^453K?:4 2b ⸎V? Nh=lqQWTQ+Gb< `ЋX0g ^h4]7c}0=I,[Ob!ek4d4D(9Zk,MXK:X(Oy.G-J>P{;_vzhX飣auD<]u7lJ]@~J5"Jѵ w g_tWԳ{p;yws"'zch7g{޼!%QuDಙ| ~U aCnzAp|6ӃLGe?񎅥2p3=M +mqq%z4Fϫbsr ><8蜑Ur d3<'y7a7"8&Ӣ9ZLԷ{R9bo{0Eʈh:h lbt(p[Mz9s=)he[N*v!`z7'N!\n m/ZOs]EIˊtd C<9->}7lYdcY݆'aOc{ٳt Swqq^_>[8r4}}]wAzCu/sT!k_ GeK 27[;}oz(4jmxE+Ltvŕ$'틉6qh\t di_`=cAYذ<)rS]䲲kd)o<(npV>J)MS NO=}\tgx/~ auvrfW*i; n_V量,u|7£]D ڴy_MH$V4yz z w~QL17yPeT}sWqq'. ޗ~?r 7H&$d4 ʉ:z#\:pX4dKsh~~elZ4H)1P(T\K8C?CAb0y9IF GG--ʢLg͘?}9L-\!M'MOQSOY{Yi|>UwCńCL8)T?X{6-Μ-ΓkX|?;I|.mȒo#0=.3"0#F'ָu^A%fD@e`͸VT3]ϊHW+UTMKJH|&а'wT uOB3$D)I3al=fH>Gb'<[\V Zt mwU gpY Q.D.P3 [@x=n_L#|(?~{>n~sOGJ_l Mvfgαt-xO//ڟ5墧oϪTU^$w'HU=Tg7vmUgEd3e a\wvnTqww źP ;* L/Y$V3~ËfϷ5n{{}жZTg? $1xp75>(S?<~zBn3-=|Wx.a? 8$y&K=3yDed@&{"*FDFĆlziendstream endobj 267 0 obj << /Filter /FlateDecode /Length 21571 >> stream x͎.9%WZwcf2Q 4fQ@/fu~DsHˈʟnOFQfEy^^oOgy~˽>Y=~e[>}*/뽏_.r~g+^u\/~K՟>}nm{6/W_~J˱||Z~ON:{x'O3=ӧ~|ovlmvJWw.uV/֓"s{_-1Oq_P]O~ږwVRJmRj:\j%k_WޏjzAj)S䮵H_㵖ZLڶ٬(HZ'?_Cc{ > ?w 6^tZ_7Q1Rܶc]=GxQ9jZU9jl31n?nwLQ:1>&ύQa>ʦn:}^> DhxG__/{)<^zxQ~iZyŒހ'a|9_WSDP IHS|}֊klQWgy=CѵZc߻* m|99cv`-r _m"=o:V1c<,vZkAeaObSjڭOP I)s?oǘtڋG~˾6G*vyz셃hÞ JP I)s?0B\1c1VH=ֲ(VOY9T({@aV YXwz }ٿ[? hCl6wn6.i,6-]k5mhß e5a,6PvC)TCRhfaJmنzޖɑfXޯYj>ޭpMP?8C IR~@z Gyy L:ޭfZ/6GZw^b Wm-6sMsWcqھػA]ʰ;\n& gj#b5f:19,mJVCn/"@ٞ!)atJ_,(|-vŶz5nr||hB-dQϝG~υR|hIzս8!ʜI$IDنYJR~xߎu;c  si؎=~sW1 !*" xn4q X"z8&f_SIp9|7O~Q:5tX`踆QC*jTe!r6N! `wjSͨ ~z r}[,=,^7=2'DN_ЃzPj_?K>=>zՖQ}m")}Y3ڲ<˖xCT&Ն?AʵvG=)>OinS zی x;Ԭj 3 T F<Ι TCԐM+XP {i7_=l0/ʱ|pmRxI.\sخѪ*>9ҹE3e,fY{Nw>}l=ۏOV—})>`qp}jy5LpP9>WPG,`uh;( ,ulCz:6kYu췊Qb@V +s[v׸VwJwcsǻfȷe|{iu#jo+ dM/^ .MxѶ*) ~w273Z0캱]oƆQ O˲9]jb qc0C-<TC"tkX&BZ}I#{h,M1߾gٚ_55S!86 Rӽ8ciu]mlOP!aۈzjXAy7܀ AcB촶E81ru2&Q'Xб9aS鄣 9,.|26' M+lPn9B3prc8>`FAY= y'qJN)1j}8nrx1Lv>uz lt>I56ts[,oP|ÿ́ϻm;O^0&`wJ3m蛭aF.ΦmŧfG [w/~7;G֎ ߥkp5Yg̳a;wXVjgGzE4X~5ha];uPM㻌G jc6RycAu 4ZJ؃ra/,}%`jƿolz61w;U;wЧAg@ΥQGmGT/}>p̶ BpXbCںe#v(;=#1.F`})l(;zC;NcqX)FAocu?)/`L&_{FVVyҀڡ)cu?w&<9V62͇9-ɶ-xjX?mNAu wp{a_.xkf(gPvb lmi4,e()PV-Fawӷ+X ͦnV#)fgDBg nfYAzw? p5z p7`5^㍦Lc,=V[>;nΧ-؞‹cxPINJd7xz-f9BŦ>(kl }1s{e?!Iu{hAvw/jo; #ׅ/9Q(:66!VRt`7!nf 8DmAY{$l‹HBNk xwK~1e(a^j1р?Aʮrh:!a(=<qTqoU$Lac5reَDԸxIW(K#p $EP (s'TARwOl[ z1}-TΎB6g!jPH1:nL%QJP79Pɉjw+(JGYKA᱐H ıO=+*ڢGjG^ڛGߗ}Ha=a@9 )`pj;/ó<Ųo@8> eu5rh:؛ cF0PMf13Xmud ,)d1̽@ 2.[t?`n0m gB5($̝X@ Y,v@?V&OxݩW86 QƧr'q҄ 2wNu|%%Qю5UUvxtq,"j( {^ J(-)X#$~o1qlm)N-7oH̠s,ޭ%( {Bp=Im}R5BJrxv0& `^$vsryUVyfV,DpМmec/TC<4m H ?sG!qjfPhYW )QyPs3QN<`'[ob|0; ~ [ p߂|773F1bb>+ط ľԞ~ط ľOdY@߄} })}t]LsD/A}|!G~~ ~ ~n !A!.|~~?ߍVm!O߃WӀaV {ىpkߝD߇@ꀿW1r'"p%\=o~̅~mۦ (},J 8¿_Z]`6'2 <8I|ΣD\pl%u8 7V% nN"ĕwA 7m!@ͅV8 ~]~?c x _T _R iP7m x#Lӳrc}|1poxタu zBOShiGJ@}E=aYĜ9F1cwK6S`¾<}t >/%}<ma}'?o;m5aߗP ľc#@طŚmK i_[`طY~~_I ] F2~a=4V80&mnDՉ~6@=Ti߶v0luC{\~w|N/ا['B ~Bbwfލﱩ?{ Bwl[vό/xZ-65N:wh#/C | eFQỳ>"^S{GyaIPp, &,@@K>=>w:YPB~P&zfƼƦB5L߅Co::EqR  |l۲6N'm=jA*G`PmD`H!%9<8[} %;=ƾlZ1@TJmDB Ր0'CoAQЦyj6[~= ޹8(Md'EPAs0$.nfe?4ٱQ A ..نVS Ր0m<4}A p=t )TCR {etfĿ%0Tz_.:)z_DcN 9lEa4tp@t4"0^*~Ź "vhp>mJ{ͅ#G{[VYν(kC$D,AKBz>*ɄB֏HWFDnӦ~gi9SPhZGFO!jbmA\lSz5t!h:R$l mex9V(7p[[ps~~eo\5|[KQ %(C P$jHJq[skXRd-YYk2r.ch-Rj#GHARʜa>>:'WPNntț-_WV#DMtp`>{R. n\7#Ѵ~JCm -6 s2q6oA!`ăuaqjIߪoUcgVEUAߪ[ J -^(ZPZLBW%ܫvz&{UP½jgjW%UA *j{bӽ*(^Xt JW)|;ݫU {·ӽJ_`RvWq/鸗rb9ep2X{\,Ͻ .Vvs/幗 b9et2X{]p p^W);ݫUrK*99!XUN*Fp{|ҽj;UKAistvaH{ҽJ{^E^xW]bf|1J|· »1^-y:W5wU۠һʜ2WUXwUk VUm/n;]»jPq,益k/n^WCQrj|^eWa U ^e nOrj?{վppUFpڛe&w(/❓5wށE_.y@F[Vtq]]5L".S0p`Rtq~ЯGn+;`eV+:X[9IPV~;-(toGwqG6qAVf|cMVF9X`u\CpCICwVb8ʜ'o1er6c𘤃y%WO7-W~o2yxʿ^?(:X]|!UN)n"%bx1Tjѧ+\T8>1ؾ\|ċ%b3HvrZ6QX J<2SXEwur1d"3y1Vs_m[]~4Y+?iOJ`\+>)㣾|>o5Qr2@3-~G@mdZM@r C&qɁ@T.=T"Vad)"_@{oPGY+)R+(aÜ4J9,JӟO>e?JCN)ꐋZ1VrQKC.*吋t1"DrQ+\*C Dͬ}/[ꡙ`qĠrzpO*L\`] 3C֐0{h Q=kd{h=21r9$7o"[@)e`>T?V ?Chznч4k4nB:Zn "܇]P +wDpK,S$SRv,9ZUS%;QWoȲǨ+mD3/jXXBH^5`lSv17졗P~{JT*~[l{%oE3~9"oEǽEN "oE@V2x{J.ɡ@ǀU=S䃳5]E_%]ϑt.LVHW5"96ޞb'@)P I)s?jy/J4E4 "!t;#(؝~s0р.|=^ݩ@թ@zdGzl(JALK@K<- 5G THr_4u*EAepm|oO)jHJqQ3p(sBsR1g򌬁Aeh(]ŵ巧@5$!9wnf;2*wրvrPmw3Hŝ5$8)E|.gI)9#HٽE"H!ɀEqWxwl巧A5$8̝YE.3(?y@C*53$962Hs?'pW(gV!)aG*R>ZPΙ#> S~Ɯ YRI*g zpW6⻅!)aGM,BE}nPƏ 5N~qP㋩?n`)jHB2(W5cu'|Y+5K Tm,6j!A5$8L}IRnR$bRvs5pTdP +F )MRoI9D+IMN$nR YrنR5ŠPс?]^d@@@rP9bPՂ^z1P I)s?J67IulGNb@ |\Ns2X=;[@9$$5M>Bہ9f¶XgӃjgdU#(4 u;},܃QXP e!!GC<_AN5K?`cW\V2vі_Ɯ9vEea93d *.:[d فT=YCR܏(P]3qNs) wr$C:8PGC Ր0&)J1(TiȟuJDyqC#L @C %{P[ , k([?3x%rt*( ˲}!LY"[P9{4-=Q9[WanalBX%;Qc[FL<$ v)Ycy@Q>(4ȃ h#bB;1ɨ܋G.ˢpae'L23A~%w>b"hMZr>h_5$8=A;'0'NIdEs= (¦] Ԛ䯩 gت{,;a#SF߉LqPIE ?>OS@J#j5K4`SQ p/ QEMsqPi3I0(5訙N<&c~|7AGQ!E|B5-2˚MaG$s"H"Vr]re f(1qem1")T"*~;w#̝#&H;* *;jvjA`w5$8qGe~E7"~4{KQCYc2y衲~R0;fO+ )eiu\"'te%lJK[$>Ll6ޞbǮ99 SSJQs6Kq%a!l=\ ̆AeC]_F,Eì!)aGkP D+zp.#^,*0,DMdXCcSldph1̽(!ct!X+UU\5\L7 /b͒0" bNQb[6b%"Aˡ)ybŚaX XydP E(} 6ka \P5Ӯ s([ Jy- }M@0[kvS8vAn,Ŋ5yX1q3cŴX8#VLvFƊ5 73VlP:c+:z5e ,Xf)%7AX3ԒWo#Vi9n]f#w0+qddX3dk "fX|)^nvqckkQbz؈k:tF5K;%6(H f "VYP; k~[3P6Kts%#6,xeLCK ߞR )c:fc<,}$5i29(< 8ȋ&? }/U2$8:zFY"HF *Q(cN¶SՐ|0u.L1Ev#/ T>@]l!:2%ځ~RQ R?{!RCCߑY2W3S%-p>T]"y99&?({mO8f`F)!U|S%a))goOl^J6=6~}ͮQs4n-34 @c034<(v@G0 >na… \$FIh;asdύ1,Y+*|Ƅy(P1CIQy TTqVޏ%,P)aዟ%aa _؄ _*|/xc0 T-3Pf9Ew 'o"*9&*l!^PaC{C+9aa 60qk XoFAaQ6-pJ,Zos@a^w>w/>"(|%^ 6(ʲ'PIR ە$ JK@9 XK Oݙ$Qk-P؀)ynz†BWG->Lؕs]0aCq KPʋ _kȈ [e/  0lz{sݔvRBy ]L'§4lnK>̼%1@u}زjv0WqL8>̶bû)+Öq}Ne%Y:FǼ?lݷW>lS3 >l=ye]YyGma6 6gy B h5h-v ,M#f1~:1^81^qcnr~~цR Ր0f+} %c^Jߘ/k w_rP9J(y} )TCR܏Ҋ@X@ _RqMVTPZQ+ UJ+)TCR܏IQ46n4␝I+>T1)F8hĩB5$8Yw MVr {r_r~7QA_EmwQt c&+g8%mE Qڠ0468(MDhmȠ Qs/ԍt [ܬmZ_of5, !h));  ʆK ՠd0w"$i(w=GY9-ܼAnj#XHL*~H~v,悉۳-ܯ|2Y rܯގx{Vk!)aǃKmhp=I|$))vO)) ))D ) B"))BzU}eJ`f`0SR3%pgڅΔLI$ęBI͉3%E8\grLA$7儙0\f rLr8NI 3ɽ8a&9'$t>p:S8)· NCݭ>LއtMt><|(t>{.ub·:HC>aن00p>gr>3x{hwřO~|8F>;ZuL1a&'0p|Lzmg:p@Dt!+N.,0<3 (S-dhQRZ/.}g-QZv_}ZLl?l)SX;װ9ɱs5y05즗*AԊo7Z V@7Z VQ+A*I%Yv.mX1Em`<1Al/ՠ\~bNN6[\IO|-Y'('{3hC!ٛ]̥#NLlV'RFmO; X|dOtl, Yw͎3`I7lswGb  6N=;'bYz7P<,Olű>ܸ2bp*S('ShLb5Q钢A[~R3xD^׍r1bPCRRgSf ݚnk1SNf ʰ?QsF:R%^g2\3ub[8`JlJ&YbS;ɌA鍙 A9:2%dB90({MYrwŲ,3b.͠Q?uE3`Y'eU(;Uir25ePjBJImڪPrK[2t\TŠVI –ڊJ@YY[ Q[ٍ೶2bVvͬ|}yTVvάkVV+=*?*+C=feeyfe2YYQY22+~=*>f1w~TVv@;+cyTV,tVVp͔Gee;ʊDyTVʊLʊ6DyTV ʠu&<*+K7++uLxTVhsVVv߬9<'QYŬ Rյ.sQY-#ۃ))%Q&'6~٣Ň=h$bxǴG}@أݽst:G2Aݢ9BXa%/k=nw[b^F݆OktP"!kYY݇5W.|[h!+^ ~_ŔnWkc#5:(VĭѾ-;EktP>:(yVktP k۠sH\h7ϙ^{@k{$݆WctPx5nCآ}!lѾ) 0EhO4dJA[Qh L<9G^E&l٢£&٢m@MmVD 4Efav;CS[27E'_2Ef'hvj-<"):8MnS퍷MGXDZLZr>\TDlDe%:(;EZݦ?4&@JhtPDS=t? AQ'`F!ڙc= Am)4Du0 nIJ7 nA~C A8i oG![_ nZ? 0 n.CtPC Q7T `Ό{ @Dʇ娖M0 7 54C};PSo{TigeI/;{kt/73Sn6'jmvXmAlnGwEOaT&@)Tc :( \dcev!aCa=D [x_A-H,n0햎`vKFg(]ę] ^=i.؟hoX}("jW4ڹ4j"EmS"urDDmbi"jWVМ#"j`,D DFDDmbUt]>D-*kQ  jSQ$@_L@RE0/ zC5Af:Z{@Զ_܊*D-+!QuCmkzMx%8xxu0H7P hڪ[.XI[x%0҉P\/WCvXC04zv]YOGCr!zt 0/elb|--Pq,0CvPO1 5Mvm h ]E`y|SPK{UtkC=-P3Z5@ezbH E>B$"/B$|\sMH-X]n7)@B=PP&$HMr0Y ,7e%B݂CvP - \ eAXz_8& tA Q T Nn.]A/T& zOC.PP PP](w[qQ X bA^@C0N/'Ԕw2@oopn3`.W_seE`Ipo,}o~g+g[?#JqcHjy)هgeLCL^ILwȕZ>fdУ>Wk]t*-_KZvhs j4joWEQd=*:(zBozfWHw?ZtuC:*{pvƓuWuRfSk7^=\d YUQ ^<&ĨouLfBydkly-[.>Kq:&3?#{xLf)YOlcJxJѷWG-GwqoBd`C2N:!rq !̓⼋dQdv7C2s`:C2s`6k(QW:'3&nsbQm!duM6)^m:9{`M恵M\xǃf9#pZŃ2q2!XtPf>X{uR'œ2 G[:)3?,:f-vquP.WpA^Ām<(^M~Z~~LV!z< e"mNZv\X]<@B)8=72BO^Fy)>@F55..qz+B5PK#dؠw- 6ݞMLН7T2AMm4m̹6LJaePmdZ)\B j7q`q j^Ud.7"^,K-jo^F}C EO]}]2 %$f)@@E/6/́ E}xf /9Ԗ@QA=AQ# p/%@Q§DSEhm5ocEM!`(jc;-O\Ё 8ýq&zoE݅U-̙>DM_6() n͍zD-f Q 8'H?0cD-6Av%YWק$jWbS"E ^KC/]X]GNXzﶰPs',jC)KYPP AZ , Zvx %A Bm+nczm+7B/ejA,((ON-((sH "ɓ$iqPl5fկ?jߐ 휄?!ƽpP R&L)r&jc]h3z>/ 椈@PZ~ "T@V AjxAٱ @aHkɟ!'B].Fk:\UA(  <~'eŬSo3:Ë~'v0JG,'m-'/?~3J;_nYdiiϭ|Ol~~ǖ巅ݷ_N߾|g+%;}Zտ+䯣V00^O4OܷuLbk{~鳹ѻ~??Q3 :מZ/^>~c~Nr 'v|ok72G/5qleǛ}z9ss~u4g`/g!|oSX/| _~ /lϻ-G=3.<۶aߧP?%cK cssU34ֽUF:~u~1|nzS^oES[=1Gj޳wck\حoa_R<27ƛ9*gПcH zw>#0x/woN Jcz۰q^?}Bv/<F_<F=~sX_~xWw_oUL<c>2mIm_<>ؽQ6g=_dz:Zng:j0fX_-9GԘUY J$X?|b=~X㷟)/?3f~BKƏ7BKno.2*?Q*endstream endobj 268 0 obj << /Filter /FlateDecode /Length 1987 >> stream xY[~R"Kvmb v6ZR[ƺc 5CzO]Mz[H :дByЏיxѣwM۾)pn@&Xe5y(`rsro첿nqfqWVl߯g10}l5ӼOk(]{Taߖ_i,6}ߵ"ݍey?/.}t-rzF:-$:lZi'2a޴Wr! *ޢ<݋j='źYm~9inЄ]t6PR#lU,p [UQhTӉjRmx<ȍpA%ڄֱƠ۾9q*@w)LRySkpe67 {TzKatŌ扏vY*P~vc 1txrǷ!vr[/.kS-yL965ؙ;Ȟf̯*|޶<~H-bNvN><c8A ( =< \qT-=BI/^jig+-;sJfVɬWHh+\QtQZ ''5VA ЌH3#CWHu%6 2i2)p,ԧs)˲'ұusiURtJUBǻhsO=/t(pCm.JGQM.|qDhO!I #-LCzDjyP#uyD܌<'"=?te$LA[{{#rϸ>}L8uZ)9;,p >&l*AsJJi{hLev4EbiJ|:L&J韺m,뒪c( &gLq(r͒93漵H0iF?c1Ct,GxB1$Qe<%4=vCq`fF.~xHӤaroMً8k68376btb߶]zs.n!>zN8l-`_?*@)pP\ BEMH]&ֻ{o6q䦀 q71,f3:jɷ!AueՆR?6q4i2104v> stream xXnF y?$@^ES8@h"B(h*"For2'hE;ϐ'd#<zu XI 6dllٵqj}Ƨ)zq m9OJ"@5) U)E5P)?҄19w]=fKZ{Q)g [odG=VXv{8eM9T0x[nDR=#COZ((7>jmdە d}O8ݽ02[&hZȦ]ڞZh͌v!1oenzn&knN$턱2[H7H"D^gEhOޣA8c,ֱf>8/$Nqm =" &mrfh W"`Ce/N8@7%ǒE19R~X2 H4Jv|+FV0ƋxQK'{o3KL(wn`z0ĸ ZX*{ZÔzvVz&#;7QPEn|bћN^` H ̪Ѩz7^50#Z/:qh7=_ y_ 3ĴI i‰femt7OԾoϭz7Iwt՛[pV-3;cUxhbMdbϬnW7'_E;sendstream endobj 270 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1781 >> stream x}yPwǻCiCbѠ]]W [ĄQ#rr̛p8pDFZMrlP).[WăVj_{hJE4nfݚ}{/zcs@VJ$ȶcOd'?-%5`t&yus )]fMj+O%PPQk:*6R:JE)ŮR_1tW %)S&Q]%\+{y-Sc?]{(ǷQ~+\ as=DmOqGrYU*0#ح *4o%4UDr3Z< S[`]) X WaD=2|'/CC9 SJN7h!>*>_<\u9~A66abZcKLr\A!LsTW74)PΝcZVhzs J!O1uԬeN<.T`2'aY]0lPwGoDžH9H'N&HrWve?)_כPvtμ.g[S.lt>p=c$8j\.l:Rk4uupY1v\g-UTW,+.+k@~jgf#K*Hq,v#(rܠ&4c#-4pUjnYh- ǂTa[PC_&ߣKV8'3p'_V* )'ﺾU{ Q yZ *6,ِ ɶBB8[̕%%$w!er`"RL3)f뀪8mqi軳=&w.8z[(*h+[5vhvf1 $ļ8l?4^Uc; Z3DxEׇ.s.wV'$媥t~ІŵƤTXa[tC^aXnv7Jdo]|D9x8XpLL ' LMGs:qLR_=wr*yǕ(h<իNeUC);P5AQ_ WENᓆ&wO t33nrۃ\JE-jn1B!sEaqS.0-z7jqnå.FXw<*v0:ǵS+O{w?zwBgCp'vM]Y$_R] O/O~B5>3! b?91}L򸂯r"qR=J-[+MP f8b_E~dN"S/ؤLN€=&q7x!FEC8:uIV1l~*S:_!COм^g+_)8PP‘h+߂0)fLVr܉dB\-&y|ŗnyث*8~J|sendstream endobj 271 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 208 >> stream xcd`ab`dd N+64 JM/I, JtwxӋewS0c1##K5|Z2,(~_?~bܥ;~3>,}}Q\rUl<_s/Mn9.|nnͳzt10"wRendstream endobj 272 0 obj << /Type /XRef /Length 253 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 273 /ID [<3bcf1a622d126a73bd0e9c857d0c80d6>] >> stream xcb&F~0 $8Jj?. -k/׌P 3(׽5X {l4P R0D Q D-@$"YA$ L`~0d, H2JE*A$w `΃I `R,"\ A>$`5>3d#"G@$'`""`GA[~l"C@qlP L, endstream endobj startxref 200679 %%EOF flexmix/inst/doc/flexmix-intro.pdf0000644000176200001440000061023014404662040016752 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4142 /Filter /FlateDecode /N 68 /First 560 >> stream x[]s۶}o'3!AL'3ݤIƵ:͝<2mTߵPDɖl7q,ol ˙aE3ˤ49sL]"gI%+,p)Q%RN3rbR3-0[fpwW.5:aEs$s;tʜ5)e0apxnYc9+ϼDkO ;׊y<`ZBLL[Vh +Lr FXCWRB0Cs^2HWFJQ*f@@J{RL] (o5Yk7YfDENRCϺ-1G"\2!<=tď=;l!1\Z V1 m^)=KWe=8sy= rsL,d|GsQ񼾘Lgٙ3͕c:A!@Qa\7ud?K`?J(E' _ߥv v|iyʯWe1ƱKx4XBizsg߭? 59!\]sa;S?MeՌp?7a [&7`:9KKb2giBeh~{9ܿQO6bA y;AdKЅDtjɄ'fg{on`|p-c52ebThzhd>0*H EK7*]湉J DnRQklqJbrDc؍xRBxڔS. .!s )L\&.NL&&_LӗTJd\cS" ;yZ L.j 22@3N|:\iO'đ<5:OAK=i}TYR/Noz}"9x>ipr%ևI$˿E+A2luLIǑ':DudБx:Bԑ:RĖElYĖElYĖEd@$X^k_^ʘ5؊¦]%qv$Cpw6zd0+_'TLF0NN9KF$*`JAhb Z m02/q괾 9E4*oCXБD3^zS r+FU"H4 C",Lv,tUdP9bPFV` ڛ- )]a!\!|Ŗ~,b|g9_~ȏ[~p2)/?;x%?E9GO%2VS>3^bZ9̯`Lć v? )$~vZ@!>F%92=ypYn爬9,/ A xrT|@XRTo䍈8,w = B?[n-`.(ݶ?jOvl/ z%G|$l\i"Xjaue7Ce/E{ 8xj"\"D PtM4]ggolpyΏ0;/O_'Ww7hpZrV:&i8`vqZWdShhֵ^//O:3w0j2r?擺DASmB}YU̬  /L'YN'K΂>4"R&0cņ&ͦm[3jfѻ:c)v>HF'Pۆ^L4u%-)*,K!ZPI/"Rƫ08#X cT+3PB E5GWQ7D^,k! ղ5UQshA(k*ok179e{2㧝Qx]/zR/Ro-h8ntjƪ?Mre}ź_uCsIiʟl%-eА"BTcKSu(]{!>v=!iEE!Ϯ: hb`&5V!u](npwk]tsmե[΁(E]Ƅj)Ud^U(]z6}n׭g/^?GlmT-?ϪAe^SbEUoZL`wHWT|AL]FliO4#  )o!qfuyfjg?,2#`sd, &Pv!ϳDiMєMyєukEMq i]/?xFFq~ VR-}yskKrYz4k ~gesVСM7!niHQѨED"<劢(tuEQ$BQo٤uL>U;o\e[(ƳqD߿~-Ħ R{a>JioâO~p&7Zhv彄NcV9h 6\!̨ Eia>/ۄ)^u&osŶr۾b\b323"yiC6g82SXn*pƪ,6+VEA'BeKʤ)^p jNixIC)[pBcV%"c\yFi]QRdof=Zh^eºliv)}ISB襁f\Q>-- T#+44"vaa&A@%haKr0JZ J 6 XVaWa-O';/yF'rQa6/}5~[t]~j{ѫ?nZ()<+Jj̆U5ei[ڛ}䃚Løq ~,cNJdkF!fe9C٬(nh>[;)] ˉխXkqoduBn[%}4#묁wkNn֡u^wMJw50V~Gެi꒞UEW3f,1ChGXc2kr:[$nCPK:,(ܦYq@J)p6dF^w21/N,%'-|_ٍs$Š`A_j;wey 4; 4M, d1p_x9)@)Ŧ6/lNkݰOb:lԢs*Qf#+fR/J~[>g-m@Meqv֯E?j{Tir*]k{ގ:.nҎӮ[p<.Xr .q#޴RR]&ueBۿZKZu0[#*Dߊ ʬ-8Ab7pS;bE!}u=O*Ճ:r:9cg >3/GyҲ?xEM9LWDa"bh`)d5֍>l,}75)Pҧ3!ƒR֓qtphӠ>ՐlYKH et+3?Tdz% ,,*oLamhU7 n6&3/PÇ&;\@VF`ZyvXMU/VY=q6f g%CX'0doZ}Vh`ARP[nW=ZBk:烯 tظb,8Ӣݭ:bo=uJúgp04d Xhr Lw9}Kٚi^];endstream endobj 70 0 obj << /Subtype /XML /Type /Metadata /Length 1564 >> stream GPL Ghostscript 10.00.0 R, finite mixture models, model based clustering, latent class regression 2023-03-16T19:46:56+01:00 2023-03-16T19:46:56+01:00 LaTeX with hyperref FlexMix: A General Framework for Finite Mixture Models and Latent Class Regression in RFriedrich Leisch endstream endobj 71 0 obj << /Type /ObjStm /Length 3096 /Filter /FlateDecode /N 68 /First 597 >> stream x[rF}IM}JƲ#YPvO偢` . Ϲ $@-NLl}k'gN1si'ÌWYc>3!g.0a`#K ٤ԁyɤANdZ5S2p Se*(ͼcZJżg( L;pHmXh tA,f9{ˬǬ.@1}`/YMo$sGѸhOhGKfsep6A#5Z* a4JHC*hDOT1W*f I "O}BED9dER}In~al`|d&;)jDNMERIL@ݚZ@FKuPoat\Fq6v+xOge6Ej24K_e7d) &A[ 922 [VQ`T)J]]glg,}_o-t ҔW[^]yl,|yK#ͱţY!W ZcS# Y̞85MSdp&A8;|-F~9·ۉVX=ȁ| Q|rtx"oGKm{zW,%v15lr3cp3pLoU*_,`&1Xҫe66ngO^ޞW]QzՎCp֟(7C b, 1r mWLN\&uů(| *PP)XxvNB9 qUr5l  )x\W q,SיqUɛOTL)w~|gQp2~[ ,}&MǠ4f,&qnYͩ~Z,K@??yCS4͕OwI'/Qqar=/:jTmaj22]ٶP)jiTĖ-2? :^|8"O](\oAI> Ku $G禫)|1[]5"V_^.^P͏?]o%kˇ&&[GudU8(y'ϱ^,IR"#zҊ64;L~&H:lbOXOXƯXXXz\p?'rЪ<⬬{,kjWyOdHd3/Ib6Gk~Nz=ooq>Y7~쟧<磻<]~]%Q2\~#Q]ZK@"&nh޳/zhkY>߽fM,NmYT> stream xZ]o8}_],&yK( 4] .ౕ)bw0\J$J:i$s([oQ;\') ^T$9f Y+|Ib wLMlNRfr$\ ƀs\2H ֐9E0[E!7NQ'5r+_<+OTR',n0UAhS,&$ )PM *$w +&rdcVL> V1Fe@p8VaHDO= fF%cpfOŸ#"J,di-lV,W?p\r.MGw?m6WFɓ^_Qr]}#|~~둕~jdfG NëoH4I&=i`Xv Us'!<c[l/ Fn8`k$xLg>Dt;ꖷh|wշvF~txfT9QxQxǗqx>k;\3Wk>{`c]AZ%rAt֪UfQAzӬ.>C*Bor| "tBe^}֞yl *_4LrxMwTJ,:{m> piV*WCdÕtq&%hM."6MZAwcӂ#H{lޢDq:? SQRSq6jO  RV@؆.1Os*/nAU#[LZkRw(>mC&\mbNzwYB6Sywu]n#z P1:7 e|ZC:OiժF8Th) oQt׌$5}ٮ^Y^owo5+6$&D'0E,O4HxfR; L}tZ$ݑt~wқg^˛ =:ݵ#]ԝE]ɒ× ;#d$aBls'Reoʌ{td*1S~G"#C(7]n `QdzYQL\ֹۛ⼳(׎'Cz'M1!Pɦ:ou?_~~!lB:ehtȯxپ2G {>کR*k/N%ct" |}ڏ'^ k|PfC F6sCd&?B~ؾ7cGb[~+vTTV~i;MDz3`D$L|H{8$--/^KS*Fܮqw)?> stream x[Ksܸ/d+5GNj!Ӿa%njg4$eY9 `8Z9v! 4JpQv\zV_^]G%W.0zuӥjUKQJVWoMUVʨlEǟL4eӸFպl)go}]|noۇeU)[8z\[|u?;83R1QxW?\hYZۈv-&[: Yv?xݭgŋvH9FX)ߦnKwGt(Ry] \K␍R)fjx h :}5)Y /1DR iCxf,Q e-jU;a@fޭ"[ֈκ?(;Q VQCo4$%(5htB.>z؏DUsu OzoYqYH9?/^w6ƾ&4N`׊/ooFVcKe ڕ6^WoXXBMhj%l#v.[餒uц)U :/FH?g \9r~MD=P W1ˬ'b]dizejK9VHV솛1V􇮩TTf@m}Vk!ҭȪןiJa3֨wC scUrsXSЮw|kX)^ hۯ k0pe<b=0Q) sUU>Sy C+ B=ƞ<$OwIFC Oݚ%R.z1(TaB Q93VRv@SL LE{su 4%+r98t8Y}l?Z)_bWuv)%p{*Mt!B5˖ k]eUW*0wl\|81EdtMc-ڦ603$9Hk*ޭAkHwrH0_<-7ˠ rAIȓ75D0,BEy(<iWM XT5<4Rid$IoLzf`jcwk89eBe3on+7po3Ҝ(Ð% Z̋q~{d,NM#bGuH*QAC[TW &\Ǩ OlTiD:$ L6$TPhmB[z(z6G?pekdu%lEi!Y]9m`qhMR8: ۯы ֣p,"WVx0`mlͳ% K'FewݾnO k 2mkyQl>"t0Y2iE]Wɤj#$ 3eYaVxms'qa Y2>ĎY`Z BB]M#E] M)})='(|I;qˤ>CO0@d){-IWS?EJʓl{dcN'{,@8h/bC8إ$0&<ʁ齣F9Qza9L'7)ep4N!hdUFIU.Fț.}dʳ7<`Ǿ{#ZnJgv\Cbj1407UlU,)@LoߝYhcڥ9/ß U ,dJ%D̋# dUi3l#Siro mL*q}SC4 ^S׳RY4`9ظ# UOp'p:"Ba1:XY0M8@ NPx NR;Ar0!QW *?*–*-6 :Jܺy5$%FV)N[>:*yS:'g#Q-Q}) <1P.V9Z6Ҕ5`F^(Xڔw8y[ڥB4zWzT8 $j]4:Q7}Cj1xSs:c-рޤ18!{HyKI4`Vʀ( ;:i )=T?Ҹ!x6#%K[ڷݩ8l~H ,89|0cB S&6 n%3!DSċ̣ԝL@D3?lr@5%ic<^*dnT&8uVz'ZiU8'Ty=Hoݼh!fdV'7N߫hbN"늏t#CϓЗvJ/䒴tp!Ndu \5$/R%8kOqbv =&}6/ wg!cu^z[%i.-5coVc`E&>&rHocĶdӅk[%  ؐtW^Y$JdL I%6ݘģ8H\.q=*׆/9CVn"4#B>6ZJj䘊2B/ZCȰbNI_yZ*|v (=deGHd;ū5_QP7vK ? MS ,wX9 @: H[gƴN|7GG:Ӳ7 5DǙuCV(x0$d : qQ=Zu=,Y'g։2B_jÓѻdb> :!. hj8Douħ4L6`ZI)J_v>œCb4XF_'mė)]G7^1imt_i6wcm? ;p #lkl|]NM"ˊ]pR$/qX5zLk%::|p.nt'+pvy$kbH+iSA"]ʕS?eym&WO(}LR^{8[&6G隴0n|YIV_7gO$*qjB"1ڷiyIl}'-.tR[tJNd,tgfz;^g I-yi+&T;* r.#.$}VWrzi U=[ӏةACՖ9wPEj7$!ugG$}eyTZZ =r4m1ICjN 078HKO WiЙBiꖇ KR[9i9 KmV&?B)sc9_$Чή{_| 4_=db`*SqﻷAHjfNO_ eҰ~~'tjE)digN!aws տ/ڮUidKq*Yb^ZA5G`:lUd X^YW_9=լq~-HG_]GbqMY'q>1D)%Zc:vGz _*>\rHbP?iYM٤6ީ] |oN$endstream endobj 208 0 obj << /Filter /FlateDecode /Length 5670 >> stream x\͏#7v7r9KM:ص=9t#ƒf;AHV,Z Z"Y>X.-:/{^0v.|yǟoZ9|{᧰e LZ\n/j˛͛k;eնyx~jR]ǚwKQֹf}BO60~TNZ7-WܶӮI9 m#.#ڰVw'+ٖw+Z;ZXZ\\Jv)*.,,-qZzKvIr:XmBnwu7c\t~&a `4fa88+d^N>44>♁o8CVdOL!Y.CF&o#0Hݢа8e^@aqTF3ޤH*aNkUv;9o[wXi.x_uMΉf?DqSxR/;~xDk Rc/#;< V'd;6c/EsU`44黥2tPLƔk~ӳ9f||kf= AV[h^x9U,_Ou^-_x+ z7ku2̵YsPUӑk MIB6Ls#P.u qJ.\0OYXYacifA{r4Q/- mY+9o䘱зHW$9NA s2 d<MЉ-vm4v}Vh>ki֠8er@6Nѡ)lcs(s V8p?TIh_D<$˟S}zNѲ;?[>?df B߂/|D2 oލ'(10&RjUhSUeZ؜t8u6. R#b8sq 3u $7v.GbuDZ&lWqgE!2MltEi e[ utX.ge!Ӎ=I5׭Ӥcq2 %>upqXaJױyg!2aC"Q1M \h:P 7`J"d&Y\5[ZLe 66"Qui=C).F:+lD>i/9}=f3& pM1w 8Ξ8p`t>C!ZUS}^gS}lCrB^}r.ҳ*` ҥ+E D¯`cɈJA~~sP.rB1 `q+jtigSPG*,:4c'ӕln5x׊2|πͶXoQzS½1E q1'}2YV?ㅦa= N1&l(f @YD4ez3A-CaiK)H^@#E WEao\ܴSO=>Ĉܫ1wrPS ḽe[*pC1dG.},W>_:1j4/m=GXԊ sֻ%Z4-{Mn+a.}|5UW"A6Uģ6;񏛰>Bm6Y, Sn?Se6tj rrGM9RK u!~^iIE}M=n?2$h%x64D@:'g]pg(f bFqʔ3 yKcf6j0wLY'h 2&=*#``y8 #s۵٬M-P*d \q#O|$#_a ~=%vY#up dN -0Y{,M7~G0&[ D3ac50*cuCilyq" ܵ ]U7 u Ē i JY|.4G8覲88UCZއdl5g6ȟXH$j)vmm9>N9T&JZF#ӁoKP'|(d͝x⼹Ӯv޿CX}fW7@2BY•le2!l<S#2m(VkG+u&`P+wiwK4#VwP\Vi -'vTTX.C-XYnN0_Ӈ 3-Y*U|D1z%㭢߇ P @NNR2Ey7uשJOzxݯ>x-1N`BO(Qu:X>#qW aoˏR24 4UP&$蒁WF7)ZnะfWa Cܨ[CaI`ꎃEt${zP"`%1vOif% ycW uNDEJZsBmCcbfC[񇩅ȯ{f.NϗE\qfG//iy:ti.WRu2>clc&MMu@bS\g {NYPZ8Ȕ4'Zl+zAYaѐ!պ G"ŴKMS@SϻWGDS*9_7l$&|nm:.UlAn2|sGW| Mj |~hePq,U9R۹ iSN!6?ГRp|_g&ٜ0qp&a`gXdK?z;L}<䯰9 dM%-OB`&wQV;d/Y,޸_p9Т.)bOa\9;ǜnKS7oRXz=]o8Dangj#zm,D1_g꯳l+XYR+ڇ;rެ RN&ѴVf[[A_$OTbSu#Bg;sZh|+٬yj^D_ ScZ0l\<8F!, .)Zc}꯵V/%+g3$m9 Ue^ ,z ;X"p^5<]Fendstream endobj 209 0 obj << /Filter /FlateDecode /Length 2132 >> stream xXIy.&@_kڽ/e"dĀ&L{g~=H8duw-_}U? siFO3ij[b|%tǚ[.5z0 [kQSK4( 5%f` }Xr+8E/u +(U3c@vbas*09%/F)'qXJ[SAы%F+C),H柦T[R TY25UG~ڹdsm&U5['DZUZ%ہxht?vQpweb.E~.7'y_9?x+auY`ޟ̀ w:g˛[Za 6D0azK< ;Si;<:%GeX_TMi09 %UsKʱ'30I!Ā9s}i9CRb7[4Q^lԧ";9fZ&$3nKm ^$8tP: @/+Ga~NY rJ_<({ { ?]qPWǨ51~y ͈A t,]"V8FHǪ̒=eLၲqӎUtZyS p!&'\ژQjPBqUu*Ȏb,<͒' 2 \G, (E1Xki3ƻANnI䳵q-46e'Q?FciH>n;M%<]|KJORX)eF{71r$ \1KJmcSfiL4 Jr&y3wƀY]_Bq"t+ nz p) ^ä;7SZ۴m,VVϦښT4] u,ʫ6?}ђ]UkT z \ܗTئQZhD Pg ƓV@2g7֡PCo NŒNm 6F=mf ʦ0??x=qW'&J XSefuLYQE}lT=Y;qDΛcq b NvcHHOjov2<yXٻT0Tl)'QQtY:e͚LhGyp'X| 1u7I$c莊0Prbh") pD Trvb@}Co4Uv>Ôv?BH*/g?kУoI|,s9Fo%W]9N_zdxb=qIzO6!a$s@ϫc"4c Ɯgupү2ob>?>ݮĠݵW}ĖJ7ZriV=p]s؝7>C>)VnJp]b 1r;4C-iJ ) i3ʢˏC"ˋ _"FtvMB 4``/'ڎvP} qj1nJ͒/& ۛiyJv $};w2 }Nl\v{F_A]cr)`(%CVA2be){`KLf;ևO> stream x[MFri}ؓâl]ЍD*H{w8# 40Z_3 @%m*#_feo/___Rˢ/qť3./>^dn^e^x o/~vm o}]Vkw|(ʬ]CSjﳖl a6:tq@t]E;ۢ ή¾:WJ|e+|U&ֆ|5jaRg`Yuw?6k,Z6YV( GJ)/v`p 8'ڇBٯu\EYWپb| ߤRyZ4XlB$8@I/=;Ts0_jPoFZؽƣ^DYO..tٱwJ>x<~0 s^9I9uƙU: O: 6]fxn B_y_Κ ^K:rp[Wyi%߁EHReG41y4,|Yx#+K&L2sM;ZجgNn=h ~ :bBVel?uu'5ff背KmJR˨npN=*(ϛ7 ] !ȖH5on1M7nm]`(T.b 96Pʂng*[.E< *ET#'y2%(ŠűB|DcB{x#t/Gt7)V^xXEC$QE0 rpҡ~!>(ȭhʶR󰎆E-?wò$ wzQh9r3O@(v۸Tpz)#h狵2Q0E-1# !x B!R& lj'6$_F9v=~XsƵ҂%Z$,nf>a?s]wz-xn7]k8?ĦGlY}Zы"mX K’6Gqka@(4?3oc G܂J$ WjgO.vlyB'jڝ~C1nd>GcD_zض !)]H+FPzLFF]daq'I FD$ȣPp7_MNt߈Dmh

    9HIHA)O_ۛA_z>_U3 ͐JL[8ϰ}}W=A\i`̷ q@04I 2FkhFS ܏.@$83X[pRj[]jW_oyT^(? V73ƉpUJfU'vlS&vIu~P+y_'? =۶+g!&8r@c5E2:p,HB{ CxEBx]w/[G`b6r,X[P"Wm{B9СjH[b i/)~SzAYYdBS=mniVЀwS>ZZڇ _o\O{+/ zxs#ʪ&6C (W+p1TF͹ۃOb$8(nQ+g/MrU0r萇?B,7@>@ dy&1 /J%P ]+MD7֩9֌}aՄ>ɓtj 81RD%/ jNHCXBvHvQ5SAT9rx] M\>tRsNs96PdHK}ˮ-ڧ1]*\y:Tpr^_ʼNM2/INW﹟N}%i(>&B>sޡ6L6T&)BQ ')f( 𽤒K0C[P>EI6#CfDby_w}nS8o;4WF׬8T>Gm: I3WCad%䀃]7簸_GDzO)iK+!cU=4˵DGPhVSo%-k ^a+{S|fs u 90^Y ̫ %gjLFéƎ*uF7ʅC>z;Zd\:lZH+W F!Jc'0nnVԛ[\Vd$T\vCŹȅ[γ@AIg]7@0H`js>~Ύsg9Q{\b.'U/!Y ,ÁW[$d)9p[F̽S6qv[Ni;՟RE}*SCV _gޓgTq鲀4~UƇ#L\Rׇ]:]59 P>s4T3ΡO0mmrU Ĵcira<ǪYez8vVAIڹ6p)8?Qe6D LdN"7A\CjE!HzCPk!ӧ ]w;< I\")WH16I)*GژR0!x!|E$@7]<Ϡ5̞bB&jFù qCb׸^aI> stream xX XgYBwE3jqEE6iąM +lFd+\hbKu21f2fIH- 0Lͣ:smc3QTVy ԅ۔FMT`gsYOpd;Q~hH{bm{ώk}Cօ=;uڌfÌf^e|jfˌg2uzfl`21bfb23erf&Ŭb~x30s0ƞ80<31jIaWTԮyO㤉Ѵjuv.ܑ7t hIȀ%ysQ’(pcjr,1>rN>ƈ4.V=ayjnBh`dv3i/隩zm *#Gb`lhRiu@5p3\Q"7ɠE; &LaH>ҳ z߃Md~yTY|T=4XUuhh1Bߑd1đ8ݟ3pߡ@tspDG9 U(.mh]q|?zJ/aH F0FRMݓt)惉`+`fM!ًWn?;ԒŖ\(,"-,X{nYBiă@H&E{)dV"RLΉ.Q:#>bB Y39Z*=]pԣqHǢ +fs:]?CE a $,p,:6Ԗ?YL GGc%7y6 .H ,`N8+ kӆcG}w}w}FkJs2{`C.j-WܦWA%}Y'Xq&%$X.кZ*G8U٥FU=6t/^=ik,SFg`- j-Xc8#C)UT:+>Sj?U5]Dupn5$!8k]W=(Cu֒H`Q8QR>bBZzBrjzDQP$ qxt]XjMɄH^ "&~ W$, -Pt\8Qzz[P.pDВ.Ph/4@w@g'^uet >g={`1q&t{vC;ͯ}Z*6\g)m}f%qUm82K?vyXxpB+&OӺn^LibJw24 [?y Ҫ.3~5OȎ|& l.ʩ1謖%b#w0zN6]F6Z@=@cd $I%_=tYFPd($AsA5.ae:VKXr\svafe zj67E\moW`LKG&ے6CKoqH\fRFJ^;' ʪƥtUC%ȎL.|JXMr]!#+=svqF; UjyաV'Pi\WUf( C '6uv@}lumMYSir|[ ]o>Wʒ-D=~\7B,E⇃uSgdžh='d="׀ZcZC=r47spXOc8!gKhMRFq 8T0XGWpXNж3>PrRC ^iH6%;G|TWM^[iF}:2ױˊ:bn~?jWdpx}X!}=˛uDoL tP9s'ϗp r+eLRY| 2(]$-5B8]/H6Z$U9t6=3SB#d$*tEP: ޽S] C@CB\[M5,,4=;oj?.`u,3 6UM4 ,E[ LB],z$i-<\Jה7 [Prjyomv /oǿV"C~`^-[L,`b~0p[cO7B`R؄},{ˉ:d|A S@3#h#wB|"qw\@DDQ0w؟\ɦz|z_PwgM7.κs&U w`*np%ڗN*;&Dx]p>\]Y W [ լώonc.?znE(T<'v75EŖT:qcN8VZ&tk0ƙ3p/kH0Ν/N ;.6] "j7?"#{`Ôί~P/uc OY/t2qFw'wuŇ3LBkX OLE+6Z?oW;5TjJy()¶@}|`:?-G`G0w<T}WDR/Oӭ5"/d06-ΘCK3xԣ ЉzG[o.=to w~MQ\=0d׶Цzkth/_ z }jJ-_:WDUEF9O(OqK]]_n$NeBL)}bhr)1IпYZ$^c epch\MlL"Â2bDQ(j6 Gt'N@_.r%ꕞS:1`>p}A^~ )1@_ث䘂i)i1dFֳ_/TP ]j1@gآ£%8LwdtxOHʫ p 8ϣZ{Gmз9{fƥ!_,-\ ~U|Ͽ|D3 = K( cn7ŘҌ) i,8.Ӣ 1,h:Zk>yA@xxu8 ~xiP|Sf\%W;JP%M_fLF8/W5za@{ Oªub@j1hȤC K:hc!g_iIn7[->x^VEfm`!5 ŜAq5endstream endobj 212 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8107 >> stream xyw|ٚ dDBoл1`l{Ŷ\d-ɽW3616Bz %!!g !7${?kZ{DE;J$Y-^{+]\<O8ckUs%tC'}ѐQ;[ﵳ|]?S-^(iqK]m[r*{^kZ`lG;n'M2`m臽YO-PTj DSvj5>VSè5pj-5AfS#jKQcjO-&PjL=.P^TgʛBP]n-՝AɨTeM)eAPCmSDDU8EJTnZ@q`% %0 ^~k}u(긼㷝:moNznO^Y9Zݒ^PAڳOȺT?57Lm֞-d#6sw \:P= +W iȹKs,pho 5;eƞ͖,mJDF[~uac<韍Xr:2>]oP@֖$Hڠt @i2$Aen-9 \.[K+AAv4<γ62Ͻp̂>K_ieOiI_I_Ef%'OAFd!h+bZZS/dدdq;WYnqf\(ydpR0]WrdUդh,8p6x{t 6^%xː֘ 6i۫g8M3 \4= P_cN'M upj5 iĔ m8Π6AcA5k4tɄ9+ pw,e8j: Iɩ2p-AxR{Z%,`oPCm2:|`!74DRx . Сt0@MQi~]I2&#R~hA׼BRkJ_WI6.D^@4G_ԝ, lW'SA\HNt%S<E!%J E,kwZ5ƃ Hd " EVCtv[#/8쀝D<-E!b-Wsh"K^]F|)Caϱt-A!%Zjjm[M1N"u)&ĝ?dRF @K{<\ys㰥p>Ib4*K"t$^B:{S2SlrU¶(qe$Y&شx% &< XQBnڲ><E7AM2Fcp; X4-W Xu}Zn\/$r9{MBu=a]Pe1_WPO.*.|L{O#JJ{\KOt[a>AQ Ѩ{_nZR9t{ȡI$>Mh})^j,7EG<*pIpi R/ƎAm)P*+R l9|M ҫҾol&:хDSÔ_YŒ@g@r>MMKрRkiAf>?V"knB}܆tȪQ C7@LqN@釭PtaPjYVΏR |CP!kdԂzLv SSKi`dAR's&h!:ըcb 'ARe[OyʯBU$#fRߍkI,]b 0Qr:3`C~>"/"u!v Af^$n@ q2t&+:Ucrb-4g!^_c&ugn R xTٟJJx)bGqheRdq& Rsh]9ɡ([JY>AqZbe^$Mvmm>|" aҎxPRG^zm98zr'BU%t![I4dtLp32%Jp>iARc pjH=c8״Ҥqh/V,4kOR+{d.=~üYN-mh&h*0z:F#N8_Ɏ >Mb'GOӴEo̸Ŗ M#qmi[ƶ4ip[pԄ҃8Q)p޺@/:]F BVV©rlTY`SVd8b^"H.@* HiHreZb:F7iy{/GL,E"d!uy:cƁ )ZD)Z6[J`ae,)QHmܢeM?_r=է pm$PÉmvPHA0JVg oP6!j4NEUL 롐D:@ޭA6=*I:'G„L7TFV@&YI8CrROV3KT dQ@=#?e&2s(!U |i(>Ivߎ^R'h# PZ~]`ݸ0",3z^Q^s8 çUlj>, laʩO/\:`+Xxwѹ; QT?bjl$ϾS~́!8k@|dzAi<贕T!Ɂ 11q>&GW8F jش(JԂp)ܮa8L^o-YfĽ|݇Kʸچz VҾEԨ^Q %a i4 z_,^wps,o\ڍ+7X r2/4rA^`>7CʬRu;|uTCf&e,zNV9׈>3md. Dln>ǒ"Xtnnfji=ظky2ئ]9ZPGDC3rOL-PWRd\ NDA[(CBm#ՒzeAD!'nM;][IC5VޖaWa69hʿ&~ gUpؑ? '.FMur,F&u6t^Y"ea-"U>sFq_\b r(͍G.4>]¢9 GdY:[D̏Ce1!ҲޣFofhtxkI(/?0o3??{<JjW.4UV sGZKtaا{cM V ih_8P]퍨r_KJ"k k,׷\i:f  AJė d:wނc:HwJ0^VTO# E"U-ctO<šaI-d-uKJZNAcOڻ֡^xt K EwU*B2 2oЛwYnih:EP;TaX> ")ZO{ n/KhAW|nlNJijR]FP/pϣD?<-ZK&R adWev*,ΐUiYc ց:Cp jT%1SwG~FBH\Xld8F>_ VE+5O.d▗fDpyx$Gh!ơlLh8-Fhspp1m4~I,.=y$F]eu۪W:qu?*ˋEJ{ܹl1+vxfp!=|:rfMѵ=9"8ާAS/ߨ/VM'mHNNPB4^R9c\t30Eq*T!L- Tv-es}8xv3I" xQi7B,&e$yAhCyVڿ=#C7cfoeɐx!hQaW*xZrd~Ēxx_ռdDӹDKl 0s #=pRͼ ď ddJ2MtC%Y Hv%<Ճߦf+E@[NWbZz0KlTt:P3I[\9,Ww'RN6xd;gc:tgʰF95+ݚP?Kݑ\{ -;#@bTI<&u:^-TED+Wb긬2; ۻ$R>zGo-5{2bӔ Y Q@RLNLO7g%fJγ}+*NV^R:eىU)_]8WF&%!1A9ydޚ2AfIT n'vzӏSras17a:| 34;Za8rMLΔ1OhLj^a+,/ Qygp.(@B5\ +n3`#.b~RO΃ɨSg0KaRHsi!=T#C>29_0:B:BABRh?qVG~}+|g-}ۣY&xo nϗzF# xa$cn.vGWPV-D`;uB9|nڪ%xv+D*bb"4 ≘߄fB`;3p7Vr#aU Lɠ1~[^kuqm R6›10N1ٌܺwnJ fJ, p غo+y%%n"Ðb~I FBRMh&''l~7DUL4x":]c欌{ߠ,Z33|{)j|lɈ|چ#ߴ|GM폍-eگtaQl>ÐB|EqW*\(H $ JPÏ%CЩIo"`0 :YQeendstream endobj 213 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4402 >> stream xW TW֮R٤l1j܈{2Y"&q_Q[ň؂`wߦUYAiYD)wܢ&F16F3㌹9_8Ω}w`z0 iQC#GZf{ dSkl+ޮ,Yg3tRdظ I+'jև箉X1cǿƛo1 f3b03Y,d1A?2K)kR&ʌbi{X}f:3ɬg\˸2n;ӏdѴ;fbG`Fn]Sb7[Ĺr??3^{魷!ccS}iq" o 7h$削O]aTj7C xD{f>c" Z{4{HbMN1tPa{xFh&}h zĭj"*zݶ{$i;\0Y7N&T,g¹A1"2pe iȅZ]WOWqu"zJ(" VEE U~#7_F &|DҷP=C"2e+A82b ̇<]3䧒N &wm5CQJ0ި^6#s?=O$9d֔] {g327hKlQ! ڲ <5@o R?HrgL0Y-qqs5X`ADmIȌ0nɅk7@]gm9v(/d$!+RLFbiX/\QOl$Y(~]Y}RFÉKU"Q!8O} W}Cv3g<:/Bi nVh G>79%xuϵd变y+@ `! ZN`ڢ!<1kU$8 kVi/q8˾( y%9K\-wT2 'õq-A8Mlmuz}QD Ti~1a+" NmcH]!&-Iѭ%e[fLM!cH ^`ZqX ltݡ:)_}mg)~ȡ|{Np`&k3hW X5pScLhUL Vy™"F,%f{/;4a_>%g4qɱS] $ iYbIAth}OL/7- 8Gk^--6%;CN=)ۢ>J@1AX֝lku5:jk<+AL 3VK@?'UK )H,8zj =w9 wSw-lXAk-5(R^iiӽ7$gq/k{av6{TldKn&UIׁ=⋡\]/JJYtcߗrBۿ,7/[K=h,H!,?T V̟IS͚RA=HzNjO߇ouh_)jLCL1a܈iF8vg^_|LuΩոq^0cڞiF`?5`M+նlf k-Fwm鞁Wقwո A$$ B>$+qy1E"ݡmcB._ϊ/^ȸ>mr9DAλh+[gVU!:)NՇ' %L(Q~zI=ȯELe$AҘTU,S=݃[]pm@%vGrઔeZ}x(F%VL$^'صa&*'L0r6M?}t,x m$fVY=!CetS]ڞqq\z>=z-7{S.XaqqY:lmIvZNON󝺋y|gnѧo?#VQ/4q0wyt޶>~^_KW/Q(*l~ҦSiܦ">>m[WV%dFJ>.zD%҃80Sy)Nftqy!@)]JTfMu btؚ.;׋6=ŖB\9UB4Z O<gp`r<5E7L.X:lڪT}XD}.I!tJ8=HjuCK9b]uS[~SWLm3*#ʓX^րqXgJSWEM0 U8M +- 4` RtLbC4#q{5S`0f [l2K.bQC0՘E9{j@+/bGGiEa;ӏ?zY@D V0w G(RȽs \Њ*4.j,z#li/ <&?:=@QCs5*s[ST"طM=Z[ (}_N縇?؝^D7Gйl3mg0TQ t"_m)G w~}ՆYѰ_S\]t!@ҟ9侓vTQEUz+̓oV{$Eͥ;ysJ~> اtQjn⓻)'!ǘo(l8P/SEqqNvaPF#L`>4J1H+5H^PԅvP}ZI&e,|EIF}l!Eg?tj,WxgI Y>au]E%p߿aOXX\̪sQI0_$3p㗇կnz )Q莣_(%n{YIݟ5ZW( b/Q=g6L9&ey{{waIdendstream endobj 214 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6810 >> stream xyy|SU- iPAdsPnf)`F6KI6ݛ.PJ˾XvDAAdfyG9їy_67##oܴ%%9B2#||: 1<3iW'hOWvLY:05YDlȍܘ)?zoLplܖJyzl/?9oWT"XBL#tb+Al'"^$BJbJ"f;bHA!^&zb D,"$D 1OL Db! $"mÉ:Evn/_1bwI #OzuTkwǾ?nHfs'^=G'&4S4>zy&gSyT@~ﻟ͝uOþNt{v][!(V8kp/_ұbATɸt<9X4i*JzZ5l&:rnN4\&\؎,tRi [n/n[L\.?3y4]J h 4"5uf u%uzk-]u >mKvJ(pLqrnMfLzDU\<D21˹ZN Pf-r[*-2W\/o*qCM{хQ$)lrOc쑯L^?qo)k <;/V1P@Z9ʡoh%.‰>tB~)y%(E]eCc>׃/q~w/[ h54TEJG3>FIsמX> ߱I7s ^|u7'k1vrW ~zz_h5[ '87M'*p³K mcr8y?Gv~%n?9fk1pr+^À*ssEn yU)0"Wj%LS늀!0*a,?_y T@)),'_zO(%@o`Hz7aF_*y)XHx#64KLNFW&ud5PQA{ 4A#TɘDP[n}dO%}Bv^_!4#[[t{ ^8isb4Wc)}?_kxX4CuŤ4Q&j,1yj*o8nr>H?\Iod0L&\XL޹\9e4>_ViK1ZrC5T2s~my7r:ndK!].lTQ=rN&\fq1up*%ٙ)aož^Dc{y%XpPP1%3HCB92V~1uՌUPMɚ%Gor֒w~Kl#͠ K^Y4z;h[+0Y5\c1`~ _F!9X9gSPyo2,Pfi?DC=߁fKYo˳qN/? m*NC05:>dngxTT܍@AC໯.c:9JBKzy-sSD{ +:z+;l%y,>'䓡HY `2䩅tZ` !Nlkn*w*JsB;`RT@&8,R =鰶mWAXmhMTt$@p kcR$)ւ0dǭ. @Zc=CU{ 'G#hE陙ҬjDRvsFnvٱ־S2sc iaN&^EEz\Y/XEB,Tfq}5d'0. |rdL@- ]Ctg3ښSQƗ.$Y-:|iXq|h՟xMyJ6⮃6}}FLŨ*Sto‡ŧ?tJY*M[PiH!1_ђÔC 7/9zGs$h@T2^rQ,nU=@;vvd+T5E⦥ۀܭ8|̍FVA.$tJ GBo']ET^j?F4;V]EcRf3I^6~0lKtN5;p ]X1lP1#}9 "{Mg gAX 9j3[[Lt;z8r-bCLܫҾ,tPxRj>ežqe+.w1?wCR_ 2'ymm3%'N! uPV%|fh[\9un5s32p~L̨ ΢:sx}A d K:(%͒胙GZ;˩-Ge@~wMz((JsPO]CCyGRF1 T'APg#a Oy$.:4D/hbLOqބ UM::|454,cxÑN#1J*q^8Grs}anB@FFQ\~%~\ -Uh&'~f/=R WuS{%+zEOM(#ŎL<+$~0 ӧui1@ [JôBLWdf,m,>0`:z)ŠR`l,CBOc;C>(KAX &ÿ w`vҝܧR% 9]Kxy@'Py!Ė[rԘjPZgV56Ѵ}GnĜWaKUTZ y KkЮSdZzD[]MuP ':(/bnQmV\)HT7t \\oO'kL`/hu/(5hcz3')p6D=K(|߹jt)v]4Z9Tz;=pi4{V \ Tnt.# <pd<-*ߍ%@MjR>RW쏫]~w,;ݑ53ӹ'T679MHUP/ 9䲲fSI-T<8=='.;.ς_n2~ ZhpB+Tk !*euuUÉ]Sy^Epg|T }hzoh$=A93 ͛`OS npyckF70aLHC66΍fp.<.^$ Ue vG'$HuTY* :7+)1 9eA۵7GtݩC=xKl4SK8XlyŵqqI1i\GerKuC}zCEKUKkSŏӈ,nI:C~m_LXJ`њ%ZP[LfZZfkLQ9|@z*7O=D|k踌?xB)*Jvk !ص|;?XY4soOfr:?ei,@^+@~at?BwCԗpzd[3;3m_x:3C&Rɋcf` k#O|ӊ^4RTʲZZ-1t;` "4)ZRvVc)ɦ6ė@GMi:&<.@Qja[[*9GK͌KU9Zpʔ9 YK]¼}_cj`8J;ih}% j:gNIc v+pַ؋;_*8$.MA(~hw-YЌ`kTW̭ PK ,K z0z l׀FMo++28t;t$ p9>pDSC]CVmKh= jhr )܄:y0 ,Ck`AI _:K8dG<jkj,zm=Õ{YiTǁb M+: 0{fK@G{:ԗaa C)W:qZ\%II5C)s#R@?en|?Pus*o/4^Յ.ۻCc] C@: hj V]UX&hjz{`fbK K% ~C^]B~Uqt|Р>НUCIWC* v&Ŀ:(MǺ ; R"m+n?hThV% o(U xzlhMRWZ r_rvѮ7N@`odw!ѓ`ի9~sN8 Ihʻt/S/ddbxǟ@Gگ7NڀڗaE /|Pq9~W*N9ޫ/|DŪ)蓼blUU*@J\RĨ}ﻚ45Q"EB ;B7}zC7?XjD?W͋VWkg41.4}lj=<72zZHhk% Zݱ{t+МϿH@N-f,{nDc:+%*5}赔Yj1E7v[%s:?:'B9ȯ.+DĿڬ[Sd,s6(o͑8L;jsE6Zr{wȳL*L hT2}T5Bo ,YgUi*[K" cR! }KDT%ÍY͑& 0J&1Ĥ"KVe,g1LO#.W+5fM7`ƂtAymq!<Un|Q Ջ6|:ĂThpφԢ20FUa9ahvQ4xZ#{{'@>b`-Ӷ&LDS=KbYhmM5UUpu$BJKD <ՉOǂqU 518>)1),f<(PFM-eH3 ƌv8F10fAoendstream endobj 215 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 340 >> stream xILMSans10-Regularu`  RS34TvZҋ#J2PwDKP $$PD+uQlu\j/Q{mo߻jlTTB4)(`aa^}vrzj`ITI0b17|:T}ʋuPW  To Oendstream endobj 216 0 obj << /Filter /FlateDecode /Length 4278 >> stream xZM[s"nyF[a>uu2AaƫfߛC5B8ꭷ;%[o[;ζN7EUV|Ӗwbj[TR]o^L(bm3LˢԎ;%~:#*ˮ&S)(ЬF}I.HhW`Qg[ٯ[STa2}LqxY;Qʰ7l)v ,$ھcoóU>L ؖE%~Nוr&So6k&Xұk8 gOY+Y$Gt3v$7]um1z>8zw`ؑ Fo F u'L6^qDÎ6b쌲ϧi=g*n *~j9 m1B _@ܸ@#qC^PYUC)Z6;ę!ny3 d& BF q CPp6qΙ jAPxEf/@cQ3BX@ZZvיxw҅iz7A6Sw6YR._Isn)`FW-]HAϣDMw;%1Ӣ$gY4'1\o6}ȕFXB`'wmS`"kZhkkjra,DHa2A5ExBBhMn? p TQΣTNa4e~sčՕ[IJ{*r axDqАHcƻ.GCn4 DŽ{]%gI"Op΅V6U/ s_wB~G*'o֔0D 1bq_MѶ%Drzg?E(LNmU)ZM=VjROG#%L!#ӈ0MB Rb><~m(ʪ*Opv0{VqwG0 hZvIU~id|Ihc9O}*w®,6'|0O07 xWY=d4P ~Օ,T4~%_%  &<|ȳT6{3x}~LȳM+-zG;=rؚY w^IXLJ]Ҝ9/eЍjAO"z4yy(-8<, L +ޙpU6we}>\?};Kx0$VZFǘiP+ ~g5~} R%hJ.`">1 4o$(ň۲[bPDl6}IUg+y$%e/Sh[vժ ;iqYBuYS?!4s:k#9CĔz TQߒyJ栕Czcj1KזUa)Gז}r@ #>`o~`gp͒oϷxjc!nnX}Vgؒ/8{B^+P } Cu˰ZI)M) B&^}MT5sG~S<Nhvz7l݊0yW_6X.g'~BHpV]{>M;$pKu Ff d Cc$*\$ ($^3ms z2j\+I"hIɯ=rh*:.s>!thPymC3Hr͠S8YǾ_g]l/}?`!uFе<ނ>DOD>6O>  jDdqF_uldDMU^«Lݿ23 P BQR$kTx<-#t՟ )u}"Jvcn+86ϭk\bjƴJK 1(J+*Sg3 @Hfd&pߝ ):f^K_nUɓB `;}x?@ hi[|]94BTvTdu%?' *ӱ`+ޭߨVKac;q}jwB!#%>@y7x!`? "0eӼt}A$Rt㮝~{9N/Cي)L!V9=k{}Y 0]릻 - x!WC {eZo]C)561#QBw'Fuq,zhf< OFSa*g}9PcՃ5|_?taVhE0zu@(@1x~L#)h1ѤH|Z(䫚gwa4q /@ݤ[UWC(u'NR/Pmvev'[Wc u_@~3RtM:踯 AQxA(rΪfIN5x1ʱi55RL$\ mbvSWyu}Egҋ`2a98')PQFq<;̲-5}P EH!Y]*#lLoUO!]YwNj p ù(`)[rTCWuY jC ,B<(]f8{$ kw*W80uAu뿲,{endstream endobj 217 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2009 >> stream x}U{PSw!%Ƚjuݎ.ZkѪ(Z_ 6D#=IHH ]iU;u]3v8Zwe̶L9s|2"4dQVǥ%׫sfQ%eK@f`% r=1 4*c%HuzٚvNNٳWXC%xb#xXF"!+j"PD(a"ɲCƇ|!S M IGLՎ7CGkijx2BO$}ZQ[lom,(oZ}6_+Q[K!>1yu,sCի-]X*DYbA s\3ZE2Mkt5 0Ǘ%Kا*&#D^g{db9ڧtq E4BHIy h4C/+u%i [ԛ!vt坭bWt:>m}BƕJ,^K# a}b&1bڒٔm$PJ;q$bјINiףYz 0YM>Ӑ^[Zh`!.M ( ^MF1SVi.J"[Z]*Xyn]A܆NNhfG1qڙ`BG΢Z2:  Sk1gjSh:]_&M H$K|%Cs^.FqJYU-)xOm@.qT.ĚS+PPd0hKƝPRl-^S~\Ә٠qck~ۥ'Z1b(H,ʜ`ˌ'vu$܀F@ϗU_1'^V.*U NEo>ԏ*==QɆ%Sv GkE)Ei2,:iװRS4Ap*.ZwއTLWށ 3HCB8jiy.nw/ } {6|\.K^`6 %yT eMŔո!ɬM%llj GMXF i\hb&3 |~QN}ailq#5c&qX}ρ 3n:p$e0qqsfxb|BLE_m>RioןڴN()*^#NF.bLrsŹ>*|DNe)RZBjրZ,f7?v `y7اff Z BfPvx_EK6w(x(f톍֙z4-Є]l'M336B=6̹坋)D"r9ҸME4 p+4masEםw?ɃO-:/wnHsL8OJclp%~gۦ 0 𧀽D?iX -%9nsF&j͗ /Ͽ$K>'T3F1ZIV ]JlJ}<]=-GRH̝Q[svTy ͜$l$F'O䞪8e먺?; ɂ `UR ѽg[,5[́b {;4睽6?Z(X!hdSpdK@p-.zR166xERtY2=ܳ< ~&OC9X}3_ '2q:dz b0[+:+dI+%H=vh3 *`/dfhoc~ 7H{loӁ>&qa(_]~<4d#֦:5!}^WMXgmϊ4?G|6 !=|$A( 00[C 61endstream endobj 218 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 226 >> stream xcd`ab`dd N+64O,,M JtwLò㻽)G@1%LW}xÌ~/]bEδ9w$3|[w+o^}݈;Lq Tv *l9Ըp.0{ Np=b >}@4GYendstream endobj 219 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2151 >> stream x}UyPwafEP3ZDZjZQQ9Ays " 2rxG l0*elqbԲĭ̟z}(%@ֿ}9(N w~^C ?N5>!q[RhJxDdv-JQ˨j9ZER5T5GͧP P(j)T472rF!St++S&idnاγ >%^YOa>a'\!rat)'s"Ԥ{Aw ҷ W٪}SxrhfQ"gF\r1*9>FT cɘӑCnx@kr6DD%j! "gm.hNZ8 ǡYWS AV 8.ʄv5 k0A>I!(8=&(j` پ]B3iLh ,x&dĚ664XpMuGNk_LԮdN=vpycDoCnhLV송T\ Uw!%MfKnhնR}cӎ'6Ys^9Zc2+a%28v}gS8̛2=E7%yY|Z$X)#خŴў۵ـ?p%/m;DƤb3TTEݥEv1#Qu 8:\#'/]U4L`&]hA+5K=?)` wr&ԭv{i"ٱ^\x/h@xZː1 د\h9Y'{1z_TEH-ڦ KhE'7%i>x ƐQVYT| LLa1X5dcUμOg#<%`happe5ZbZt&wn/ |VTdm:g s!щZw[GH]Ew8J|:ߤ1a?YD{Sꉪt|>sgk?7HvS:O^}xZ]\7"ywM*xn\ׯex ɼxd׆{*;ї"a/ckG3բ伯ϥ7jb"C[[x1CGX'҇}4 sƺopc XO3~6Lu1ߣ Vg VLFJ\@!ĕ$@ 2h#Qǽu8?@_EаN6QŅ)酉Ro&Ue9J!d,#+)B,LX9<[`>}aZ,?Bt@7X7F7qkvP!e0G (V]e lXMf\)Oendstream endobj 220 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6310 >> stream xy\SW ޴ZuQ.P8QB6 {C{XqhqjhֶvV۷\{?Iз}Ő䞜>\E %Ʈuu  ;g&//'h94Ef&aGњ11xšB¤{s` >|7o \>}猙gy~}l|b~ h[,^(jzLm^6RSfjFmR)wjHmVPJj6ZEN9QPsՔ35rޠP BʕZGLS RjMR(+j7R8ʓ5eCR52^xJB,(ʌQɋ&&3m4{լМ6/4>HwG{G,[\1repqsG9fɘc=gjiUu[qm{>n֓#Km8鶂]&L=1}bϋ?QB ^5ǰTHnPIJa\NrV8#PexT @AH 6 $E ( l@jWF"oIo氐lPWZ݀Z$x l !|dz5n঵fO -\k( vbjߨ(ն#- %5R)i|VlG_, ^bFݼYy΂Bc\Ձ|-AIlAr^J㗡Aj^C'J&PPj%+ԥ0ռ.K LRK%ǶXmNղ0bA=M !!l5gӞ`( ~3Zkw=osH}嵚P/sk=lZۼK9'f`~d.tqϾqTw6c"YoZ=t'k3굦7p0vlfbǙ`&?>+)Σ_59['vAV: yޚ\0_hLЬI{^AdX,-j7A&/ڶ&zX~_R@KkoA1p1ԑ*oO並B RW w޼3<F~ l0 FIvȟ0׈]ܟKGBUDZH#c(ExFu+G%=ȱ8J<5NUlǫlfW`؃ :2ZY96 V& ( ![Bߌ eXew16oo\ĬP,JʔdWʰK3j,.J?*F[IyQKbsLSuq]v*P1f ^MM[ȳs.J<քz߁Q^zff=6ٵݯOɍ:A.: ]~=+6y%yFy1ulau!ࣶ5a):Bom*|>#ÕdRPmtYLUM^YI._=w+W!o;t4eMklͦo֭[ =;pᝊ8 X,U}r֍49\s*1Ork9^_֮gzt,+V0D¢1d3sɽeR$G&eJ2@b<.<ҶR( A eԴNRxQ鲌tQhEJYeWѓ|QٳH$(se!L<6îiD[؃X ]\@gDs;'1zN_,lWWתSJgZG̽v_cuI)Hac̳9fAI$[T | d05S;f &PWHe*X>؏$>OH7B v S^'[2*CM1 kّ#egUu2ɾ}%}`O<$HC7w]qn9t v媝N~*ݹK Ӳ֛; {oJ#?MOί޸n9 䑾 7d%"ST[[Y`4&RY(7'7oij(/E ,+8QSyU #FxvO/a$ixF+}`: dF(Mb,J'Wl!S*F6tuرá4¯߶j`Ft_ $eSMvkRm9|c[WeS.o\f^f005ԋ` fշxs;p@&1.2W} #DL@'ˣQb|Qxd@5Văx/> @~juQ30 5W*23HnTj̚i+_4F{ No#A26ĦSɅ S|(y"ʳsrr XIG=ca3m˦E[?͞ON4I# 0m V$]DkQ8Oid=|[BRJeHX]W'آ(T{Ieb 0X.W#?]j0udwE5-?*QE7 M[c ,6]k{m9YCy8y)+GQŲĴDɱI(J{ٔ&feB+,G=RV\AD~V$5A&.n'5i᧫ȵN]-.t*I*J߂ MYT4|K*cxie\yA{ WoS[8T̎.c ӣ ˋQR`\G۱xg3|$Ϲ!&492I^I",* H< s$H$F7~T|e H-r@#t\KKdiEQendstream endobj 221 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5651 >> stream xX xSն>0S 3H2BB2tI4MҤ)灎 QIUWw}퓆{o{kmm 2y}@@B|=/}ȴ=“)8/Lp noOyG<&}%,]7kRR${2CxD`dԫ6~9s_͛₅-}jf>CQӨI*zDM6S3 j ESve9j 5ZM=OR/P(?j>OH Aj"DSnB)158ʕDԣcTj,5ZFNLQ:.\u6h@hbm7c\xkZ\'MlևEXKooL8yd_[L@M G~Bd$6K5œjh!s<mcFЧ" 5O yv#lȄү#Y7:v -am&C>o_Z dy `ަYQ OW^oW]=t$$l ᯵ڿk:^nDKPJ>(`=7x^5Ig4JǮO2a8ml#4l< FGS=Y®{feddFzոZFRH[,$"͎j|sC4VՅ+G/@x̔`w f|C=`es||V*qVw}|.I\a1/|gC\\lJ%),L saf4=A`^: +_=`75ѨuyJoS 7~$ Gd@ !ly+:/ߖl{ͦF|O7'`'˰OQMPg@Լ{miUS&ko2A{Qur$̚ɬ/5[ H^|rcݕ~z#E2Ҭ(Trn#GĕZc<.]u$ti"I= Cm-Pk @/Ue' iOU ^=`7 Ly7v|_q#NKt:DِOv&Ǐo,m.ocm&73vM| b pNUJؙz[R XejuX-'{5b˥ysw>DS8[0C6ovd1a`5-BSZݵ:k 2ÎerIK֠h٪蹰 )`5$Bt%C TyRhmA[k|4jQ Z.K /A c3ÆF*Z$6/Yb`p:7/8_;X'}6x]oV$m@sO6ړ@L>pOط_-Vx/smxK?N!{7KɫKtpayqA*y|`"D!!`ÇF~Gm@]sSb+Xdh"|%]9LtRv4e>PUKt po Ճ  *T&5˕]5~ iPjjMdك%EuDA_Z \L9s;EVaݨ!փYRːؿ(+9<(B/(*),'pdo!=3/Iu1 "\N,S hrAC+sw}MruWiMn6d,>OO>y hv ~U]KJ!*,jpx3x Zy&G#6m+y޲M6i{pFyғ>/~,e4m&4H$DV/~zg+1(7j:A](#qϩPUx˅*0+̥ N_؝z&|sTB@S{AE[XQPؠ.%|0 މ]ŪWCa +9ɰ5+j}⾒h?WVs4" s)l!* *}\V͗_.'4F2l}xššGW9G[°(;-='׮5Cl6тv)@;7͞dO1iQϰ[|&q3;K@7 ;h C bpHl'~뫩t՛E9=}\sqt|lp4yhy2jPG!5^%C^=`)q<{D.fq5P quxsr0)`Tehuz%[4a _5 Ά #bOe6D&*qD]yS+MO"sj"Zդn6ZE x`-\'NkVtsD'shARk"މWl~P^OIm'5y}_S[aE^qyx+27D'KHJG7@wh'8.gSΆuXeMf5ꒉ &˴1y6I~j)4Vs&&JLJ2;_KD;Hᙉ)Q(sVAbr1NQ7Y4+Z@*G(QuW9u@GJ ŵ.3/jd*)|4 @_lED37JH~0J bTW Q\s%m Xhp ` {= JVh)->EK|osp>s' Ӥؕ/Xqr&sSaEYۻUxc]*R pfUeB.4]z7t]ZR(jL.G#e5:ùrUa~ zG}ѣ'?rQ]~'z%Q~LD$mTD1 VX)S(S TTE^.,TJB-畗Z"$g,,)+5W Z@-JQjcNA^ȭME|WSVl,,(ίKx̅t> stream x]fInuK -*9!@V!/Q{1j{JjWhz|߾!7g5@W/d"p<_?o`wͫWO8Fz淯X$=\l(WhK|׷yhWu?}/^>G͇_rc>|̟u_~'?QI~+7z+?I5'Y)F֤W|o_}o~=8öR{I鱶r7+[}]??HhH/CY[TS۩%߉@~_S5ۧxgOoowOoVqI.ן?|{o_B}1m1.=oO*kT^o90?}>/)?}_>|d2>)_|1-㱎&ucf䚄}}X#v_"֜ښS+>fShx>MGOXl?<~k#UƘsMgspqزt/cܶˇ_6w u) ^vYo6>|؟v羠\}~S㿲1ζP'$r8ChcNd& 9cGS"Q&[n?VS.8R-GN˷տ4}ZB|MU #8.G?X{=lIH}|jng@?~1z W^cj5՛>Wó\zS^ l=,U_vk@_n#~CM{z:NtOnT֑~̖OzLjW+]`],UTw׷O6Gd?OzN]/f3Bl/}d(ѯ0ۤCq~u&Xdҡպry? P.E?mfv֯M,sfbҾ:yоաϣY}OX:eM9 [YgD1헻eF}#9[>?h>?Υ f9uBЅOo>o?Z ah۴A8ȡv%BTO?,wkiyY}F--5?Θ#l{yT|#J hN06\d6Cts>c3qf3W'OSi1ī#֌P糂x #UTre9eZ~痮OϜ>>%?/L?XeGҘC67Ck wý~o6q_kKu4@uʩ4s|w渻ss)Uϟ֐wIɱl?~}= ?:dž|AmiZ?I>=]:'QO???yv߿JS߿:^aY5QDf vTh|Ș|zLG x, p{pG2r |)p02b Ü dU-´4{=ZI%ʏ mלH,-\tzBy#`fsb{́`~4!8TϵAa0T4[%Z/M 3c)1y͆.P, )9쇌22Ȕ`1k!5;sr_K٧axA05˙r9^`Ng :;s!' jS(L 0N} 헪瀐Y&h"x.)ϮRj (a9hazA%L P%V 1փԥbAX-{X"H5Ci_ &=%F @ dpWVLWC1~ԩny˿i.3rRzh !'\KO?[d'Qaš~<Y9`FAטN _)B X{`0eH $Q9t]i-WB-I{=*>d,b%:!% scpcbCyu3E7[ B@ЛhJ놌$sĜjdzsZeMgW{ȷ)k9OxwX#.ת`W kd.d\LW1? f(Φ7 :G5ppпY0gǼD`Vzgq `\>̜E&uF0!ʒZȇ'I30]nI.0JJZ*zHi`x灍0ftuBŴooFbvN.HN 7ADs.#'f(+ԜS9`D٦Im "&-s?KAXD p^qWe{s!m+K n-U%iY l``1. QalK&̦j bL"_Ҽ؄\ "Eܳ_@qT5{ r>oSs]59-iuT%DXF IpS0LK9tYx (aZz02<89Ƈ,ՎS.ęa3 #yhS)p&0eH ds`~Ò9QY'TsM"s1 $~Šºbv!+cl"2iaql=IĖR#&, BRC s IK+~F ʯs롛 =$l$+f aVO1;,nW``0E>b *Bga6I ,fQ`[ a:q2s܄E-4)~G'@qʚ #(FÃNӀD+, 5p PUc $_u+P *s.rJӠzwa$FUʒ@]T0י I-I!jdXUn mQ K՜&P Il1L9HaKJkXue^[H&8 z9,3rbڜQ9;200kJ!R)^ AU(LEWLMGz䍢 R_0vيWm3P` @ $DD}-;l*]tu>79șgErHQlr>tzhĢt!% S4uL9XAo:﮴+Y#Lk4> Y ~HI*%Sŕb/ !JҚ jᩴ]1]Wk%%F]%EҞ%PnկÓoSDC"edZ(LK׃8 ,[jSh 1/l#=| cdk(2SJaq@j2#"gZiꡉ—Tu92Q0huIHp'x*X$Ut~ {d+ל08C6ź555Cs)Yx-VMfrnR(LGB=i9.7m <Lк1rycc`дDd!3@2#M%t4{-4tJFb D2ǘw8'23mn p9ج`p_ ڍi 3|`ȠT]⯒E,8L`.U (aZiizhM@#.Y#fDzLJNpmc`p숅ans@o]"q#ZИcҤ0'{Kl FO E9U\&8M"iZ<6]QrN}u𚲮\5}bߧbZ?k*IE 0eh;1Y҇qrd~jiKorWǞ)ly7-'%FTsuj|LwVYy!iH)lZzEո'W3&TCف˦zJ/8IFpkOX)U(LAepW[P#xa 0zH7 c")Wy `P(AϦ+Ao@9`ȕ'/i>c N/^98b-Z|K Xz]= F4k⦭!"Yi9)bݮc4F՝3 kfd:gAXT pV uӫcLCgSQX.A\HLNQ h^k 8cRz&I39`V} $J5#Q K5: IW0x 0JFaZ푫'=A0Esr@Øpq Vsp8 LBWô!$UdT0 8teAf0i ߜc0 OI_/ :&85TeF (LK׃Y?eӈ#4hKRX5P ^ :,DQ w\aS%µV[-4c䱓9OU)[dIoHLaxLx*'f(sSQ9.YQ´0 8初W n;FR'ɠ$5Ytg\0BN.%s(./ehJp8zqJmasR:F\dXg,9'wVsqp0fSd8іJZ*zp]+OI&C^V})4n:Eniy 1zXeF2%”37>qRIVdmċԴTdO8\3 /E΅9ƫV{-4@Klj\Ɇ Gv@ K)m%ȝYq^I9.Aa!gϊ5bxV I k'p^Oʡ'rT.GYmr5SLLs("sZzWZgyu< 081AqmÒֆ磱,:JZzB1'TA'"`1saU0s}f vqʊh2 J8]|s KF'q/C=p', 7 SN9ܩvݬ;ýN s9=#"I\ 0sC?EWwwWZ]p g*s#]O$ǎĂ9g'ȼ8^ \C9HU"BAݢ3@ut{-4G݇R)qTrRUpS0AX+ e\*(\oCGNK53k1`.?d Aʐcoe, 䣔8,dFRAXjaq끺aO= =)W^{Yk~p«ְ`ދ1=" 3o⾿?q%gw3d1&9<{XS{GA6BY2"цq5=rI efYz2|TMr6{F-4{H%,^R}19[?q#%n:+Cl㊶y=09!л23Cf'0a4G g]*Kt 4JgMLt1؊P:=34{*0F2yout<$'*!݇PHI}@y@:o9E% >p &d^D1 <[$g^.r0ه^_p^Ń'J5gB01ZM.7 /122rhchfh`Ic%`8J&+f\\*o Q>eSSJwYzAӋby 8g"{E3dkF?΃;**SdXɴ_iE\pM(ɏyM0t /T8bRjPi"[jzFo%O 瓨eETWJ72&j/:? "ӫ2'~3 vFn3o-_Ƙ.ǼQ],X'C8KB'$0:FJFXb7))Á|vpZmǛsd*_oYE$=$E[D,üJ=]:t%3repa{(Co4Ag`]'ٔqF㬜IvG;c҄$J2C j;d5{C1p{ c4,KE^C{觀ѵW<>qW r i9m9Gd"c8NɥnPs^˯$ V|)P}kJݟi~Z(uICIeCev0u5<*3G@.l]tqTWpzO޿#\ȩmqۻC%_\ASJ|vB1e)rSk u m^BC ^= t1?EEyfaqJ =sQNX2G#fS'k`g]-` AI `fs/ 5 vC"`]E9l])à;g ҋ$˜{[iVSy#%H'.%1EK0Q[ٺp",9w n46{Sy49Nկo7Kx^LVן]Dv?1үj/ilȇ,)UGFҝJ1Us@J1hd݈>*h7hA [ۣ'"OlV1hcn 3:ܰNnen^?+sM~Ld(\ gERf `$Bxq]W@%LpC6J'/e@JK c6)vz Ny98yzsap\=,xRA%L g(ԗQZuϔ{/Š$JD= o)%#.EKwJ&znt+`{W\-ynl҈-VrF. ?X2ŖFaZnEG[pgPh?-%”4[-VZ86NljnZLzr z9BrIj J**ZMZ&a̤t7sF>(ΓwI}\5ȍ&cQa$̀iZ3 ,D,9ZcQLhs8FsЉD:h ZDaA9plb@ dd!@m@8 ދ|pT4{%4Wqp9cc ~0ʣFљ Z |t F+P.`CaKL@6ڊ`9caUƲ9L 0-^65wB=ǑSfYtbJSc3a a10WD,gڔP WR9;i٬ԕa 'SZ)͈+Ew)/u[c49hn(LKCc)ب#ZcuQ?(UXǘ6B8ŘR5V7(tAW^hc E1^1!FXc̴p 8`W԰wخ5Fe>E\Z_qyc3u. ;=0jl3dx\ǵ0 [9fіrP;^b Tkg5Rx%R'.Pjeݘ ώ8 _4ufΖ-.WNώc*#?nj`4ctϴ 3u>=0X"䰆P 7}NaZ4wA0uWg^A ͬ=5X\m%֔7 ϊ5ZAke9h#->caOєxysXyuNю2k7Jte;ٮ$/ TIu5 ggభȎ9zk"|Avt4{-n˅adY_fZjH P98lѴIi./] 0-^F۱edVcpC:i?cpG!8NjK5,j2V:ias5.\Ӄa5@}0l<pG18g2ahZiizU+ּ#hn#J{`aa10[D5J)iZpCj1$1\wɵ,5|%\~<1X-\e8زd2¥J8 j&K Ƣ|c%>/m""*`|M#0^A4΁`GLF8 ctL8GY2/w|j:r0xm;F7ƍmnZzo]ma8NcPCY-ő *Z0"qk:spFctH&c4)LK׃ytۇctRW`qe^l*n5Pq:\6W(LEWb܊ cӟH!#%s@=[/<+/o-t{S(\iWBgOʛQ0ʫ^QnV{kҖQ9 % 61j;5]Qa'1pȓ+!eM>Ѳ(r^)ь`&cDFz+;юQ -\݉+΀ qc?vA NJ+ƒvZ 㩼زI̽M0 moFC*!,xl;-&  +W8o;-w#PHqG[ǖ9RdTIQ4HWы+t쌄G$Z!5xcofj8;w젺VܼggS)LcjIo%G*WF2lUs;[Zq9W  fH*JQ!@nrAHh 9_puKni۲n(CMLp^ Zv-Z C#2 :ysSE@} W*hJbE&`.VrKp8 V:M`qǘp*P,-O H-p0xr2vdTQ<]ہc-`,S {G'xPb<|ӣBFϦk;_i2feƜAEqMuV{{-%+m Zz06kBƃa,A/\ZnE+\|jȕrSc 2Ly0-´4{= Ոoo ==*n mt&xR\FFq=Ղ?3@9xId{PM:qe\f+ƒXʹ0 8`Ծu}˼fǨۋ&%^1.gqU6V}fC!v2|0^ڻSb c;1m½i.%P%o5 kGt0 ZP߫ܭe+ʶRz5Nq}-3xeѥ8Je2ZfZi J/2]6ybݳppkeQsl{a,oW9|. Ǵpr]^dlI&9F5_^i%c6BA)p+k baXX2,α0 8sq<o() kU3퍢U뀄c|q&c0-V{=43,I_Ax4Yv֚-)Zl3ctv: ] (9/ye+>!o>+¶m-l*hJb<B-qzĐ6 1e#.B` ז[*[v+i J*=U&:9{. ^pW>Kr\Ex D *>(hJZ^y -@J18U5p:X_+ ^/g]vbiLf4#MO]X ,Hbj <o_2[Φ/W"؊~BPBc,t @Jh%RWbU*aO{ìrd?bV_)[;qNJ9~6k5qp~l2}Vdߚs؅\j_ByART{PhA'Vވ>}]Xo]1tUAL+1/QCy\w={߱bA}Mx"I: $&|>ābQWI\),ޕ;^el)M~T#5/q) g^[`~W}_r _Ӛ7+;=mX@tG>}hE-|7G OY/(5{ԴEt!-nm/ bIZ |.(مY{nZYg/LmðMb+x>ۂU/J|e>L0|/ k-i[q'k}Tc6 OIAϣY{E=+׉PzZE>oQn1j/\Gʊ44]͉j]̈́@_b LDn8^J:'"/]q}u[Gʲ0N˱"kMGӽⱍqݷ;E6,R'|FgU^]{ҵX,]{6:\3-S?; s%vǛOy뵧<~͏_Ig åvC_zEEE40x:9T<*}w$I]&wQfI"9؃(r偗\J*ZZVu>.{û_iwr'؜\Iл7$K^޵^t%C5k)T:f\)7=q9J4 jb\R5(v h96{GL.ޓ/8^􂓪%b(y }7ϐH\Gd^$dr3 c3\Ub H0st 0 gWqm3q>-n,nXqK͵y '~xKOrY(}"O`|~0@}OtXPfԢo-pP{jάo'x~lƗԃudv6KY4/,sۊvG~|bFw]&^xWn]&qy|Ng&T_/>:xVуh@ifNxx◟7ub=uOc<̂{斯7~kDt~ۧ:ԧ[Õoh1@s`q{;}O ?]ܶ~~S$*o][oZ߾V#T$]/>~UORۧ?'@t7xc~7H7/}>Y' s>})6cD\}ݦR+3~z zUz]S%*:=[>&@o#×Qg Pni`J0[n,bo_>>"o~uk0ml.M6=yW_{:wo&v2N|\e}:<t%Oqݾ'9D9e>&js/p Ms!kL0ǜgqѠhE )~1p}`aĭy'g}~pӭ۾Y+|bgwZ}oV3QKǾ2E_鋈.w^ߴY#3ʖY0ܘ16t9)r.Mel_I_?o"׷:nlC]Q'u0k a>|~9{C9?2ct(~*> stream xuoHq0 ћM&RC7& lns4vCsgEjMK_"zE{^><<@"DDzʞ?wn.x ;*.J>~3nxx;:Q=-HxG8]&'{TcP]{:u7}N_qބ&lIyƦ=Anq!0 1cȱ)/3״~UrQ>wa:Weu>|&!x<`#d<^d!>Ö<|~MHsN ]}L03}nk^{뫚I3j5aڼA2"=WQM2mPo+ MF3'X!sw{ۭ7}s|fBuV$!E Gٟر LثD7c-d<𭖳L_NGtZ'KdȖJ}#K̤TPP) (#BY,?endstream endobj 224 0 obj << /Filter /FlateDecode /Length 4908 >> stream x\K#sJ| rJ+:7/A@^ζW]Iއ}d7fK3q`iUYzѵlUz}"l՟ }\F08I0vF6+Ff<8a(@&uGO}yNnpR5Ң)X,N+dnٟ%jN]kӚH:aN6㱿ݑ4,wɫ@8ǵ;a5m06dV.Zs, j ssM:*+˫?>HX LVD󶇆/ f~oiVlݷWU?S$ԘUE kB]:aNoJ2.d4&Pfs` W쐟lޒtB ̀79d+'ޅ`sy4*=Bus< anS\ Rr ?|A( ˱{'Ӹ0Ͳ,邸ӎŊh闵1 ~9ZH>ze[N?*HW>*3WKXY`鸬!3v#Z%yI!x,`-l[#t IJRoyjv{ا,=2B!w0iia239DRi O Vd Px8 .owDw3(p;;p;^;ȝj%YFf,"WA7ڠ͠ Ʀ8J$zvg>oꠓ7_/?7 נa1 o=W6|VDĞhZHlML}RNpfӧ;q5|zKA9V66UMYZ|90{5hcVa)c6nd܈s D|X5(U ` 40VdX$ [8@}\o\h~3ʆ?*O'q9dXIVqXjM.Dlf%'͛y +9BD ixac8>·%xj;PfN~rh}+c@D " T*|Z$hĀ{]5B5 Lux,D,_-E@^S UNl5 z px4Q9_ t|T!.bWS-RRk.w܀s D TD#@gAfD?؟s Bd"zB+ )' dVc^Q8S9; ޛx ӳ" 6CAJ.O7LA.Ul`|VO4B jKԋPӌ&4t<`HQt*/1ta)v3]UXZ%#. Vֈ4*,bdgIh}vt"q]_}s#jqXS k0pQ]`oKg`zEVAVESJkN0`܋0Ou]ݵF@CYح[G`|2žGdˬqBԋCt~}«y7LIf0[E{MHTa4 ?553Q9^U)$]^hqtRcp6ut[xY%YYB†կK/;zeY eovF8Mp$ɣޤt[5N )uzOsVҧxBtlOD$g5Sq`x;XHZ)1XdN OnGbx`4Įũ35<&=놸q%`y@e}1&2Y#I>͓uPHy( zhZpѸDS]H%t {{J'og`>A)IRێnzvM4w 0{)`ro oX)&u7oERP Vk J 1,ZJ&}j@ffhWfI1튐B/.yZ57t_S9Ḧ́=j[]x1vLPLe="9g8eIܬa;w\+Jҗ42{  Y )|rqL\O} Z&[h$oYg)\d9'Ū;BxN~}=%q3DT&gx@;0yR ی)}3t.yɻ B9b89;ɳ=S=0`* FYd+ qi\p'EjUe0QԵTuqOby`5~S<#yk&ťS! ;DBak_ _Cz5[o6M(>f#WVbW! M%~eDaه2 =T3rV -L6r$]nP_1FjQ4:&QpGZ8DܡInfVҍQ*gh.0U3;_c̭<݆G!?7ǀQڳQ3g+$M8%e-RVqRVz^ pzH0~ "4qb+ŜF\8kK*&1S,ƋE-z9+֡P㺿 #L_ 1mG颜JR]gǭw!u^6!iнeY\PJ"݉o ˫2!h:mL9_׺oio'̴)cH88 3PW6oTS*9a:jLg :^ x_QvȒho{8~$B8P9`bגB30I`5C,M2O+%Y0}Pd̂C9]a) G}1 IGajZ.@ŹJt|;ke d9;1JWO LX=?H6!pbLס$h`KV3=00WecW>VkB4rȣ"!lFjz6NhgO7cV.4(tͱlXh(:Ms`7V]hw)jqX\3#^[_pG֯y,9:{ʰ ꎘ fe@LCԥ3V g ,J<3Z\) GFNo&cEW&*BADŽnCY.78^T?lG?`nc=8Hsoid΁Q*jA^]wIH&r/pfgJ4ь{8vDq(Ui\<sub#%V ,PvjSXO\3hN:-gaG=t%i+pP:˶Lp'al^(49* y/?&>=Q\=1lN OSwxGv(5AjIZ? {R%C2( ω"CB_|SYjM]L |XuX#tDQ,PrF* Bl1K;'2_2{mP2vCnNgq~#JJJB%<Ʌ^^F}H;Y:v6AEQ3J"A*,\Ϊ;`QJOs^PY|@7ܥ%-s>'Kh7 p֛>: E׫m(I3̹HOPB j2`H{ څ@OECF,}g=8;1gip rfrt?exq[,|r KZ]O aO*_xzc.+zʅi).- ~ `O"FQHnJ^9%$94k9/cE5OBK~H]@?H)Yw_8"/|?i"=yFDWgrYa&"B3AnH͓V^sO!~Utw;oc%+|ikHBr v‡1+|0V;ƇwNVc03wW?̈B Vg'SoJGQ [,b9jtn>Z/UL9*MV释7;64p`Zq6tgdO8}ONet]5[P|8U׾RIoU'p20IvSUܭʯh@;2sR?U'rGIV71o endstream endobj 225 0 obj << /Filter /FlateDecode /Length 4947 >> stream x\Ks8r=nY#xsݘc'm+‡Y8Z*UO_3ɒUA /eߛEsy녠_/㟛埮/K6~u^^\:VEwֱVz}{snwԍ[_}Jk]7~vP9ѴU?X Cj=Sqw%}Vl_h|/?Dh'jDa_NCdዺR #i6b<$6u"m[^Zv]kD55M5"T[-|~ـc`|u8WldVcQWzvA[O鏧8-P6{t%_}?WZW}88U-֭kho{y%jlkhÛ5nGiis-Pm|)̿!g~d{ \ޮ V nr!3f/Kfsq=*=7[gl6h(Gɒ +0U+!:sns30U7X*ؿc__%/}$ػ6~!_!kȴ/d/ZبZ__"?sy\15\HǙ8Xu9Wt{Jk%(sUfDVw-]˦dˠ:2`OqxxK40w,-evcڲ32߯}ݤ)-4X)$Ě:#As?d.uXO?_/3EcscZضdql,A{1D|dh"f Zp)korT{m[ܑ+ c@@t[ջ+#0z;6Oi1~w]Ғ?L#&Ā}bZ|sHCJZ_Hat =y\~i:EyV6ScȾ Q2(HKF`Us13h&:QU=" x"[@NXi9.0N!X., ,;kΊ(izsjvqd&2) O}1EBCGL ik(aИ94[bVF0q^~NP:&˱?94UYxLlf #瀣.8=H?}0tg,ؐ5zDhn1Vtwr d (f V?vyEl +lXZ3˴[&4CLqXf6, 0 |J;4Kk-JY%1EFs)wqf )BbL37!ie(ftQh~aEDj$'4!Ze  ޝ5@)%B/B%t^ h,Tn?[0~\;p5f)Qs]k9 t >~ K\q:1΂8ĩR,-<"a`vƢڇD1 cf7ye} ß=!| GDkuBJnY!;I-ŜZ9KHKLT~{;E)Np=M{X B)晢9P6~E9ӯ(j2rao!@zRh?6/' Ƌ9~GA *wH@/}EТJcڮZ(-F `83w6)У.6Gn#μt_iCA27몋 bdPB-meT 4>tw1t#HU;[+Lф/9_t +Z/?{q[NӊNK.kIYi)Ccߏ݋ 嬩[e ¹]~ץ~@@Wn(KgP,׮Q\?o=/x5kߪlp=0/,yݦa!e(8&zOEӚ$7 ӵӖ*Tбl+\Hy?|s[*2`8lУÆ|b1\2Z>SM"i\ ޮ`rJW. @g$2ɢhԳT>zrJ .kDFss֯a~ѪnD8梾 ka-sFsWnYa e@ qaeLv9`/&mCA[)_~Uʼ]'\q'LePv\&L@\?!{ S#[C~h)jcw=kbqҀq&vl% 1zʺOSD r1qDc{ {M |_ XˠTZۧSLaUy/;jC-SJ_ً`k☼L؅fic!gL#V鉕yKXm=K1 ГLlrzRFp'Y\P\=i2h F`i !ǿ8F*70.S^^hn5xA!ŭє/58TؒɄjh]k ]*Menf;WԔĺw q^2mnE/Ki6*+=\'9^:T^RqkE؇@U{3 wb8#;SmL涌e)礏R[3&I1YG07e!8,.Ӂ+pP^|8˻sXwc+,n@k_ϟ!j1j4 )_QNڑ]'vdg]?fÊzendstream endobj 226 0 obj << /Filter /FlateDecode /Length 3383 >> stream xZKoA%dİ6װ $;; @42ג88xS"5U_U}Uob{Ⴚ_ΊR텟B.ԅrq -~Jg+J`jYʢeÛr%(ʒWa/%uc9Wݻ cHqY)J,Y' d[aBiHBLhM UZ/4ɟ Yq +. ?O[$5l7ݛf]ğ%lxzSB3z}A^Tݳ.`),߮ 9v;þ~&v< &e( d]uUXJj7^hEnC6a@2vWkqӾmaJ?72']ܙ:{?.M݄ȠU\颴,\6gn|, ]Ը a~a PVqqem(uBAqpӫ6E8VUW7cGw6ǰ`z6@jrd0.qE[iI:?Hf8P*r_Ѩ_OYFڰ4դ Bq*fƄT49"pI\ZAV1HT*[FOA6f.[p"-(P,1+n@nm+ ] Qm+ κa]p?oJqGWS_3^ʨ) #WBSȾ_mx(yt$N]6Sqf:cK+Cxi,PrIcyF?oQ R`Zpr-3U^C\E M>֣RӤn'ѠQ룘oV.;x&hVwɋz&Q&(.UxhgɏD5k|0l@~*P9 |jLr'9LNU`~N~&Ϯ:G.v=qM z:_^]xᙠ\_,!ɂK4n*H:fNQ8Af֚Ta|&(ԛ)~><'pq==B B)X*5DלOPPyr*[u.mFE7JTm:j3Xh[PWY.V}Sb=1&{ujC7jĪ z{59Sp(~A N{ʕ'ƂxqPUWd%8`ڧ !ϥvH%I2vwRT]HP$}:]4䱝@H_a&Yt_G`x=;Auncr<9^na(,/?_~H;K <"'-Y? Ijф¡u!'xW(!Y>& TA%f/ &֛' W%|@Lf=",)'8nK26Ius<%A U̧XʥM 6Ka)1z|Ǯ_c5'Š}d|ƣ! .UOk:ln@f,IS LӂͶrmXͨإR8n}럑$DDHH9GkNt]f8"cnw8ps>]ERd`GdNox 0?U=fshZ]JwaGxH9_=)LX j0Q<\ԎSv[Fvy࣮v/kTJ3h`txosۇQzձ7 ֖`fivDSm7DLY`B8Bُ͆:uY;2YJ#zuZ^t%fgiK,<jGWKF6H5wU$\N};4 ,j =sc~`(VK[dla ?e$gּM, 1Xd3 )~"4~!!gLp.HQjE?ZQl&ԠNv167).D^D5eFZ_` o7)VlN٤tUʕrȷe]g!Bw|֣1q|"gVnr(^suKqv >| C]Uܓ\Kd(ʛ5Ue'cr!x7L+U%MeǸdLjׄSR*mF7K3 Gp>ŚHu<(u¢IuYE7mW M Tpee0+ӆIuuP?;i }fs-u$M>wnr&P4 w@ [Y4uk ~Ztp#!l>ԁp1t{CB"LXI5J]tWB:[7nҿ0bZ2" N)iC ss{ + a\!+q%:J{8ЍWtiWk2[Cb4*ŷBZK[s.wWUi7p 8|f__V\ 5뮤A/}&d|q3=*c}'ӋvDL.km_שӲ`xhY[[eYy@Ko1F HFvvcpru&ca#Ndt,IL dQH»ϒCV*[͕aJ\hAM&O ݫ^)Wg]Ygȼ+^.4;i,*U(.{L5Of=7jPD_*ocdyOn3T̞ZLd@Ccq}}-/NS鏘8d4Zr^ЌBy—^ t߂fE͞ǝhBM};_K&-W3%Ȅ)+Xu~wq^ŠW9n8y˿9*JL2&nn @- IN?!M( W)_ˀttK·Ö5;G{\Fendstream endobj 227 0 obj << /Filter /FlateDecode /Length 1156 >> stream xVKoFr+ ç*4$VC-V:$]RZrQЁ+7}3O94'?ی٧yo\l..474Rce>fb++=2 a~FeQLFe꾜Bѥ~kJ,rr[ 0aSZa j{uQ2 U-F6m@ J2{AhM"6Fc^"r`]rR10lZ(ރǠ wM,>_NR[1#HW( q8ׂZ+?(SNTCr)l-{ͭ`dDCbt&TıySVn z1(hMq/Fn,E!]ƥP$TGJؔ²HAP¿ [b"HT#Bb _fWwB5!G  ^u O( Uw}GmIfJ=EkRt5eMBY3)q[RK1:/|%GT MĩC1j)D ?ESg3gCoBD宋ՅXyK,_; C6vt٭h _ԛQ@j3F%U 3mzMOB"mOC؏W puGIrGx 90 &=yjQ=X[M=6Z6Yo\zrPgG(%X}!AZe O^4LSo7.ps~4V|v;"w#ie'T 6͵h5VaŐ?2h;ץ xTp,A0Bޏ|o;| M[r:WK{=8A b㇊\$yрn)!~ "Lۡi]GׇEI8cAaS$żNtǂ8oS )pc;2jR|S Mׁ) ;TGr1TՏG]bzTVnsl.fٻ`>BYah |%W\/h+|VZp[]"ށendstream endobj 228 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 165 >> stream xcd`ab`dd M34 JM/I,JtwXew&ӂ' 0cC ##5|93<{-?V~u׭[gܒ+]yi-ýgRoo n!{7endstream endobj 229 0 obj << /Filter /FlateDecode /Length 341 >> stream x]1n0 Ew7b)T\%Cl*eXEn}IR=jØYLnk7R#AoUvnAnsdʷT'3WRGagjM zR̼=pײ GEbNlr.yŞMlѓ xMεDomR̊[W^C`sA{."(h2J!Q&Xh&HG LC6t`P{&K3"d2#R# QGjQ+G<[&ь/>P]&,RV]ߊӬjgendstream endobj 230 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4059 >> stream xW{xSe>!P@hr, *00*BKҦmz4I4$vri4M[zA)RrS2㸣ˀ">gyv?}}q@ xhƍiu ^ HAo׫67j^efX$Myx4xL'ۉED2H!~I@% bq?"hUlK 'qTxu|5aDtg&49:ˤIiOgMNw9qVX|QL `Z9&1F.F qpo e9=RAQ.Wl+Y;1=d4Q2zfњ` 5/QOSs'批BoJj 1|7mP+AO>zGM^vpe :-ҳS2r3GOfPW2jh~ffJ=n !b;x`?zD Zh:9OWϧdNh%R#nok޴)O-~/\qaA`x Z.C(HOQA4ۃtÀvgtI2lƕhe( O!2~\Agi@D`8,84;~Du;C@~+ӭ$q/p$UTЃqnпH56Dj>=,Ws!]׸R* 2, MPfr3^;qulJ>Ar=N merF`x1;_F3 l &h2*"^OR(cv^ej~dbccƭ@i7B1AMt0ndr-2 6pB=n^^Wh)+RWv:@WҽQX.+H jB~:U)O~8oMh"<{Xޝ\ ($%HQ[!*U#˒:yGR]ΐ]m0n:.a:﹎L> uz$/qpV:Ap{ e~ȕ1^كp9RϘWe_ c˂Z,nm4&R^e(֥5CM=eH *?EY^ PY 4 W.VZX39']wd:35Y\*_d[4>3ۚ;p~CSAX)o!49NTe 1- PrfbOJ Mkg.uƈݡƔ>:!1f]ew4 =iA xe 9GIA=5'~R’nz*(VGGNa|bklF3r/J 6|tלE+m3PM|?ƯfWhgwT b'`Gt-fG o}Ww=^Ԏ߈7ѻˎ/>ڰGA 2+R~B/V4KJ@9r=PC6₽Kwf8߀8g5Q~# m!W}UO ԃ S^e-Tkǝ6nՂ 2h 9ݚ{ =4&y1ՆLctuBB F[1e&q-5ǜ*)|`>"jjknwBu:<,~9V^'⡀]K!yFa9ֶPS(S1.ɪLِ#`!o{:|YdfC( @ؖ4^T6ZS`J^#@bB!Tr #(Z 8?: Wrrhkl&`'G}ŭ8ëwe<8Ęw-:H!ban}/b%; %J4/7D5.su'}z\Wݑ-١mfWk0}Y:Ќؤ]RQ[iY ܦh 2,h2 2] Y"-A9Yz\\'JdZMF-ZLyp\'!L"/4Spܟ~O\:^c1Uv:0n78ww`֒t]vg {Tױ\? `Za Ҡenm_C2;nf GLN^WМv\o7~EVg{ AeX!,Љ`_h;Z$Ѭ̗*wh@L9n+ ЍF%(h/`׎TUAP?EAS^秤-1jq˚HU1k RGz* ~a7AO0t}$.x.B zONH$ @W"AUw>޺%sM?aTQxcT@T )~X  &&nS[)~_}%)cI4ڟZE]6X ?ofc*2ر\Ni&z?zTҋI#m W!wP˅xGɿ>h)hqѸ1Xİ)Mg~[gV~[N&ċ>@P%a<%Έ/=|7^ @sBԇԣl1//D?|=$. b}rlMZYn s_{}3Yn;.8v|ᰂ@ej2ܼzo?S+we=5p-^l ?"~G+\P~Ltv25ikI!i$M9,9k5$HA1!~endstream endobj 231 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2241 >> stream xu PW{fQDݤ|kDu#AT(KF#a a0A.hWB\ut5QԬZɵ1&U[۷>ߕvD"QlOw_8?*:%6"6060+G)8ڟ@G&^D.:!btl܅cyY b#E#‘p&ML&b 1p# p Ÿ=ٲ~9#Kf= ANFm%MёRAb4y <;쌙gstr!0%,%4,>!.dR/\v8'OuA/Oio?l<&9|Bg oIHuLщ, &EwD w8SzC'WTߧ]V q{@EtydשZYqʭ9i['tێs=G)n =c 7=*ުP9I[\[kdo66t߳k\gq]ض0H.4.X20*? *VJfsHYd,jE-Z0qϒbs5KFb |U/ByD/xݱ@]Ǘ_Wܨe-;OX0s\(DFk# .B bqa2C Px?BHv²U,c[ewCZbjXՁ @=8܃A 8UbHRnӮ4_ࡘCV20[u#>Y|~+cy?2Yos)9~{ø{4IipR핏t{LJ{/ y3tR퉦deV# rz'h/f#"fj 'Ń/bQdtz+6!g[CvgF4untmdF.?MhJ5Tǩ.}o]ɓDC4 &gf L-nhJ59ځ@FMNăBʂ E9epLezCi [Q15kHyZ.}$MHܵ7DlC)Sk0h+ZtkLs]2<#Gq:w,[;uѤ.bFdg;BE'/u\Jg?7ꔌu;ӑ{!lϜŬH i9UY'u1)RY!d@ /$w_HQ4''''777͉9 GS 3FCK<* q@&ޠMB $p|$jN^^ml/d_~/"#/x%i?g$/HπFJh> N&"J3PeEk1݊rT < w|CzGppK& / 4P\ƍ/x.<9bjC?CE4DRC9F_Qjdzq87:^W.9: aΟendstream endobj 232 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1040 >> stream x]}L[Uϥ4a!ݲ\N'S\7K2Zh{Km][(2 q2Vus&ǯdAcve&KN=}w,AAT665:[_looydn`[U-R(@<_oـoףR(c[ 6[f6vlFCf2ɛ!CFbvYnhCMhvC>ڏ A"D*ِՠO"A'Kn/xB1/~KOK 2j'YnZlvRW.nؓN}|ZHZ>b)~@'a&\n8V O70AgUpƲGFe0CJfNjPfhwnM ҟ JzM/yK{@ ~\+Te9~Th Ui8rޮrkt25C< 8| OlHMw*{zBZ)I=R63NId=tmݝ8S$$:s%ò H/T<Nu#n?'㌺+%.p:3֋gtFޤs$w4NE~_x.|k. ,MY֧Ft4||jOZ'1) V^4+B$yw- 2<-gpB(/xbA^cR4}󮏁%FQ!aI8uڔ/hIR` 7T4`>=o+xLO), @IaT-R5KdNWL#s\Nhլ4ecX"Icq4 "endstream endobj 233 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2012 >> stream x}pwp,p`)k;SlARPG2XHrCB^HBr${ns&wyKI(IR*)/EZ?lhQ;69Xtv/~weX^&ɖn*T\F]y,w}p(rAwy(o_\WY`|CٶõKǰ Gjl ۆEdNl9,{SS#OkCo&4QZpu {$ }g<=}U/>sz #=ͥQ/!-@/8}d'в~nEl(w{Jj'JBzs҃`t\fPbHUChj$׌(,~[b/n.s4"B8 dXhͻY\D.GU'Q3PDk37-}9{lFY|8ZhgPIfkH+[ PѴAg_Ϩn.paH+j72.=U<4K:LNI$96A"ӕfb{bY Uf ^_?^D )DHymHZrl qlAV=RJ|2jVBTKKva7 ~W jE_ _Z63jj \>:p)il.d#j6[av9wMF'CI*o HgMG|RSU߰%`9!Ǥ u^B K<{򓲉^je(-E Ee#$"P{ftNgcsAk@SD,j瀞&@|B$_Sgd|;LNAߘ{$: n\,To*jvW7$/?Vv%@ZJ%$~COb=Xvɻ6% 3t@(d_Qh [Y{0%_'gd,m%5uULo,*QL<~B;2N53yʄ)XpA'RFÜb;Yf 611ٳ6Wi We盶"5&7G!'[#uQPR Ih݅?͟ju҆ͥ7PG#lGJ{J|o7 GΝ} "]#SPLHĎmqWXzH _|/&pHtD,&N) Գ26Hj?<0=>0EصG6n6 M-" یJ͆*)vz\/O3i mY+*L; Zb9W5&"s1HַȄO>oBArω=Uxv_ѼF\l;P:`p7Iz4x=Ơ^mv"}i| a"D ouY2Ƴp],3rJ6yMH(Xh׊+|Zw}AId VKb4w\<,l]g-!۫ޗ|wQN]J:4N.j6C!T#OͿWxD1A`X.v{Njendstream endobj 234 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 883 >> stream x]mL[U˽TXC\zGF"ƒ7_6M͍l%Y -)mYi?M#ldI?HT7E?e<{nyߟp&GY߰{'\o|Fo[\V-`34V%}X{?Rr?1{֎Py+ z;}C;MN4 ]=-wpǿrテ%1'23gd  ^,~ZbD5vC4._֍;?2`7&OJƜ06-8ᷣx>z遼6}2ɳ{Ydoaذ^ڄv; 83K4p7 .b B˓n/Jg_Ah=m3Ԋx_S)KvleVMl(l3hik=*ܘ&`HCVڼ`D-e8n(DFS#JLY~fflSAm[fe>V4>89%2'PחQX7[7[WLR[Ryac#χ:<ޤϦ3:Rrb8îF#74^֙eD&3vE#rF!4 \P,s7- Fhy`洴N-WCp R!)Ղ_iwnpy5`yl ` =5FtIs>FR/ W>B+/g^%-_^:gMDž҃#YZ5Fl2]{f['tX"L?gcendstream endobj 235 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 555 >> stream xeoHq\mJ#$p S)7 ea1qam ZͰD6,/65nQgh(V,!hAA7o6W=ϻ|(dAENw.\v U]TްDrMڎNT sBv$ # 'd@;j?f-m4H[Q8&gs)e©#ɠt3S%VWI$]K~EU_۷رU' >!Jl+qp+\~:yMM~SX}ߌAI)o>xɭ̂,W !=x{9.> stream x]A E@qװ.4FEPjq|p\Ren7&$>> stream xLMMathSymbols8-Regulart  Kprime0,a:fwtu}}utt 1omcjF|~K:  W/ V~endstream endobj 238 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 196 >> stream xcd`ab`dd N+64 JM/I, JtwӋew%S0c!##5|3,(_?~[?Di-'ד[[};ovߊ߷WdtLtᏀ9^v&KH>g7W7Y=@DSyxyxJwendstream endobj 239 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 639 >> stream x_H;7J͇-(B lZ3dBqbmmۼ-9dgHXIEKHY_zMR`{(sEVVU:ƒL͝- u1\r>Te}([!y9}Q"(' 6BIC9YCj=`=}xqu Z Y!c&0AkҁsA{$f")#mUw.ZuhT Jpjzqv(6J>%{ ŶzN`~,Ȫq`3'!,KF7E˟~!fkY\ @|͉)^ąSK_2+taH*u&tBnn({Uv& # = ~/QsY%bXsI^Za^a˦_#Cُ勿2_ ]`N`2G =l[,YNZАҝ"|;E&&"S~3"픱Ϣq5]PH%y+<~݊^ +keendstream endobj 240 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 284 >> stream xcd`ab`dd M3 JM/I, Jtw|Se '0-3##Kk3%K1to1rK2۔=c]+ۻsO/{Vw\B ۡ݇fvu39?_`]Z]#׃pQLAl3u/]ŲVo[m PnY,lp=lqqpvpup;8O7>mHendstream endobj 241 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 ] ]Z2D! }I$[.%Mů F0Sau@vz&/UYU{%hy"(6ֶIG`Jmyp8I.~r 6//S2endstream endobj 242 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 221 >> stream xcd`ab`dd74 JM/I, Jtw?ew+;7  KKR3sRJ YDzʅ>?+. +tstUUupOSܞcySمzpgr| ~8On; r\,!<<\<gO 't10Osendstream endobj 243 0 obj << /Filter /FlateDecode /Length 5076 >> stream x[sHׅr$oEtsOҦ&~㯜=]W.#!~]?Exlُ@#v߹fydn 眐ZE7mrd4Z K/k ș]vS͛\@=R̶ŊV+/yH$ dj@B5ೂg[27p*DRN]!D}} 6\hh8ܚ.B]y\os,7ŏ}eؑe@v_Vl+~)9pҪi@ŕA,̽GC <mqէMRt'' `ݓzrtS\ `Zcn;Et'ؑBrnO?OXV} ='X[ ff 9yŒo@àYR$U}.0q?Y@rIILyF04`e]9)6 |(}CbR芥XxybX'[| a@F+=oA |[J+FC r&@'sV) oыx 4OP-_:|hSRl\)J92j3i<hY|Z Qx5HI y8 "$Zd / }kG&Eh#(·BH C a9."_jى`:ֱZgZ 6Cl%*PK"AP; $wIeSa4}/:;^k- j(i"[x ¢.7[rFRCSG\ȍʅ(OSK7!LPAk4 )޵2$d?Mp}x q+#(v]IxqYȠb?9\2g![c€0%1s 3;3J@n b߾;Fn`Qud*XM1.(ۚ-1FzV-P-RM 2]@GB<#CK"aQV6\ ˴)$m9OiZ`rv1z.sCԑƒTc*Y,I%G- `h!tOQFbW )- n+F l-ډsr qjw-udYc \9:>ZҞWH lOdg+/|,yXi4$(j"w%vU9T;v2]bxUPj5"bx39%W5!Q覝Y uq>Oe%0|nd.memeHBŔu:WKWYd>hrHiuG]R#/ \b*QT3yY,hj]G@VRUoJYE{xHɲsxL2H_2YQKfte u:4'`ARkh2ʅLޔR֕}V7Z;Mb4@BȮ] Ÿdyݛ㣎Bx&@>.UGB9xN׷5E!Jﻐ92R+ Y15-$Վ v4 63|q~ =$C8J3hGrX ӴnʓE#]t~\iɤn%>+ʚϏ_Ǯޫ[Ё+9,xq4(.WC]]oj//l_5~jWWWCAvr nET57mt}/p?D+#v?B^kE*v=ӂQ,V % ^}I6`=@c5޺߆{gЬF@*}5pD|ţq(KcT]J#(gǕe8(0TTrm2B* @(hKLU3pUCY5ÝMle[KlEwu^8Fbnjf.ʁZxtn ՕcW)" lE9EYE/[-":+"-Cc-* jl 2ق(QԅV9Vi%)f_6(7BJRZS7gLv-yZU[DV.^ ,KʑԾə}a%JTĉm8Õ=iK`dczsu{}جY-yZ8Co뎒\iHB%c"C'!41uaI-h8@JRAjQQ7%s e ~:@ ! FD>9d'ݖܫE K\Ħ:n14פIZ( ˈnϲqI:[4!K.%Fou .m8܎Y_*0VSKg(y%O6c{{Tb#gm~8AR/gƕuaf#P!x5vIQ_PΖ2쩕KqyY^[|"\*c>մ #qM+FQfպcjHo+`ZcX5.V)[=^Vn!{n5 U^8EB)ݴn,O,CM}[yK]j2Mt(iLa2jӽϫoJOzݤ@@?RPx΂o> x`2RV-Z&<Ϙ𐀜2? IױT1jn (F҈q,^[֪ni ZIjr8ޓW3l^c*B^m cx!KH%jB֩؏vsĹ721]=@ yD3sgq]u RSf r; g S 7=?Ӎ'9t0ŖG8$"_ty8ЄaR=TC3G%O G%|(+ gc}.a#8^mUYgI5 00_Ve)O^֕C+kDݟcl^'CCTo+°j( 8pSjz]/1o綏TO8}6Z9}mnزPqQN=*wuܻEÉ;OM`4(B )08oնɧ,/] j8b| -[<Xn 6,POZ4 ]ÐS7(#9—숣V`en:棖DhCEGZT@,tMAZChB7b8pKSI> stream xYmo[_q/|'55EXSolˊ׽Rg^R8IQWߚI,'MX6Ŧyz:+Au9%1P'TsNOX ,9m-ǂk|Z*ˬd:RҶez'a#B`JZ]TNZK0'\7qK[,T[iAkIͿNBJhèn]tWF))׍P6hjd5Z]*CpatcL3Ue?4Usmw~M?e GjڭIVNIjG2}?aZ1*xcZ#)wfWd sƖL)PiN(%J2I)%Jk(QRT)Ŭi52饗I7PJR% TT(e@%J2RSQJ e-B%JrDo[m#+M˻WԪGg.9o`\8-4hLQQMgR'mV,Ev N TlU#jdU*SJT)%*YUʔ; I"0ȶ#F 9u%~DfX78yFGqYJƗbx,\U?Kf 9bSm5‡ Bu(nF1'!M(N%;bggy k'^CFb؆{rN-gGwl񺍨O,o>%bдTi_q[ӯpwgĶApUFd&Xueu)J&a{~%C_&K3P#XL)QîLʔbJ(PR$2%ƕL)P]^H70pgW iYc(Eͪ-PRj T)*vD T[2DiPqWjYQ݌k*k5(QP FPC2e@ 5ʀ*j~>'5 & )q%IMa #SR858L:u 4>sl:5 'C,'Jl0Gs+Ja Q o_=C~W~gf_!3})?[ pKajhZ9"O5fQok7Yw3g3Mx\p^=v~|JW3F72B#ЉsF.uRfHdǦLd8togĭ#h^?uY} ٽ GZ4dtE|CRTtS#è2?EDWĊS]ű D]2Ɇu^vZ,uM|&*Fz*|ʾ[CC?_P8E{fqj߳بqzv*[$/7wiS΍7 M}h; ]J.kOIT^tNS>2y]k !V/p(#輯JM)@XJhl (ZcR)\'A $,4ixdgitIQ k|ԯcɭ7GE6U{Vj&I_=@s)YTp؉Q׊$x}M!]><0;"lz{_VoP<*Ng*ewK'{_Ş>f-"j؆bz#݋2)#AxχYqRqk%v.EkP5yO bp riTEކ2y20@֚NK#9]aO"ge/NVՇjyu;3Í'ץu'Sw?{<{|Je9_.-za"㱾ϻ(P 8F.ާ>f|3t7"8 x.UwVb<r]w9t2hfZDAQgѱED)Ը\\~endstream endobj 245 0 obj << /Filter /FlateDecode /Length 2782 >> stream xZKo/$a/۳07M,4RZe9oOU?.cȉw}խ_N˂/?)OoO~9ij2t$lrqj-ԧwM% f[xe*vĩ=YVJ!9lhq?. I3$tU.O盋OJӭEe ~~=U 2o©]N7- }צU=d]h]MKG+Uzi.x;ET/؁ȳgZTj8+IL3.d}z%Di㸲,ځj, ja`1Fd{zoyU |xeAuXC5m3x8' f{jS&O#.$A0HrJZ; jfiiH8U?,R~TrIxC%#M"D\ @ ʝn[b(0x}d1²C>յI_F@ќ '>NJs Dι#}g 4Cv+H=j]]3@@~CSq70D> u uWGt>H-3DcB:GײMN ~:xiՅϲ`I W`Q7 L0 WЁF+P*S\Y HTe?,%:bV{tYbO[=P4{eՖ$+ {U}a#9N/%Q~P_불@SuY1mBuYzo$P؃x1*aV; y@R 0$u % e 9GVʒva\K#II w:isiR3Cr {)]zLs$& {TYuSp 0>4_=./Z s k45,L@A8(;f&B@m@WqbQII1Kfgx%<OV>oX`'> p0PDwIF)T~kY d{ };~8fxA(W,.R=`ɑ~#=fa;oӾydG_qOi(KOq9>]C=Qv_mZnV5G9 ɮ5.6 ^SF,]|<3ݬv#ao ^m꨾?I{j_'^LȼvlP؆Mco\D1ɺD +!WJ[-ѣpvjO&x)&*k:H%N-n<"I@r!Waܮhv%Y(q |*PuXi: Hcc.H7Br GaY&~7k,x$lYb1)Fʱe"Yv7j#MWbvZJ5t15967Zh4%#x&3xH*2~:އ @䠧 R JWj|Eh^]:V {1ria>  #+/?H*&(ghq,޸c I[z~_[CRRAr}\ o"W߾_ha>}M~IvĠ29X8%0k%Imp[x+eG3Ӑ4#lj$Iރzᢕ | ͩĔJCݗ{;R4!+ᰥB\]UӸ񜁛 k%T隂ʣ<_ov3Gv[::i/_X[E`Oȭױ(ꏼe=&`łԎ zcp Rnױ[IHpË8Bv^~yy2RjHfl_Fm}dxPi zD1{$|Q[}w!= zg_鳉~ʫmze\O|pN$;Y$T/DiK e x-\ U*%'ZyպҺ4CC_x'zFL܅-ywWڦŪ|"6F%^QTER%".|a[/ӯ5W8DLf_ubδ-TzΪ}XV`膕KoA,%X: +c%K2Gohn(˴ [-8OUi2`0҆;rO,f W˖#5Z7骙>*cbkm\?wgf576~inAf4O$bE TcL׺5ò+- i_|36]$|a#,g8'UHF0 ZH#s%ϼE y;ah5y+@}q7zf?endstream endobj 246 0 obj << /Filter /FlateDecode /Length 4803 >> stream x\[~ߐnIHbXA(kĀs̴vN~{x"9ݫˁT)Ulkeۋ̏^_]|Fi]E.a(:.o3[\sXfeVz0ß3s3ͻ\!X󪘰>L^/$t.WւPW _|wu :{[Å~.`Y4 ud2I+asUc YӚ) q*[r1Z2h$+Jom.yT,xꕑiʺL:un\` taAѸ C~ OP5"*m͝h;&UI$FkDn=gshHUFK0msCizbX辘fa氃C#y$lDn\|[޴v8Ț 0Kb*8aDB{ 0aE]F 0y6?Iy,,<06?;ѐv=;RMX)iF45h51F-itKw4ŲQRLyWq+wP28>5?O%{Djv$Zh3 o|Eȟ'u&}b סzF4ErЅv>`DE?5Ek[`*-bѿhk٠,cef\ke_h\#E~Kk]s+Ba>yX:f<;"QX;΅nEV&j!S8 kB{K[L 'D. HRm~(b'HUs< 4꿰T[? .C+XS u1 P ȅ"ZOO d+:V;h֢15MWDfaMG~Y8#۩QWOCn> uIVr?zte4 )QLgztJ(7:J#g9 ^:ǸBtαw#sK挡y3mKe1nkfjß!tI567uCe[Y%+HoGƆ'6F}VF_.@ K6b2,y:q~"bT#ԅK^_' Ћὣ킺YwA /hț{?#m,VWD%GO˝=4/ĺ)'DŽX(d ;LS 3 Xe'Ba'R LgTgZm`p_Ou'4z3hg۔c5Sf чr4$-I>NR Ҷ2L:X *miy]JkҬb^(+aF{4|wcTVdbFN>NLe擑KԂ{3_]r+ hV[f\6rf0ʭx_%kHއw]'7zFHՖ^?BZ~h X 6K"  \]E}~#(I`)%[AQ?&M fa38F9_^186H<e CI;re?ZC-=b~1Q*0>CD3_8_DGI Wǀ|fmu':,E~{߭v1ep`s|#qswN|"\!TK uDOz6-!q)Kv ;h0([ OH2sh BzEbivmD3?z9:Hyӯ8*<A:k-Ï(^]\z4 i|jaCЅ8w&5D>X @2_mDf3`q[4L>,bG?|]yuI|M15֏/y(?SCD}S(pׄRq7yŃLiX@Xy92yn9k pJ- #`rk.wȟz\W k;ܺwE=VҴu+ͷ?sn^0MxcBi VL SP?, $1$?9GpPG@(M^X Ԯ\o4! k*eZ>0el jBJɍVdG!Kvu|;ѩOqߨ紐 ^HQzos=-S_gX sVb3`iB(R7Eq_,>:\xeD>sj]\ϔjῆEB,}(&4Վ-Ѭw{Ng+sQVdY/JYs)>cBPtj3֕ups / %1L}~ $(ڇ2B!<A Y"ƚ7] @ i'\ǪPtax ]pcKnRL]:҃*\NWT ~b|$/YC BZk$/ZeČZŨQGE!e}y"=(/1Q@չ9ʤ8}«VVv~cKw[~?v$%(İ| [3U&޻ReJx0U"B5|dX7U/Lb酝I7c&aC"OCq";&Л 9E+77[BzULe@>@<2QHWɜC.P\+ˈĪ΅/tXcUnMx#Os3/vi Bբ%ʌ% vw!;I~xաToE pA\V~u՛WbONJHZ&-j2ާZL q4@Nha-_6FӭqxU'*xkTf[do|!Ղ:M3qt L@0onZԨmnT5ޑJ^R!,/co vOxoi]ǎzWnNwuPZlpƣB(2S"8ttoUendstream endobj 247 0 obj << /Filter /FlateDecode /Length 4874 >> stream x@6+@v524j)M7nI튈̬̪j#42##9k9⿟gg>mW論'g_ޜ-sm>ܞ5\.-+2'`ٟw՚L;kaVJ]—rkW5@/W5{z'ûj-\˸U8jskc?i_ mykH7ýJ"%k[gL^+aTJk<?LKo? Ls<*\ZxqZaN4nWkohޭ4v%kyȈ9_W tC]p軜Q9~.ɟo>*F"#lGV,stXn.*=?pw[SsZ4W4kk\9l- \EA7a6Hh4M䘴ı|o:mp.H hBu wfڀv77ٚ\Ws8n 2[`_0KݾtM8ƿ@"K #hcW[R` \@.fpRB;Pnon上0dKh- DA섳ZrЈ5"( -X+2cqo**=v`Ͷ4B%h,RUH0F|{\اe4b0؂_.M$R7hG6gk1ëU,F8y &rM=2A>K;LH{:DKlh$u08^ yJrM, UW.L&MӜe! rPB*-1ʓe|p8gJ9Y Tdmf!J0%Z"-E `z4#zJH ̘~SppRwFs\YMf(nHNM!3eq/gطRjnM\SRJx[)cT:VA'Y!5͏s`43%bc:Ǣ)J7NVwn3e% +\c4q_瀀H_)`3-RT VyBZ7ž:pP7iscGU]͛S0 s z&\H.`Bnم OpI3wŔC&ɇj`0CڣD\)Fg<.slnXtKd&+!b Y .Tځq9iT1w|8NRb۩0b{| STq+oJF2VBsEt/?Ok͓Xp\χ49_0"ѤP`[̧*Jhi4]!=r?븊hbf-\i!mHz8PqHBFSl#)ݩRmdfa)63yZ [bӜ(`u4JσnejG`-.YCJb!z11TXDKU nMr_Ȋ"ELwcnY+Y7mT) 8wKx1:DQ՝P^u" ,l^wۀԱsv̦73uibP-,t+dyP`[ͻP̦}FCV7BhoUA< 4UV1!t,r_z&A "؉k|DFYF\Bk ?pę@r4|erڰX%鐺!f\=לVC<]fhp$G[iKm ̎zB/D_Rof /^Dk_6  ǂ8hyݡ:vȃ8|kcDY&V Oh$y:QPtT R3"()yYU!Oc.zECy;f ft ClJaYP6'H@b-?s# @8 6(uB9Ǵʄ1tfٹ=.8R@~O[e! 1˻1-seFkLk2__@Tmn ,306LQC!V9Ǭܑ*PT Ρ|)@EoJڮ܀otoEXR>1QY W|XʀvﯯOSc} lsj vھCL1X 7lc- C杷U76[MяیP@T"*jд`p1Y]R Dzhh*+/o.7) it- ڤhXŧ 4~6go Fs`\,fK2jHgQ `&丰spm"'>BUEmTfHkN Uc.dkʀ =٠\6 +qu,\KA^U0|p"\L|@nBr24E IN\ZUqh^˳?-}먖m<< ~_44NBB<zPHD`W3˯pm{k9x+dzxzSyA HXR Z u.GtxѦaIP;Iw.EACzd`O8J7a;ei׽Z2e/f|7d ,plO{bOE [³ihԃ1t2S8,r3^'~,ؤ|b=֙NB\67?9APĂА8~Çx}lt:O pň2W)O@ &CftoLْAJQw87ft dɊQǚX 5'5-&BH'#**4"M-^…9p$Ԑ f|o6,̶¦ʪs7 >L,̼f6}P'm:krRLɐoYb:,ED,2ڷv=i8I+hmjAJ'B ~jKG`fШ;md Ċ aF@҅0!|vw@l*ueϲg63ٯm*-8Pi/<=+FZ5^X# )BAvbxm;9@ 5XE:4LpZ#H4A/sÜ }[v])0)&D;dN Hj׍2k)-rm6ؘ!RUsۅ6Xk>]<_ɣdߕ,'h8bKѻ8)ղYIT#ԎB=,&=|5Kr)7?ot8rc:V`u,P5~mm@u PAMFjh檦C)JR:Wc(ٳDFT2 OVӄXu.;1fc(n .Crv p5y?`5E%?N/`lml9jүl\1ax3'b-=duN}گd3lyZǴ[1T| 8r5Wije|G/߃̇DoO~VlwWכ 5[;3Rw\:VTR[p?@]` $.#Z8 sqMO~cx uus;lcf PXyk5i]3րH;X#H:!rFb*^#s0r!NFI3Y̤Y1w3(V]endstream endobj 248 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 251 >> stream xLMSans10-Bold!J  TR3vu$$,!>ʚ걋Ym^gfg8s+fP}Uu]Iu^i   To aendstream endobj 249 0 obj << /Filter /FlateDecode /Length 3357 >> stream xZY#γBapkya Æ ڹ܎4 |ţli.zd(}߰7KϷ \/_0ڤ?gO3ϛEadgh9.^]:y}\iǝqŒ>K:_ ޶.WRb7Մ~(A} )u9uvhgq7Z H-\íQgss啕霩,\'i7g!)ư0Qgo璥h ^O#*{˄GJTGJTGJTG Sl}i@J{ DxpSxP}`7nñāmFbsm]0I9d:&X٨)(J_c&xdd A[.Xp+DJiwPYF(=FyltFqs'eT(u'wl 㵐[:q҆&,2}ӈҼ'SdpzeXwc (Uc()p8moiςCTw/R|Xq*HU$q[HủH뗠4P` 4(z8RΌ$_Dz)-a܄(-kwbq`GJTn#%*;Ƒ[H2 UȹfTdTYQE2 dDȈ*5ɐ5J8N6]w%qL<F4#%$HR3>\>nyE'zi>@*𾴪$U}<M9/sR{sr#Ou ZJD{n7e7TzƶQu{Vtio!FHnfiww6q\e\};NP*e9yNq(q鏮6aHЗhsæ^:~Whj 0q24|]S&Q+]f96qxz)(qpǑZ\WSgVpXI\)F=Ylۦ;.DdXpw/G,ٖ|A893``ۨqJo;"k=fXpJ!_ ]8v@mog^<bR(i$':w\y}/qۗo4Li(5MJSQ!L0M3_!a>;߿`zvp(@! QA ]ә%"f;vN'(AxW7rhHAmCd,ʚ$TMKe WG&c;T!'N ΍A hE.G>$ Ҋv/}S >r| 0 SojSeWVK`5n~n$J7mO<[ǫ^%yH6 pr8LQtA mVK]!qJTo(ڼ3ʡO{a\gl AV({H hq  `u;JlNC鸛C_u>I&dTE{J tͯ7z">%}H[ ¸ճGMqO)+R@ynn^S(9oߕfxͼ*Uca?C,W* :c…IMsZ|,]M^'cG[Z`mBƩ8.]Lȶ/΋ۇZ(*וؔ&H֥|[BjonmQ%ұzkLWu-YlYߦI\Ak?SW*?=s G\ *HTU.լ5OUWd_Ɨ9e0wuo$WX<67\Βf$My~]fEQ%+݅ MOtšFZ#V?=4G$,Ǥ$QHaRD:MX'.֥_d5kY>&D?1e5xqȟt:-nkT!B>?'bPQ[\٪xqy^1n^b }tARIK/?^&6YKO^_;2=f ͛$Ԫގ.Bv<\/oilmfTDޓd4ryTG[%%Vu+UO .@0 H)eUF,%3gNW :A9K+7Vi3 dUl:|oWDk@|Wݤ)-ùx >WJ~x0td;Ny޳ѼCi\ ,!zPe6G('N0HO:q\>MOnS)҄yPcONoÒh@DG H2qv|0!(R|r)@ׂ3:BZٵ~ˠ'lBt%` 3Ȍ|甓ytF٧P׬ _XmD1uPȷf_u__VyJuendstream endobj 250 0 obj << /Filter /FlateDecode /Length 3344 >> stream xZKy~O94|1'toR De$G^ z5YόUngvW=lcX,~UEOM򦣿f5׋<6߫MlҠ"vնR7gs˳5J?X`֗Uvqgn}\)ڮvI/gX)J9R{ۅw2.Wµ7]ԡsg }yWm\+жuƤ-h,UTKöxD@xTdwy5^S )Ήl?v8 L]wKTLNTe~^h١W]W/e]^%;-;rmhm CKKW%BXֿZ-{M0"F`(6m3Hz줸a{pa CwlKEIn{^z8 ӠxN;K97DUl_UET%<0i};M>b&s>ղ%;z߆9vquAjLZͤ`v}ij -ONh˾w'FZgmڇV_1xJ/4'g @D/֢y8e8Us2*-EM( GlKnðf÷pxGg9Kmvp~! .RvuM!deحQݥzj/te~rՔZ~dΎ;D]ipSےx*uh>rq]E>vI8+~엨i1E~SI>1 <6+[3=9N'8C>#D bH3C X6<2YZ-|$r^"Rg^%]N %D5!*t)!0BxXɤwٺŧ1;[mvqمwQ 9?ŠDnuΐ?ČQ~^GuJhs;E,{Iv=67nE`fo*B*/aINHV OD}WDYS!B?%bc VRz~E'g .o7ΡMcUlEu"ߏ̟V\ nTr~b7Bcg5GYs=;\]Q= G x}qZ_VS9rr*-f{D:|zѩR)D}iMGzQMYє[rgX&|6\ſC7=G"+δEqJHb?ZA)E*_Q:s2KܟQXe}N|gaut)G+y&+-T7bzjzZ_nۗMjD+=@2P]dЗ$c(^((9ŢIG;뀟 |sI얜H@z=aоᗫPSƀ6?BoPPi6SHD^@ĥD%tqjl)QV[J^rKz-1e#:5I{B@B!/QByw*(Bk]aY ( GPe +[ƒ )]#T,۸kKmҌB^t.܀{֘ Tj)Qy'TnPi TjPi TjPi Tj)QcnV7|@H3?DA#1bLL)߸oi/2 6hhqrj:K RKcxT-2*J.u^c4Ј' Yg 䄰>H(P {*Iٹdk(ӘZAQ8 #$F2FOG{HaeK '8 csprqHÂmi$y Nܞ<0|J?u()Lca\^P\amgXHgcK=A8DeR<2JTj(0.Q\RZ5VgF{V@gzvql܉sKٸ*TdZ*Tn %&w [RK2Y[YU$#jL4FThdԔhdTh6]8)-Fr㒌"y He2(TX#ƞDis+>8Zu(jHz[ujj'⭚EI:v} E ##|``#-]{O#N␥H Ot|01y&>-f=g]hapNli%oÑ|9Q80J}a$]^FVzh{JŰ],3>Jw/sW0 |BA&ǹ+TL^̮Ng¾ZxS#;dAk U6 ]݄FY>zӳGrspCO4/]o87^o Cb 9y 8t5#H"Rl Sx ʤOv^0Һ//<=}M1~GA+E{82~U0VeP}S}QO T~UgDwP8nX-5O0J--N9  Q}us IyHPY?t|)z2w[oP[ְ8vSe!]1MT4- ~fwFyYDAt_vW>e ?W]Fendstream endobj 251 0 obj << /Filter /FlateDecode /Length 47954 >> stream x̽InIr%bHhdC!zQEvԔbfREj.sٽ$ōD ϮnnszӿoOO}?o1u?eיu/^U^/?|믾NzWw9Oo_Mw_}]旿zH~Cu;Tz-kN}󫧗ܿNnuk_#}G믳Y/^>}ۯ^;YT_8{.jה/_~?_G}:v׹O˟>Snm4[|⛿z7ܶYS|AY'av{}_}m7r9ˏ>,ZwQ~W˧sCIO5/ogش7Og?y|?;V5ageؾQ;;p{}xc>4?LJa2xMOogW%Q˷F~Q_UY5 |CEs0!8DGϷz:3b#rW~?G뜱|[=?!ƚ/8^_v5{׭ڲ,?ǟ}_]sCkfO^)j*l8 m}:x662:{j5Yz'SHa8}Gzkyӏ߃Eӧ(5t}Rr@ qXoYẓs]YNGٷnG~mS)0P O?}8syבn/_͟U䩾pJU.:ˏxT{}};7qr>$~?˗#I|apGV+3oaGY_s)G\u Yr~}O/nC_vxW߰~-`*;q~y$M/%/v?~GO1Ĩ[_)3rigMۗ?|g__Z'฽Cg~~zsBlw)jY{p^gu{]uSW*2ڼ׷J'2Uc1Pc/PM{W䇁]Qrpp)JckOCSQ46_T]_k]LW>lB>?x+|W/Gr|L?{?y*\>U9/ȧ9vz~uFYLJ>; XbaZ Ű/wP-N@ſK1l%H5uf`qu:.N`v/2l +Cras B^zQ6r|VRMQ B^ڲ~A~1_Ee W}]tdԳGLę|vS1!>`b;cp"ϥU,5OX{;8]XW{'u]Xbq(b rfmvjg]bT $CK2|=C9narDyufK;=6g N36b9kY'h|NҬ\CIV@bdj9':hkO] vDumCzf]식"oO>Ԭiql÷Wsy81I'7 573.eTt;kNNL ĔOANV϶ [ W{ma[GtN&]6T3_rȊlE$49Dr?HR~Uns~5 g3Wd_CY4l#n; L_vg-*諭vF vUM,l\AZs2hf:.N}?3ٍb';4Zj@ey2bι|AI\Fq:kg7pp6ȳ?pӝk\cc߶Nk@)$)r$k9{!"k*v rXmwAt5sj܇srAzV]S{A5;*L݇ !<ײ,_>5z8p}j;0w5ϙ`lG /9?܁ĤS'hlQv= JՉ%g⬚ϼj2nK߅$GCŧ;X_(#$v v9i~1r:h@'#[mw ®r=vu!FG-U0)wG(6klrљE];=% +їs0#,݅/ gCD3H Z.VUМƪUٴ\fj L:tylD4v_kLX\Aѣ-Harȹ|ZC.2,bq!aM8l!Y2"/H\9UR+CXKN5O Dhdʨ}/rm؆LO>D@Q_ml"?M4* WMβM~ Sh;nϼ :D;~vzVGu /H쬽(ffrȼehlc 1FC$z8pu3hڃ;^XԌ+!ZEIĺ k`-Ĩm ,Q`3E3 hYu!gFyvn,xS?ۣdz猙83-M_\mAtc+eȺrV Egh+ZZ<;K@G97ÁI91DoenW >rͻxC gėcblGGN Q$!᝟8[486 Q[gR` 1ĊC p"5ׅM,k'glW\@he \LWhb)\ Ejeh1?Kuqu7\[\2 /wn3nW@֞ʚpAb%`VM2HIDڧњ@C-\w9ݺ_/{htɍ9;2JUI;vblsgmv,;&oNZҼD O-?F@x(άROЂf$vZ=-'I7 =sOtmuN y0Mk)p9=㦌p͗g HJq!gk7?+KZw͛\ r`'yw_+ps}97[܆ q7 #;Ipyk;L"fn^˦ѵ)9fB<40B86v@ӎ_M[a$ &$t#^0 u2C$zBhH $b{qr<5)FS6ی9$q20¨SV@澮x[zrQ7JT)DDQErax/RRsl, v d ަ. w,]]c?_I Ⱥ6ͶdYv#wU}4Q5W2ol>Pye4(R~+.oo^/% fXRARicV7M>q &֟긄kg7,B6riMX<1D SA M,Qnv# [^.#DS>TRpiL/|3ϱq;X[bE7&PqJ+&XFN$ bG;b[ !V-!ZHamP$ab>^ǜ:nfV]/= ĪS@j/kMi!^wˋ pTginE<2ys;@N[y;[:sJn>csCzc>Ġ*yA7ힾ voOc}`]]Xi$Vմ?J0Mw PU&)x:Pä9뤗4;x~̀a cJvyzj٨rDa0!: G΅۞&qQs@PTA'U1  iX.9̑U@Pۖ&2ވꉱe%DD4K &YLjg"$08W'wcٖ4 m=t4?}6u9[gNPrv ۭ-E@[Mk;@!Խ~Qs`n3O)ҫ n4'W ^*a绒 hh^jrc+]I;whG MGT7]cCڙ${{/Kh`@H{)"q1W[44 b@8K$VNEYPY} R I?9Whdg5X9wO3CFm 8gJx@p1f2a蜪zsqv& }xkK:TvߙVT9p3pݚ߽ *c~Wi2Ֆg@4X9cЭЏ.Q(J#f.hQ~V̼r3D˙pg%bmKٱ1ͧH.u'mрvo.Ƅ`vMM%=7[gTԸL8:5wub> MhȨRCM sLNRoD"m^/j%97>,}x:WUw89ָe iR(S95J%-wT<HZX ȹfҪD sV;9ytH1%c)T̀ i$ tH1m[Jw(;Mf\R2E+$̐ $<]3HK7ܬ]&T]29;ko3XRZf>Q/Β|3*T4_l5KHkhbkc ,ꝊIW,c& [:2_K35!0HTjov-pI-"u,OXP Mը8;Jݨ 4pO\CMX(JN\nfo%ARk.yyߢ> <{qN1r6E|\1 EJͷh@ԙfM+ !cnLD1֦m0ܬ!{, 11Qwi_7sh:Wưo63ћhu^ѣ0m../r&͕8l7 izAYzArʡM`CF mP'DaPkٶ̀t̗^Љ/EujxTi h̾ !A !XQ,gocwmѡnT ԩ< &6TvϴYw oڌ67Zۂqu u#҇Z QBMed&GV% æ%8dXAg-Y횉;Ťhzwռةp!5-N'ݩ*"̄nLڶ:xOW4m(McQu7s Ȥ+-Cm]Dx1+җ1]'8L15 Zݓ5mb#4f/9U0(J􎶣)-EDym8}X{qx _VNs+}YDA3Q`Jf,} rg[q1[27}L8i קu,Ǵ}͈2U5ZړՀdfHxN8,9وŹ M㜍V!p9״2:-!l{|Zf&@3Ev ,:FRbv&/Htќy!IN%iIѭ&5!=5J A51R'=ipAؠm+z,<$20y <9qlRl : 6:f{127?ԥorl^Nv8DOrl*^`ծ&;9777'-.o;du4ٮ uWŒtAsR`S::i ڭ@{5򢧔Dc ,D5Y,ezo˸AĨ`Q(! ~Hi\=tiN>@A0361$p*~xcn¥U% $MYD(eCA(f‡pE9D'Q ?dtH Wm0(3IW&MelKGT̔qVp4|iPHMh1L& hO#wd|1MԊޑE FIAڄPgLsE>t͜!qrnBt6)Umji؉8 Ƞ}Ц\czWʎ~xKh H0vT ݖ~=*eIN:- Fh{ELZ'Dp*%Z+2b2he`J8٨Yf(Rs "u:6g]3IZ2h ~h^z4Vރ^.蝇ߘIMt/ńk⺒rvSi.eN@\!2`{A5ePntH|Y8!Y!էZԘH,WV6Wun@xz+U7&t>EC[DxpbfZ3J*a|$;q0WPhntELmE !'kC?!RfʬSi\I`ʇ%iI}IP !€H<0AUKrgҖ)9tstm/1nۘ>\Ϡ2 չbn20(),ڸA⾃}7li& U[WsH.4xSm/z0d붫zrC5n`p,ya!;? DmrVһZ)2YnZ45 C1Q2V(d[roD$wqA=Tl xaWeʑƞyQ3h*Jj;vpe56K "uN;+LFMvX`qUD~LcP|1t[ȟD,]nv1\˵C R37][-ZъA wm%myDTV^n AQD{z@f`2㇨PA6?zҝp)\ѥ݅S!l? 081 l3 #ZI͈pNG" )j}whDՅLay PsXrJYٴ;#5DM A?ˆ]p _?t틳WqCUt@ZH7^e"06ο@7$}Œҷ@fW\+Java֔~M$M*|m;}:S5xm WJ@jeC.8Ftڿsd(BӧXW !29ҚRVz܍x:X>C}aTl/ " L5XV|ke{o87L Ӎ< ^qC.c/ \5Dm Bng mQm`/C.)L:cAtCđ˯:3L<ױa;6iVhaesϰcCݵҖE5 -]ֈ wQܽ^%PMG΂cvJʵ 1"kDv}(r*#&l9 W PG>zls,\TF!{ɑ?pks3 \'!+X~3PԨee ḿ)VG ^f0|k0}i}8\c.2qY]䱻YzetS.¨^CCԬ\ P*c؋~q|@pML3ZP",@JfFr*qRKʢ^H !Qӗv4PsrNJŭL:!0B^([@5DXm&p]jBe>v"0^((Oq)Moj0lSpݳ,^|!la``[(r4?lm lX\i47yhC6FE6QFl E^f` RrLz ,bGN1|<(+/ ă_ˋ:^vusx;XdK!UFirhT QΤ*0ֺ52Pp +DzV ux̕fΑ\Ȧ,iY 1|;V|_5L7o|vg#xS{.l(ۢI\ ]k_ F=p 0/o,^Ndx[x92<` yÀtj‘ԑBYbTZ3^_{= YIU# &6ƫ=z䋯 "'1=|xށH1=D`p d=-=u1>Ae0,yUe6! L4VD J}pyCK}WChZLKφT p`]5xMH$@9O0V @;^3Y]-dZ2W m~ yZ.Xf^j9)AȊ4%DiFz/l3)[s=vzZbp3R:~~^AUCSVJi-I ݰ']q>lE%IL #c]ZB~?HCAT: Dp*Ny1:uv¨Lx-` cSRՀ:-iA|6gĶ2yeK?eaw,~464B{A&$]DLyD9k! **ɉwz7F,C.8FYH`Y{ pjxHF%vR41XaPɖFQF @R}i4 ㎡|;v1KQg$X)LT>  <ʕhe`u1ӪqBjiEF;126#' o5xm`7:$?kPheLzo8f œͲI6pG/ǚW=Ƽ0{% K߅EbM#JEċ_%V!r|Z?蜗eQG ^.Qu^M(ߣq")?BUVA ٓWvg( ]MK\7 &Lq45PM(}p*x(fQg/#EEr(>M!(lTe4rUD GqrA]4oM>0x9avJ3L3( |! gF:x (IԨ;_?jt07j 0ҢRC4{{tZ?Q7Zgґwʵ[oE˓(ؔO5bqŶ@L=/r-/{dw6U`I^?U43_0[:j2ې!M<4lPYKo8T*<"|QHqg5IY͖%><*ݤh0\D8IĄ6/Ltnβ~PQ(ΧtXz&:`S￙졑ƳЌ DY/ #*Ͱ R';GQr=9V6>O0dtrlxy*{Q8Ʀa+rSj`H4q^(>J }p&B"tWYWh{qd!;KΌBs4(ǩt_*SpW81|Ԃfy \F<\Fz]N(WYvE˨!I4D5x< ^z ($卵[* IB向=KGȘ)+`Ŀav쯸 wd[2}{S`vǪ_po[+$ IYRȤX!ńveWaT~{.G/)VK܎΄fXLf$Zb.VIX#;SIyMQ$r"/0+kK<XݱZd4B>Xaw,Wyp릲=xwc} =/O]U{ u=:!E5A+ii'n2fY=HUC:*H,QAa41:IU8֮١fF(!G\dF;E~DX73u0:6"j-xu ^z `-?4|vO_35/y|aBav^u*fTs?ȩ!j0rkecx/qH#Lm`[)]3r@("MnS: EcT8 ѐJE_WN1إ()&i e{ =ͣUAcx4<<:I8 ,ISa& o(όVX[sY Q>\}OSԓ1u3H^RHF NGޞn>*$=Pz{:q`-N_% XҦH"F,nXᗅ)g;R} np}K4*[ ^f}v <BI/#1ߪq ,|V4uZq,}ĂJ/`r Q x)"o4 PD9İoEJ#x1Cu rp LSWx:(#"PU99&*Ho名.8Bt?A\ ݸzD,8i]nK Sl9#Y+#Ov`g giR4+!a%Y\=N!F T6X<^#z>a o$1QTQD=1}xQ~:F>Ƀ05EoeL!_Kwϟ"tNOxz}>VY:R. d$*"z䨟e)!pT2~'Q|J!YetAj8} ^z 膖=LMX+'a;b sp7M>]%c ,~'TuEkRTkuOX#إa6V H娹\81+PYM,"akh u {.qlZ źYOw+"[-!8uՄYR0תqq i-D| :^ _u[wcL(-$9~}keBnkxGzJ5hZR+=gKAQ_rY)%3HJ-uFPv6r(:>d^X/6GzcL@8=;7,nX.p,!`k1ߪqo/+a@6\u3OR SU ^ 7u1q y #Hwӎn1ЎjޢWr:V)h%쭡A15wqSnlw P fٍww#GF9ĞpwTNdSi0Urեd*jaLOΥ 2E@hŷ~׏Ca[0LXݰ]X=Y{gꚑg\FB膤>E UJ( =iWb7C(WO(%@)O5n&&wkxGQl=^?5<; dT$1DMogP +3^}Ězyw9ȬPe9:d0MH7; (Q=(ni|@CHXd;<fjaov?E\z˧$8)0s6}쓭{X3$PJX`s18G! e\KCt> 6 3#N;_oj tWldmϲo k^ǣ9vH1"`ZzttjoT"ʅT<kܦ:z"o6|[$2%KY:]U)UcIhfeVh|:`w1E!2|d_~E2 E< [֓Iu~U^me ,H99hLþCDp1 勎˧vĖKq~یE`UY7o>ӨOW}ruD{~NIs4$|M5u0b n>]NܳL,;b3f䘇1J.o ӊ$ucc3 $0uV| —tH_W0zX`A>Kx0 \ElaF֒!h8!ٍݹ`TY@r"fNp2(漫x_@K71!5P=79gxذEYsTZIlD?QDM[z)򡨚-3Wrgq}#W ީh.:!K[^eh8~DwT!zp-`+N6'ܗ!H U39?FSTk8Za?Qyd9LHWy xyV%ļEspv t)|"}HkHkt{ʼnxl ~O"W"x^qɧ v9cɅ.1/l~Yi&E7fa )4IaTϩ#qr'zWCLA9 '.?> )'н>]{Ѹz^E|Q[@ ]8[+>4JTmx.0lG:`Qum]׵W3pg+o`jw%>d8N2Ji),YMl^ygp |}%ٟ>HF^5 ژׂ-!LX5 y *As?RIi ҤUX bQ ku LDoj(NL{4MbF& qjs3rW^ l+1H}nY##z ḿ%AB@+Qґ,Hܒ9߅ɩ[2&o@Tj 8S 35NON; 3K]zY4gI>\)0Ct Q4R<HJ@dp6:;Ⱥ/@꙳.ksm(#j%k-M&ZE)w/.5\ 9(kvEQI]kFf31mX=`FǍgln̻đg2v;HHr ŏSA=OLjxquMo:7N^3:t|Z0Hse^LW `l7E'DJ :bbL沞˾v2TN]Hp85v1l wT%HPd\\;ax= !,\e׈g`#wXЍ{Z`MB؈24A895v.")[Sjr|J ZMĎ'UuiPgu(!`͛1sL+`M\S;֡)ި It>lA]7:#ez͡_ǛT(wr.~B̒iBk.͵k4<H ojs)n' !Ruq-=(@筜6Hj5ƜgÛl6K S؛@z5`Nkcqsʁu $fZhd Ry#-Qi5Hgb{K<<*=R-MBĆO64=c3T]_mg &; (hW }%?e6CJlݛYJ(dRkV#oV֛#ab*3,]]rFi [*=bX%ro@(t[d[ͭ=<¶O1W!n qPٖفUw7Л.mVU>OP~͊ =[b20`ֆds#y3zJmnWݮ[j)W0^yX4gi:M4,MEREIzc!g9K׼fr66-4IUjn)dQN/lDQ1id2| ) MM:CHT@m;9A?t\dPE+0T0d .Cx , l;mF6s 2kY$WSpla:>=n2m^6-6.EڙxM=I1"i gd:2ɷ@1H|3 4؅Jda!t&46V9'~UA::USLiz J:a7ݳ9980oðj' YVau4-JR)'OxZS1\JoQ+}d¾/'ض̦Nm[{z[•.z*=ͱ^t]E֝VHͪڑAnXn"s)Ș`U O;zZ{왐-PUzfMeE|y_N-/b%rʽ7 |FKQN˜o_ ɽMwFjqJLq~y*]B6e"AO*Yޕ5w A/&YݺHLTNLNp8+mSLūaQ#%PZ.ۍ'pVA8z 0ȚWw5Z}x05So>S @TYklu%]cJ.Mݏxc>"S[QT&S1wqdXo:^,60[y&^34N3'=UQ|üT'j%eݹT!" q U\lm̵q-1C6⮀F?딺(Bऋ;4w͚.~/)'reh" z=&,VS"0Kq{0?D`l.Ud (f 栴 2\h&X/Gzcи*!rH7|K-,PzJG11!Y5FiՄz#x1`\ I߁5DMd@s0 yoe31Ia\(<OKYZEIiu'( " ]5hƯ6XŪ^7klnM<%|A'u)V H6,0:G ^fE@t=z hzcDU886Y oJu))fjmtlRY=u1ޔQ5x聡 b?_dh&:IU8 RG;j-cmE,U65q6 :<_d]W<#x1pL-ԥWLT\*(8gwἆ( L*8fcDY(6sFNo8 payWΰvD9ĜP%+H ф A]@~~ mZud2>>"Tp^ۛPwkV(Ď"NCQc"!#z.kUA5`G]]M ʻn0^(fGBKՂe7ϢIP}4 F~!T ./Eck K\Eh\)" ޛ ː6ZwNG U Ƀ5$xS]ml_} G!Άgh$'žŶi$y"'\^AB҆GQԐPMB>8+l!_ jV%m0=P]`)30 / |N\5tw}pjx䇢7*LA6J>*MXDH[W)&Xp 5`>.8w+xOX߽ [vsz0Ogknϯ8s@ÓxZUZvkKB pcG2\d0/^Kp?tRJ9?=D[f(~4:zt_.%t|3e0vb&#u6ǩ誴7]6t v!8r#|S8$t^P*ҐC{+pEzl>`J6HJW@M΃ 1%>+)\'RG5mlt`Rhp86{vO36AYR=ɻ<8<;lStU 烢XWBBn.&uQdO?C {l;dE1!5x?RJͤlImL-3!AsjhfGp7uwmg20nу agAC1:x Ivى?*JJco,VyO-^mjK7@zɣ7&\s^s@m BpGX+>)[(". fEMxшm(K6zvM{)=\,Cm {"8bw"K`ݹ X{+Gm:vNC织lpEwߡ6EpxG7\tP|R1Es̃1gLZI]bb7ޡǷ{޿CZY+Tߖ8Upq1!O-jJ1NsAf;-AJ9݃bVwEp(1T0A9-]SV?XՅ!G0:x.Ls|PZӵØmfƶz8:wKzXPxqﰍ@A0047$mU;h29G+ ~|R\ "aM%X!]mSH JJCj_QG2zxKWF>H R8wJ^WX%a@&+Ԧ< #=} vSmC1nbKI O r)818A Rmvfj|AC(VCN ĎYP+<`OI( hv-<ysTjf;DR jۋQz1LjYkW4eK{ejI>gQ0~7+{͑s_'ٰ暱A 852b;92{v^GNDYc/bvߑ ,AYGy'HkUP% =d{F\tR??.Pj(#8_!r9II:M5r+G8zH 3~^m9J;l>yS$}!6B ف~0e3':@2)i#F\vC$"\}8/ Lb}w_ld^;q޿B_3~(w(ri5m֙]9C;|IizІ~xڏHeWgAl›e]wt5CÖmcE{<0(viQJ]0'x1*)xAQE|}{/8ڥ(9 M.!zǷ9dwQG=޻hD|PnfC6{M\!zǷ)2zPmjz6˹T=^:0sR{?ӊWۧj` ~S(J7en [Cɳh -rкCG2zxKduؽ)6ačTrȩ4 z\()v<`QH9OA5<ѝ673'eѾfAoȻY!8b|4/ia sՏ?ƞt:5Hpk'0;NLp+r9:k-enp|UKNmWrD#>A6*4tp%a -nzT˂! utu.n "94#]WD|R0sݝtc/Fl0:Q>._M<`;Oἀ 8)KWQl9v8*!%AC<jjQ#G|G2A%:iS_v1($G?{ȦwB/wP5#F=Ǜ3D$KXBn`iW4o{lGKRP;zǷ802nR 9fw #f0(rJģ ş⌡b!wmP@g#{M1?ݣeVkIT+INxkv`U_dt|*;( y ۬1+ yb܆<sًJ߉/D5o7IpvmBrG >mYY6@iEp?Mq+Kh>ErE11qV3#}&(Yeˍ1.HYB{O%x!wLPQS1;&F1;_"o%|W13'0}ǖ.{VNjӺ!G=<^mU@ʐ *&ɟ ]YPzl/+䨲D'FԦ0xva1F_Mu#?=(4*Rh)^7}C?Df(h | (俽_Eo{x j7_q9=8L32/Ua&RkU8MԀٰ&^ _ia]JCƚBǻo|1Hw>6[VTRT>JHΥnS[@t`+ FxھAXVCˤHm3z`4& s8{Nt;VG|9rw8jR@K ^Q2ܟI].jqF[ҖA/Ԏx?8< |YyPx0oBڗhO!4+mRKG[*%K'#jdh-^BՎK'zRW22ĵ_USO&LpαoوQ(+MGL=gT3Ӂ-V}G8O.ў\ŵBawv8`ǹ!d ypŜhVNx2xKH^EG@u_D{r1A`&އ↖}ɥ4)LV谄m-ɥ4jV׸X  y8ץ) zg> #HpJӈ+&D{r9sąn.~<$=X?ſe,)R7W1KQ/+ ޺k=D{r9rZrJyK%ssPR u"ړk{}|)e4k<.\~΂_ŁCw!k\)%7,);v&i\qGF"ED Ap]_o.==*P@{vܓK'5kr }ɥS($Ϛqؘ8lKω%GqIܯ& ssѤDғGO]6W p-reڃO X]aJUUIAxPGIG\%_\UYKV6 M#k*SRF r;Tۈsol˵LO. rV5L8߳gkP8Ks~IcA0fk/c1H!201D?7EbRIET^(D q ")isYቯg`Q L`q4ePY̳tyO{M fZ. |P-yrae5n}vM-ґ}j2zp ;OVAA .4q0OUPܗJJoq){m3j ©&"vl+y) ~/Hw *}x |@>Q*Wk\ WPzژ F!Y`+!2JC?,kXI4E Ah"b<0u\'ր$M _iL$G' [(K͈e۠" ׽lWG&eLG&_8#߾onżonaqQxյed<1gN)7ŋMa9Cʩcav6kXmTtP֕.f|SAete;RylDފ W(*:Ȅ BcȦ (:\RH\;N`Yv+do+]AῘZtSIuAfʍCM2)KIa/؊LI-wi\tK%ԒR>uV$xIb+S ku\L tcgAlVޔyl9 :CU/.@vr)3ۃ@fk!Eu̖p1(|0VDjUFe+ 8&R6mJƘЋ+(m^*eON00IY=bרC1|z^gW@  mKELNr#o-Ezm!=]q&}FD;~NhKM%4f7n!]!,2ǭ `bY)_,DVVbVSp-uGȿֶ^|Z+;;b5YUjpqo4+_q'fFKL(Zc VCpcu2N2k& #VP7㰜b@b!= e [Yјȁqk҃j0C1UDS l !A 5K[U`AO!~}PZCȜg• y\q)(wBWIS<*K?)٤ъ72C\2yGкqث3L "1rc+@R`mlʐJ)Hʙ)Z!r5x\>W{j9a:7oU?^lz8Tg5aTt 0.iftJ#b$A,Pڐ<&@Keؘ2Cd)IULR4)떢 ]ص駰EϣHY׼'++M;g9!*\ >L ys a7Ŗ GX`CZ5);-'P?-h\uK[ݢZGb XsH>mf4N8#׽귰m0 ^_nq׼{%]p&/*SSP,,\C3gud!%w| !H86x8EaHQu{?(Ꭾi[G{>J#)roM8 ,vb!SvFrCN)\Z>ŵ7;"u͙BAo--ǒ)LG>[a#.4;luE)k4^5zuCRVƋ22L񄖐sA3*]K%\bk\L*U{vEC¾)(qb尐ELp8Bɔ2fFqя:$.M/p~96i~1ſcqԩmIe4OKlw4N+iQ+.W|ELP%:Tk6ҀNӺb& 4vC)o|~Ȍo]ȕeC #ޕc?E<2KA'%%v8M(LhC(^6s1OEJs`UKYG} AmGѡȓčk*A[B7Op- q΃(5W")- ގ4EOn"d*~^}fͤb-0LЩ]HE,I0ՙՁF 1ZА-NZ#&nM$nʥ#HKZRN./%PL$m<-\B fן\K3()p^)҅_\>lm窀sQ)QSH ۤ%["G4n/M Rc,'q*7s R %At*K^5T It=p^ AURQx,Sb 0׺Mm#)u0}R% z4R!UXǒ >/TzSJ&?i~wEV%ǀ>GvC(8$3JM H+.@ EO*Etb3*K${SRv`#QLV;=dsby%l'Js(R{ZD ;)Ue(`1n9*O/ץv䰊j~hMU;'Oj>  VEh0JE͊N0x7 XG*XEvo' `S@?ZzUyIpRU2rJxR<' @4!&%.E_6 jt!9Bl]uZw6,r ]Gaσw~⻆5T"ʑ gsv6ŏtx21}; )E"~d2O57Aqw\&`Gott]&m2HRobpH GU ., R-xyCK%?OڕJq5II sB8,1bǞDۃ'&R*+)yQ7nK`= 6o"h˕g\.U rڒhmoݿUI 51)Op1.RZD9)Gkf4#ݽ"M~#$a:Svm]p\1']2-lKApu%7AV:˚Yba m>()+a#IաjPyTZ`yo,Y3)xqai[v k*%(üx>Pqp`-J Hb7l%V% y!X:Z0du;Z0d=i:nUq>53BEmzRF$LڴQEhz \{+ZC8?u(r.LKFؤ-zkȂˋt4;Kȹ!i1JJx$&0 ښ护xbmUSHߧM3<'ޅ`<䮅zHU5ԇsUx1hޢȾ޴Fdʱ}Av|ŋ}k7x@˳#NnOkzU;6(\RR[>h!zXPi7!EitEe+F3ά$c+ =,A[Ȱ>ҬoX( yM [ ;M05ZV?G5)-+% [jq>: K%ş)-;S}7ka&v A.!ƭ)=alNrj)AWhr.תh[JP%cp[S5:- K[[`LN1Me2-K2|ſ%>(^a2K E @ jRF@ 8/r5°9-og']x#?!*b=AgV7j54 U`#CThtgTJyK`?'* eȩ}vB[Io~̦h6wAI#1kkX}4 6kSU{'=UAOyє4v+px;B7(i c,o]q]F1!@.<[j<҉oΥ"R][ִQȄ}Mr‡ii- dLg/⤜ -)R>ZC9IɳX޷%ǦP_Ɛj {>(V_E6䥪1SBf/y^^"\M{P|\yĨ@z[v@R( yzSRitż?(nse *\ 8#loo%QNvP⎷ Ց%y*L<]T咾)^;NJ~uuiCȨ:΃bK7ؕdUw<2R=yu[h6wiN׺Kd27Œu[M*[r{S$ nk>p5񪰇4t AyZaޓ JJU+#5@NS%9(/WV`k3 RkG%7%&TG0[;g>c?ޛD^ĉCk푠USw"`ӥKZt'.ub.e$cJM\ycG",E2?)MYE7/i Z6%Ќ^K< s|}R*FeѨ%":iḳ}YuA ?vTލ맰'e:˝*AM)zn;6,I (PW??}|o ^b_(+WRo ʓ(zp] oxNz- {keH-w?~3~KoV?H뗿5 Q&48/yP% VSx^1Ox /c:M5tLӺXiO~x{ooKbue[~kz[??~ߕUPo=U_?Z~4_ :t}+R@ʡFFlFFEQӧ^RIi:EIMAVo׶]}hKO1k4dTR :mǘ/ف|Ґaһ(lHe+Z}CFgRpȯT7m=eE` KNIb_@7}:2GJ+`oEBKQL|w`8J(1ǣHh,@"La5KNPxfA9BNNkp?ϠY1ּ-Кʚr㫪*N݆: Y)\', %q=s.AuVTduGw_ﰕך1UEzsP2^ ipY@weN9^K[@'V3\p{aL$onT8+rѐ{:qЪe'Ήٝo}&h 3cJ˪W#~GٮDkC-5teHO16)A=!4i?k^q<BI@D2z6ZXO^E%Ȁ =eWFY&4+SENv'vj@8DbU}Lךva:R𮺦U-gi):%y8x9kR9*]E`ҙKu<6\v GAuAYqo<.c%=G;7Pa،V U$tIq +y㤄WP$,2Ll8ʵ|ZbJ+t]Wԑw5Dj[q!sE%;szh kj}Caj~If4 O'k]y0RKi0=۔\&xp"C;͵Ɩ ,k\ Nzi(b'1}On2'아PNRX/ܵ鐯n IeڗտEF(?exVggOj(zB ]}nm;DEpF<øTf{tLuK^(Qi6Q dИ. nFJgViAb38yL8CU=J#{9ǹ1S/R/T,sభ,8nߖD a2~ϧtr_?|U06w8ѻhҘ f'勢4ZP[o|J7 !W'ԫeC+IK\ HyEUzN>ys\~Q(34&p~XKܨVP/0'aN*uڝ"a\D%֑eΓ'g9$.34A$Lv(24-IнLFɫ6LLj2Y=9<e²acW AW,!p IƜ1\?~u0,MYPs04tÁ<47Wٽq$QEWsxDhh@8FV ߐ*.D\f? 5z*:8ж#fX_USv\JW6oz]Jֽ| 2"=Q٣3J?UP8;*AFYձ.G'{8{Ͷ CL~"/K)#}Ąda:qQ,Cq[oyf.W:$NC5NJCD=E3¾Y B o _DiU/$yo%(o)MSغ8Þ2@Ī%+rb$r`&LJv5mÇLpm2Ĺֵ)HCw?[ 6(ab7J ǧx Y.;UJ/v&TñO?egg-H1%~J2"擕 fIsל@q1$tq+!j=ۛXP'jϞp6g+s( 'єY \E z‘&9R Sݣ+ D@Vj;alXwEfgx;_q._Srg4{Bv{௩QZ.ef8O1WQGLr ?Tй5Lw$X"8-ͤ鞯c>wfe+*xqh^nh_W2| ?_7S )̭Ɓ[^T2%4j./rNޔt[G쀟ɇwUYhza+U.ryz쪥gH܀:4(SFUtjy@c@DmSiJӎǎUϮcc,֛~i[k$?ƿ=#_4ӻKXtW)ǂJhjso9_ Щ=XJ/[6l[ѧ}Fn0W0CWRX ]1)n~m'ˆȯU+matĨSE9xJ.F2E(Xd$@@k#0UT_i}޶1ДٲiaI¾%,!J&<l# ;wB̿`b]MW!eQaE@)K4dY9? ~ō[9}nV^lS&䢡6?ĢfK8"9uxIF5,PK\ -*ÖA3&XsxQd۔BvdyG%(:$pZXILؼ鼎gaxuA0hV_p<[$ a]j-~w3[=QZJHۋ D1 [@<{׬g Y'4O:ѫw:+lA\<錡A7S}h3p[Ȥ˿8VF1Tt]qMv:q$SGha#,T ki >@֚;#r_>Bh#$>7&6W&$eͮ+M[e|T ȁ-Iob¶1\bFu{:I"xa^UCuWBG fhF/=O+2!xNg˗he:7ėzb7vp]8p+ӯ3UrM -@+XayaLhwg%*((Uj,-D&FS=cuz3Tƈzmxu8~ӽKK馎Ў!e+J3b~fɏYs `(F_qVNMPy@,Ddef(~L'xmm")lR$v6?`iU=KL>mM)^=C{÷aj)bxK-}=kT&LŌe.61-|P&o Q,&q_TSx>mu2E;ݻ# :s>s WWLsѵ`rrDxX.-ߟ_>eLjCڵ^vqeŸ07eNExeKGu!fvcJ!MvmMQ`[A/o^zL"f˚^ݽsx|ؗ.Abg[?/A1_~ {?ӃkcϏ??Ͽ:?x^,h3?GYC)}?xvϿSy> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 253 /ID [<0f98358e8d4f2df210cea0c3495b4e02>] >> stream xԻA3N!"ht.' Ѡ "J@C-z^TPJ4[_93Ԍ1Z )@WYr%Mߙ_)wV{ȑ Ƞ}j`tǵ% endstream endobj startxref 200361 %%EOF flexmix/inst/doc/ziglm.R0000644000176200001440000000220514404637307014724 0ustar liggesuserssetClass("FLXMRziglm", contains = "FLXMRglm") FLXMRziglm <- function(formula = . ~ ., family = c("binomial", "poisson"), ...) { family <- match.arg(family) new("FLXMRziglm", FLXMRglm(formula, family, ...), name = paste("FLXMRziglm", family, sep=":")) } setMethod("FLXgetModelmatrix", signature(model="FLXMRziglm"), function(model, data, formula, lhs=TRUE, ...) { model <- callNextMethod(model, data, formula, lhs) if (attr(terms(model@fullformula), "intercept") == 0) stop("please include an intercept") model }) setMethod("FLXremoveComponent", signature(model = "FLXMRziglm"), function(model, nok, ...) { if (1 %in% nok) as(model, "FLXMRglm") else model }) setMethod("FLXmstep", signature(model = "FLXMRziglm"), function(model, weights, components, ...) { coef <- c(-Inf, rep(0, ncol(model@x)-1)) names(coef) <- colnames(model@x) comp.1 <- with(list(coef = coef, df = 0, offset = NULL, family = model@family), eval(model@defineComponent)) c(list(comp.1), FLXmstep(as(model, "FLXMRglm"), weights[, -1, drop=FALSE], components[-1])) }) flexmix/inst/doc/bootstrapping.R0000644000176200001440000001652714404661743016512 0ustar liggesusers### R code from vignette source 'bootstrapping.Rnw' ################################################### ### code chunk number 1: bootstrapping.Rnw:11-34 ################################################### options(useFancyQuotes = FALSE) digits <- 3 Colors <- c("#E495A5", "#39BEB1") critical_values <- function(n, p = "0.95") { data("qDiptab", package = "diptest") if (n %in% rownames(qDiptab)) { return(qDiptab[as.character(n), p]) }else { n.approx <- as.numeric(rownames(qDiptab)[which.min(abs(n - as.numeric(rownames(qDiptab))))]) return(sqrt(n.approx)/sqrt(n) * qDiptab[as.character(n.approx), p]) } } library("graphics") library("flexmix") combine <- function(x, sep, width) { cs <- cumsum(nchar(x)) remaining <- if (any(cs[-1] > width)) combine(x[c(FALSE, cs[-1] > width)], sep, width) c(paste(x[c(TRUE, cs[-1] <= width)], collapse= sep), remaining) } prettyPrint <- function(x, sep = " ", linebreak = "\n\t", width = getOption("width")) { x <- strsplit(x, sep)[[1]] paste(combine(x, sep, width), collapse = paste(sep, linebreak, collapse = "")) } ################################################### ### code chunk number 2: bootstrapping.Rnw:94-99 ################################################### cat(prettyPrint(gsub("boot_flexmix", "boot", prompt(flexmix:::boot_flexmix, filename = NA)$usage[[2]]), sep = ", ", linebreak = paste("\n", paste(rep(" ", 2), collapse = ""), sep= ""), width = 70)) ################################################### ### code chunk number 3: bootstrapping.Rnw:194-199 ################################################### library("flexmix") Component_1 <- list(Model_1 = list(coef = c(1, -2), sigma = sqrt(0.1))) Component_2 <- list(Model_1 = list(coef = c(2, 2), sigma = sqrt(0.1))) ArtEx.mix <- FLXdist(y ~ x, k = rep(0.5, 2), components = list(Component_1, Component_2)) ################################################### ### code chunk number 4: bootstrapping.Rnw:210-216 ################################################### ArtEx.data <- data.frame(x = rep(0:1, each = 100/2)) suppressWarnings(RNGversion("3.5.0")) set.seed(123) ArtEx.sim <- rflexmix(ArtEx.mix, newdata = ArtEx.data) ArtEx.data$y <- ArtEx.sim$y[[1]] ArtEx.data$class <- ArtEx.sim$class ################################################### ### code chunk number 5: bootstrapping.Rnw:225-230 ################################################### par(mar = c(5, 4, 2, 0) + 0.1) plot(y ~ x, data = ArtEx.data, pch = with(ArtEx.data, 2*class + x)) pars <- list(matrix(c(1, -2, 2, 2), ncol = 2), matrix(c(1, 3, 2, -3), ncol = 2)) for (i in 1:2) apply(pars[[i]], 2, abline, col = Colors[i]) ################################################### ### code chunk number 6: bootstrapping.Rnw:238-241 ################################################### set.seed(123) ArtEx.fit <- stepFlexmix(y ~ x, data = ArtEx.data, k = 2, nrep = 5, control = list(iter = 1000, tol = 1e-8, verbose = 0)) ################################################### ### code chunk number 7: bootstrapping.Rnw:246-248 ################################################### summary(ArtEx.fit) parameters(ArtEx.fit) ################################################### ### code chunk number 8: bootstrapping.Rnw:256-258 ################################################### ArtEx.refit <- refit(ArtEx.fit) summary(ArtEx.refit) ################################################### ### code chunk number 9: bootstrapping.Rnw:274-277 (eval = FALSE) ################################################### ## set.seed(123) ## ArtEx.bs <- boot(ArtEx.fit, R = 200, sim = "parametric") ## ArtEx.bs ################################################### ### code chunk number 10: bootstrapping.Rnw:279-287 ################################################### if (file.exists("ArtEx.bs.rda")) { load("ArtEx.bs.rda") } else { set.seed(123) ArtEx.bs <- boot(ArtEx.fit, R = 200, sim = "parametric") save(ArtEx.bs, file = "ArtEx.bs.rda") } ArtEx.bs ################################################### ### code chunk number 11: bootstrapping.Rnw:303-304 ################################################### print(plot(ArtEx.bs, ordering = "coef.x", col = Colors)) ################################################### ### code chunk number 12: bootstrapping.Rnw:321-334 ################################################### require("diptest") parameters <- parameters(ArtEx.bs) Ordering <- factor(as.vector(apply(matrix(parameters[,"coef.x"], nrow = 2), 2, order))) Comp1 <- parameters[Ordering == 1,] Comp2 <- parameters[Ordering == 2,] dip.values.art <- matrix(nrow = ncol(parameters), ncol = 3, dimnames=list(colnames(parameters), c("Aggregated", "Comp 1", "Comp 2"))) dip.values.art[,"Aggregated"] <- apply(parameters, 2, dip) dip.values.art[,"Comp 1"] <- apply(Comp1, 2, dip) dip.values.art[,"Comp 2"] <- apply(Comp2, 2, dip) dip.values.art ################################################### ### code chunk number 13: bootstrapping.Rnw:376-382 ################################################### data("seizure", package = "flexmix") model <- FLXMRglm(family = "poisson", offset = log(seizure$Hours)) control <- list(iter = 1000, tol = 1e-10, verbose = 0) set.seed(123) seizMix <- stepFlexmix(Seizures ~ Treatment * log(Day), data = seizure, k = 2, nrep = 5, model = model, control = control) ################################################### ### code chunk number 14: bootstrapping.Rnw:390-395 ################################################### par(mar = c(5, 4, 2, 0) + 0.1) plot(Seizures/Hours~Day, data=seizure, pch = as.integer(seizure$Treatment)) abline(v = 27.5, lty = 2, col = "grey") matplot(seizure$Day, fitted(seizMix)/seizure$Hours, type="l", add = TRUE, col = 1, lty = 1, lwd = 2) ################################################### ### code chunk number 15: bootstrapping.Rnw:415-418 (eval = FALSE) ################################################### ## set.seed(123) ## seizMix.bs <- boot(seizMix, R = 200, sim = "parametric") ## seizMix.bs ################################################### ### code chunk number 16: bootstrapping.Rnw:420-428 ################################################### if (file.exists("seizMix.bs.rda")) { load("seizMix.bs.rda") } else { set.seed(123) seizMix.bs <- boot(seizMix, R = 200, sim = "parametric") save(seizMix.bs, file = "seizMix.bs.rda") } seizMix.bs ################################################### ### code chunk number 17: bootstrapping.Rnw:433-434 ################################################### print(plot(seizMix.bs, ordering = "coef.(Intercept)", col = Colors)) ################################################### ### code chunk number 18: bootstrapping.Rnw:441-446 ################################################### parameters <- parameters(seizMix.bs) Ordering <- factor(as.vector(apply(matrix(parameters[,"coef.(Intercept)"], nrow = 2), 2, order))) Comp1 <- parameters[Ordering == 1,] Comp2 <- parameters[Ordering == 2,] ################################################### ### code chunk number 19: bootstrapping.Rnw:455-462 ################################################### dip.values.art <- matrix(nrow = ncol(parameters), ncol = 3, dimnames = list(colnames(parameters), c("Aggregated", "Comp 1", "Comp 2"))) dip.values.art[,"Aggregated"] <- apply(parameters, 2, dip) dip.values.art[,"Comp 1"] <- apply(Comp1, 2, dip) dip.values.art[,"Comp 2"] <- apply(Comp2, 2, dip) dip.values.art flexmix/inst/doc/flexmix-intro.R0000644000176200001440000001333414404661762016416 0ustar liggesusers### R code from vignette source 'flexmix-intro.Rnw' ################################################### ### code chunk number 1: flexmix-intro.Rnw:30-37 ################################################### suppressWarnings(RNGversion("3.5.0")) set.seed(1504) options(width=70, prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) grDevices::ps.options(family="Times") library("graphics") library("flexmix") data("NPreg") ################################################### ### code chunk number 2: flexmix-intro.Rnw:323-327 ################################################### library("flexmix") data("NPreg") m1 <- flexmix(yn ~ x + I(x^2), data = NPreg, k = 2) m1 ################################################### ### code chunk number 3: flexmix-intro.Rnw:330-331 ################################################### parameters(m1, component = 1) ################################################### ### code chunk number 4: flexmix-intro.Rnw:334-335 ################################################### parameters(m1, component = 2) ################################################### ### code chunk number 5: flexmix-intro.Rnw:340-341 ################################################### table(NPreg$class, clusters(m1)) ################################################### ### code chunk number 6: flexmix-intro.Rnw:344-345 ################################################### summary(m1) ################################################### ### code chunk number 7: flexmix-intro.Rnw:361-364 ################################################### par(mfrow=c(1,2)) plot(yn~x, col=class, pch=class, data=NPreg) plot(yp~x, col=class, pch=class, data=NPreg) ################################################### ### code chunk number 8: flexmix-intro.Rnw:382-383 ################################################### print(plot(m1)) ################################################### ### code chunk number 9: flexmix-intro.Rnw:403-405 ################################################### rm1 <- refit(m1) summary(rm1) ################################################### ### code chunk number 10: flexmix-intro.Rnw:426-427 ################################################### options(width=55) ################################################### ### code chunk number 11: flexmix-intro.Rnw:429-432 ################################################### m2 <- flexmix(yp ~ x, data = NPreg, k = 2, model = FLXMRglm(family = "poisson")) summary(m2) ################################################### ### code chunk number 12: flexmix-intro.Rnw:434-435 ################################################### options(width=65) ################################################### ### code chunk number 13: flexmix-intro.Rnw:439-440 ################################################### print(plot(m2)) ################################################### ### code chunk number 14: flexmix-intro.Rnw:483-486 ################################################### m3 <- flexmix(~ x, data = NPreg, k = 2, model=list(FLXMRglm(yn ~ . + I(x^2)), FLXMRglm(yp ~ ., family = "poisson"))) ################################################### ### code chunk number 15: flexmix-intro.Rnw:501-502 ################################################### print(plot(m3)) ################################################### ### code chunk number 16: flexmix-intro.Rnw:531-533 ################################################### m4 <- flexmix(yn ~ x + I(x^2) | id2, data = NPreg, k = 2) summary(m4) ################################################### ### code chunk number 17: flexmix-intro.Rnw:549-551 ################################################### m5 <- flexmix(yn ~ x + I(x^2), data = NPreg, k = 2, control = list(iter.max = 15, verbose = 3, classify = "hard")) ################################################### ### code chunk number 18: flexmix-intro.Rnw:568-572 ################################################### m6 <- flexmix(yp ~ x + I(x^2), data = NPreg, k = 4, control = list(minprior = 0.2)) m6 ################################################### ### code chunk number 19: flexmix-intro.Rnw:582-585 ################################################### m7 <- stepFlexmix(yp ~ x + I(x^2), data = NPreg, control = list(verbose = 0), k = 1:5, nrep = 5) ################################################### ### code chunk number 20: flexmix-intro.Rnw:591-592 ################################################### getModel(m7, "BIC") ################################################### ### code chunk number 21: flexmix-intro.Rnw:727-734 ################################################### library("flexmix") set.seed(1504) options(width=60) grDevices::ps.options(family="Times") suppressMessages(require("ellipse")) suppressMessages(require("mvtnorm")) source("mymclust.R") ################################################### ### code chunk number 22: flexmix-intro.Rnw:740-743 ################################################### data("Nclus") m1 <- flexmix(Nclus ~ 1, k = 4, model = mymclust()) summary(m1) ################################################### ### code chunk number 23: flexmix-intro.Rnw:754-756 ################################################### m2 <- flexmix(Nclus ~ 1, k = 4, model = mymclust(diagonal = FALSE)) summary(m2) ################################################### ### code chunk number 24: flexmix-intro.Rnw:761-764 ################################################### par(mfrow=1:2) plotEll(m1, Nclus) plotEll(m2, Nclus) ################################################### ### code chunk number 25: flexmix-intro.Rnw:803-807 ################################################### SI <- sessionInfo() pkgs <- paste(sapply(c(SI$otherPkgs, SI$loadedOnly), function(x) paste("\\\\pkg{", x$Package, "} ", x$Version, sep = "")), collapse = ", ") flexmix/inst/doc/mixture-regressions.pdf0000644000176200001440000113720314404662041020211 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4597 /Filter /FlateDecode /N 83 /First 701 >> stream x\ksGR)j0BH KD~3I#K6}Ksӧϥ4*+t!s)򡰅p` _ C ) GBjP„BB)a@Y]hoFj1(Y ($P w +(@ªPIa J¥qBYIWxB'}@(// QE0NJE45%+c VpC C3Ec4ۃFCy@KpahpN1eɽ) R R.M38* B5[YPV|2G9esPV<3y@(&p 49^1@8=(&C(pPkZ4փ(;Wd@2$.r&Ow.Bs_N&#˸|/L>t6S6)u.m*CMg%t>.g ː]+l4t:_"qk(7319*}H>˼M>! ^B2L/dz!4A3+γφˇh{]Ix p-隯/Va_x*oˀXL/&*X؈U=c+Ifކl4Rְ:"%1RA *QQʣ.Aǹs+D3@g#@p} hF fUScB Kǿ<Gώg烩w&}?FiuDD'V Ύ%[[ǚ~H<gN+MW?y/&.$p28'i jлp< TYG1xxz </qE^ 37/G_xVWj`+IqvuSWh8b,wiA4]t~hD4RJ3ɕj~+D> FX 8;# Q 4H$QvK`欸mѽL`===_RCgthߨzպ52}̪2j~*zžFu*uVJo/Q1 Z18Y p&AZ X3,*-촚cO][ئ\Ig=G^Ȥ:<`F ~hǂM*5du>N4bBwM/j>[Q>yWe]@oi@}u/~>x*`H*sM=cV ԣ0_u\r+ f_:*nXOየĠbibw`Q]gb(bD% 3[C?wEPՂӻ41]. ~dosifxC =ycTj0]"fz/,MDƩ7nt*V:sE/ȦO,h ϊ&C-r[rfApRG<}QJH7y OiJ'YeKI3EᐾPRPHLTq#w8O?4 cx)+fʳa.)<[juB}C@y-OV14W).U>uz1\ G3e`Bv slM+|3TJtwLqrh#}JOc.yuY,4饡t֚fW@Hv1pԯ@)SZՁJ'|I;%Jʗˁ-l^n-RƴyB-hֹO˸%su).K.D MIS(7 -֯@S0-us9/[8 9xۺfxKGT T&{INڥF*&;|:6tbP-ߴ%%0p (o`$J\( ahu JJip )0Vf0P N8)lp7ҭ%(X -LRФ ΔV@YT`+ pJERf3t`Z9tިpoJ~kxA;@5YB !JK3/rlB<')2͓]ܱ5=nmo]VO a>~̺'/}/FฟV]gƌ&_KVULedŚ"]F %hfrMӤ#gkkxtU؃1zU?ѝl\LRxm(җc1,z/MiF/Z/CBVBr 9wb~p|yy||F{c+F!;V:|J]Ar J3ە7r}ؾTd`_Ri=K6viuIB6'vʀӷ^Kɿ_Wmn#M커 |-sŀ]JV5Xĺk3&#z=A,Yb\=79xRϾ}Px:(/?V4P5: WE:ai˸2|\YyTó}(rP_9A#}Nm_q'WŻd >TjJs;O8&틣$סJXҬ xD܏wC+!!bH cj.endstream endobj 85 0 obj << /Subtype /XML /Type /Metadata /Length 1590 >> stream GPL Ghostscript 10.00.0 R, finite mixture models, generalized linear models, concomitant variables 2023-03-16T19:46:56+01:00 2023-03-16T19:46:56+01:00 LaTeX with hyperref FlexMix Version 2: Finite Mixtures with Concomitant Variables and Varying and Constant ParametersBettina Grün, Friedrich Leisch endstream endobj 86 0 obj << /Type /ObjStm /Length 3962 /Filter /FlateDecode /N 83 /First 768 >> stream x\Ys~ϯcR) n JR%R%[ddR)\ȉ_1,wyDI\@ϯ =SR312,HR˂bRY\Lt0%`2b1LY˥) )L)dZz֩AC3m52L;аL)5hh%8 gY#яz8Lb6`@.՘KWK2eg%d`wxD^=R)杦#^&WB)"?=HJ0Vj9}L"\;aih"Vjz#9ƻpA"FY`h&R %D Ri!bC(oDʈRZ zxi 1U{(qCg# Ec'C=X8jFFjHhD:wqHNP$7D@jamJ-G}Jzz$.9+'rE"yHqєG[Ӵ<Əe6Ao:M 6ܼ o+Fb#N^ΧËl'˾,YF]d8mMʔDH\q,'krtL=dړy=? Qm腻fiKrIk6NC4GE_gǯ.~x0`9N ?Ep Er:'y3(!/VWH"+ ~ίA-ȅ0u}թ]HԸEjꨠ))p_@#Ď_jX Ij`*onSG=_kO<#>>lO/o'Sus2@TãB hOQFqxl0v8>܌2OBLŕe6)F Qnh,'mK{:Xcaf5¬UO:U\kİqzKTMOrav+!6 aXPtC=嬼*o}`iym zSPKPh*1A$!td07 T4 cH $PDWQa^_6U]lMe?/gG/>n*[ A6[D n`-LhcV63 S¿"]e.i-xmZK7ek3CTf2_g2 6(T'x/G(fҐhDA0*V3IXz67׊+4R%ޅbG$b,oh|H5Yr,T]bIݻi .qpƊDKOTt7%NiΚ8`%!(G 0FI "ɥzm+Դ9!=|:).vg|J82ol08oFٗqOOVnM@+Fđ +Pd$Ն x#P J!-y')MaBl[ dݷ͛D4 *u t?)g-y6UAp U[W(SD$HDq,!ՑVÓ ]Iߜ|]PdF#Be^Eݾ&cV4UVxosZ5 iInP6Abɐ$MwF1>-֜: y<2]oS{66P!܅ EqKka*1aB;S(;TwK0zu|5-z׶KoC[oEޮs:UPmܾnPd)᛫ֺnݮl0=|0̢VdzZƫ2r<9Fd( *=*l$|'+"#̖ܡ3-w})d|Ũmsuݧ͑}(}g/.~h`LsdaiۂĂ omwHL68X(u~U%t,D'uvˇ}5Wxi9/@@  jN]B{ql'Ŗ'=B~4ܦizMӎ~|pqQt#][Q;]xtuѮ?}%rqZ Gg|m[^6ROe[weokjه]+%TuCgxҹ]4ny@ ײyD@z\'LϮ(n`껥 [n<8ٽyE :tNmH]SlnT]6Ғn˫eރޒǓ_C:m<`ǤG FTjf4 eK a9XUtpV䠨5o(nRavn-2~O>F/S6cQ -('&=nhCH5q>MOA\6z $|ۼ:)t߽MzbXI"Ã;qK6!MŢYآ;-ݫ8l <vUZ8d-ʅ$*f" Beb=+M@Y{?YŽ-#y}l9[Acū`,BHrH%  L q"-b@I h).BFRDz6/\PEUD\[f@eRgGut#NB}LVLs&Riē#ij8"OA᥸ e$\OܩfIh 4Ya]LX\KAK)ER)"#kR9E[* >> stream x[mo8~p(E߁KvYޢD9$[vP-z83gfꠅ:)jW+N:big"?rWn(':H9#jc9@ Qď0Q5XIJoNZ+'NF`b+,>NXk n.0j&ŏp PI)H7$~=p;^p) $'5(IKBZ b!g  Lډ`"x>AQdOQaEc@@D":Ϗ,U"19ER<3X'I"Y3xEVn*VɫM 7ЫXwc+c^ & c .T`` K,w5K,ޡ X}~),x,x b; jbP.(Nr!~jT(xTo?q䩮K47-d^cX> e^|8xg_FS=MDjz9OExz2]?勛\\Ƿ< ePtBjyÂ40dt=aoSMX=\66GW5V?O+KX\<\bts9i+IxUŨ\\lZۛrZbQ,& &Okq:]\RC5I,R[їWǿ,GzRbDq6^,0~X_ރlB[a-eZl-z__g/gv^^M`k cP^V0,aXiˮfc[B 15v_\v>Rԣ V)ئZhS-Ўa1tjQl~Ukh*{!4t އf*6T2RgM&gb7tcO3W`A*l? 7258Rkl"p^mz!H@fYi@n6YГbC&9ZJ*<{;'3׼6f_U &Pn1U9}58Bm9h[}E0MJӉcwĜI:j뫫ɴx22=PlRrZa#L& f|+~\6Zud> ¬Scczݛ+l7 e\5HH(hѥQQr`kp_Z+bHl}z:6:x.[Or2Wak^r.7vtɩvũvũvũvũv)@. oN~B[vKo\}Yd.L4rsRM{nUL<1k똵G;ƪzUQkB(lུ?iBNzhIv0zlSZ 3ɦ?z %k{Zl ƳmX4R 1$W BǥfSؓb3&Z@ۋSHMg*onJu}&`4EUA^|䶦{{wol:KtU}zȊn6}NU}ؤms)l͖ͥ%S٤˅ޱ'hK& iʏ0M }/(v)H鋆{P!vD|&M5~>jZA7v:n;-K"Wc,n]֏2 Q 5m !%EC+lɂj1u7Hqr IyE:lV1V%;ptS6mekI*ݛ[񘧸5-{;~?Dzn@熺>7갓Wյ) )sHh1Lc/ <8upiу CfqzUd*k R{$<+ņ2f4h?Fn&b9ʸ76 d#Qka VE SVs)Hnxv)UO|@8"|a Ha%,orޏws[Yual`*-=6ץ Z~8` Y! !vs-9`rKNhH >洓"li /wS|D7;ŭ,-% u˹!1a[  v>bpX#rU K;hylnv/ ڸx p]KIcoa767pXew't b 5ip[Ҷ18>QFWy55hkַj{rn=n+E4XcЫd,%Rk-X&v[r )>'<8ڸ0wGlh㺣q%߲9$ "> stream x[[oG~_QSW E !,D@MQmFk6tԥ:s8'0  F5Hz%2&m7$'cM܄Yp o"G& Rd?Ympc2 J $t& 󠟵gzO$l:I'h& q G1 p ~^3qb [td[Jb">9tNV!S"y,:xT!b()%r "и1"2(R$S&b07N@-EҚ2E/YãHNAZ jpPi< mZYͭ`r &i D*2[0F2,q M|h5# = &.BQ[ '2*q+cMQD!pT8V7ȂZӢzX_]EWECT֗ꑻ=] 䧟W5.m?_r}}^RD)s;V(xTy{˕5myu3=gmzW.+=lٷ/j&LG+__k꬞Fck$gnqr[d@}GӊVL8"HFpPzs_cw{q:"1vd%fv?F?Zz@æD0uaj)k2I8[r9_jO|QTZXNSkezk5vxV ?[V*W 8J:*p"GNI#ryAT뉜6͢ gc8\;Y9X63ijQYa $˼ipb3 "B r:W w^-Ҳ׼x-Gw ݼE%xQ&S*^ Hޥ3CG}[Q =:، %"]j&-, ~D-8t.Ch`5a  W-Eہ4l:gRb~hTŃsu76%S"Bq@-ҿ|zpq~2;ȳns_ ivB`$OtuYlS VGv O|Q?j"=|<-)@j#/kHf76YJ`x6-!İ\[01,{n'WI*ʷ({kԞ:Oj^};,YhqZkگGL-k3,4,?TVŦ:z[]WUu]Y})pc$:1;~jv qrxPBDAYC˙[DD} cOLN"p}zZâ 6LLC3f?9endstream endobj 336 0 obj << /Filter /FlateDecode /Length 5102 >> stream x\Ks8/;no`n'fhIRY%0>Lf %[QAUE<2/ffu}_Wn.̊3%WbZʮwg]}zXoN %E9:x$%N֍iBAvVwt>3Z+WC [w|fJHvq.?o/nhoׂMcu5 mߵ㪺jo/a{L)nJp#[ hɁt"iQ[>8Ր gyA 8 jcugWjiWݶ c]@h!laVqupXC'n% k  йj8%n5p&}E(ˬUC򀝲KLJUF$"Ue]7-MԷUXJ u`U7l6Lۂ-SNZK'WNTC”ߒ=HzC+9^]&12};U8iu3 A9kꆯvgQ! lXsn\,_)kΣз=I`Vj[Kc<Ͻ)& ڦ&@t.*< -@7ąö!Qi}y[Vn UսrQ h.Ti*hFJcpeiQSUv \ըJ&;xvO &R`hϤQ6]rJ#H5णFRlY&+?wIBy-r 80.>7_RYm܍9 `8wWR0hc,4– )P3*ށgB礬ڡ;  izE8[V,mVh<]Lת'KY<~evꛫbc<]*~O*k>wǘNstu&2߻J-8T bp5a}c;o)k0CiC Aid8C1G)-P-E5jh)M*VV#:k}na4#iɑ@}ߜ[nm=kU XEt S&T¹ ό̑TArl_h vE*r쏲-8| A£}QFroOh+:|6Tgƈc]'q7DQDOfٷ\[!ڔb N@`iw6&  [&"nja2(.l܃y7}U:#9uGMq, #h"B")Ϥx)PizTD̑OhSAؐW@"}s[IO2svImyzO(c!(('`=/X)Ct@YMrJ%ă&E_tb$( >^Xے_$ )Xjnyo iPhsJdd? I UІ`d|T8?#u-X9?X9_baW'+BrFԒR h  Pb.Ӛ@~i }7S*PBP*TRГ yBX~~\YceRJCNfO:YmA@ ` /w9yDx"M>%O,0tC/h58`@th-3_g 5:`1|2'(iI6a-U*g Cirїx'ZΙȖ6]rp/LK%`!ۤ9) -:2a1t}cƺ\lC6XXf,U2T;zeG ~_|MCrJGrK.5tU!lUqM(O?9@ļU r#a%`^;`/kϿEU8~8N#s@sDD =<Xk>gnjCnWX}BrcX+ A̠=AR~A"SH.0qRRf %h)&@ϐ\e<^';*0H CT]qg\$phw*ܳ}7 SjFG #TiiIZӊwjλXYq57!3g^,@C'VinGQEM)LTW@uyL&޶wEtOฃFetI^rR_B2F+"coB+A-% ;]%۽x 2& oF&Kǝ!xya\|q$r}Лg> X/gsOj&1˶h;Ha;*%?A3S(͋P:$_diNb605.BӣiD,vC*ͻ$ [Hv숙)n&K2nhD.]69X./H]SȆA6~"9`Z#sC^cÏJƬ؀vO#xUPSMvmV [ptco74\ ,?u,&c3dBYKН 2̅_@pqX|rfd(^qKxhSoZKh(.s'E0llBLֺa)(B6h8k<ul4L*6HP塻e^5t6_/TCt=4[QgjIFI B%Ȣ$0ՔJbŖ,<՚YUy= acEIo>zL͋.*!mX_I rm\xD1VnE',PY]W?S-C%r>,J9>\E.qKQ>4Nҷh`ʭa i{_w"e=6]no,+~C-#M= J8s199ؼ9۬|AvlXJEm3L( *ɰWDX.1/th쉈."G"f軫&m?̦A3Nd"`, O $J6ȱ+7x2;W;iL;s23"Ðȏ5TJxVV(X@+C^77+·Wɗ6K|*t/826֎1.0½(@03W O )\N9aWp.+ҼOiV߱F}9򳣛+he=t3?N2LE_S5M[oFPa+ޅ5"i (awt'oct,\P)@a |B5=Ր7gёys"_/qa+9C e,^\JM2V5MۥuO*kI}0xRLL=Ə1'Y(tIXvRPo{*1t'R=cPL@̮ZT߸٭ah<ĽJ,rށ0Yyqxd_T\S|=0(yͯ츉,\dۭæF(w>y0KpF7;rUy䱅+lE=U~%{OtGt\ڡ67eqeGv@l/ vKA j!WWjӻӷ19<.nۈʿ@{= ApV:ۺߧy_gs0Dj@Ix3' =ƕ:3e1RX~ ouAJC6S00Ɨ y6lK&  ܿS[ y J"D!%6΀ H%X2^RՑqucSTZ\FxYH164jKf۱GQAoE(.i+sr㛥4lxߢdѾ/7e Opᱷjz,Ӿ紏ƮQ@@٧]cop6_Y:&Dz"*&-lCyGcQUY~Wzf*@|?I_9RazFͯAe&G׃.J& S@CƸwQaәUvQ'Rh&\RQ>4j͛ (H=O*$ዊh(P3CAAS~–=P0[60F n#67 !WF{"r9*QAW玞)Jmx{ʮs)g+>tXԧ GF:e;ލܧw$h'*81biR@&7؞> stream x\sq_byϙI*8~v Èȱvz+QߞC,,L.h4.Z\6h.o/~z{۫_W.#i^˫E%WWm<-zfnt]Z)U7MW R4=e?2a2ug[j-@jiSjN}8?~ZUZ[aF]"̝6h #|lv]fa+Mnz _?~F#ƞV.vD\NO 6Vʺ=Ǜ^菻%ixX So l#M;y4,+[x"0tnWKtktZ^:#r\t_>Q&?]`>ǝmtvppwjlFa)!9q{:$ bL?LxjS=r20}c=&0 [&_ڙ`*k/:ޮTڭ$N't[5$ y\ \_Yꝷh_ڮb²?!l{nݴV(H{j@2bڞiPI'7SfrSdĖb8%"kʶMQlbԌ73-J;s3&V{#Gau8j7}G:/bKR:cMCbZ`芩FW J+ e;;i8oW -/72"ť 4(Ax(cH\nNIFϭNt4WגCz7l!=R+ cщn+ {QN,,dS;M[=.dڌ+m6E[O'+b߈34u?þvֽȺ_qV གྷkZd[L'/ %C8|+6WtΠޭW~Q$iW ['n<=&b~C=o[^ NTdb~֛=:`Q z"T܎_R/}cZ4Um[E's{P6-@ʭq8a;@>"9<ǀrD1:5& 8+D3?׈t3*} +Dn1"՝X L775vLjχ c2f8` Ztmlc ~[ϡ :?mlKfDx8tlֲ;`~1bTGA>h-XBs~:ÞW$9X_'n b0n*<-ej{ochs#p O*)!SiLp~mRvAgH0gOKClnwG-[p)TQC^E ViZR*J*"d<ߛ <;QAllU)EDρlyJ0Q !/-fWsX[ͮ!&ĉ jEzuyX|SJN_BRfY%)1jИY3 - 31$U1fIl1.Cc;P<\}(.S)E8d,pϱ4I5/"D~B%'2Ĭ j%N1\{L>$I^m&2tmG*an6T\͈|}v8Y[,QSM<ʶ-b`9H󜈲@{O x*4 ̙$BqY;R0h fL]]`kBv>On>Ɩ/cAG r_>Sۤ{n۬l!EwA";ѰC%j7Z-BĂӥ)ɑݓn ˛Z8A4ҡ$dZ󤩴X3 X wfyM6Lmկnd ZK]F/^,[g  ?2ii'TSw&^e4} ֹ4YT[Q:dHS Ґqz p%ݲ+T茒VǕ== U/ġ]Hg 1:JZ#v5?3M~^1A;:gKr(fRF˃B=DLSƐ^kPkjOtct%\bd֢)]m^e$uԑ &=]CJ*t-k ^j =8'?`RR \RA}L0{5Fp'GB*X -0 oPa<{<=-rYDX _ߊލdCF" V%002 4|!" %ƵE\R8Jv[AH~,PPd)[%[ӟR_6m`ɉ-pl)i;_>&,ݍ׆DuHpZ_w_~ĝonpV$G]}rs+l8豳Jc`%5R0(#U^i[>bݭԞ2™K Vt)v[1Iv`]Ҍc $Abu4iOQ$= >xHƁw|!aj, B*E;4L٠m:E,#m!@Hde , &Ti4 |yZt3eWI }w%LC?L+Ԕ2EDi;)pu(7M&&%q:gsy'\mFHdM/' /'`f4s?yk7QH)_%i\rb\ӤvuACsY|L*7+X28lv S "PER*yɼoz}X= es2HE~ ɷDyV; \cISF* -6WdErۚZC0}Lӑ"Oe]kבQ}cIR5L޽5D{1nKl@ؖ>jgjzZ~V'?ObM/-]Yg&\ 7cMa!,G7Z`D8ɭA7 }dTb* x`&_{OA`Lb{LityMh [Si_# Dw p5_(#mvPpfRZC Ő7w\ZKA}e =fwQy v Ӵ }N!R̉;6i$6v7g_m$#מ.g-?2o5E|VԒIOz]Zދee{D½s"/ 'R}~лҝHtV)0٘z]6{̱ܟ} [.!Nɑ靷;F )H2Roy/>_LCXd|GPkFژ1t N|S_,Ky.ў۶O=?.DayoEi6ۆ/Vx GL6$3j|La^LS̹1&:xH}hz_Q5Pe[!f4'?h{+:y $sP$D!+78K\5s$r~*aL +=1-@> stream x[͏s_ra|Y*cIfM032FYrR#9GDJj{]R>YYכvfˋ{8i|"L3!\#նR.7/oE۴qg;9u󅔲i`?}Wα8H5mksܰ]_нuXKŞ}>h_IoOxXa7/rPR7| PX\KX}Lg\t!_\ZvsZ&uf.p-K}& "rz ʂ 6]Ԏ&އQM氊ݰV߇Y))ل^vX{i`{|$ >,뀍nXU3ux5aYeaCw\+={qh/_ $]mSmh0 v9 x);nx=mFp:ÏP{3Z <>r.n6=wq%9W뷻>- iﶯ y"Xaiinثb, ̑l6zmy̓ l"mhc̲3EcZ1O %B0?2 D'- up͆>ۻ=\¡]Bf*.l>pX5+z;X:v(0M \,T|Z`yN *Xb ѪWlmr壋y'*M / N ^ee J5j p`iak9뷅sCLjHۤ1i‰ޅH Vnu?t"Kv zt v\dnU虹%K. I!, 5+s]JMr ȸb J\r%>H^UƂ 괉\T]FiA-)ˋ/bԳiX`Dm# @q u

      -Ȁ FUT*-H rU~Z9Unv3`)/u09_DF YqW ^:oH'~[pcN|Eoj&Y*@PN6d"Ag]`s%2'`rhIhBœv @@4+ՔHh2u,ΰ7*m bgV) y(i"W%&.KDb!Y!S`LN}g"vi%рao+YbЕ0AiĄ`DqNʲ,L>se֪\5Yw}(ٗwr8a]-PlBPCv@m*ե1UXX LJn%ˢ+cV*>4F9Y*{Nq#+ 7(U8IUHkZU`=bqvv EƫS.,[op2 vu9$)YT.mLè[+Ux{bO]UP  n FZè'qXXXn ߆r|qE!B`g'( d Ƕ#)7!& ҆w+4TCypiLBe8L$LIyK sc2@5=4sB ;[`ZYK.nOkkmAT}DW1 H؈O8#72Haտ0E_#/!rҳ4&nR$]$[J uvCD"n+?:s0$/&-:ix3ջ~u<КwK(n]D?y!Z]#rz-dߡFnn{UEG.7s.c)7q̥Y`ā 4pyԯ( x|>B\(魷zrèfqJς(-\ GW7S7ڞ&y8ǒp'EF3 lZ7P^Myj[ķ}l\qLˇsU9zq;nC4˙E(Bg_wvR>v09ݗXW_c ^ @iN4`鳚h{X'"e2H,lT5Mr12]ٿPt:Xgs`X*Ӯ?&O؞>(za]>@2<1]CCnRa}?ՙ~i.RkRl# |o$!^%x^*zˈIc &h} wi"Kԥ>+Y!ik4>&LNN?ISF郈oé>-@Um| [1Gr[8"z2<9CVݥ\Wk+pdݜc0G딆u])XMKM,CJ2Gb*,SדRR``'mI(cE-{Z8p!g`vRA.H6Ĥ.lõ=wV'F{:!p:Q Ϗrs8'9t&vTmkx,D\IBq `Kd$Uy c,5v>lU0"1'4 =\`_<|([Q҂xƙ ^递*#GBk`AV' ^i{‚NIl+}'>RZwنK?$ D}YdYIM`ߓ.6wzvMt)Q22mHhAnD&['ac{Ɔ:#ЦqfӑCqD2M* N3c@YLHPU+?M*pMr>'dgiqM- CvEOtj|VH"dRWɨчvXHX,G!<>8?+FKr-TamA`P1 ),Y41lcZ5˱$ZЍ ŷT@*ƈSd2 mPU>zO  Ocѭ"IB&%XX6V7`2NCe/`.b=x]x6;ȥfvӽ WEa\^Ҝۮo8V%ݏbӵBP?wI@ax $hCS.P骼yq^ - ě)Y8l; ȤLWd'gd|`pf,nv!4@q/˖m|ϐzM^} G?xk@/-9>i9bw[ry,q(o0X9.o(l^D*h(ł_&] NOČ,3[虰rDZzEu;l}9B5uFvi!VψGO8B!Ƹvbgb1~&ZWVr++* pzr/x>nw߭W|{ٴOT+VZrysŸkE_ZvWǝx3 qLǥk6ffqǏ?t>A]~zi0gݦǻ`a)dqB)n9_?*2 ![z jlmR#[o&%2Rq٨5d< jt-a]O+"EWtfBv8`ۡyNൿۣ/!%7⅓(gbݿYMzE.(`}8w?+j endstream endobj 339 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5250 >> stream xX TW@DlhVw[L\qq7Mqf(&4"A\Ǩ3hLLBt3 0q&Nf(8U`:1 뜹 F5drp:V/ zZxZNtnra7F5)hr˧xN:SgNڹoo%^={ :⭑~g0yLƓg3B?,f0/f)3YLa0˙PfLgF03Hf3üeF3b0_2LcteGƉqQtfަ;cbNX߭YhajJ⬸t{Y6lG^<)v]VvffkT}yzm1Ϝh lO|nV+Z-Ybo !`H ɭ5q;[Ъ>r([Pl,VXY)`2nDG .CJS3@%\5٥BoSYZBDF!5;Z<0i\߉~;oɄXk-q'Z)W<Џo!y# ?ٌcW {*[\5|7钁}ރ\(uYkVZ±w~/1zں)F ob1TT8axUGX? <¬-=_-aVB;F;Tjt G!ĩi@2cpDMm?T E-p7^B`=eAp aE1Iʖhܦ΋O zC#^? (9N|I)ŻL5d@6p`*1-) O($ %d7 ,ڣHHX,qj4hwP5 Cϥk,L&dp[Xȇa_`?9)KgkE'Lc4urѓhV8!}sBkqX7b[Kl(SI}oҳa9łNhMQIu?r]okSl$BsX RyT¥/RO.H:2OulDƪp$j 6;|Wi]@'~G} Q*.eU~3gg˂SܕPi!&!92]F[!_31;$}\BRpԍ@{cIJsn o[:'9XoLD² .'}@>8L ( @тnx5Nr-lx4ZtsgAkC#a 7ֺOyo|m?t:|h pِ+o ւ ~Dw=vw%[nbg̃kVa_ Y$q(%Tn>}mfy /m+H&Xvn%+7߰}&LL,=+_6-ymkǩ5JB(̭,FCWhhLl Tdi~#ZU]-?exF-+J>"!DUg-04 A0^bqjC6SB18B)c?_4i9/AAOR6A!6)"ۖf|@?!1ZMk[TO:G_lJ1)񙡐y' ȣ (XMY=-bw\8gQg$7csSyIKI S/il.ziv|jԅ1K Pa4cⶐ&((/)ߞ%3JuNJkY(#~/K7B0D6!#O|̡9eK&9tڛ8[a BYҜ$ 7`atp Z]qqc>󭩜&inO)-]|֨$.ZU hc6J\&3.n|Y+I!H/ +sx}ULQoXrV_KCxvݩ~P"^ B7Xaé]E,$}O>_݀3e%)7([8,2籃vpYhDA4 m"YiXEPvϐp "I2X!9F/ٳ ΗY+(!Yc(Ч+3Vޙ 'y۝~$JNՊ2RC1*A۶y0*CNvѐF(&%Eλj{iaޓxLɒ 8ri.Vz RS:ϋLGBX@ĵ֐8p ű25l5$FiUŦIgmn6&C'Mf}jt8Aj,hI*3=*z=lH62!HiկD'ւ54$$wQы'>4r _6FZX]Py!z^efZ?}R'X?u8Q'>I{xWx* j9d4 ;c[[q:Mi3&95|$)[HjdI zPMPN_9'9΄d@DlDQ`OLVU[Mw*q{epá{ gx꥾ꒈJo.B'LϺ.DX޴Lz?q_ ;8}hܰox^sp3w_-( #!@B6-ezrNS[AgICPuTX[;ϔ@ۛHNoہ ~zxtf8qnSrzrZjrzHBeTі ? ΠO7g,r1>TJCՋx`5vV蔱41WuMN|)V:ZJXTUF-UGJ"T>"Rf@Qҧk0_ln~8 ):0tPlqIAyWJb~t1;3+,[ B}LvIRlb|"f2,oO8M4.|alnvޢ|Bkiopj?ڞ(*?o}P߆1m&5ВԮRʖk-NT Äe`d#lZOĖLI7.Ҙgs\rIFN ާ `(9||0c\Q4RG(-&EM(&[Jն@,]Ѧ>ax)[0S7BG,9#<Ø+L(|gMݙfE̛^9X_^MN~+v|9=g6 \DZ:*th@;QVH'er(Z*[a@RbHm]@KɁJ N{# - Û40-NYKݞ"{a{Rsq)ީ;i!I7N[tl0tD'{RoMKFZJ?h[ P 8!8wCEpՑ8^;hOB;wr(ds! 64H>aͬͪ ! :2p| eU| #]~+P{THA_syKIk[\D$ꥢS8ZM'64'$O5{zuMz˜ >@;$>qPawrRgڤF%^&8W쫸}SuݦCm 1=j>v2V[IX.+pZ+pD&&Jq@uSkrEDԲd4etn(Ρ3zbdBܓtq;c&erd2VgT~qwF[כDCIJPu€B݅58e’)>Z}Ψڣ(أځ#[QD?Q%z) აȰ>>LDDb5sܲ;.8Q]2M=U>-(c1qmO)ZA0)U+DKF!}>nwP'+'R' fܝ9Z2PĖwD`g@[YkŧBVTlQ*i27ZPm~ S4h?vEV7Yފ*^ɔyVwq(< qǪ. `Eʼ8S) (.cTO3MəBc_4w@uÜ}Dޕi[[$L "Vl4vx׉8!v_. RQy'M]&g;k2>JWN* Y#6s*}ޤ;0$F.懿HdgvșDa> 6~-kH+4Zz:ek_ɩJ/_!yP/9jȀŢ@{5bMz {1nlqQA ҷai؇w>ϫ4D Qْgٟآ`k(lsyJrJJ)9=-뎝90̿\endstream endobj 340 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8384 >> stream xzw\ ;cW֕ElXcFQcWl EzKYvٳ^⠀'jD,Xy{gAMrs[ď,3y9y3(H$ZtW:y: s†_U-E ],y } ~%Yen9}?/`~B:. sZ}rW;V\㵶ߨ`&N)S}4!1lx_PvTj F VPQj5ZM jIfSj4K6RPP|jZHMQ$j !L-PKՕQ>TwՓT/JFB(kJNP}T?JBݥh*Au:::S pFUwޡPfS_1ǡSzDVVw{-u[k-6Y>3|ٷ~!{ޫ ֊ձߟ_?~@>0gA[:Cu{  ZCD-0"SgKi5_iI_Oߠ%, OAd!h*ZZӍӗ/eگdq;WYnqҖ\(ydpr0]rlSլh,8Fp2x x%r,C4H,c~n AYHBsr<'DxmNHlSP!Oج /H7$fE8ڠ Do6$'FTY!E:lKkZ,Soaqֳk qog^h-]|?4/4DRx*. ё CM) Ѯ$ےQ@i49䁡Xb|HUǮuYwl$`;< O۰CHE[5emHk(Zڃ/@2$[p^ )n*ZޖYt:9K`P矞#bJoɶNEld1蠈Aʣ޼0[ 'qG^'Ϡ"$+BG㥤Hx9kj&zM B%4IR宯qVV-ل'C1;HȭT{GG_▵YVh%A# CC{ڹn]b0CJK"\ :>wI0Cߠ:$ӄRF֎&vUx#f|61Wx;Zz{2 e͈F7}qaJy& 5 &4d0[GG<&p6Epiﲢ'L&T/C)P:+YO[| Ȋ[}}#k;*;/zC7kvjoBMZ~|CR$mPt`ZYV֏R|BG#)A׮,ii2dYܜfLv=0dkb0z= 8>}9 qÄTP$6ʇ@w#CCp%C~bP(Kc} fY/ߌCؿ*H /;xZy^ٹ=}NCZͦ뒒⿦&QВ !=D=dgWӶ^. |qh  vD;j&rqw6Fōlȡ=mĹ7@,T_J3oMcKF37YJG(꯯$Ն/Ƞ Q3P.:s6U<jEf-I\QBt?[̣'n?{ V=m <cѨ*.Vh~o(_=52eBdI\a92=2r:'.9sm?5!`NTGd~)Dom LFc63B!6QVirlTِ`SVd8bt^z"H.A* HOrezRFw%G,Ebd!y:c&3)9D)Z7_P|p=HUVS䣐 U!v?޼~+ե9pm$Pˉ Zl-̡P`jޠĄ8]> U ("@xZ yQɪLI8A> %d'&2:} 4Y "O4 Ss~@5qmR& JWc\v5Ah"c81ZUFNnCeW+UJwdm\k׭yzUe%ެSWͬ.:'%^vT9.d-t\&Սccwʯ%='y[9Zl%]?v*$7%b!"&&._'ЈAB\6^ۉZv]8y`g묥>Y􁕸ڻhiWS{XP~^Z?5(9Ĵ>Q 33~a i64 M:_,N{7A⦕W,O r2=+4rYA>`}/ӔbcwjLPu]6oXB:gD. d.lBzk/dƶMa,NI73~kk.6lh4 \{:#uDA4.3R85~eH%2$>Q +]P$*}.)?tlhEݜD:L:n +)c0[9Ď8m^8u1Ą!X+bdTg$&+P*/Z3o1Ho'Z%!) zR3,,t\'HvQ?a~Zkxk ,#4UE.MB74_ҺV+5*/b B*ʊw}6cL׋avT.BSso [b*gHaا{gb V ih_4H] r_:$G%CZR&ҶtZoH4Z3j D&e|G1?ّLN[1CtL' .fˎ0$R})?֢DH$k!&25I~9tT0, W|YDK)hI{l+=`8aaHr*b!i*$0HT3Һ^[%]ׯ^sV`1wIܖprwa ~jmwlUΤk>/6K %xY)Ʌ"mDB(Z/pϧt$?<=ZM&R QbW1mvW+,NKdXNkUaPhH1 "y "qb!I WvEL1 2q+q3z*CHx4Ǣyh&-.M`$5AYhgM8]8 (BW>Zk>pM=n! ;"ȑ\eaQ慔Wo2t~ ЪW_Z,x{,~G/H,A.bPM@:s+u,*iG4oر<{$AItp억Va0Y~t*GV A}̛V)X33ۙ N̜Q7w#`TeԶA#)*J|5|{ڈZE͞JT0h%D3y%,)j=vE83^rM eeVZIžahuh,~y=``I! xQi7B_,&VWF~ VWKtxOЇj4m߯e)x- xQa7^]|%PN5H|URM-Uc+xXNΌk)ծwW(p?A6/`ƒ%22Pdƃ |"l@VF̈́P\Ŵ ~_J0m PGk#y-۵Ķ冸O84iӗ?\jL/ޟ\{qOz;##%թ@nPfŋjY^ 'O?q_Ex].~]?gk'55ڌ8м>5ULe I]x9h 28==BweWxtlާNZ~lnRQ3eaE>>a!~kOu7W?(}`՘PZ3J3[luPas$R(b5(!!"NzÏpzR'N#c윂L}99x[)05޺ZGkH] ͗k )Do%8vxXޚ(!zy>R %DAHd4q79HKDm_/ݿaلCYJm%-A;柧6ގN/i<`G6N&{ޕ][$g Et SNn>XjK۞Oc?m(n~, mQAqU /n U^cvgd^vR+"tF;[&k!9((ϩ-Lpb[߽R0іIΑm3 ?'0-)m0j8_~UJz- Sm;*MAjDO䣴I): ef Hu.N9ŐjDx1ō )!mjmb 1ِԤl4Oi)mڴ}HU33D'g>yR7_P:Q-RG_0 rT¢yٻ6#Nkxw\ڙϋ@~{M5 O%w[#ilMԒI&B DѾjbXDD0eQ%%yέn;"KY H[FCQ \Om<+ٿF1ڪ]wr Id}mݤRifYw"?gc;pQ~eD!}4JDA:.j. +;vUASm]xxk"reLk.!q`>ؼ%w6ձ XKlb쌏[HF;H^Bc{$Aqp7liU_U_VZ [rdY`?mBz>Z?}R˕O`xZ"`Wq1hP EyJb&͔%w/?{ tCft7VrEc`']Lqщ+*6kB L|~sJÍa[e2YkNsl凇Y}FEa/Xf溊J.)g+_ l$/CFL6~-X-0=,.7-a=h5pp{_@Ze>}ILJHmlʎX26祰Z8g.-?7ؕ;0RHZG>Bb6ʈ`|l:}'v$Dcab_;>'Bs yϜ;w]?vfdO(K/8[k\,Œʈ'eE7@MS+I ݌mu0vڐhJvqq,o:_:_,RhME: 38 ^J6lB/[<[%Ln{qOjGlmMT.qwmE}IPDܥ;̩ώe/LswײE!tFfb-`k{=ԸCwW7hL@ M 9 `_Uyliklʥ>L |v=5'O(r/-Y[L B  vd#A~ >@]tent7 eqwh,FO! F{E<',v[6>Ւ"adĤkQ}2X96Pf 31F䄄F;պ}|T0>Dy`XͯDƨT6Q $$U~~"K`72w+ݲL or/Ί/qOdV h^4uG=6b:!ǝ–qG2II2*(ܾdpGFv+1j χ8%z*m)O>Csq! {qZ2W-LKwY۞U˷:XUɾZL[aov4GLC79|w8R 66*y& b!4ށ,g=>m+n9ڮflf/քN;j4'[ʶ'@eE$0`=藏ΕV'݂|䝙bpuf;Yt.:VRzÅ.]E?փEendstream endobj 341 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2047 >> stream x}UyPwffDPsIEM<]]KWGhAPD{73 "0xJQ0JbY&EcJZ־Unh6Y{}M(> Y=J fɓ&MJtY2E;}wv$.JJ6Dm ek?LeT(ZIQ0j Z@}@-Qj)VRSWt֪TU?5[5OjuNm KR!28|OZIWjezR t"E4ʷ1,_ 5Sr$X9idSSQe諒0A>$É)d7y ߋ#[do޴996CA6\D 8-md2-9}z"kן"a>IQ),tz,3A.r{f!{<-8xw#~d՟F;&`-676 ̹x^.=p ًT/zXY[QI |Gv a4\ ؏{;fop än~%Zr4$tTmy65ľ;EN8 ?A6`DI*2s x>j>GwR?-מ\~ir|sF2"sZ,1Xka=pQwS|Wn1/SHH(nQYǵqvgA'b^.3ҀKd4ȩ-BUPRL8 ۿҷ`11'TȈ}ɟ7^ڋb{>z` ƕ"k@?r,%9ꖡc_w$q-[QZM,қ!"NdKOEٺi0m/$xڦ8߽bgeQ kʽ58쭱F$lƉq 2*9곭Fhjh0}4F&Gi7'ׂP\f=\źX5p/Fwgۙ'*8#h裓; [TfBOZFoEuIM&qE]0 &k0)Y)YbįY%KGqi۔&ǭ-U zK5zf e_畁rNEUv_d)4g y)KA,ձ|qW<,29ݐ̽!'g)p#Q])x )R$r_I{}9de?#J٬6ve]Hs㍦ ?=mވ#He-0X,6ANn߫2K롸 'Ltok @;;KJ/;p$^Qd vQvIlHt,_C ӀAmѶ$ȠY=29>ڪfj9!"p32ۍpYJ'|^ն s$5ך];I t(~Gȟ.+M!irF>5:1Mbѝ|H*Mtg6&p-Y[>ؐȘd/s%Z-ߘ3]?+'__gr-ӅR|I!rA>ra^z) F~=űA{J|vO 6)do)uyao#TI7<yqW\oZCH;am~ǿrơ'ݟI8[*vJlT sq5*4A!AmgYd*}$7.hx<`ޯx߾ф!Z21t{9,-J]DhV&SB_)u4/Owl4awHWFB/m6em@#+0RK60Oirh9F4TT2$xW%xxj[ŖOO1}]endstream endobj 342 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 340 >> stream xILMSans10-Regularu`  RS34TvZҋ#J2PwDKP $$PD+uQlu\j/Q{mo߻jlTTB4)(`aa^}vrzj`ITI0b17|:T}ʋuPW  To Oendstream endobj 343 0 obj << /Filter /FlateDecode /Length 4204 >> stream x[Ks6=ԖN)L&@n8얽FˌF3 {v7@!5n[9$_C+Nw{񟳫ӝGǎݗ Mj[xNv޲dZvVz?gd*,J.سɃ9_REYtYIۦ>}RWYz5K􄗅xzä3UM.1xm7~Oe ](k/1x !q}1~7)w=xZ}كz5G(-;>%dlPl>^n]֢jbץdٓ^zѭ`G̤fw1s54qAJMop¶M,"l7.m9>6R'}tX-CoK Xx]XXh9h8}Eᕷpau8dj"+xqG /be?j0:e,60}~N y0QZ&'E @_:N7ԋ\:NN'حk9Hܒ/ce d훉9kq}< ^V5Ly}}lO8ꩀh.>~ZU+j<'S;[U} Z3}L,'V7W13e{VMpJm0zRd##5W5ҀKTa'>,M>x09 .`Mpuh! 2A~;b j SZKP15)>|Z 4[R1NXTUcov+p`+=c#គo=IA*! 7\Cs#s& V]wsDW+X ewd% .ze@Wn(,'7#7U#6ipyf_c 0uHabԇw<z`WHzRDR 3\\+D*ha&+l?~I:g^u}Y^l 1qSj1z^Bc-{*g9^8ϭ#`31ͽ*S yLK%FDVFo gÞŽY0 w'R82h.vӝvBSܒȡtZ-S˃t/oC~PڂCJN`!}rY^֥@nIzl.)K4`K2# !Ao $ /~IZ=xq>ʢWͯӜ0z^ֹC-G| R퀂}8VGR0 85Z\ԫv1܌"J5" 1# Ǒf:D',W2\-'10L(XaZ+_X@{b‰$7+0&r#YCy}ҢSF4zD&7̫GJa9=iZJ4Qjr#[ 'crtz ]36!xkR`fBy *mpYf]vQF|L8!$dFҩ&BhP^<ٚo _k1j,aC?_גKK&-jO:/B|?h2PLf.@+G$yrlc8'@J&Wx|%0ו0"~$xx:J< G[ vؠLcQso;@g+u8 KolhB-D<׋ #/I~}<ٖy`p@`GQUq3!òQ(CB*$~jUh -ܦ MijCS% )׮%c * x#r77E䉨3Y]Y8ۗc6 EHCh-9iEԢQ}-eT[j˭h&A@N} _.J{ Rd1H=F3c/K3BQ#I*k2U45$+X|Y! Wꋠ%њ'pg@DY49螜s?^S}y`Unٲ)m)BBXl "[ƞ [_):,^uJa>V[!Wx=:<]OHCӚ}lVŢxt8{t|=|'Ѱ,*Շ`=|VIR/ R!wxVQ3_4\I3U\蓳ОǭDiuDŽ'B7 `RK !I)"uI6e8c>G+\f*X4*P. MbچXjd%]iL)WHwKa%#m|-U/AB^oCVNN%S-\a¤8.W%Mmq䪐ZHZ<Ϟ 𤋥u c}K]R2!Ev:dͫMX q*SX.t>C&\Iqw 4Zð-c\Ӵ)np ?p5gR3cqlEؽ*y"U=EՒ ̞.zq dOx~Y+O9Y*xR^#+Rh flqޝana#w!lvOX_(#UV"`ӑatCK ЪL6v:;\XypJ\F báȵwY7$ܯrQ @AֺGC=PIuXּrdTKD'ܴ,Diq{{<ɸLH ) kpWET $ٗ?ArB05E.!Ey0c8$*'i6tzWMeG\?[N)8L6H݄ ٕP- [7*vptrj+i i/C/OtTZ+ >M+Q3|;oחYLA\cqWlGaNS C~'c|;ZĂLY<1-FpXjNg V`auޥ,i\]ba^Ԯep(Il ܏͝%3u~X́?|65%Y7,D5GzK#Ur]C2.zxĩWʢ]FXT >9Hibܾj> /0޶ǯ"uK*g/q|D)CQ rr'VaO<= xJ6D}h~?kڳgZV]h Gtb̒֎KuXW$fpM6OJ1YS@$Űl="uT5 ֪y)rRۿ' &rypˢr`5{urc7.W-oA2!O#VG5V.Q :ɕId?yڛ *sA$a?endstream endobj 344 0 obj << /Filter /FlateDecode /Length 4110 >> stream x[Ksȑ$xFv)8IYKrz*vd^U!6]&^N%O~]]Ware؇q~L\b"Ҟ7:}G; ÖalZXl8/%YSR;>Ί\[0}>?4'o=?M'FsRNxSH,n6hz6 TeC[V`?MKts*g Ʊ.py퇄v0(M5s lY,co]JYJ쒳]۟X p1][}WNg٬뻧LV̸DkD&2Wg{W {~lW49d h[Wu1`8[?ܪr,tJ^] 8Cǘ;STz Rmt|Ehd 8ȴ8{hj_3 \-,y( LeO+ph8?;_l[/:lUA+@~zuH-E@9WF\/XjvLB3Wawmm7̯@Kp IG# V1mM0taz>ۡ=7mt%: p^?<г`DJvC)6o=֝a-qޛ9]G5@a&7NlVWcS(N!EO֞a_fpT00k;l.''I &ႽY@տ< iQ6x<ӯdߡ}JC|atϛ<\GFZ*þSXv`mg~x{FE'@zO'puaA6ELI{|xd&~"PqC{ wi6w0/t5NeFW(p1tTKo^I g͇g#!Xz<܂m%"?pgEe4hFbp"GzlV(ѼzfTDIo7QMZ@IdxG9sN0)g/3oSudhitFKk .S.@r DO @jxYҢK$G%sn7ϸ9ž| K9]w]֫ݥ˘MxxY3?7,@47*juP2[)L*̦g̦?yz:]޶AHRSȽjWaS?tn&׍c $G76БI,rC䞇HUAHXz=zV7-M +\k Rd:=E9uQh Pi[$e.5IJrd5P ˏ̪ A2VaaȬ>rr:;NSz93$`ݢٲ |,YO2( 5DTDduQ9Ϧ9M{z΋ᐟ5b%A !+*{ۧ _6?^BK(3I>ɭBs+ |3R29 C\װd Aﳚ^N^jHf$_MܪF T/Iv,>o +EU~]?euݦts%4IIj< ]7"K;lccFqڻ)'*b$N{s_8_p훺]2)w1=jdy[!MVnQ8C8C_Tn_Yē11 Me#I9φa|L@^,b))___|}ڦJJA"]V Bɕ `a?D2/>8+8OWcͪb[-үrtm#:DcT+%s~BQ_@F{u{{0y1r| @zXf'h= 38R!bJ ثUHY:^hE<1jqxǶkK㬠/)!$%~ ;z/zS}RcOI{M^! J`2MUd'-O*Ǔ"ˀIpWX_ Q)5 r2 Py-yLwV+7j7'&KuPqߑ4SN'J̬|y͢E$%9U0bm қa9' z=@Y/lSO1 **w\!(ke,D0ςrZaW+*in?N!DCYէ۾zH. sWR\` "' 8F,O &]ģ Hb(y|D "ȭ~Jkg0.)73ğexMJf|un@u+B] XUdGmsOcCh4&_X|xYdI8@A7/bqv1$c8@=w㘐8 jڕB"l&.' tNWKBfnu6S$g_?My:%dXi/Y6m,Ӛ_&@&$nWX I<3RFII?=")9'z^{H \\{hkN 8M` DJ-h&Ο>U!5Π aoERfQ݊XmPw/]DP_  ?9=U5vsl2IAC!r3 Bru,]FƃekEh,hPXq|4Z?%I#}9 oփP(/Psd]⇝"g H[\= +@&xRnl_fO^"Ft 3%" iܯX y"$cd̦ d(< HZ I "d+AQ?T™)Hu2k,Fm{nA)6|騅U=ȟH4N`hTJJ?E80+a~P]V =g׷](K yz ( L1-qV>NқФb?\n‡+w79/b&M{|~ݙ6#iͿGism?Ź ' SZhloP_Zwwu@@EQ7w+m./r US{%R3"}6PT*v?#β/a({>¼b=k|ֲ[wJF^`ݗҚ- Pe_fNVUɗު+_Ǎj04zRylx>.`­JV(<ˁc`sRgjF/(f.@(NQ>>uw>Lsۑ8p NI88_ZdV'5>4',ފ2+;G6B}9>B߰pfnSWFeIWa? h ϵw9FEֺH ] 㚜W9*5'Mi⇫fendstream endobj 345 0 obj << /Filter /FlateDecode /Length 2371 >> stream xYKo9=䴋N jiɜƏ$H`3"CǒiI~vhR+nUd}WUo㪤 ׋Q568^OF?\r oJ[Y:)tlXK]Z.Wjr+ `K%qt<&EUVP }Rp˪2䗉a5lPnVK'l𧻓{NPei^OpaYLW%1 iդ`VUYZJN.*Xb;=aUUMƳJ&JiPr_ LFq*a +2F`ڍEfy2Tx=-UI,ʪR/Jz0`̖x <aGĝSҽV3r;Vr@)<,Eмu ы+RqAy)enU.Rroa\0VJ{4}"U $bGUS 6KVL%xG_xi6V( a >]bLvxP<9͖̅ D%~䢾0Ѱ SMF>ui* $lk`S $RLY h{n8 Ajܭ&ϚeVq6;_0ҁ ͇ t=4EKK&'/WiX3{9b}"^62yNqĎhYwϪd%p#(F\q@&%e+vІc{Y.6)\d@4\z§ a%05%x'P&y vQ/HEIcH7qpc !_r> ςWgA*@EK؏\I"~L@Vz$'Yol/a]%tlM1b>9wtu$q"$ ege pާ=r++/^y :!X\4 dzPAJ@(أ0b9R" -CPrDYp@XTLXD^m t`8 Sb X p(u jG;x@W伹w;мKSGhI2Tz\\Vw$կڸ!CgXV[ϛ/lZt ʠw\ Q 9 obO/BG/W_oOmRQX|tw$Ov{J$AbZ~)4v>$I{c!,w w]PVpQwwظq=1CK+Mۭ}j*^#I֛C W}j_7h᚝*;W˰γUۀɧy} $BŐNP\͠}.u3>=4J鮔7UYzṉ;ԑ3q]>o7受Cuvݕ ~]7gTFwIiv=m=H֡8w*v:@6*> sbStVd64}l>_42&ħ3'Cϻ  I;Ejpkm6_-pcrwٯ~Ej +ȴqbؖs2π~S]y>&au)z4endstream endobj 346 0 obj << /Filter /FlateDecode /Length 4371 >> stream x[KƑI{vĜhBvOH apIJ1@?'UT!hTVV>̬*eNjr7߼ r_.9%2xsnJYf-9m0rE !ʊYr $˪.&{ 3x-hq?n'\z6û6ȴLETDUxI$L9D5>wJim;>7mRk \knfڶ`gyB(n"k`O0Q*?ʉ ZmiɌbyS2nL$#0ŕV\1Ja q!/fi}j_m?_yJ(Ȋ-JHWBp&wcA;roI}ɯ9-`fSCn[Pq*p/fѷm`Z;OI q'eCG|x~e>ߦYf[a8m%'iHJPYj W<^2!Ї^zC?ChaSJhV4,B[ݓ⇂͚zi3YvݬĶ4y!2BA,g&c=t͇8_sbJoa=XʇGVd'M ۽ni)_r#…5| XV !t,=Clt]]N6MXk. N}R!a<{z lA͖9VJ.!> Dp34B l3PF9SpfO6}?smiRa{lC89;ٖY ht4x3~ TH͠ c+&`~{3Cs+@O] cAAh!RVK% P1lUў i@}X&̨7#PBVP5sI(J"=)\޵pp"ȍC>/pP%I$P,קG}l ZO,g:)?Q`f@< cGM'i6Y@m#t}/mcRMpl BG΄M4ВFXr(L@u͓| k5C;۴MYJ  ;,6@*&&<{Ψkz@U>ӃoZc͡~5wF`ܬJTYՊҲ>@nX"v87 'ei Eٚv ^Kj߄\(lD\n^Az!'7(H14l#[&yX!U|zdؼ(pvz_fҝ쾫Ӷ,!&M>xy˗&gT^p̗UqkXCŷ ;Kq ;p(?} Fj2N#}f}\(i YFx'hizF{-7e߇J m5X9Uv,sOц"#hC:kb9)D )UAũJQq?IG(O>nݼ JF71h<|[wnTlP:D,4OoR:7>mCۍv(oP@OF谻w'XGR2%A6PCeREͿau烜4pDچS;*+!1-=9X ϒ}tpilC_A,Yn]^ל{0P^n*ª @!#C?6Ulha1/:CkGY9@7`a&Βb} P34 KsK CJΰ1䊿uaVЋB~پ!5 FWąu!2# !W6,05QT/|smJ͇z9!˾cR;?wEw屼x8$9k7W*j%blHb a Io[X2\[f-XG9s"Q&)Pa&쇮e =)0oflep66߇]@bq>!8]]y|8ZK8dn"x|dTMXOZcF4`Jc y=ebNivB- -}+ϋQP|M%WӌP g3҅ sW?u!51yvSP̯[[z u|X#SNlp5q2ҦΚ<Kv9Mן>4`ѱ׆ܡ;N eqccrÄd|r ?3ۦ=?ݯzS0KPBX&_󪴂BŚر`"'uuނ& 5 ʯa-^cٻdݝy>6Td]Oԏn-h̨ϡWEzjte~20Xs x;puLPV_u*4eж‚⮹mN;DɌ=u(g Z" kN:B9e?GF"iG [}ofhw$qOvoiٓp]x%圣!.eųx WUE'C*H;^:KX@zKVp= ʧ{E+L GۂǦkPW۾[XPiuծ^kJ(l<ҜMkDtދ0/̪,+n6KT%E_"pcBM?auŧDCQtq[cӻoа ɃJXaƅb)N<#TNdN13@ "+KUY>Kb/ 1܋sB |C EM!C d(pËJV #2p3ѥ 35%Ei ECA}:UOkEljY0D4:,Ā9˻=Zc2${3}$` ~b@l_y6cݡ ]+RΛ,MvY%~Yj1ȬtW̰c~?⻚(߄[8txPZ-=?ӱkeTm*16L*k3{GFă{ jp~]e2Ȯ WS2!`Wؗ<\Xg,@z&fwa>_%o0;}1]ꐌ—Sqw%[VXh21D$dLﻦ;"}]7= ߱%NixXd4M,T Il#݃t²p}Bߐ}_v<Ⱦu iK@* ~ᡩ[MGxAM 9Wn߆1%AWåmj,I^`RqE. ,)r> stream x}V{TgFELqmխV*ZXjU,PDmxJPyGIn<HW@@|*mjZVj]P7ֺ؜99';^ EI$SRR2L~!:aB8E~A($𛀮a3"%"5#r̸ mLH,_>01S%29-jCEP+$u*O-Q3PI${:3& LڠA}ӆeH^#nQV:Periٽ27eշ,V{)D@vxR-JKV밣8V Q6"o%B)Ӯk.|8{s$ZJ!y.'jwtȞ`֫6zCVuXRꅪ0la_!^Z.K_P6M˩6i_|f3wkc"?e7A4K;Ghl8-g`cHL t棨CKhbEA';% m9P}$ *4P@@,3LT.9vUJXvxZ^~}I8 z¥7[}+%2sSzzBĚ(z;(1!ec ! _v7Y4;qZ&q5?tv`N) ]Nu&7:F8߭֍]R(W0;WʑB:pq"LMpx Nș"+@l7mR%Q-h!@Q- ?*\JLݣOS]sjV0`f@Z榮Jg*|Е;l]̠{[Q/t~T){Xp<8*ľYl_ R,n tH2* ʒ7/JMZNS'i|1y:FotG@Rv`(Uh@ ۾pNzSq7/šqhk>G+W4[Ϲ`(3l@\i|<ܦ.6TF W/[]]9D s=4LVIcOU0'1!(cu 7FDx4M$13WMz-nw Z|bpǜnTqg&eCE ][tTonFkC-kjVLF9_lˮzp6*y唵 N*q0/yu Cu /qc4Nɰ'.DG@߶>܈6yhPQT a9|Xftz]e6_s[=a^Fذ,pNklRpʉd2aNͰ CbvUo=Z>|Z@زl ;GT&;P!]Y,6=,c@!Ɯ6qkAM`t~H!c3?F=~=ɒub~!)ۆQH ̝d^3QvsA_N 3aDrpJq@fڛ' (4:bNi"G<qx'Ah^f}0 3MILkRUQJ›+֔t:hK3T·W3Jy~Nݜ;l"&-. qF(Ozyh3va,п-;Imڼ$(&{@}  g潼vWV}iS|͆R'=d`R`Гyc0 /Pd/endstream endobj 348 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6908 >> stream xYXWdZb-5 bXlH [hKKウ4#vlIihbɗ?\<]`ɗ|yw3w== enJZ֜Y<.KQBo Y7\FhFRAZ*1tuXSD^ɾuQ]<{f-~[M5cY=ޜ#}K6W>O1·M\w&/]f|5zZLM6PSj35BMRۨ+rvPRJj6ZEI9R55rSk:j!BMQaRʒzZN VP#(;j$5rKSRG)ʖ2Pc) jESQ<C!Pj#qeN)(5Swӏ֛5O3Ϸ-i/EEUkCF ie+^N#F9C[9ZgrWK9gh%[aV[k1AcMǺ1nl_+xkލ/珟;^:&hÅjY#L4i{fBbc3‘ WJr? DR_魲0L@=*kQSs$l I^?Q/?,;Cf{-lZ&9Iy?n?tD³Tw!dzT,>=#Gu{*m|yA8H?A7o~X X5 vWp'k$6#F PfQM'WœhY A5xrbQYB ɭzT"c[\fNUPbAΦLBQ)l`S^`7gf#!Bo {(v%7O^@#hn:T*n4 k=oAi::(}e YfͿlrpퟨZӚ ^/96= w3L{Vc7on޼},r5&Ǵp, \k(C%3g0מk^R,ΈN6! UUA,*EuQIq$>5*B|ջ+#sĊ1 b75T'08Fl1 ܣ}xEcjԫxGw=߂0;`Ŏgr^Yz{W2yrr(0x_%^yqD0x A. '֍w!Ujo;Jk>$m;+g}xl 4h ?.91߇bDmSj M+Sy*$!|'lכD?;>Ums6< ʓ  xHBJU|T5ܖϊi4pRcrT JPrM:JÌCYkTcukOק]0|X5B&ASEXͤ$`w@mqgAajv\m#fSb[ +Pf?2ȰxJTޒ +Rf'* Q*- /2`ZAP=>L:A`]1O\-YU@eÑh߄_n@)t;#"RPO5tu6D%L`VY[֞K6+CɌxvohTz/$?q4 oslfEWwntnb."%Qx`#zhNHieB땇G"hXכR]Vl 3:dڿ+pdQ )%}k|Cv_1p@@fL#l;̓:~%Lp]|Q!y3QQ$iLp.qAwoj,?)JQdMn>(ن~m5j`RTJ&q`θG# O&ڶq?R~pܦ猨k`kZ5+&-_Inb͎l4oݙ泛gRRcg[̷D"fMXi|x]CHa;Z;f/Ux/y~TGLYMaf&׭{\ٛYOp6(9=Sڝ8YRו桻 />-ız?CkV_kIW[IwB3XD Qn| V%/`_>ltj#<ǫI %h&İWGc3 + PK#zGx.q.ޡٞ'7 {/ԕ<_ቃm+Px 9xPͩëÃj@iuᦳZϛ֬K8txLѾcr/)[ن0;ކ%~lcI .\sP1Y%#qݳaĭކiiei~}}uvqK,@#AJ8sqO_i%v~Fz7 : B,9i{m|Ioٟf|0M$4D}b&=7hl, !rX'O`Nf[;`Fj/W$+{t-,}`ki곙0f?B[Ym@y`#_akS(ҫ/tzn>Ł ^v8yh8~֔nWD3=6Ė,""LGMq3"I?}ӤBƪfo>uU_眥X_%W$pi5a(tA(tAn-Ʌ2V*,MԾUJ wV-*GDO~eF%Pd#P%J G9#EYmwJ^Md7]O9uNAqqg!j:'?o5tw` <֒{ϏMR#5Oa8i?rx3sK7q{?8~%,7PYL^*lukp1PWm srPxQmjE*`_"%TD$EJ1lԆ"E&c<mok+jkL0ArSYT^"oH֗"33I'W'c?;0NL  W6oCL Q7&Ix(݆ ŤD 6V$(*$L :Ns}a~!9^&IKJB$K`koki#c9h4CLMMMF12#)SlKk]Œ0ùMV{xQfk ?rYg+~G6+E+eDRPmTIteuNIQ6SB(v.i'A + O?]S0 cw,4gtCT<fP]Jǖu%:bTe}ކZUD1/Zv -B2/lGZb#Cji`KD(_<,bk{G@e}h$$ ]c%ҴTQHyRIEGѓ|9"F4•@yq @HvM!*dD ))WףIxO#~1}vOǀx8tƒز2uaRQ|_rLy`U;7:m`>>!1 mu,xJL?$M8>TMSxGkuވ&t\LJ\`%~'6d;“S^'[}]g%bU6: kɑ#%gU=L;5V'{)ꁡB8pU#s#,–v9+u.NOo}|=@}(\ꕺ)YP KB48f_^p&XC:XVR.F"G|LJeĤD%,ʑ(TEVC(H) GQDZZ7KQ>E1EʈèKk*KQy_gGiuur @[Э\>EIiH,+P* \6mQO^̬lTFUDF$ ɞQ_۲uӢ%>;'awyc E K0kW]0{CA/ZK%$ C[|{G!RSMkW {)X`={_|vf~.aԡe^ɻ]YL,Pײ57RW b6pAٳǺXk͡nkc}8Ꮆ3Q܇-^A>AeޝbD"W"^xT?R !&uUG)\ڵsݲ7XRiEmNyA& OB9zc Νkz=B.! Ov7h]=D6Ap3k'8 p66(X3]?9krND|-Ez9 rWi#KQ#F* D/NUVS+;Iwp{BVN[9Ť*# ˣ 1ߔd&72868;VͤgUw_t&OmkY2+E I ^ԃXΑA2&!;<Z3T;j%r,-V^YٖҊ1endstream endobj 349 0 obj << /Filter /FlateDecode /Length 21411 >> stream x[g9n'_`)r uql`S]{+Î3o?DD]hd$(tw?w.1N|{wTN<Ǚ;)fm<}~vtvw86J)1D|R7/_~}~?owoSz[w㡖Tp=y;Z_I x(madS{hy>'*qẏ\ *9 j Ҙ3*F*Sakŧw] QGG#`(]nNT9,uaҩ]Ha%L p)åeWsD#}zFxLcQ\ L2U:CI.r؛ʩzmB􉏘 =ga{(%P4s(`aŠz Q„0 8luZZE[9`Z{8INaL x>;LI3P8,ua nwHˡQ¤0 8Փ-q<$Ņ9Co^P=H(;L6M Z.Q*à3U*QV!`"ISJm|e5˲Ym:1K 1%avK(a.r e]ן]>-~i,p瀡%};iN$uAJޙSr80L>nwC-%:F (LJ㰷G8w 7H qHdaNi 2 S8U!HӘ[ Ț%T!0JFaBj~d%e3> fnfۃl{ThEAj w@'qNL/>(̖`&q@4>P*C+M**=Xay N~!q'/O+%L0hebmݲw 9 Pօ f -8,uaHb(k$tu$ FaR62tF$8fLeѸN<8jn0 }c 09LiU( [p R9'=D/L='6 1i(䔤Z̺ 0La`&"jK#{:yXr# KT[K5XO6.NpA3X Z&R8I.~-ur)bɹ_Ә&SS`iR{i,Zu&Q`kUUL!J> XޢlV-`*T +(LJ㰷ۖIa,lLds1QÓYD&0SV$+d?I9`$T MD{#@g[Qs )+(9?D z:IEQ:`fqeա0P)¤4{;d~6p"ms +xo0whsn0o UB`7ӻ$(6%j`&1[UutQJ0LlZȃxmؕWmı^ȼ 8(up "rdcա0V"Q1AHu"%/W › C ERJiU(^`.2B[pu 6!ȷu@ k 3E^(!B8l&&'8;B= yPRЇFCO\U.$u(3 83p0X@ [jZ3C`0)¤4{;dsINl(ˎeP$3;Y5+R5id܍4'92&xҪP9,^˴<5v[dZ#l~C[AO-$<ƍRC@Ba$#fAdp Q\Z!v= ᙒ]؃v  2ȮYӐ=fn$3F(mibI:*7U#0x[G lRg18਎-eMU`aAyqИV]aʝ„3SCv=ѯiLRhu\5Plt%8ƶ8?[V충FaR*K;C+CZ ŎJvu'֜NYR@az j^[*Pw@EP3?QtJDI JOds`AFCkpwy,úfTD+5AVXdl@t %X \9LfaE0nHBO8(5hcg8,{U*Qao:`>8c9`>a9# dcwpf"fQT :-LbK% iu.Ja&q!{paI}"0b(a*N;'flgqFy ]RRn3\[yo]0 lMOp:'8fr;cEҴ2fY/(fC<(=rz7  (LJpi.܏{r!{Nj1'FjwO4El8,uŐ}4wU 0)c)ȡ"a-Cfi,2X4- $Zçw 3yC0Bab!¤6{+tzd?P7 gV,^lu`5g :xO1~q qPR(K.ykW$3 40'_ ChVFj)gU-/6ECW]{/H2Z` D\)\8p_!)̾9n!|X/<ع"~IэRJ;ءTOPݥR8I.Dy2F2b;a}c4OeY$]Yg`(mc;CU RIivx&m1!Ɛ'uÐ ȑDZ6 ǹ88B!ƠJW!ѲJB*?A(pVsTΛ!V2dgaGge`% ytuC`0)¤4{;$,{;1 M\w%olj@ fQรs TeU(&1[!z@8I66 8˒`N'&xr'E0o:NFA7K|@0,DvCyU.7 !lu=z#ocCI6m[gQ.*8Un"0\VЬ%T!0VU0! ́m;RĽ!t(ңxR*9G:,8(LJ㰷C]K,ΝݬCyD,\^Vg3bL̬`iEзsYmy7]@aB*b* S}j8J4rqOIs300X@ tsL#0 iTs62q*rB\aSIsǢ|*iq0࠱"2'3/W a&qZ8CR$CML cWhrANmiN 硘¹OAfv 0\(TBc@:-5HTG0XP$-P@6eʞC:fKi9VmF)Lb㰷@u}Ve.s$>/JxSSΪDq4i J|ӭ8NV sh%pM ~䦭2D=o4VgϓwqPaaa.UáNK`*W@D >G3aȼb# yc\8g R *(XF"Z b6ɠ@dp0b)cXr(N9:P0*ɪaCaP)6{;$r9GZpL.$*EO:(N>8,up ň^1 pzzgR, 8`0Ɍ<Zczu`8h!ҔƤ3z@8,u>b\@^s`fzs>Ja&qO;jQqI?_`0fS)Qge`ഃV2AF6˹PXݰ3 8{=l悜LL^LQ36@AkK/)P );(+^:oD{ s|DDp 8-7q:ΎÐă3H 9P09%wA)ҝR_`+/ kR~7.l<5g͚T^&L4nrs=O`N1R`rUqs@`0D4K:Q=)g`PXZEcM8<`wC糧9֣RIivh 'ަ\0}ʶ?"jν8#!eCS$8 ,5AY7`w9A+W N9\c? g-L=p2àWOJSHR`Kch)/VVR, rq!mSRq4AfÜ~vMs|}8t[rQ >2! /wy)Z-.rCWa+My!ȍ˚y޲a 0Nhv -GS֗?-w/u)>M9=``(%#P6.I,d.znO|ƪ mK0O:lƝҿC2aXrߪD~z㓬 'ߐ/*T1AdKS{9K0 M*y8(|,|,(̙|*`QT>ܴ$M~!gSl#*QqD4[ܦŁi0b|\SR3UL#6Y Fa2z![\=B4h<5t/0c`AZsV*9A[u&Qao(B8YNF`LaQ RX`3 >mMqe82O:fK0 8KwQfuAN8,A399Z'i($ P'mh Ja.rCO>p^; 2{ ֽ\03uA z#@&m9 A(L6- ySrL,a 3fƑی5qf~n(yꞎr0Xna:Miq$朡9mJE0 q;O/x'0w3Vt m vzJzrRK}x5jzuR=b@R=ϗ!lunfa7qa #:MrO/;{ϒ\vae'9Zw2@C97IFOd"GdrTXj-6ve{t;KZqbOLghepgG Wrߦ\M*6n!3ϫCnso7,rNMgAs,7]MF2xr IsɑefSm6L"o0|RlsHzؒ? _75J4u`fC KUtg9P <\wA))W #Xd[ ZJRU?$Ԗy=40"]&pg=iJ@rĕ ,D<4 ҡ32mpjC{02&[ 8U8$rmdhβMۼCr4x_\^N^z Q9KTs c%Z9cTL lS%:ƻܨCө+AM;!NBO1dj]vqCI8$7\TyW!jM9TA_J:'2f0I8b a``a2ׅqpNWʜ2gs|x#e+;ZtR S #OIjE< xWqt.+>0Rx%)|@ (b*Yۓ,e B6;A)EF>}02_jꬒYq,X3`tЈ܆߅%sHnalU/C%{lt9bW8lxU8n)M_j )8 u(&'\'FFXa#˺dFK ? #U2>j!8u(JW1~ȦER۵z&%X]. 1>q^; T&qIRd^l]aSYFF,gkH%Ydz'0Da>ox:}bzTS#BM,"qqԓiByO?\u5 !0d(.dz98ՀChPyv0~kCRdAdu x'6%X)qؑR G`22pOJcH.F*4,O:8ˇF # BRڜq I@dj+Bjzsr(g?A}jNCiz"$ 8O k]VtN\y4|*pQLM))aZfhSU8FmzN* &{"jr:ْf&ϚsXXc!dz4Y`fh8VQc3dBN4j U\L#nR?RHP 1!y.̓{^aԖY_F܍Q'oI*ۓ\_4Ks4sIf}!pw:o|#QG #](iz9 ' -8.idĻZ3wBSUȶs#ojeBx\#%yBn{ጊG2L):[T T42LºtHeHuiQhcdk|P-sW$ԓ=CN>?B2n>4$ g$^o'7Tf:sSͨ4mwFJ. Dh%(OdH^8CuGE_CB_q|_o8 5kuA{91l!G }}xعYNH]$O a!Z/zv"B֊$~LzHx8,$9vNS é%n549DMB)}i zNMr"MS: NDCN@rb`T|Duqh|OW! F`22lL<8/8` _ N^KM)A0Lghqy蚹P R9\ځU2#t#Wܢ{NO8uz?QLV9l_ta/'%Q`kT'' K'ϸ(>r pϑUgb3lկxZz Z+Lsd3슝:us(E4ea)!0JJ`"j *Kn瀁\˜WLei>Rl98q0X@ TCMUGӣK 0)nfmQ8#/9ǶqvH )OmQɜJux\ 0)n[6Ǩdpϱ$/Yoku9F%s*ms)¥Tvp۰ǶDc%Wqn\iW͉n k /#0n ׌.Q`oZ:xUkŞfj/SnHG;ظV*VSv3PPj"ZU7$0 PMfDa #߻X]W]΂F Ы&5F3X5X-d4{+Ĥj]nIM}}5{tˇ""- VO >C9P:|0FaR69slcls b liQ`,Ø`̗*+m2h 0ז9FC^\!>۰Wx ϡĦ9MsJTrRűq`r# q5+h GEgSn8FVvKa&q!q؅Ff;F u$[KJF!9nvZaW޹ɧ/-Gp>o~q;cM&NW)>ΰ VwFa"b_!55aa&L#kF@T5^`>rx%Yq;[V xS3{ UUO A5iֽE {  .oS-Zqs%lJ"*K#$'o@ 8I"q+R9wԸڴl*9UrfRCӰRpL׸qh9v=)LJ㰷C2l@ohR>"Xz|8hhXk`&1[!t{XxaYnpWY3=˾t^r 1PgjpgDXvR{4?.GQVcw/R3xua{c .2Bx'!02#TPqS;GNQi 0QS!K 0)ݣȜ#TcL>*$QCS|C@Zδ1^zêV M\%,qFa]ѹm"3"1-}jBmby w54qVj*4q D#a˷E Ɗnl86\ZnmF"Z O#a8;F^\"SWKfZCҏo֣H8r]&oFl M`v0b -b,:̜ZRIivootUo%>,cc4؅O\w7'cTc#N1\4ٴ.tq|*ߊx*aOB/3NaO0uMZXL0-v1-t.+*7 8˺<$ǨLǘ |L!485Cr L4xQɜJux\ 0)u禽a̴Og}{iwQ`710]kX`& yK9F lQ{U`0<:PB({40Cuy |sD4{#DþAWP={MMOa,f5=I .CwA9ʯsp )((;'U&`o/醱t9i6ӳ^FL=bۃMp`VN7)¤4{;t633p2qBHa91:G!gKa&q!#-5 Ynj E})xf 8ڬ>Sċ |J`i]xyMKwG+zWӞʸO{V1t㚔2؎ܓ/P`;Fqܗu tH9`c7j{ nλ Fa2,[>s}S܆cJ~k0A[<`K]P`5C&h[TYǨ㋛&b,޲Se@xpv1:uw] 0) ܱƒxWoҀ_(&U_ ^i$^`IVJ6)¤4{;4 ;Њv~?(fɩ ^$ $Zzo~a0ܚUpI6QF+` FC-6k$@ьs '8 ]B/(*-{#9kfI&CH0sxp|E=VG  ^a2}Xul 锇vڰ(ظh1nZ v+1j:5r7] 0)i2/Nw?̉.r3>nf`;K t $T{4#s?2:ci'\gι\zEع(-mǦ:Xe+Mn!xG"+åN6Ί8 @bbp1{qw]0  ZA6hqÅvc_Ck2h VGַL{Ǩ`B<%e.tt[5d;MsJTrRIivu `Xou^{jm\ ^9^`)VJλ~q)ьroH s&ur~nH GٲJ20t-0(LʾTK! cDK ^e] D;x u 0v!}{ tL/}UMrή`Xfj\];۩`*ݪp2h VhjΌ瀱t_XIjBBDr(JR^  -dz+!¤S9zVIvNt39]V`wc;vmRIivHN^AbJa-|HdױDzSy9!Th9hKa)&D19xLd1%L+1>vl@,JcQehik hY,{Cs !rpx1v8yp{1Isg , &4%I,Ě!n9bJk$1Cb,ILXְT%B] 8NI\ }$5x^\jL˔9`0K:VIa&qN8F?7s6u)% ô`07w!^uZzoLO]M؞d@ڳ ۦX"}4xmjƶ$mYjkOd0P]/F{0f Y=oF{.%%|!gԼCNu 2?S+w}̳ Wy. &OXuҘ*[uliUYq7*Ia0| GVEp_kz6 &G& ^`dD#@bV빉*/39J"xs)h6@1 pU!.'ѷ>A\:bǸ7]shq4n& uԛ;Uaqx d?]U8I%rхQ ;$mfUUeK,x*v$i/::"GlTC>%qFOUxIXm6xӽ+uԪFIY哲 oTs'h/x˛܆@9):K{2H;dV=įf=qNMGq{zD1~!Ə07*nãx3XuyKv:%q/epSnx_t5 E Ys﹌i*8<Vq_z/gMV?gQ?,8.vl6gT4Ł9R.8nW oxc2[+ˇ-U谸q_ӵ B e7Gɽ[H7⁲F\>ÃoԈA3o0cO𛂮E%_>Z`W%GZk%idJȾfxv%iycסל7¢ ^F+d>)Y09DeNV|$ sS_`˨&+Mٌ6Q+ڪŦjZrc]noF |ΛQ ~2?1I7?q]{>!iUط(ג^g@CI(fVsWe S#[](zY7n&%g fŽT|5 JI_9CU<|ju&9X9ΫaHs1.N[ mʭ*QA[E_lv# ^ܞ[wa^ .ў}‹m"(m\Eyn)=|[7)؜Q9CR!B|H!͛8wۧ8~]c&֣Đt&Qc7Vel⵸jW=W*.5DtC[*DŽ8Up**rXq i"-Nc."D*D . -Czv}o=f&x&'*8LQdoRL Jej%Wb6jT٨FJ—sõ'?;â̒m KzL?Zʑx-yſ{u{dK~j??83Bq>ovo 7i{džrbVi;'-o'Z"u/" QǗgauCiIw<۠=}h×?<=O|?+؆x)cr%i`v'FL?}?|1<#}qZ ݺU~ ZOe1x=G;`ryP+9ۧk5rb@hgJBmdSY%Ms[&NhQvꭆ#0Rq<,=<)m0hyR q>}{{S<~ _~{NDƉ+"fq/<|&1t_^dMRR~I"}{t0/9RW<=|S-_/egF@׆ѓs7Z{ޣ lu(9;Eu$4~w&+)|{|ޢ1nE?$~%*yɿ(oyia[w4jfy_S,HPk >B *|lt2K',e{:Kt/C!~mbH}iSF4: M=|CEiO_^^]ƪ~n35i/<ܸ,Al_!Ab=5~ecVǧ/ i}*C$`uD 6Θxh|m|7KTEfyYI2LYUn7Z1 μ|Z+,7~f߾+B0.VM$"}j￾GGc_&^gQjKu<}bN ,2J'Ys?Dj|S/KwQ]@Y8/JRbZrK'qFe K,8Ry#镵VR1ٯi4@ TeM|VMg|jU"f{ ׾A}c ׯotÈO6n_i)8e=67yn"#~i΃S/k21_O><k_ F[Sd>S/7@"VZyf}ׯWE[6[c]nWx3Cn2P#/4^^FE:cF֋߾|}9tW"k>A`MMަ_^u S&zP V6M_+#Սr$nA ğj\|S[xlQOTKֹ//W gkU!JoF3^w_hR{3_?^ԷLGe/Y"NsD0C)Ê!I`OQ e?i|Į9"h߽&67FqIE,Uh|-h;A|3ߜ=?'9'SS~[5e\p $"y7cC>B}Ӓb@hqvC$`˟m(s/ܪYYn?}VV3O+B79Jipj~)V??Unf=.뗛i‰ik-Ap"B{l[RFor9p#a 5KI;Ouiw-?ŹY"UϘ=!Ayhw_y}[%'wp"GK}6 qgbB:.ßХ.k^8}0F?.hUJ>ȃ)l?lbNM/' {2ǯQٓ|}0v}c,Z񴟯(d%tn7 2;M-P~"%^rgE^ysکw/+|ߛ?E^!ߴ74V9C۟hRǗ巌 1]ܦķ g@j4+z>KtSp?=߼_q,aRJ{j5+8JmO/+@EC?^ߩ~\co voV~~40 ;B~{<0TC ?piQyQ-ڏ%5\E\>*޻#\!ۻL)+T[MAOd ~eD>>4Ͻ6Llu &=^5Y˚.ibVHK}}hƙH4NnG3By1V s95 S4oB?ǟ E3};ZqIJ .'[+A ]bu*OoM ojl?΁8ݿ`x㦩A_?~% #b4W5QhFsg)U}QF+j`%#zzJ7endstream endobj 350 0 obj << /Filter /FlateDecode /Length 3927 >> stream x[Y~_1#h6liPUum:۵W0bBDi/qoY^kja׫n.ޭ^\ 1I_ S% nY-֒ikr5Rkk+Z3VSx<o>]Ӯk0R4o}{jw5mNfrEZZiDk.URՊTa 20SuuFG *iuMNF *cs0RE]K)H- EYȉ6Q(ap`Q(4# f"# LaZV!RFED@RZQgTAS(vFOd`~L1jj &T;vc$&%Dδ( dD40E{JD@Fi JQ( D<հuj@dS/"B( /$OǐlKN-NbM؏LQi!&0 <fu0>C,'VXD*.OPp*|qeHd-T0`ۜZb mVZá+vA E?@ִܨ]+p_|?l,600`ov@Ձ^n+ PD Rxu߻%Ba|_saFKu4pw^ حM;C*;´BM[9v{,e H/P;! sӵ?aƉ2x YON{G ^ͩk?;7UϢt8EL?bu UΪ;9'˪"h> `|~MZ偠/Zu7q]d$g!["pQeA'# V}Pc{=H[@+`:M]yc0gvi1Zb}Qp+$(z A tYR=/?ILa|`P^fnm87 .-26qQ2 tv\1*F]sWeO?XSHf<1lq)DIYpŲz^a m*%ŌE01)Up1@1|ىúT52 *Osj† .~^KKZ7jH[Xb<~/^nE.%A?uuՐUB#w)q[ƞis/Htx>@٪GP-9X3J#zWiX|)zֺ(Y2IMO0^3_jqTnImoq>̥ir+(Qc O[(\H VLJF!o#< Qil,qݲ\JW*' s3DN+a(H[ܞ p1ojp> NklpU ӬoTnt9I?L/0Y! 3AxᜩpHj|p}\{"D0y.e ?gqރHdc&`I'G(Tj&P>]TO.:Sq벼vY:Ѱ[LӬH1&lթ^%C=LC?|8b7 En&ʇcg%J8E1/U7e0ea;㥹"ӥN b-\svm|f{gK=v J]'}e rQΗAx8?zjȄ^xU?JC|#RM :ܳz[qh͔&˴0jю}VBu2jCp}q2wz+ƛk\#FxYeYYVgT 44ʐn޻_$mlY9ᡖ^8`Sr6$ȡȾ+4<^6AF}f 6Jn؅q&/yg RWf2Z x A83B^8:(X8Ʌn.WP+!P-b-]CPf__Zvqo<5`ZGŢ%Lhb'x?bq7 Ζq>̧$y 縘J9OA !4bd+[Xwyç׋/6А=}An[fZC3?N)uXt /t s.kN%xͪ&ęB~Qٮ[ Q!fUw',8jY4E;sfҺE{N! ٧ eYŻ@DٍmS׾Ɏvr6h`VUq׫/b;ʇϷsho9bNαI#{F≳'"mazV ?m)oejH/!G_݋C:, 0V߂H.onOjwwBR( H E%}QJkMTWf-UӧOaU89!̊l)Ke}o.a?N%O&5(M˖d4lendstream endobj 351 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1291 >> stream xeYPSw%"ep(kA[ŎV%qV "P J@Q $."X`"uimiQЪXؿ3m=<~gM=(&HܡΈZe(7sPvuwrJ,d}x(D0㲳-%"ɘ1 ΟxY]Uh٭w&/ݒ#H{@y>"-u6aT#w0𿏊(T`(`hw2z2kzaw-p|Uc\r~_ ˷ѓ:~X+RVͷ@5/S 0u&OԬ5~Al6VMuloLQey 9^Fa'QH ćB$τH\]ef^_XēWo?TbX)ï`+cVL0xs4u Iѷ) _G8!P&Cɂ?ȔJ$ `1w7Eez8U$$ Oh%}yA[ |sHwtv[ڀ}3/"NU+d}gWTDz ז=8}qÏÛڅ]J 0,|erSPDZ'$LtNnR5ں?? v鵙Gi $̝E*-典T?\AB*pX6Q/hfѯ[y #,3+#*f\ެJ&<rc8VX|3?$La\UB| ]&|暭 *a%w] a;*76C*0Dd>$XĒ1HCg> =0`&=%D3C t~7U45$q;†Q5w_;{]#>rb2a<"'xkLv>GKVڴakŎ B۲!55[%R0c̓\fr{pkF6Y8 AM?h<8gMJKSliNI3յڌj,VFBƛ)}^ n6+&S˷R)H)_3]endstream endobj 352 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7226 >> stream xyw|SB" iG Ce(˲ΤM&ΕntPX(CQPQA\8=z>>_ߺ3)!4E 6DŽ{85  cY0vdY H1{qgĵIxS^Kݐ)c3?Tzf֬m"GE;nOބp.z~ KxiN3Lb1M"!vs=^Y"G#uXH$׈Eyb#XBl&^ /K2b@2f#yc7qƥ=/W=6QOn4}R)S|0So>ާrQ@<=i˦>rkd8_!܇><<K82-*:!p+$̝OB^ QEk$Zm&ګ PMA]1J2h.J.hKԜIr`hi7@+ GM@PB3둒fm/>^qO<[rfpl2Ad<@fh lՖJt }'V )~0A.ކ,1_sk'\@VCDjFU+d9ͥf`tʍNnITvݸ d>(ck0(Ǩs! :~ 5R>m'jsx&@Itc:+Wgy}%Ƈb.\ j K f b B>ehr2aS2Xh mCfNvG4fc6rrRBCzb4]MvvƅG<*py|0f6~7sЌp(<@P Jr`{";ׇrp@la| f{5p -s\C|Z.73Kd=eWQ]'fi960 kjDPMJ_$$D9 o#n_%Z1b;G!f;f{ihuTg̠7.?S d Zu@>f,fʨ̃wɃ"3kQv}+-hN^q#˳ifRv^ǴkU*!OasW ϲ}զ+(.1@&CMг c DZt)3A+  lo m)7@) YQ ͓ jnW'Jy:?y /%9T__I9((*t:TO [,U)!V0yn4W[nhUx01k:8[MuK543uC+܌\Ȕ5flupL3==2\Eg/Xn{,9Ǖ&6Mr2@!&Gp:e,2>d R9r7?ؿq\>,XYlb 8d `b8"\%bȁH?! dzClu6UU3 g/}47ך-]-{ˈy?~ g\gƅ~aQIE/v<1^$y5;|rv{_AUT;Նq]Ws 7N[$ܜ2jHz 8%%#6hZbl I5?D~yz 6\c2bkqXUKZ55GhAؠ^.UE@ji.;=4Fb}J-imCl\ ²֦V PH]6LXHCU Tdy^hl} \J@tH o`5?rf;0Dq~_&ԉV{+4gS뤉߻9qS:#O _vUSkyPjެ]FG B{&5јO_תN֏A;'QfZZ`* =)]}k^}>r M0lB0vaEMDH˖1bιM'>Ыpg,-6+e[- ELV(I4f`$̽ ~}p?1:$\\d̓~L\Ck `+B\w?> dw3.tJB cH݉N% ,w~'VQƄf5!?'JRkW֢Yh߹7JWQP;J!d&)ʪ&d?׬8tz8Z JdNic67+ 2]&Kj>?*(j'X$%)EB.gГA&01691*i4}!z#vjBq27fpdVeZB[dAjJ o1byyٿXC|so峾g&0L8#eyWd/Wz-p;}Rn Xy&rUMaSMb1R sJ\Y80 0H PF:3m”~tб:P3Vvm$VRKZ\/LPROHʚ_mk5< [vnǓ64˛ ~Eϱ4oh4칇# 0'mgk'x!-Nt/:7bۑ7h;rd*JTǫAɌb0ӖlqXAg,ٯH ~O YPL&gƋ"[2o7K)! Ďd*<^{47Lgy2 n~1::!zu=$G0sj5Z Ě]Z_l7PК՛N_Ppd"݉}ډN&3Oӗ&{5&XQSY0-$k jPar8}t_ ~]xիx)lNaxL}&g踌|>zJN%T";]]BpϞiƿezY@^ ,ٞ7'M%DzVy*rA 3W6V4ʴTI\@',#:Cʍ ;M#z6t;SLK/ʨkhp5PCtgQ:xc 7\;R\9cU)jM5Z*Ҋ| ֧PZ`+:>oTso6B ߩl8.Ch3W>S`>מU-I sX#תAUU&NoPvGu-eϥS@ehod[(ĩ9ct"($5g/6QU/ҧgm pxѵUBdښ#X缣O̊:):7Jqm?,%.7AI?,w* yy`0PzNgAj&vrM.䁦\LsCO6IQ+٥MgWCAX0dwE_@T!ڊt; C-PbCsq} 84eZ6֊שsmݽ@Zr+A6z%ՙMA{CBc謿 QRP֪N 9"rt:K픳 .nQЍ\Hv!؅ Y̸δE `t?g?rp"p3DQ7,Ae q$QC}19Cf7YqJ(Ք<%_c`SC6we5UTqs_+,:yq{^.I!J%JȑXrlJs֟ :2\ 66W :ep1u]w-8Q0=g!_.Qvpp%Xp 6M'@(=wn͓ؗ'jrLKF"1U3n {O۟,Dnpp5c0E12vɽ/quUJp.!y) &ƣ2nmy,\d4ىfYy/: *W!fKګ3О}/bPZQTW!-/+Wo9*RmKcQw}<|L,9z y 2d5kJwo fb68KbR7l)H{>a}cPESX\Uj~ak8|圎{h1|L+ѓ gC ;:0Yx`h4o:ЬԼMCR'7tˍ3& ;$lY ?pz~b҉uIoy>Zy VC2_Vz !WxC1ceq2*ʆcб.;[y:tkfovFm+X'.@~]aq#$AEeƃSF{WqH4jՙU̜Lf)룣hlkInX*2!L-Gj|mG:zx˙Y/, Ⅶ/c^H=4@-*:ӕގ¨n>|fM*stײ܉G*))OT(u5Ao SuEr e^Z[!Nq#Ys0ZT }|oHTT,3>!_fXRqY $UfT)j>%P͒si~\U܌b4^aZl&NhT٢ȃ0ȒYTRYK'y,*,Hѳ3YI> 9>F-pBj1mͱqxy#ʏ XD+bsQr5 sDSZ_ 7gF<3󙨂$2tڮp敗s!\vm|*Dr37(pArKR'Tr߾bwPk\-.wPV^! 3鞉:ʻr951O2 3sh<-% xWF1fqaվsNxi'6ژWŷ۶Nvl߫(3M)@+KPҝuv/rc>B;ih;>4dƎendstream endobj 353 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 728 >> stream x%,q?߻~]\8?̲5f+kʏ2Nqriu8a#lv" EWU5|oߙ7ϞzgEDBQ%!Ags rWC99,$B^R¡7nQN%j BRI: !$$W"yDDʏ8 quL#9ܙRƒ7=eT`~0׎/v_ytBλ*Ӕoգ6֚u,tmlh!TCEqB=:CO^GŮ{QY؀R cV1sUybt$̍G'^OG.dܦ6Ơ >fXnRyȝMbMRMhau2ӿߜ/z M$n({EDsNȟ1% Ge:Imi^ PRǸ;j]6ExdeZEs$h `P fV8@[(}dmWe& a]mڇ=5M^X۟y}媁SGj. WNV4]e*c!BcS?Ú](Cm mÊ0О@G/ĿHR)k䚰ÈVLN\'a> stream xM[HqN9&䋨A*FfDfްPwék*!uaׇAP"|*! y={aȅ0 v,<86&*#QޡM~|YlUn~aq)!Y$$%I&iM  {L5ҊS{L[ax-x̬Hdu]>H[޻,V,MVr:7 by1be'1Si:lDH!zJ>լCG{ӻ*TB~N` scAqw8 *m>p)Vwun&`fL=Ƈ $CtCa[%` 鱅ͥo gLCzY:NY]ykv]0oRX3`6# ƻjh&hon-I9EPqQ#B^ },Zuc{Wʑ3SEߤ_N`(,hqZO8:+BZmmZuqzzM>PЯgE; XV#hr;5Cdcf=&l-.lU+Y$IJ4ISѿ龜ػ8߃9^ {`A ;H=@9ř Fe!ԟ? jjendstream endobj 355 0 obj << /Filter /FlateDecode /Length 5169 >> stream x\ߏ9r~}ʽYzVnM=w:&C=oWwyHUlle6G-6Ui6lMxEfϷ7Ƶ-nW²QqB-n77\Ӵ sUXYm*ˬǗe-hVZBcgF}}V_Tǁj;WvM :dv0 o%] M0ul.^e6k,k)^ݾK^aW%Ve[ߪFYN;Z{/$2-ԍZԬ1F;7e5U݇;j"yzl@R\S;(\4ZZK,Bi7oAV JG-[pGs V:BE)olwA0TSalSGY+@~-TvʨEa*nZ&d;Tp^vJJ뮿VwNyÙ ~*)Vu֛U'pǝ m~,w9}75Hf='N>/%Q>+e!I0?nbeՍ56 ~JzHٻԖ ԶearI;[ޏldv ӈ WK_;,:Tɽ-T?O ڊSYl|Y33S~6x|~l>+- 0{iKrRІa薕MX:%$)-p=4|]kZH[>U& ܫQWK [~Ghovo2?pxCqlJ]m߆m,+-jy6K ;?I𳚣i8DOV:}zYƟt'c7PUvVV~uֽ l>H)TVp㡮Z.\Fpmk4 wշM&-lF*8'd;XC8Kg),} 0A^/%=_ܥt!y>e=\9l kejoqIAkAd8=0/?Xy|Eh&Mx7'@NjwW@S+phDNhzˊWXj8}7VLu!D;=!SM~Ҍ ȡk45JӦme+xhw,vu؇)k(g0'w+kyP6K#-$!>K̜/8̙F-Ϲ_8F=r_Y(.{h.Czti]mC_[ЪԦU}OH}19Ú^(+$H>\]^'͕T d4}O(JV6^/Y8DkvޒVO7V]j QRP:znU] {D2O dܨ[7-ҿ-sI dLf9\1JY`M RUzC|t ?=Y+t(0"i\ ]H2&Cz4=?Éc"ޔ :o,x 9<6| 9هUqD-iՖ:D):mU.채1Y<'0 pkƅAb㢁%'4Y֫J<0EVZ*j0EȺ`oB<õ3*m5 OɉNy1qc6tlyZLHu$5P`ax`n4-?;8K5"% GKE$+c᫩(B+mE'KD7aY>qw1kTvb@;bM058Y645`nO{iP.*XWhٳgXFNn36&8ǝ5‡>TPVVTV-]-"sը=c ŽQ xE-F @ zd(To=H 8`͊Zp֐To.BcEaӺ}exb,aˎ@]k08Ҫ FF6j@upިAe{*['`)mXo0_L\ׇ^vZhGi `€Xn(̱].\D߇(M[䁅>vZ!3Ժnv? ru"P6Z{Sm (q~1V˙"{U@du9:ѡ`V>;Xv#41*_hC1*gdB]cya|'ի/ 7D8q%g"ڿLS8>fæ flh@&lĔWi5.Wln=v*Ӝ!vv!Ox`[ M `RCOnF.仃_E d}w)iϽVǕW:)|; ͒[B$Prg}_ ۪?C! &!VƄam;يSȜr<2zP5YJ)|&FxHX`ִHZtiVaƿ8n;t%L4ηg]a_YoJz9Hepz0n).yN0쇲`{yyIQ '8f2sn=@g 6G0ll BtCP_ fPA8H3@Gb6R峁 bj} 㕗qR HYnΊζ+½o Tg; su`BpSd,QVVѭC7е"-G%a7b!3vD#ԁ~+F=[v+ _U~I-M(ܲMKɞQTaT[3rg(?q&&N=;Wy,T F*D6ÓvY:Rn-3yaSfQQ}jĮ]ZZ ci o1@) 1^QA'׊ !T2>S9~=_GoqQtE, Ed#- 7oRCDk,D0|0ko YmI?6}Q3 0|VX0-Җs0Si\X)x,}j 4w'sgjLi ـЈTS) `:B;#`ZѨ"mҰS%'t XlUmK+*jt#&$烦0CD%%:ION00@6-W8W+NARSTK o a}Qex >S9,tԟ6I)#9 `0& ޺Mr{cvUꇜJ+RFi`&w}x(¹F F']96L[&*;M[,>&4aF[sB1 *ae9X`/pCeJ> UI)R)'x/sxq:+FYP%eEcRp+KM~$!O\V|WٷI_-k6 i5W>#mn2"V%~ ÜU/W1})^?3ԾVI< {%~ʻݢ?GƄpIH»83_D(NS|42Fsҹ rl4>!g_vs6ܕGcmQ2Xv>^[YIZ4k/!>ŏf?_G*z9҇Lb)9񇂐PU_sU H~a__S%pr^L͍ȟAefڢg}Rԝ҆?ĩ endstream endobj 356 0 obj << /Filter /FlateDecode /Length 4095 >> stream x[K6_,IH+-&A6v ${=㉜IK}[U$%Rz|pZ$"MSMo FO77W Ԯqlsam'L\.~QYmanUָux%夵gFWn-~`:uټWSfAL_ Yt}8 n;Q`Ww\^pF8mkhQFVl_qk7?KF2(zY Fn~O{\Ś/;)e휪~|"M}5Ҭ~Lm;ڜ=hOζ sՑ d-QDJ )mt6BjHd_I. \ dp^?tq_Kc;+4ך?o8Վ9P0O)N5 np}jhx-9)Y[6;ǹ]zH C_YuQ)PWܟ}M׿ K 0WDc_}:ys?jq"%>,iˁF?gNkXR#6jr$JaQX6!|<0{Z.&ތ@<b9 X:.HDNXz HQ&Mt74)!P[3So(B|q)7i,⍃=sS6.rv'$,@iPU{FHXR}>>ITijz.QTfZ Rw/^axHw!#>ުdK)[c]BQgGmKARRVܧ=IcX2ԇR}4/84 f_l0 IyBDY 6E(i6=٨ OrZ4nwC9QXʺѦIUuN ף~B+qN#a7ؾ}08-'{˸ژl1`sXe4cա mϖPoy*1|S_O 0^k9 fUֈ?[ W?GOQax:dJflÓf1 8CLq*> K1C,HQcrdOg=A|/tj[UܿY@+U3x3*:挙η/֖Wk|c@ \ZO"VXȂktG}'o<Y8 D&љQ*e 2_A&rg2UӦ@U3rDiPbU0fJ>xAo9gBQRbDF:ڬI'N:OV [ȪI2шS 6S)`W~4POVw(+REM4Yoзzzh 뿞^Mg[߿}ȵ@~",ËOW ȜqxZviŵwi=bG(ޑ 1Imkm,PCM|'tF`-0sm^7aG?/Hԃ խ>tsk%؀V)HB@I [}#4&o' 2z5׫6׷eX[v45w ˺jy#94YPȌ/nV^r%JYK0lgBgI($\zcR"-Q )f~g^Q}eR_pLFCSjH:lxp7KӢW9B<2Ǵ 0 WԼ)!KwkZ:OGy.h͟TЏtJxjdj[,-jA|GNij9Q/|Mˌ*1 g&^@Y3v'H!GJ `Eq .n&!9$e ).~ck:V)?w6Yey*)ړRĵzmLb18 шp}"#?Lr̚9.!sʀH2s ֱ [ݕTd)5ƠNTTj#vx=T bcXAVQQHInjb#1&MhSe=/d_,P3b"} Κ03Y>~á >۵ MڭċDLf} Є^6^8|IZ^z2?d<9epE?*{*ڹ2+eSsϜ > ytׄߓSvvn[q6̔;Ra742 K.X2EG!P#] CЈy94{d{ ؔ*,B~E&Z$_C̰&{ $ckC.8`bUYb| S*- _z9]O #+̮y@9sZ8Nm 2=舿g!x& }fZ [}Ȳu hiF "JbM>5Opk|ryQn̚aT@0)ǸHrZ '[^ C;vIU%;e,tzp-]d}0gO]sm!o6+'G?&m։̌!οw68&NTxחnŋZy?pQkޜNmrpz $w8ø {L8G~?o 6Lk+XZ5me/W9 ̀?چ_KGƇ:Trl$觋4>[iGpu:"~LY|ʝD.f2"tX1 Ϩ젡S:vt#;a-+ û'9`ŽvKRr&4@Q%-J&cY;\{M--OlpTCSk[M;ɑBUZ?;lOB,hEA^ e\M&l꺷;򓆞-,Ҝz0!{xEI#/P]8܉P`)侏eOTZKq lH#H䡛AȔoǸyZ:* 2ȑ9 I~X&,?.7L11.'c|1>W> stream x\Ysu~_bŘg^-v]'NVWEDf80z RR>\ پ4wE}ywÅo./7՗ ".[q٘f)syu\ q.W7Tn6hm[|^6[Ԯ7uRUhVUrW-j'*k醇 [uM<\ Ytam%hDϪuWcEO:#t=z78od?mW\k!$ @3 S/ OWke<-42 ;cM#\u:6[ ZS]ݧ3XbAjgt'/xqTe8 uD(M ;/y`?m5=bei[8 ]ec~DTaѡ#4!ﰃ6i\'z6AE! Ln`wP?n W'`H '%.A'QUI  I״ñ ?&sGg;FA/$>q$Gymտn~FEF؝oA&Ij'mR?=$<$EE,[T2ZtHM7 hr{--$irgB1ϫc4>I ơ;/ۗmtFdÉXV?Mؼp' UfK$EO]nٮnv}.m E0^yI]C@.3i:% 5xd;fMoܘ X$hk289x V71[z@η3<:ƽ-3*[όb  #omqZxX8eܑ3ѯ&q#:;6<_-IS-X}4/+PT)5 .@B˜4;ԅ-!w 2&AkM[~ɩ M3+Fx q(d}xJ2INjv]LHaH/u("7, l 6x' ~k\>gеa {AVaSRBO q"n 0+&h_o&A 9`zZQN#!<שM|poVo`zsYxmtY|zM3ɦ7"vx9Q%iM8ҹznvf/ ` yt(?G]{ _`Se`o-oЀ Ot c1Lfb-:?&[?0SˠSNʮI2פO˃*r" _'bFl/R2} %JFTQK=hG&z_41aE !s`cGR',XX~7K-Cj pq.jaDlR(f`Y^ݜ ? ;*2; 4MByHSwCxn0 pZƆ(T/џIbL^Cv"kN i<?܎$+Cgt Ea^$P)$|*99D%ņz p#} `sx9z@'* &7m! `'F U,Ff)$`Je֪b8;J* (P%FΙFfa-&}8A' GmpquI AaXGqRW`8k3\,?!SˎJf .dBmrxm'gA^mn6 劫1Nny 4/̓`v\ur ю_#j<G}0ȴw7NI:Io1y_D*8K!D&}d8q t"*,g#BD\ccCxOw.mkp'.&A|'$"zRKv Nw`Y9ª4ni TȜd[n0U&x`҇,fAy~ )!Uxշ#(/W F-8diYřbu-cu͸!tlCoB!:֧'PH"BW q=0I-T51ְu433{ەGtŮQ]t.Ӝ(^gv`MKvg;۽3av] u (yQ+dY;ηq^ vvS?-z3gl}Oy亱7W)N6006b6-%{񌯲(Nȱ}9㉇Ep $HlP @B&6<Գ͈KԔ89KEvb&7*:'wcU"g(BN2Q?* mT&XٳӦv~ZAH+4E6^4DXEx]kd f)հy k ~Nu64Eh۟aM@s[r1&$*E&5*#t淓@*^]BKBסc#i!N?Ƭ8($pr;֦rgE: Y%2Ҧ*擥@:ļ7U{ֆ=VȄ:31(31 6 &׽RF\ЃQy8AZ`cHP_1 qқ )JlV#GTLcǴC7yӯP>Z!<)S 5ki 1ʊ; 7hΖM8순 {F:B ~>֐.-a̫Mvt:8G 1CX 㾵;vcCP@ۈSNٟxlƠ‹HD\`k2U֧|03̎((wJW^Ima7oi@lȋ (&>5**a:4GZp:)!V1d SfTy/09cy>>7y%3ˣAo\'& TE"r$B[6*qykv^r[㝗).x[OM_|U4aUl%Ia0 ]d Pg>}S^ - VH]a>+Q_ ŏa]S8;eX&.͌ғ.Xb+z\G  +,1pNO]iQi8x\U(򯎳oʢY;z|}|ʂk|Qf?0\qXc=Sr8LRoB8H ^a=ëՕ'ACT_ .znlIm\2@\fH/1L_ԙ[An8"DȃkXBYwzfIRR ?76MCkDeqGr!g=@r\wCK)CEG&W%]*q\@-#H<.yaBgo]J^.`Fs)D \Q¹ g6̅ y_oES?̲4r"ͺ4J 3m} ^clbx]~rPe [-z6h&ϓ{;l.VN̈́ϡw&*Ex01ל3:k܆ZB8\t'2T) ]JlhVR̨9Rx DEqj^=$أPtACjS];h xG-M\h5~"X?ӱ:Nw;" B6xyVjm,, bL_l.@//G,8-=n*Gp 墨^@X,ᄏ5\QBYhXJhBQ֭^.BaЯm-ydϢRYZтH)?[J*T_.NL}#c|L9R߸J8uie)D3)ջQ`x-mc!ܖ!EɪGzB6pڵY7MV3eo;fW]Do*N.~ݾ켁sJ،ϹQYQ% `4,WQ ZG 1XZL)2u rDNA[T~ΤT+ wux n.'zƏGrY.QG-x,wֹe$y}D۹`n肂=WFni⅖rP \)ַ Y*㤢p5"b1#Y+Fr8J7pr<]ZSZqÑjZ&ϊCzC']ê&@Rh JZRzgSPMpS7\ ^ wl|fj69D+(][)~=Rsjd*/1a*)PQL"" E&)Y%h36v,[]Fqx|'UU̦a9Lΐ _qtYgS]b] /Nqwch ?TiD.&?fܥ(fz}Ir)/<2^قH1e/?<谖UIx.8W7Hg񄇀mO fgu9k? c-8 M1,B@"$>cr\%69 t^,kQCF&vϕyI28dSN23XI,U0FR&jR L ~QꤤY#@I 5m =<Z%rV:qlP23E7#i вolqJh!L˘Ek:1z$&tڷ)> stream x{PSwoī2ڢٚm]Xխq(u[myB IB$@ b-N]}nSθvv7sgn9s8ج<3v_^nxӫ&S>Z$O}=u05k(S4Z¦?y v'T!ܟvc{ v[ƶ`OaGEl[bF8|Nm?=;睛l9esw=?otG}݈0? /NpN5hX'ɊK)Omt4 }h<{t ے  干] V+a rL/'71F-@(kD/Zg~4[k+ Df",*A'd}vr>v?qq\m:!,a-}#}% "IEKE'<-~{$dP[-{H4攢OzXIV3;\/f h/VfouKc  B 4(jYKL5zro> ÇySlǛ'5w\H WJ5)=ۙ9)+㞿hT*8$pMW&| =ٍNy9h. '5Qr4u>%C|DջWS rr# -3XVUJr<WYE5ԑx[^JBߞzg~OxƸ)HK(v χڙo>djj#Е\ &Py}$hւɦVg1˵33Vq].C1DFo8|ݸ艶&{_.h xhowVaҗa͹ w*b~֢4H-lʊ%^ࠠ `+Nb٦}1 >3e;`[jo;zO9{16j;\k&c7'֏S(~G-.,(,,kl)")ʼnR^<^w:EDPuOCFji()eΆC1l:aY=?Sx1 PހBn;蕜 pFInmD&u!Չg\)fw;O5#]Ҏ6&E8m#4=I&GرKJcԲs084nT2^Id*4z1ډVMM09MPVD[ؼ SVSVсJJ̷mGxui̚bIy>J Ӽ̣wzBz^r=d/f[{Qms^-E$} "Q:UZDi7;{,EA x3=>`LmnFWc_1 &F{W Mn7<91n+vI[koJ֊4>k?9?3[ &7uoUzRͫK?X00>mp41pIB %?NzX~~Gh_{hPIX:CND'M@0= *2@!Fe0(˔ђdeL̚1B6! XYKOhuvZO9PCH<bV :o˝9Te dZ )/((JvCv b",j(%zGkh4:1(Ƌ@^.-{GnKVZaj]E'g5@*!B/ۃCYWxL) z*AZSʙ[*boY2H#*Bt詧[8$Jd@|h S-2kN,7olۜOiENe1WgPdc7PND7_0X-`8ovиZ~XLP_R 'nybmqZ8&ݨ Z6t`:x (qѯQ4}쮅Uxs=Kӂ@h Jcdy E.a+2Ό怛悔vsp\9]S?󂽱\B}9ΗH5; ֑ˀ2Fl$w0楟8Y;D.*x7扲st{1Bu~endstream endobj 359 0 obj << /Filter /FlateDecode /Length 389 >> stream x]R1n0 %%]2(~CI<ЁgR> stream xX xSe>P(MPPGeEdG-PЖto,M=9ɗ[.iKtZ٭ 0q88߹3̨sϽI};/_"K&Y**69ks(*K\q0)&S䘊nF3|su@:a򔩷Y9RI^i^iyLB~!o KR58}e 6Uʜ5 ӊ̿S(j5AfRtj6ʠS稇%/ԋcrj8LMB+8U`.pP3I^X[M)t@ e}sw⬉oMrL^47TL𦢛%7TsުzvƿI_Hic91vtJ-tЯNȗ俔ol5blݑ~4Non[47cLŘogB&(uʼ`uAs3v]ί5Sd`+`"ws5jG>_n |kV/i@Koڃ27=e ..u``^[yU7Do ִi}u4@(B&r@AntqŭPUjϊG!pGbo(u YLkrvX4wXn2l&8#mce:Wm(P{'qz9Rw;z{Z!R B/WlXk@EkdT^A BKz^~4H9(eAfm.OB%Z,Vz<27r'2S_FҚkx-pbm0canƑzP@8Tl x|ݮntUg>dGEp{{g[.q ٖZdrjC2~ l5X}lkijs>>р g_?z)LolcuH!pԺS;zͦ2=g^) x"O ymzin'dtOKKkm;$Y:ߠ!HYuT $硱_IU(4>lA6bus4xHHm5,$\vElmG%=BȐ_cs>ǥVLvEw%En迦~AW~5%!I£҈\e hyH 鬕Xǧ˺oD]a݁srL;".˽m@{PP+5*͘΄-4.W'ЇN9?o?4Z}5ex Xi6 L@, W|4]&X~w~[\[SkKFȪ%)nfލvz)lzz 'cvo7 /b+(ӖU$ Lȡ~Sr11;mjΡKh 8MYz€V{OݍCd-f@mM;DWuޤNcQA9h\g*PLi}o5>[9,H Ϣ[ӶĬ DBT"j=_~yǂIDZ5u硏Q.v2I[`! ]AYWB1AۧOSj1ZKʉfWvtv;?=b4ACuY^te@;:F&0eg_}Z,,K6;|뛳]@u^`T]h[vxEe҂C}gwsL0wW_@$>YPJBY [ &f,, KClıǠ vWK+%UUj tyƞ,\gtχDiP0}SXB鲸 z1)AJw'/vV鹣'ۚw8kkUof6TTC9hjQE>g20&r;غv/4@ЧNؾEʹ YKEp3-~}sq(O[Zx Χ$MR1]orxQH\4#z14!TQpUX)nDFx|;KXoVwHGtW)f%X*旷VX4q6&d3'=DqÏ K8gy'l Um#M:C.#MK92.\@&ٯۦ*ωMZHF^G7l:ׯk5C cw$J4&t1ȣ>dLOj+hO$KchAL0v$k^gC ]Aޗy^4_\ Vfkj/uN%oHr4f^ |"sp&`fּYEӕm{6Vu @(jy.AxGΦ}{8Ո&=Cz5PL Y' .ZN:fNjZV۝÷ľb3K5 %2|m#o1X&+SxMu=k>SZBUEDTǫ GJG? k`{¿@bbFj{MԣBWOZ>]/BLti4G3FZ/?MMGhf'IZJH-_6 "b{[{LxCǑTƥY-ԛ0]P[)gx(55 ڥ\ƞu W$DB_s>^8{B_,#DO ٝ7hn٬J }9=pVLyMw=џX:?kΘHuev RPK[d֪4N+Oz,x jn֛ Wga_=9ߕ&t!&?Φ$6$8Y%bgb?rNNݍf>$cw qNjt}['=^6Ð^>rIdl:~ɝ_$eqtO\b Ozf&=S(Uf}nn>Ȣ'R# k1R~w'@Sa5Pos 9s/޸Hq BE؄̄/L&1'('MIS)S endstream endobj 361 0 obj << /Filter /FlateDecode /Length 5895 >> stream x<ˎGrxoo1Ե~]^xȇ֐CuH̬ǐڅT#2"2Q?,X o. NO񿛻ů.~ˋ0/pWVK~rzwu%)-g[].=aʹszLK?bRUK}]LXA8񄗽xW_,⋥Խ3^wWa݋x,-%.,x̛KxmV]y8!u/)xK S.ñ.Eo GpWx_}y1U57vaP\1?ul>FV"[$c. ?\rc̾{4pd%:z"Y^J:^"{2L eƢ7wxFs.E)Rqi[[- 扞ˈ?-d6VFpQ=-4!IW8tYXZum2Th}=! v'mp<rB o?{5dk 3G/E2@Q8 )͠`Zx%*ÇUFv2iY  4hƦt#-ܨ&MJw VzyL%&TiL~ $P,VMnnWwq` .2~ \n~cni(ڭd?:!N kU$8l0V9#4Y  6!b6 8Rժx60,g;ߊ3_exvGֺi+dD=908XxMQZP"orpeJ9_10u )H.)G W3z,+*3tNuť*nm(WѴU}Muk_8@)o.[:tu+۹DE݌VJ'Ջ++tD*Za@Wel"uAE &+1Ha>FytӥN'&,6&zR߽ *jr )xq}] fiH0/A-?%ڀ\ TBۊBm1]>uqa˥0,WgL޿>Bͮ|*ASm va8X]*bxXm "*rs D_]ӆ?p5ց!u{:#ݕăǓ;kHh 4@`mY˄\W?KZgvh!fOd ȍ K MWƋ 03<8u NgWbdW]YK<|i |"{x {\i mߖL 'S{=1ȀA/A1~jE>g`wen@:l"/݀60HV=Hg[ُB9"GXY)fJ >^JKHmJZ`"|߈,u5>$AqqU`XbҦVN8C&G#1xFM{mtN=5q\ዚ6s8tA$赐b)`ANnUX띗*1[Z Ndy4&+UO,e+%4HT4׫:> F*ilѨɯ DBșFBrxi풦Ff~uShAHy&GtNq'4M0 !ӈtE w8:hR@n/2EV`7w9c?V }`zy2[c%ñ$ؽXPP;ٍ zNAzp֛#IÍ YP1B*+΅ucH7cp{&JXvV v4$<|sjxӛi6E|2+]hw䘮uif(*CqFpK92"$P]km ` n]+Œü>#J`m &?Z88_@VS S8`be@.F=N1N#g J9b!{E"(Tc!2> ~/e,&{[=f ;9dڪ#Od2ES]2>㎳R⬰% |3%q? 8p)y{ϲ<GI,o9Kx>1E7Q4f&PbMvn .KMCr7?t9a{9<8]Lϙޒ%S$mEDKw𮤆2A%&mLQ^*0RT2T(hlzOt_(vbENjT=RlLqC6?Ep}1n҅pA%U>fo@V=%)42>0okCyxد/zL#2^.7Z 0"ð~!0;j%jQ~wcfX}E/b?u~ht`y(D{e6Ќqx  kNM/!<\X <׊` bk~jUl=k/Z03S@3gӦ\EEi\}^u_bpf+ cYo?)adKlx?Vò*.16eU Q_c% C M :m$oֿ͡XUP"0,RP4AFc[ U0E'Jh>J37vqiB"mx&)mh.(.@D8 MiO:/=>?gP9AqF]#Y>8uh;LTt4-HbM F=l CmQt[ (yiPce] @'l<,UsFmL `sO=V!1ȕłM 61\ͺ$T )tNKr, amΥGm10ZEҲ*Rf BmWTjy&4ϓ_K? %XVKp7#W 8]ZNObpie㹘;z:s0* uOYAUs\`w56S@f|H O3Z5@ҡ%!> RzBeį!p  VY0M³B~Q>6W ;a]4kK PN~s"@$˃?68Ixti..b-XWk6&5Z~Wˀ>vi:itV UZF|;YxR|=XAs`/ (wT8[8yHqo/QP<6Oi-X^Dʹ?1+!סFE>۲7 GD;^w%HeӒzġ>hu!e(ڳ$\6EE+)lz,U Y"r60s׽.lyH5< pYJpɓϳū~}O^!-%ATYs!q` ,/Q7E 1۰BE_?j(cZzOA4 bmv[Ü/șo`Z('ͼ΍٩3Fgc?R#w&Oai^fæp1k $RD8#>ǬM ">4׮FKpM(  D4f,0G3 D7! PNlS5ˡ=ufeԩƐ )IkuOf D"!LZj iˣTǡ}|ZR1t e8:^}]?X՝;-HiywDb9xp>EUUo=ʬe>Ga} wt(Xu?'X b#vqP5t=5Z&@k0Nq^ZÝHVLvIe|jM¢)&3|d .sp[K$W$PG'vUD.jh.s۝:IGPwzm1GN>~dlS8wh}0q&,+` U~}u T霋*@F#>d@YX}Mf*  Chd0đBy_x`۔ `U뗡$)Gve4EC WԝBaZO dIak`PL:ܯ?ШGؓ1egcZ!s'통}S,?vRExg1bq endstream endobj 362 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2003 >> stream xU{TWbU1)#ހ;$.w24ƳqC'&m٪J;oQOF!J"x&# OBFxbMl'"E{ 1#n &D"h@#n)uҫҟ夑MZ<g pr&C>E7q}ZE pr,PX룓G$݇ȋe,%tĨH$1#A$tpE+HΟ+cp/NI<8оk"NhCx @Иw/P+Q5 )ptu6:᰺>>B!o(爽ڎk!_^4T%yFQ!%&Ƭg-֑nC*cLe4l޺<[sndo AE~$ Pv@AC P.\)vmۮt`6 uKeVV)3x)&lTf%mOpްuejG#6%N3sZza.D ?3CH0$ {hE?!‡.~ X`'tP GJGf9+v=<("W\~"?~S32V[Szfz+ۀ:xR[M&jH1So!ҕMÓqlkT[51U oB»356uI6Fu/y[/z8~6r4z}LΊ H-蛽؋ ,=0@Ac&!"K曋}jg&}>Gdzi*$5&um]Ndl,n829qH8o:#;|>m!;twˋr0ٜڨhfCTZ PiH|;$#C3#Lβ&3JcSPHɾ6(uG`D]0k] ]항pZZ("*%+"_z9C(iHpcH9ySj]bbjjbb]jSS]]/k۟ޝ5L4*sbѤ4Cpjxֈlw ]rIF ﲙI#}nn7wxڶӴdYj2Ϻ Q\endstream endobj 363 0 obj << /Filter /FlateDecode /Length 245 >> stream x]N0 yA!U @SK;CWk';Oϧw=-~\Rvk\:<%T5_B)kp2(n 5DlQ~^W'Xk'ib kXRc`hX0r9xZI %p`05qyibFj2}7w_ְ^Yo-u\ Uzendstream endobj 364 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1456 >> stream xLSWߣ>K?C6Ș_(aL8T&70)B V@3jptQLteq0sͽ e}}=|ν$$#vn-Q\xHɫhQmSi>cD >);mRfs$gG/|'9}]"_k5U2M-#l &`ʵyaRW KI>.)Tj+j!D]TTi}VJᮢ BT>wI AmD.1HfB6-$aLtW\{#'*] .҃\[9u$)$w#Ɏsa V8N7%)6g.܂ mۿ࡛vGN3AC-i4ΔBS^חe{[X ˏK c8e3R5!$6U'deX>$jI=BO=Vͷ`@BB/3]bY0LqDffIaXΌk Xdh++jk*M-f34R7?m9 tu۩V uIl5p8"0q 2f*alC*l5e2*Q 4B6ۭV*0:-NPtx/[+ր5@w@݊>GOc{Q888QGki<3A'L]Dj,Hq{{C}1FCJͻm01޼TX-ԝ+ R] Haς|ZMeb<QzW?' JjqDI ZRb0P}%8y~C:+,@FC٨=[f[С !hWghQ#} ~jn%?)#fMGӎЇ:R 3G1G44r$JGc8m=懖Vm#@a*XE7(ө9RU]D"Rmsҡc' K~Vμmzw|vj9\րy16F O'&Z-tSLZ:Ȃé3/ܣjAkoU8[M@ԽaNMd\pE(mPhӽȬ#(/PofY3+Ceq"*o괸ӭhP";m.PteF 'HHćhdB42FE 88QxF!%ZR|JaJƦ𓚡]E̐w0ƺ߼SW ofL PYݭNO&< \>endstream endobj 365 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2697 >> stream xVytuRP` } "Wa)G mI4i4$iIh6IChEAT-^ ]u}Nt;z}o~3|a iS1gMVWXRȭ(~Ҳjn]@U|n|T`BFЯ2>Lյ\AYV/jX-޹dlFr-"d+,C^FGV!k,d-22=4$ 9bܜ"b.h*6>du|&ZP!4X1@e@Lrw[ iCd^#&Q2/]f%y FT,0#_D(I67!W@#܈̄`wtXZOOh&AAĮ&-^ɅdjC0e͙ɬcGdҰ"{z@+6{lwg`86BPzuW(h8eG^ie?-E1Ryaz6_:}ye |u^A'T- h:_}m4h߾= q rp=d%* J- G!:,"17oO#gvҾb{fc ^%ig :Ǩ4(`Wï^0FkЀD^ۈ̙yF_tIRy;e%K2N6sJn]l$.M>y[=ϨNQ& F,pv0==/lNj%!7ГE0Xt?bӴh_GxchIR&r@$DӮbnM8~AםmY#U‰P3O-m88տn?_rK-.*WVr J7.՜˃r5V]lYm Ѹ3h_[}@+l*wR5;AimOh@6xAH&\cUY`ayE~X%@Q:<_&$IPm_SIV8BmXrI#OVkc<-~QЄf+ R(4㵨mAglvB])c>q~JumZ83ڮxX$h]MBC`RnAG䬿|'ui*Q/b^ ah 2 .ұz5wfǩx'#Af/[Ls :|1|wXd?BWwzt=Lֶbc3鐑3f1^sFd܅ rnendstream endobj 366 0 obj << /Filter /FlateDecode /Length 181 >> stream x]= wN [)CĒ.ZUm/@/8I҇s1њȋGpȵ*'eU͕xUXgpr6zv8AY_Z0IM'HӕH()a)a)aK5 a m(vNk,.gOb_L_9endstream endobj 367 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 952 >> stream xemL[eӗ{/Ca6˥ѹ-$s[" EЮ Z垧-:%ŔPu/|lɖ8YbX2cb$Fsӂ#?9Q1 5DWJn7W-aIMjAfjjÏBs;VpvQwsVq5uƢaçXv0Q/ "()0FBecNCթSo#W M4O#[/|(Wܽ>3(p\(Od<8?ԛm{jqLC7 Q`I)BW-:U .H}z|zz즶A $}ropp_ic H׷уPg`XO<zgEIVbicJt{pq`Mcm>qRĘw_(D lͻ/5}tl7KWf1$5D,a^RH6;z$mQF$S!B2G!gRJSxH| %iHqe:w)b4ܛug\)W rbJS3D1@F=ZM:5ai|8iYV/Î#lT=w~wrfyٶfHr!uGq,ky٪x$"H3)8x%f.ss޽w1rgQ)kK-+[{ cC,zf^Y H9)<3t ɖ(f'u:F}uODp?E>\}wendstream endobj 368 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 912 >> stream xUmL[u/^ {[Vu/&2Q7'cuBKA)PS+i2Ǎ2eXeH}ؔ~pɲ00̼us>$ONcNC1QݾloS?Y]V\f VZkם#Z~5j,nM}' ቅAb%"I!FzyęǘY٥J(V::W]9] aw[1F pS4ap;ADBub!1 ĥ{ ߹kJ=")g }ŵ6 0 ] &VVڿè|SL,-i  jf}.og|L퉞{"3S/Q(I-6~;=! UgK#C]Olyn HWIBB5޿Q J}Kj|+\ymy en˯8;6; '.'X&/-sK{[~h܇sq$Lj~xGADU\aG|>rbq-1]Vj/U|cV k"p\__a7剎*C銡@!EO1ff$5^ _)lCA/D3UN+uqWR3 .U;R`l&@ݝs{-I[?x>]Q43cÁtr-^f,;-uܠv=O5UхzdXzmkCGG,1L_4on qm֮3"fb\j]b]>V6ΙDnOi('iBn2Rendstream endobj 369 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 351 >> stream xTMSBM10SA  R3fQ*|{d|fZsYH᱋ ' y{yzĐYYRxzyzRUkl&JmX$xU}h lx2~{w|pysF8* 7 VTendstream endobj 370 0 obj << /Filter /FlateDecode /Length 176 >> stream x]O LIå^z(, a6ݙ lz@:a[T'CڪauED W`Ou5 Df@=7F pěa2?:&2ec<_ ~}q'j\=j:V >@oYcendstream endobj 371 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 344 >> stream x%JQUʐ D MEk  Ű1 fmѫ5VE7Ƞ]qF(8p6|y\c'iv/9^_NL"0T,i18{,|};{?&BnZC<{w_9뎥P[kD0:6o*FY4Ul ezW/+JYmj7jvA\B/&:K9GHVif_ɊClI$LFR$ xt7(bzgK^l\մ3v٤D`NXK; Ͻ?hjIrpAfendstream endobj 372 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1266 >> stream xkLSgO);7-dNb"ܲl.ld#t@X/h)m*0ĂxM%3a;cbl~}>,4XL`}9ϼed~-*; -!UH5)EP,x9+r1=%y/K3LA1w>aqM6B6c^̗]H$ 8:0ԪGjRb25`t4'e"[u5TW*?XI-ZA:Y1@|r/( _8f1# smU6؂ ՠ4cRYuMCpq]1p~}Km!Q b/vZj~9xog/D)^zM5ŕz^;xtИ[#iZq&hD_(9?:Ja&;aR@H{vOgK&&ssI}W#iUtn@J:Ep!{X'wcA$c5TEeт 0ڍmЩU<0GC1;8!4FoGq̣+kgK~:A^;4AIH3;tjAo*h;uyp\˸"7JD p=B[R`"sf{WutaڄESKeœgNz"Ҭ}!hҪMu`1HBs> stream xcd`ab`dd M3 JM/I, Jtw|Se '0-3##Kk3%K1to1rK2۔=c]+ۻsO/{Vw\B ۡ݇fvu39?_`]Z]#׃pQLAl3u/]ŲVo[m PnY,lp=lqqpvpup;8O7>mHendstream endobj 374 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1774 >> stream xU{PT{Hݛ-H{5`Q2A^ ",yoey] FB:11ld;ژ6FmOfKIfߙs}}GHI\jÊZ\rX-{*ȫVuY' G!"H$^"{h"O> UV~,d$IB67G]R%w=-M\S,ҒRoOٝ_㍧WeכYEL69Q? 00a_S AТf۷n] 7\Q]v6Ahz2 $gjr ɄSACHngw3!8^3lmל;lG6g6z(te=v,c":Zm=G,>joEQam](g T8HFʶSi=տjEÀI!2x{/sUl=a6nw'R4nߥ5j5.ʲXuMo Ļ endstream endobj 375 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 659 >> stream xKSu+nq!}B!>BQf0fJG:ۜt6ӋGKLL Hz1"ʇs4AQ4JYEs>}RQKZaK(?poyfa$- mB%Bj RddYg!=P<9!c'C h<8x)\MQh!+hvSQg)lA`# ñfl%$=&Dk>p=?',g9"һ-eu̬,zW/t`u Ͼ,E y+M&)u?~9hăF~赂f<0M=9^Xj}I0?e|OEUSZ )a<̫s>522AD-rw1<"&u) $s!B&m ϋZ wqvcv; Qaw<>;%)B!WK`ZJ[J +L/&&"XTRwZclx qSz絪?vT=NA.3)#3!3qƛׇ"_=endstream endobj 376 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6454 >> stream xY |SU~) ,i-P(PhٺЕn钤M4Mr,mkYT6ي\:#BFGe>2ߗڔΔ-{99wnݨ#(77k6Oyn3#Scw& O&$ F9Ȅyqh؏}^ݷ|EU)~f~otPLl\OȬ۟ 6Wy弬 &/dً/x^SS@Ij5@MFj5 6SUj+CP+g*9j5GͣPQ ?@-&S(Oa%j 25Q(/j;RԣT%S#( NIǩ'(j4I5ŌyU<_F֊&21,柣F?;Gǟ=O><1}jƍza͏~IwZհyoj:;R_ZIƵ@0G`|A: CgNJO5!p~Ǟq~qX+m^1%? o"lM0)LTfQ]`*V6B#Ӹ`4jۓ)4DǴ$C8բTCf?wH@&}7Xu >8o HfC6r;ؑ}'Q-#>|ӧJ.]veiP3T^|O)7@"_~ى5vI8k}8wde>d"͎j|sCl67P{{< '짳^#/fUX1x^S>\a1/}mG?y+Z&t% [&f4=?}ĂLzXokD9c92@>3)ugfbRX^_Ǐ_yK'г6$v'G(qM˰ORmfЬz^ba5TZ^P=hWN 7@oˬRK֬i7]qJ>>*Um LՉΈh&`?ThIhФh6Z (Eoin,IÌ6&O\ved{\Eg9dfg 0~EЄ]krVp턁YFzб-k )l| ''Wk+M~eEM-_gHg2;;WHdS4 R\)~YMy;6X}KaNV*}+pjgCoۼ(þ绐Flnpkhޤc6Xf4=ݏ׉0e4edaxp+!z^#uVw>xg1()I\<Rv>0ehh ŝRt%劍VkVW.NXA=HjI|iI_QP;7p 3Bt|RdE{T{8}CiQj5k4@I &Cv0~.'4V50t~p Ukͬ2KUqbP?N,p l^^ Tvڬ1bK?Ox }q&r6kQoaqAA]aEA9I?xs[VRuj?~ѩ>n8 m%lg(֥vf{B!WQݖ{SLV<m53VohC[Hi\Ev}xgYxh7ͶcK3`L^!s]$vwe=.-_n0֙x~(gA/y< #x/A2um~lt2lGבZfCl^7D 7 om6BQ:J(+ Ȏ^'s iD4(ZQdv$5`zQqZb5 zWcA BfRxP^a(8R&EM=2ie'jv}~g)Er>1K=[*;ύ4ͭGѫrm{+.>~ =$AV6m61eWB(`X'(a??Mw3V;؆.'#jV bt]Jq(W ; $op=t"C8l07:'rq{'*24RMCeqiɂXd?M{H-4n?Q참]҄sFA' j;QѠ͋uDxK¡BHN3t Bҍcrx}"de?7 .To=*ۇOh FZ٤  ?":giưyz 1۳A%rT_g^uo|c(|]^v/iNӥ4*״hvd_ϮD' tEQUCrk"hnonm,k2UkД@54jCX>8,6c #JN}JzX [c:LUomk)5p(8H`7ǽ ueO!P˽P8 Vٗ]@ m_m7IW=wN!R /ΐȄ(>*S%ksK/يW|&[1 Bwv%ޟP+O$RU07fy}3(n.0:>yfnCw`n`^ Y?xv~Ǡ2D)ÒPhv}7M  ڄVh#}Q{1 ;L|_y]O>S%%Be!2Ƃ‚"2Uˤ!2^z gD0CÜͮ&҉{, s ݥ:au~a7 o] GnzؼZP^Dޓ_ɛfO%16)!Q`|TfTq y5p&h`auT<(00' Qj+]"VMϊ*҆l˥scu!<$;~~@qټC˛= ȦH8;j܉Q&2g4Ȑ n.)`lG8DcPZ+'LN9OH(4ߛ]h/kݻR_fȃ m@EۘjRҀf)ަ'\X[x4( !X6 >(<Ž:CT%VBu;(IV1&(A:eJ?'m|뾳}$$9!r# :[0Մ4H2]lA:1#ܒm蕒"9^xi;ë~4 nΙژ *fW,Qn\C bΗ_g)W=tZ"Tii4ש"4{X'$ZicM9~C .z/#oowĖ(&BLZX& &ȳlFz7>>[KHU0q<+#@=~lbnHh›EŅ[*]VWeVA#~;;:S__|~Êl7rq!9_5丗\. 1oO5F5ht|Yxfepo-M^ө:S-E-1qXMDKb'+ s" +&a)#p׏F#9Tg,ģ_$v(݌f4i_~O'~D/7>4XlnK*P-o;txK.ssbj+gn&("upby O5Jŀ(f>?rFsB/И4/x`KWcJ!7[p*MϰvA&'Uk+r{Kôi(/})F/@~g AO;`N5RM- VӟDԵw];A_kx 4gra^D-զ2}ynQgI y|p-E@'Wz\އp%1SbrJNQ"WW]]gw̭8U+49.my}_Om$ZѼ.E~s3W?& CWju?[ +KtގKBގ ɢR#u ;(ZCs^; Ҍcs1iMM$"_CcyۤUVeWɉhs&˕kbI)T]^^&'BYJH]]U%&QRP`q7e'h/o{DߌBCEKW^1JMhLÇ8UAKˋ.t"2n1S"a]ZivM v"!I4F Æ")@w#(PMhj'Z!0nhprCt)$fkIW5 UQ.] DpE:s[Z Ĩ(]^!q[47~&Al=ۤ&t3_)gqa]b 0[uW8Xp}uKRzPݚZΆۼ5vGtچV 빊^}OLDw˵DL#y}`rL:$Y2̗ġ6YO}pS'B^|PZEy"Wj/nѵ9Z:gKgA:Īd8+@J]\c&:wO{,|bc8 ?]D~eEZ1LNb3n.'OKgYTV :ùruJ8]^"(=Ou9x_JijɤhY4u(I?KԀ>o JE$ 粭:B> /Filter /FlateDecode /Height 427 /Subtype /Image /Width 826 /Length 57847 >> stream xw\G7Aņ XDQTE[4{1Dވ KP *J"<}g{w5{μg0 0 *1 0 2k X9NäIpdʔ>vBKú0EΓ'S0i˗/W\ک`, 0?ȝ;S0i~/̶vzu~ݻw(Q%7ʺ0Ef0LtΝ7oJյjժ]p2J[ܺulٲ"0E_vr&m?|7m0L:uaUe6j|&M%5a=k cV5lƁM:Ub]c&ú0Ff 1 c/1Pגɔ)Svܩ?$5a51SZXXXƍ߼y_=zT`5k k c׵?|fY׌ua{ua 5}vxƍ7 Y׌ua{ua8]ۿ_xxXҪU`u(XXRk/^lܸqLLXRjC9;;ӠRkF0frt䨘3eD?~? @RRE( Θ1.P:%k8RW^Sλwh rbŊaaa 0ieɒEMr .cnժݻ1Qtw^o߾4}rʉ*W D"## abccs)Oʗ/vIڑ#Gvs3000O<`&5,7dxz*#45a5k]ܹs4ɓ'>Go޼t}Yf &2g @Z^duϜ9SfM1[NӧOo98ZPPиqܹ#kn_|:*>]ӯMbElΝmڴQ93{E;ggg1+W.vgϞ=rHEٳgÇϟ?_ 44Y<::nw)3gк;k kɓ'E)SvD Cz im]vϣZvm޽iŋ'???>r䈧_ͨ]d.]۵k߼yS,i۶_|y]QYWאVZi ,JF^^^4W_8q]|Ŋ{<*ܥԨQCBtTua۶m&[Ø}ᾪ^zN5Ӳjժ~a"W\\|L+C }!s4=ydT4{-Z.]Z*&PL`Yf_PA+ *}kԺv)8GddSnݟ9|65nX 7ǎSӣk .NBL899z k͝;W<9_쑘ԩB*t "xAƆ ^MHHʬk cF^xѦMӧO2]˗/5ҤICIOP :^:;;pٲe R,>}:@[J. u>Ur~~~ ,СC-ZD߾}-[_]rE\Z *T8wPmI넁sN<VgɒTx:BaM9DE ]6mԭ[7nРi޽ &uaR.K\߿/n˔)se iV 2e$ԯ_~fŗd?0..Uh)i^Eg$5jPXdYfQ%...ڳg/^,iq]p߿?{GELF>?dϟ?/YP@֭g8SZ98ϯ2c yP@ ,SZ"5-;715IMPʕQ۪UPYU#/_͛W׿8ŷۥJwyr~zﲋ-*-SxqҾ^z\* oܹsy葤Nk֬&iL'AG-[~{ w7݆Zj䖏UΜ9#ZkfoGSKvCCCqgr.]lڴI(\ײeF&Sua!!!♁q5 Q+X5rʕ*UHK]߸qC|o.Z%[YAEꖾ%R " nѢ)*H`6>>´l2;k](@MF-M6t Q`ͪqM pI隢@ժU靬Fx!C2g[knaf@^p4_D_ҥ{3Z;Z`]3!۶mk߾=M) 00O)L<ׯww/z*W\٘P75L2"LΝeWhׯ%뚼SJjttbCbv޽M4رcF kL 4><0 ٺC}iThǘ 0`Ŋ'C w;={4/P鉰\rT*Ujժ[n4J-yCu&G}O?dO;N!0F#k\ooo莞+ 6Q#W+'i*BT׬YF%B6k֌NTuZBB=G}v҅W˗/߳gkTK.ժUKXZjZۊrP >yAO #$]wӒ5j={VFk➤H"ᰮB׊jvrlw]rsAaנ ׯ/_47ruNhQȑd"o['iq]vݼy=::tAts%z@ٳgƍ)ok%JJ*A,$YɤdBlذxgŁ[i–u 7G~Xٙړᬣ!7I`Q#vI:tM>ZӦME΀/_5yk5oɱQn޼YBLlذ^ٳw7N}hpp0 ŋ>Nہzg]KjÇu}LJiӦszH4e4 oe˖n30** 88##0pwpA,]Xb4k]]UVZTt7o^:`cf \ ÙCi5$eQfWJdd$ 2A)SX;96'IfZ`n¤~yFpͧm-CO5X= ĠOz:&_|EǤ[nm@tĸВw]Ԕ<FPN<6;vD_XW״B"m+e4sR ͪ_MlTk'P1:=jX 5CЪkjėRN7oވ'ú0鏔}QF;FԩӖ-[$5`Em-YhZ7-njXJ#Q<-]Լɵ%X&aN~&5Su9V.g]/X @]k-ӧO3gά5Pzua5IYk֬)35u;`]3#tMJ;hfua5کSH5#`]/X 8]c$5IJ׈;w޽{)`]/X uhX&aZ]35k! ä?Xl5uX׌uak}a*]Kz *͚5Dllg$(%McȔ0Rkwuu=u9f1Lu`]/յٳZdɒ4-| u1 XvmϞ=_?Zy?rn߾]\9FOzNUV?~HNab//̙3UQ׵2e`赜a=y&z!OOOluM}{Nx2ڵk4-D48s玉Yh0 x9ESrLk_rFԅv2 ۺu됫 2f̘k'a3g;v,&]fu.!z`z͚5 յʕ+cZϮ{_ä?kfR@~ 5hѢaÆٖ-[[:tَwnժbZuM$ӵjՒ4ʖ-[޽m6ŋ%̓䄮1MC`]c뚝úf 5!um .e˖u<3&L Hۛ7oѣGϚ5Ku@_"pFSHEhjs5jg+d]cua]vM,p]{E| ͛/f}}}o.i/^!OV]Qϟ9Է ͚5c?5+X`\\ַ6?ϟ~Ib]cua]ojP9YdqvvCȿj߾=̓'N~իӣU0qĉQF]tIu{d>>zFۄua뚝úf̡kZuKZ#%|XXׯWPAk\R. 899ɓ8]:8m tr3Vua]k`Qj갮ٽ{۷K*е'O? hѣG0&&& H%$$|wIqiǬYgMŁػwUVn]ZxĉڵknnnZSuVXc۵k8d(ڴis: ]vժUvڒ6^}5iҤ|"K.:u ԩ#uy֭9R\-Zpر˗/gɒwP mܸŋ]tA"%M/</F`)"0L2aJ.M ϟ4S۷_lT4-NI(Nc]P "WtW(={xEp¡Cb1 gϞѱ&]SvG1c`>CSɍ7vU;w@XDlmӦ\ר>Mxxiܗ;vmۖ G2+TO>Ѱ9kVu 5mI4DM24#Z5wޑpܾ}\rkKwu#izz_51u%MtRy`${z]kA`:"H'tt У R!$:޷o_ͱ#&$8**ߡlЈ#XlOԿ% gRH&bH|VA!?mCٲe bC$7BDBtMgm޼s']S<ϝ;Wq41b-\Va0aBHZר#G|>u0ݼumg&`+buԳkv`]3]F7(4hp̙FbזI뺆< RkE(`5'BEkAyՎnݺ⭨˗/={LkA*VH}Hw)tmӦM8i]5jԬY-UvSΦ8.}p7U֭\2NK"υm6((uI=kV@x¨Q-[ 0 e4b*[#ڱcjԨ#{-[)յU^tIT8dkNNNQZTsN%//_ƍ4={vJuܹsO| ;  ݻ$ݺ|=|PrCt_~]k rvY5E8UrH*pGCאRG`` Ȗ0_~90bZn)q^>c]cֺuk\|:+i,A!zimm۶$W^)]%͛a05n޼٨Q޽{vRk@t n@_S`XTvMB&DKHPqqqX5ľF}PCtMڡC!̢4:u*VZu>LYql]_H :Qc=z(H+x:dĺƘ5+ t I3(FmOsjWN5k׮"EP?rh111I6lb ]ӠB5*HZp" ܹsXϟ?r/]&i>GQڵkgΜ)ҥK``ѣ} &_ԶVZWºm!}*iTnݺd NV@(g|||ի'FӪkVdʕuٰa`Ɓ"ڥKΙ3O@N:ȑ#O8!FDD [>޽ ]@+n׼ysׯ)Ysk4 뚽aok _?~YjHk(P7jСC/^U40U P+uݻwQbߛ(tM<8DYBs{訡H\GEEʗ/n:zoAZlϓי7o&>uTk-[RNŹ>'''պm>|ѣ܅!SkB)S&25Ϝ?>OOOUN… ?~XkX?ƙAVO r'd@Hl TR52Isڴip;U­EGwz4øq!$D~~~˗/Cl9rdƍ6mEF[]KP`ka]#\\\bbb8kvܹs! VLOJe̘Q|sڵkCΞ=kK>uMk`f|9k=>j>Y\]]X-[ Z>515;u޹tRժU%M_ 6[41K|r߾}:tX55k ckv뚽Ӽysȍdɒ46Sll,$کK󰮩a]c#`]sXk׮UXQҴ0xg}6dȐ%KHڴicyX԰19kvjM\4 +F'dƊ&5g.1LJ`]sX엧O>bQ̙{6ojKXQנeʔ <))Ν;Qy%n$I"EM3gΜ'OSU/ 111?~D0 akvRB7obB MTcE]K,?(VΎ'җ>hM՚a]sXXgggL_xQ.]k K/XEJ,I,eLJu-,,f͚ իWZ7aӎ`'OĽYquuU\߸qǏt V5`]Sʖ-K/7..5΍\rX#蚍#nRkgϞU( Bl׮]Z2"ѣWትYVujƺOJ֬Y_~`m#?.Z{իZôm6((hkjh~eB*A|NAԱ wlwqq10aMJJǹ}v__߄3bv…ÇÊժU׊GxҥأbŊm޼vں6 HDՅ9*7o^LW+VرcǾ}-J]- k6H_zeNXTgΜ4h+XLmauMyũ끖o=ZnԄQ6ӽ{7*{VZ&!8\ha~|||v)o̓d߶m[fnܸQz8iv wY5@ǎ;s#"1%@YÇ.MlApU\7߿g]c.u%Ykٲ={0`aÆY(} 55FڡC4iݻ,Yկk6mҥgϞ+V… ):vuVEDŽۡ!C,Zs5jdTk?~ÇixZ_.1^L'O֩S'EX[еRJ޴mR\۷o1fiӦ4}亹Lh\R}+Wh\Mg3͛7!O.κ0;wWIJe Ϻu1##nXX*׊,\0MْRXkRֻzYuq䧱-C;t/777nd z]ҥ˦Mh5jѣgΜ)ɓ۷4|7. @bŊ=x@L?|`끁:uX&58?~Ínk Xs|YJM3f֭[۷ooSXkCXK֬Y ɜ9Ŋc@{udA?~̖-b9"ǯ#~`Eؖ͐!u&_N Jl+D `X.tNDCrEeʔItYYLoq^Pխ[˗t5\з̒ŐN$Z$M-\IL1ZƞI={[nku1 /^ȟ?I… x55k c|`hoo{Jku1 'NT.'))Z-[xzB)RdŊM^:uM AJu-44ʕ+bIquɽXRc>~HtuM v֭[߻wO,qss۳gObhѮEM9OwA(SbxҥKIruuPQ4+ +\rt X\iܹsa NNuM !]vTTX%_'f]}kgϞk׮G%j`b@сGgs<6j#fcbb`c֭ۆ t+1ւuLa]c#ЯkW^%AAAEUGe&}*9iZdII3ZZΜ9Z###J׮]GզM]vYu 4oޜ> cX51]vȑ#GbX9{l=] [BBB3A=<|XXf͚mܸŋGnIq֭\5GGGbRd f5k 887bv͚5zRGخ];v7>|pDTR{'aku`]3kjX,ɓ'ի<7s5jj_е˗/?{ ^"E 5 #F2o<Ù kז,t(@qTTVaBBk.7oBRk>|1ցuL1BVZuʕ ,\ɓ'͛1ӳYf1ߡCyӹիWɓw... rJ@8QX 8pب7Xcm۶ƢSE]Ν; :thE9 q֬Y}I =zeK3f|=Ew޽RJF 7qD)GZlYF yH5k{1?p±c`4<2A7ni:[9?nݺAQ,]Q~ .|H7lܸq̘1O>a{3g,X\jذ#G z*O`umٲe R,2ew}NB 83eʄ!>>z_:uk922x6o߾MJcX#t & ?WdѣGQWrd׆ pa*`e˖E~-4ooѢ҃2p={J(-[6ʼn^^^/qPBBB/>R|yaQ?^כA909s`p"s$Nyzz1$3/_a/ȍ7h:unݺRToVXl/** Yqu={lӦ &vE裢AߨQ#l.!Ǐ#wdb`&'tu|r|D8 ie(4TtxS1gΜgΜYcp2]ݻ'<-amjLJk`]3kj57ܸFƊvpq)VXyGɚ5&Hg;n:((H 6bĈsUVM+Wjpႇkxx8" 8{D4ra>"rL.]6oެE %Zt! N8W_yF1N˗/T)44 JEr|F2 V;f; ]l.a!`\Rt`?&GsذaQD׮]qVAU5zHFӗ/_F!ALشFσ;N3\~兌5: 55FZll,%IW(v]`xsuΝ;#trݻw,___~EĉYTģGp,D4naҧ:lC(Q"""Bd5ZDՀY!k׮ݻ7IW^FNX52yd1#֬YS-?_W&0\nHP2HQ5: 55Fk^zYFϛ7O֎;ڵk'2~̀lfRJW\⩒P<ؓ>wnժoMw +5 &ѵL2L>}ĉzj #t {"#o~ 1+Wj!ޡ;.N/` 4pVnddBQt\،ehNr꒦ |EukfuMѺ 2D7oS;Vn+UT^=CrLK&OL46:i$Ct瑷@-[6E {=Jh53bYܹ#κFTpyCA59z2'9uuyj)1ւuL1Z (j&)) vʕ/^(h5IH~ޥKB |n֬YP%U$M+Vo UPׯ_c 4/_-`J8="FVy!J|^Llp {"i8 K._<|͚51YÁ|f1ցuLIͷk5j@~‡kPu j3f}EϞ=W43glҤ -߮yzz?]vWVĉخ:`De˖6lPW^c]K ku`]3kjL 0-z]4 BCCqA cX51~޽WO0 uMtmȑ cn`4V 55FZzN>>N:uј>}:Uzh֬oܸQ|y&$$8::-[֭[`z76$@ZΞ=X(D {~z߾}9Ҍ3(XD Ȋ/^k7o%M%HL0ᅲZ y}]TTaÆi/YbJ*`˗/70vk82f̈_I?&p߿ki5[uMC)RΝ;(˕+zJ#[lLWR͛X޻wk׮^{iٲe.]ȴhYGzI8D"M|\+L`9 "uy˖-?.\0!Ǹ^z/;Á`Hݽ{wVN<)wGJdH ._~_cǎF:g%zZ2e {;rȪU8mڴ t rJſjҤ sNo֭^̒>}۱cG۶mS ;׵۷b?<<:Ǖk'Bg]KEӢTfv%ujb]eXk,hĂ$JWj߾}b׷o_(-Am-#] I*RJr/A...L?~ Q9;;îvڅY$*Çϟ??EyR`gϞQAPPIy~~~fJQ'NhyB&_;::,iY E(3gΨQt֦ժU5:ٳ' 鄚H+},(vk޿/iA+W4275K~aQOUjcǎѴHRBB39rPyԮ]m۶&*5[uMѺ&0+2Sș3'-9w\5``05Eu9yE`Daɒ%^ZbE f͚=}@) 0f̘@-[ta_ ZSc!|=foz<&2]bϺFg&:uqc"<<gLtMi& EWH-Z5[uMt_~+V؂5ꚇ,DG+U͛7˕+'I=\h8(===k׮=iҤ/^)R!F߮k?׵UVʷ^~}션e]C]˟?TTqt[&z'O\/N%Hú&U >זuM о}R-úh]sN2e0{ 77Yf (V]pBjtHk f=zL%KDj]{iBP s)>Qj*BbX`ѵ5kK] nݺƍv5m(@F"]┈]_|9Xdح-X.(oo={88us>Ka+fxa$[k&u͖a]Scچ<᪯Wޙ3g޾}+ԪkxV"E Y=铮(Q7x$j"tܬ8պ'>>ކ zG#,Yϟ@R!:tǠ!v>\ Xn:CrLKײdRzu|ȗ/lիʕ… %Ɩ-[:w '֭:1}#*}ʕ?|pF?~\hʕ+C|Y5S]>| Lu ]C6y[|(֭ۮ];j!).!C,Zq+WHg`&MvOv~글Q0[ϑ#z]Cm7p(0[f̀T7y47 ߋ/¡Cj߾ݻw\t)֝={6JUIA4lVZ7oF͡N[ID3xISF 孥䰮2kjֵnԩŹ JԨQԟ#a4388+EG-S. ߗ*U %ձcGc@TB۷oS. QUcŁїS*Yp@фE Ş5͢CY//cF *B>?xZNDÆ ϔ.k8oߞLHاmܸ7NE\k;TVHI^Z&k('.]\j`E06a0(C'N#hj]۷oփ*>yBG1oN?~ E 5*PLߟ8a`&Evvm̘1؄x Boj.&$լY֫W6ZWAZFM)n *=gUq iM tBӅ mH}E}0rPt=|dOF}%bBQe޹sG\A'iu D۶mw!_~vna%<~X,ن$@"OOCtDDhKFFװ![Oހ4)) Bb$֤I x']׶n C a54`C ,K,cO]-CE -ʕ+*Uҵˬk k *eTTrRkn+DIb:N$kM#h+Wnذ!n8I7k  ä77~~~rիG_7^tJ*J1A>}{C8bccյd?2a;RKo>}zj]K61ݺuߜ1cƄ h6(t #ovNaaaիW>uH] u Xl55 k'aIII%KMqzB4,Xnݢys~Ez1ey\pa׮]zH6bĈ9s7"VtijQr}K"jM41MS蚐$lT][ |EӧO+S_ׄ3I2@אW1F1Mn2k 0)eݺu)`Ϟ=׮]lxQ޻wOk3t@֩S'gveʔKwu͖a]c1XĀ$M{@WDE>x`ƍ͕D+a1CMu`]eXa 'wG*#Q`]K2k 0sND%zO7yXl5aC믿ƎK}z i:ԩ`%K>|hUlHז,Yd[-5[uaƴÝ5k9si6kq2k 0tFgϞvZ,Zu͖a]c1-kki5[uaƴIXl5ab׺(Yb>>>I/ º0 cZZטt º0 cZXט º0 cZTroܸQ\Ǐ²fjkfuaƴةp>ɱ_=~zuMR52^w95 5al*r+kŮYxСC%5IϟٳgLúf&XXlֵ[]fʔ vb>kfuaہuF`] 9sVDž Xl530f#jGaVXѯ_?ˤ'Mf&XXlֵ``/oS뚙`]cv`]XҬY3s'&f&XXlֵ`!̎;ڴic X0뚍e 5al5u$kle&`]3k ݻSkI|嗰 k"f P#x}ٓyIB,$ۇuL)2dȒ%K & Z^=k"a]tIW X+'iԧDr/|Ix񢃃|"B1114;wE4n_~-9::b׵"tFǣֵ'Oh]r7jX뚂Ç/\ک`'OSS0D?U mܸk׮߇Pjժ;wNW+S Mg˖ou-ғ~XCBBFgt!6wʕ+X rS뚙`]S tmٲevrڵk'OX̃%u ʕ4=f̘.Qăh)]bYdy~~~NNuMеOOFpFIk!E_ 5_ŋ\r^^:͖(Q޽{a]Hֺ>-A08 RUV]vѶm[y`AAA/_D)ԱcL2?>K,kԨQٲeC0ݻѣG;wD7n\B۷o,XSNVX7|?#LBBfS9ӧc˫f͚N@:tPbEk'HX1a]3+50mڴN0C >|iΞ=΄ϟ˗OT8l d#͛7Y5ciF/CaWX uuM׿Ν. 5_:߹sL29w\Z8o< cAAAb5Ԑ`#$M b C9sرc%f~:{ɦGO_i*O5r5C`]c]D̙߿+5kV cÇ~jժ/苜1 TQݻEbIkC{pZӧOɒ%_|nݺ={_8 kTTi5k5C`]c]Ä#dKW0uM?8swgggז/iڴC$,\l{``ɓ,X@ [8&)>N-[*pppF}}&ըQ!'2GOF#>[{$I'&ό3nj#GHԵ;v޽CcKkР۷i-w5WX4Ppu jj)2KgYX$;8\RFF t.kr!`?+a!˗/WCHa8RNϴ4ssskuɻt)SH$t 6 {G`J, \jJ bӧnf-Jf&fP!2$Q2Tx\T)~u-_O`]@5ԗ/_>.ϺfDXX$u5T=@#""$p?_AJѣݺuSO$$$J*(MOO뚼"͛75j$!ȉN}Ey_lSPyuNKK]ݻG#yuMuĉW\)}|D?abB!umݺu;Ef(to߾hhUϞ=i. ֵ| Z>uMB"Lw]#F?&&F  _ipG)))е6mВN:;uaԩP.yMw%M߮猓T'l+>ݻw8uuM2ɕDFi !Yb{=88X$3]@3x'O'tpAԊ/>gΜE.Z(<<\R#zl*BawwwD~ ':шPWWǏSXcW4M4pJF`_ƍc]c]t5CR젛 1 ^pbqQGܱ7B-[Ķ|eTNq<== {.-9vX׮]%YlPtXI7lB [n]dIz-fQר*hdE(>]>}466v̙ݺu^J>+VD{%ޟúP׹`>úVZK K5Yh$]+qeŤ=jD13`)O]=88X%} pҪz.YDt/y?4ºƺ&tMNkԨQ7n%+WJ۷4h *ASB9::*q ,p𤥥΀v5_[___EZ0}]ˇ-,,pKÃ_ƋGSLp}bKлwoj EGGg=b۷?s"'(2.NKFX ͝;Nn޼Yp1}:piӦJ*:8۶mK}6˖-+ٳM6 ĭl'D$VXqҥX#8/gdNmz=*/BժUi$B+ྈ ]3@wޡDB.]v*UX-=zHR}6 4wLֵ}z… ׯ_nebe#G5k-YYY!m:44H$&7nadTcQ55ºdFͩŋqQxLֵ<@G]+䰮15F#O>Qz~-736Sδa]Xtuu)1=x@ ]JM-29Z k&kPqqqeFMZ%:?1L`` }_M6l痧yqX5]`]+֬Yu\R*kL25$IKK C PV-и5H[e1a]Xtutg̘aÆ޽{O:uذaV>&dƁp u.'22B _ZDDbJ*!PFbZ5IqUT}6b$ CDS0YDY5]`]+&ɆYw^ݺukGa鎙a\z}RfP!1VӧO5 pww2yf2kVtݻw-Zhݺq?" e=C95k֤Waʇ9::Zrֵ<@ڌ3`lbuF#0wkEp`?zFfvss[pk7n,ɺhuŕ)SHWWW߼޾}hUжVsֵ<@Z^g`VPtM0}ʕ+:T}5FIY=Z#pHcy(z ky^ S״hu!Ĥ:>0ce(ºt=իW:t萑yLzz̙3.)`]cú۷?wπjժU{5k,g밮1KKKZ~a]Shu=rssCDqqq:nT'9`]c u~^dɔ)SL#túuuvqImSG*Wkך6mS XטCg|H5kvX՚lu^1buֽw^Ne1/`]c?ʇZHNNk Ǝn:u5e˖W\~:5j~:5D֭/\1%kgΜqppȁ6E8p@b]c2uݻ%K%+%#%&&IbA[-,,m۶q߿xyp2u50Ûkk\Ό@_]{9T"lٲ4Z8t-{F^2ƍ[v۰aöm۶x >~(6a/3$$J*/^\22`aaanncǎOS"2 ;ֽ{wlق]a̙3.\W@1a,Ď7nhܸ< 5F;kL1ڵk?|P{>< 5F;kTu.r4k 4kCs-3& 5F;k|yd#`ffVHNViѢիW3[Ds0úu뚾,\_~\09ZW^GfهuM5}yar&''v2{Q`]ShuM_qmv2ӵkk\Όu뚾]c@8vlXhoo'11XdGr6Dm~uM_XL5HNNV\CsI~L 5zڽ{ׯ/~Y Y۟;wN}7_U"ݻ/2' *Txj)κ/k&_p>*ÞuF<,q/X@K3g/8t*p`I&-[LbŊg:u߿p6m.^r 8_͑cǎQL(;tÐ5 zNʕ+uuu/+^8n իW葼m۶oݺe]^juM_XL53AAAh;w9s4qtt|%0zڵk{{{O}sB] ={ݻwQv\z n߾ݠAQ7oޔ.]{733? 5}a]3yXטϤ(kK.2e8kSމ*ggg{PSNP.Z #tmժUp5`]S`D]ۻwo~(ʔ)_Q{GU?#(FaLш3L]>ܣG-KOq#FHky.e?Ӈ  SJ]++W<}Çz:c _:j($κ|FիǍ1)0L:vxűݹsfKHH(^xV 1)r֭/]d]zyyaװ kɳ"3]opI$L]zŊZvuԩSY?rFmHkyOײ5 TZŋ|W1# (_ۡCgRNJ* Uf^͛NOOϱcDXK2DtѣNʕCCCKϺ|FΝxbt >I>78b,w^ݺu%4nv---qТeǿ] :Tmoלpi믿ͽN+"\8EfffӦMC˗ .})dZ!*5.SR]84Fׯ޽{)S Ȥh q/rEEE"YEi}1͝;w޼y+(#S]T{೮'$%%)R$---99^~YfDԾ}hWT;6IHH>JO-gϨ$vS,BR>../յcǎuޝ^3fҥ:r ab-ZdGFFJAѪgdd&rh.\hݺR&kR>p 1.pPXkjjEԘk1nhtG F5$)yy\T8u.Xטϰk 5ӓ'ORRRp6iD,ǝkXXlٲG"#\6 .#Fl8ȑBai4\6mJωGGG)SzU׮]C Qx"&&טի_zI]A߾} [h0NAmHH.v\1t:r"QQQH .C14U鋾{~)֊A:|887i @ZrN:QXkPhMXwX< u]w{ŋ퍤z舺_6FXט0˗%5]tR 뚂LCM<<}45}WD^k` ,0ݻ6lؖ-[D4kK (nqlCn߾M뚤v(>Rqٳ2gii٪U+dB vԨQׯ2y&* >|޽IIIUTYjW -y?Nw5W\iٲz͠P{kz*6 ?Q=5_NYטУGFt'55kjyUXqǎΝ?#}XE_]9آE *1ݕ-[."_hcc;gBm۶%C Y\_BhoI;6l(@ËLR7u =\xQ'j֬ID_zUQDPPk7_}իWP8ZquQi|OtuMAnG\ɞ={VF 6l&?ú/vM .-k+ ~]i!hŊKӧO^VݺuKM4ITȌ34iC u5I%R5'affB%&&҆j9 x?4] -[qdӱcNJa]ck)M]c "k`]߿0B)tzꁁȓgϞ=<)) v%dE$d%uCnݺbG Ku > M wJ,IKΟ?ߦM wСC"K /s˗j,)ֵuM5}W/^L ^Vn]E''AS6l0zh*22rʔ)K.?~]tM1(]XXru>;C:iii4ƛv11Į-[F2DXr5kvX%?mglv߿5I7 666uM,|e6mpkHDX(|V:I]{Ij#H5I6z˺ֵuM5}]ID(LC.]DK5IQv.t5zG 'ꫯy[[[ ]l׮ B#k?Ƈa]ShuM_rH*V(O^ڬYʕ+Ӓƍ߸q٪U+KK={Dgz8"4Q< reddx{{Ϝ9]Q-e o۶ 'e*%Hܹs/^@^:RJ4LZ")`]cú/9k/^| >>>[ŋM&i5u5<4+iJ̲-]DlE=Y̛ecluMZLV p7۲eK+/kAKIIf 8꒒$F5I5:IuCz +ʣٳG0ydooo)OuB b|-$$"&+`]cº/k\u7hR:/8p>i$GrpEqAÆ q9|rXEG];{,4KRyM۷i1h"44//@ 8/C͚5oݺE6#&3P'((bLZ\alEY44m&}ֶZjأxfgђ-wssc133C"bxJ*\M,uC\rꫯ䍀ܼysd#::J߅#b6???z_ժU3 /^,F7flfc]54Ŋw:/v@f`ؤtҊ뚾k&-3 J)S>}s<͠"]G }/HIkU[܃d㏤$۷o%8bԩS*T*n<<<>իW5-/޽{h޽[\5jPjb)f~g>|н{w\'l+Y ĝwJ!P.Gd;;;IvE%jW2@CE3g@L6m |eA0QEppD+<3]C(H 3-sBB"򏜋<9E`ȶzb{ftuM_X333h%MR48qGkua u` 5,lA_7P̙3;v+-7 O ZۧO'OBM֭ ]{{聿ƍ!.ؐEGGC~W1@ƗpAġN 3AZeIՠU:u| B._ܪUH7ᗡ 5YT8p+ kٹsڷom|'аpC>|v4 RhӦÇь@ 1"e[(P[Zj1c֭['֭[ /a_eׂaVjiӦ!!7XL5151LD3Eŋ_^tG%{zz ?ÆݻwW ٳ'Kړ'O`K RPL C,i58_@@urj06y=z6leiG_]8p :/zӜ7XL515ɦ={zPϟ{HM̾,Pxٳg*v];zh%OZ<)F]q9j(U8T%K`Q N8|rg>}/߄u-_f1a&"O4iŊ_^hRװ&99~0`u nl/HΜ9Ӿ}{}u366VS@I8 .RƑ2S|tMD[{KJ*`]k&|5/P_(551LP׮]5kիGA:ui?uܹsh V0?DvʕŋϘ1cر.>|sأ/^&U^zXXc۰aÔ)Sږ-[ }E/]DW[^ULfBR&"Q7oR 8p`%;v 4H$HO(/^HSSn5}v)~f=&f]c}a]3t ҩS>̜9s)Lh޽kѢYvmDCRy@Ðmۆ@,*44! 77RWB3dȐϟ׬Y XjUuX^P+V̝;{yyawvvvr{ӦM;x`ZZ|*FD3`I5>\6mpJhS۸Sݻ799J*M1<4lذ}O_AXEcڰ1ްL6]FoeX 9Qdr=A5}7o :Lú}wə[n_-,,p]1cȫWNMMTװI&ɻkbɓV}鋜~X>Búf0kLuM_BBBctXr˖ڞ={:t;{׬Y#ޞ 6l˖-bՂ fϞMo߾=97 Q%ݢEW"PBW^7qa]35F;k ֵGɿR@֭/]D+Wʕ ŸRihBF#ºV1)º0a]q了NLL%tF ?SSS̬ @$ 59k X1FXriIVЃΝ;C/_N?IT#X/>>:t<΁;+ɓ'Cڗ/_J>}3m4X۲elmm _Fhu5Zr%777ONNϟ 1b!{Cǎ#Hk5F;k ֵGɿQ@^JOOO___B( K,IUK!z쉺RāBmܸϟ?Vׯ_?zhE]\\Oj[1( hѢd03fXp!PK*ݛ~tX1FXrk&º@_xqFݺuuuttLNN}ׯ0 ݝ"8;;׬Y366ڵkD5EcS?ٳw҅ծ]{VRŋ-^Xދ.W,Ǵ@uúhua4ºhj`]tI>|oO?#@wׯX5gΜyIr44.|CY0'ӵRJuqqi16))I߮U٘1cӛ7ok׮D[B"hIk5F;k ֵ''t J.'_ŹSNOT͂4ceeN:0%%FKk<6ZJEf͚5|޽{קpBBKKn%ֵO5[n:tXҥK'OqٲeK.}bu@Ϻ0a]q@Rv\fbbĪU GJT=6mZ~%'':\vF$ђZκV)(ֲe+W|G駟Hʖ-%|5郻wKaڵУ'^~%+Wn۶8n߾ 768^x188w>:tښ5khzI&i gȑ#[lYjc˖-e<4P* [m޼YO;vŋ"G}ܽ{OѢE)3=99C||rD9rOKKCZ4zذaq_|8 =uرc}}}>|X\9dLKYF#k9v ,%{.c 4'kZp /}vɒ%aaaXsN12d7X\^&֯_h߱dQ/$%Eo)ZAG_]@:vx5TGp[ZZR:)>>>H"sιeddCEҧwkƱ 'صkеzA_W?PCBBTUB/Dr5<@ȧ\uU>/‹/֫uqqAYuÆ aXB\6m`"5wwwSQ^ //;99#wމb -n 8?%_ѷ8 _ҥ߼y#'o tZ ?C!pA,Y 'ˬȬk ֵLjk=SޗSR5h1)|4ˉjrׁ͙3G=cqnҵ)SIq@\p-T/|vTֵaVT)z @xʕb:.\Hb@O6m¡iT8כ5k&v.] ;",/t\r"4M.RlxzN6f,ŗ/_nժվ})kyZVk>||R4<Ё9隘_~hIfe]c8h%[l5۷oK,)A񥗤E Zy¥f͚bC$zϞ=111hє'ž33ڹs/ۣfZ^, 9sٳgq9Dz5wܲeˊ8J#>kc:p`Ez|qhggD=ZKJJO?.&]!O%d]c*P7>͵l䓲 k>4t["܂ƛU¿ҥK4󇃃CttC];x`>}詳:90];~82#Iw;RV6eu]w^xQreHא%oӦM#Gd]c}a]ca]3tmϞ=}%5~jѵzɿjǒe4Hn6ҧuѵSNuY% P ?E׊-:kaaaNNNnqb 551LڵkwY)RwbܢYfwKbbb迶z &}-uѵ *n… %dC?o<={F um>>>P"jʕ+͛7oܹL*U(pWhڵ ? dfܸqgua ky8^zmsss+D4 f!C$&&x xbRR\[ݭ\rĉzZ`_~pN+W|bŊRDoƍ4hpӧ>|E>|B Xd = #P!;vذa6e1lѓ<Cڷo߶m[DFm551L._ ay'-%P+AwEΜ9C`|@y/<^vĉNNN}#ܴiS؊k #uau`1vXFNе.]޽Z* uM ֯_ #?===: _0e]O09Ɖk 0L1e] rqq\0Lʕ_x%0 }LYBCCi%Kҫ+1 <@@̢k 0L1e]KHH(^ytx}b$`;vua>k3!g(0 1ް1 0  0 1ް1 0  0 1ް1 0  0 1ް1 0  0 e j/ȁ|1zk 0Lnº2j&iөSӧO緣uaMX4陔X>f̘כpCf͚uIӨ>k 0LnºSN6ctuaMX_|A$$$ȯH-%%%55DTi6mӧE9sΝ;/tΝ; ?lذ޽ŋk@ӦM_|)S >>>[ .[ݻ666yV]t~YN'Nyسg^%Ç;vľ+VL]vgϞEϣ6 aÆv_> R}'ݻtҫWľVZKNN>|#GPQ )j/##C=cǎ=t5ȌK0 &kʕ+VVV~o` 7nܷo$ՕQR?BaFZ~=mq[n*TѣǿÇkdb o d`fff...ƍ~_~=`RJb+x- &&L2Ey"Y)4f! С{7n| QQ7oDl۶ٳm۶ǏQbEjǎ ,>}E(A3`(],Yr9,]̝;9J KC߁ZB:(1 0 2)S,[L|…ś0$#͛7$ͥOq9qSݡ}P8D޻woSSS|5kŠ N^Zy&MR"5RQ@ -DZjA1K.6qM6&'Z߾}@yA<9l666QE3 k 0Lnºt HNN>|0-wޯ+++铮|oqջ{n2ebccӧ;u]DGGGCڶm;k֬Ӷ-[r{ާU^TQj-s "ԭ[ PHd~Ν… 6l $]!zN4,xQQQyEuaMX4!YŒ?xBkz=֪_;w9scǎ5 ҥK{!: .mC :vX׮]u/lfِ>ݻ 9;; ] 9"2l322"##)LYi5$ f1 uaMX4+W;oQR@*V>|0E\zz,RK,kVP1 05 dk_~۷%K m=ϟ?{l k׵GW:3]kѢ=ʢz:|0, ~A999w%2VhQGG@IQQQYڒ%KP!0Zx{{})lȳf͊.˗0 }ҥK ޓQjUE`!:Ҿ[iܸ۷vjnn|rDa_={W*V\y'jͯ_nAO8m RJ֭[-[hO?9|pիf +111=Hɺu6lA ݺu[hBa_]tA}S"H[!sb4$J P4'0Lt"x{{GFF3ݾ}8oy7o,Q6Ӱ1 0  0 1ް1 0  0 1ް1 0  0 1ް1 0  0 1ް1 0  0 ǀE%5-Y rzG[ܹ33Xa܄uM?RJ%5-ڦGGvzĉVks7\0LuMPW91e1u`GLn,Lb0ֵ ' 133#¸В(/X+ˆ">"cZEK4R_[! O#muԓZ;"c fjI{=NI u`X .&|`g ŋVL< 7n| ZKʗ/!SΡCzꅟŊKHHUÇߺuիǏ56lӧ-\e7N0!55E^znݺ/\pܹH7oXub/5u2^R%IPP5k ͛Q ,Xy#3/_-Z4s̀777Ϩ> ܶmsX4f0BܹS\8*Jk }~@rF3f̮]RRRq؆XBV~}\~n޼2d۶mX5b+FaC ״i3g)RÇ_2ebccB7D~9"o{z%'' %U)> С *j aEaÆݻ;88TXKP()ŋmڴ9<&%%@b(ŋϘ1#{{{,wvv9ׯ޽{9?~xnݠk^^^Ϻu :(е:;L֔,Yݻw)kwr=uTyeknP铮QXX\dIkԨu]5jE\e 40`X?VZC7)۷:TDj֬"Ԯ];22C)ƍGOF2G) qqqbI.ƺf0k5}q޽B7$:8u-::߈`oo#}5IkտAΠhΝ;gsŊȰ.eUHZݻwuѵ gggD[{Po޼LDkՃF]CWҹ 8X&?c)ek;v|lZ-߿OB_X`]3ֵX>9::%YZCCC-ZV͖v]#_hnn^Z51>>A=D25úh5777* !kܸS]נVٳ'E7oޡCE- BiK.M0T.U%8gΜ+]\\2p…'Rz;kúV`]cQw2D^߮_%oTc-`]3ֵXr]t) Zuak9뚩ºf0k5 ;w(kt͛7~mfk=<|`aaYC 9q?[hoXFoX u-..tz5k}r^XլYׯc6mܼysCrU/^իFΝwaiiI K#GlРr9s\$:p{o۶mb9AG)_~.\pƌ&簮1L~u3_~˗/!IgϞ`-ŋaÆb[/[uh-ϯ^zpuBh@N t SRR;ŊT;3k,dw޸ l޼RC M9. wܙ>}zBBGFF~7Ν;y]t)SD&ǏuURɓ'#;vѣb@r믿"ZjA:u%K[j\apSUT A&NӅeÇ0l7n4m}EDDGǢQEp{(IjXL005%Ρ*U ׬Y3~x___z45vXl 1)F]kܸ1Zڵk'%%kbFB]׮];&wllH/@!u͚5a?!ByV`70!.QbH%EyućEQ7'|H!ACccXXX5 ʋO5zN`l꺆Rêa3<<\X FhAᾈ9r{jZRJjժ`]cFwXהh5WW'O888XDի7bZvZCo Mݻ/^;~bŊIվ ?=:RI=|0JH 2TBٳgS?^Q0޼ySE(nٲeذaXYY}̬u(#EX`N$H#QѨkH~z /kcº3]vM6)N:ۛs̙;wĺ0>)Ѩkeʔ[.v߿?-ѨkhK*K޿0z׮]VWreG@@.'o.S޾}[tiְaCEjؤXbn$ňPz7n| ]ۺuСCBUzE /_0ZfF]C3g ..L~ӵ7o޴lRR5trrI)7o3QR8j^JӧOVuakJ4yk5N:ձcGq ]vݻwiԵ}AwnݺAKYJ&uƍfj X,Ѯkť%=z5]C{֮Pهu`1Yº05%aaapႼ/=ؿ>}Okx,Զm[l_xqUΝ\{СCzF]۶m۰a grRנVVVX tMwWG$ \,nj#~)Qd^Ѣk/[,66j&K]kذgĔŋ е߻wO,G8DU?yvŅ޺nٲ?~DR[`~t/]tٳgzH"E@1 &xbŊyRc ʖ- uw^$r:K/X um۶S&cޢ55a]+p䡮]|UVS&cޢ55a]+p]0`}NΝKchuau`X AvܙC60իĻ&-Z`]ca]3ֵO`]3b19.h3c„ WT]^яRJIkL^úff]cr5>{s΅H4uƙpc1y u!X4K[zu^iT<8jբ1.d]c䰮lYטuM;:uR״Ϙs%g3)Z-͛wuúff]cr5hѵc˗1zc86mzm a-[e+5F;um֭'O,AdlXT `쌌 l2^>ѣG2XPڐ!C~ &H5kdtf|Q,t-G7H<EK?Y0Uv?.h2X绻ӱ!TNNYlŵΝ; """vSaÆpӽz;a,s̘1 \FEׯ_?{lI.'''z؎k׮yyyBÇ߻w/;w_~ؠ@~5X7P[nN[lDD^ (cv <<Iǁ;v  cǎ%.AdeeuЁ<|a-92x`r!}}}E34>>>,,k/_ MLk1110Uԁ;w,kǎj$Q}踤{!t1˰!}#Ta%`ž}P+ (DPHpmd%%y#8K)6݃R,UV@3??.}ׯsrr?~j?7{-**իWÆ %:&F￯YpU6~ 6 2"\;~/w܉c#ɝΝ;k%, q߿?/M\Crq 8*Յkĵ8 שV\spp(++3)pM2Ȳgv>M6 >\? אs)qMjN_+..fpMh82??@`D8JI&iҥK|kjhJd *ӧ" ^ݻwGEc`$y5=(PJ;s~Wƚ/=zP.-ZFIzq6o\]*p-<<{ͦ;5$ך5k_pa0#ke@{3dK1k*d Q 3bo_.Ҹqcpf1잓đo+BQPߏ?^?ic BPj3\f)_ \h[w-008qFǎ;H9Iz!Y[[7dw233Qt <8888))q *+W@M[n8` {0޽{G}\vةZq F'++ olӦͫWފk'QHKK!> V\Uի)a^a;]]].\xu.cqTO2PƠCˊ(RVh|j\cǎMHHӺ54Q R{{{{TD/ЬOCd\Ӕ:\POٳx\ԩLUU26eC̠МIn;x>ltttdd$]UkztL5`&ݗlJ,kѣG+ʛ7o8\ ֮] ǂqРAԹo5:\CN"JSLQQQXX%y m #k]Mߵk<޽;>c|2=O%yƍٳ3g'Ak||'İ%p[xxݻ%yB||<`80h˖-KJJ~"N I[^BBBf̘p 5*d|t- b"XaZ(#Gf&Kk e!^(c4/뮨 ռwנPh!MQBJpɓ'njc86+kT_"LL"õm۶G/^nACpMk:\:<3˗/uf(*la3fh()p=U / 9@C\'"G֤zA𽷴jر&DLpͨeHU_j&N5kVYY+IYlAUkUٝK+]Okr)2~ʕTA7א%P'v') ʎVSY "P988DaY}T MHHBE]=zҥK]ZezZXµu͟?_Ȇ)0ҳgϘ>MX3OAЌ$OheYNp-00p߾}zlllECu*E>a p-(( fAb+=Q/pfѮM,U㚐B e"+?pwWsUZC(#fˏ(Cm4tQիWqq1 ݗӧO~wwnT޽{CLP*):C.͙3g(/d-Ӕ2GaD}Q }t' *WAAA[n9rdjj*[Gܚ!9yF>QQ |;vǎLi&GWdu :߈ [[[kZ۶mY/ri~~hz\7!Juk]Ƥ1қkز/IZ5pɉ'4/OIzqMX=IJJqu?Wh3\j)QDﲤ۷okpe]Fr,,YNgr kNτ="W^ׯ <==/\vd2\?%y<+HK#Mkl< /_m-ꉃK2$Ͻ{$r@{ujPׄ52\2u(JY ۃX!F^GäDEІ嚻+|N " -I4 ??]AHH^Fx@po9s&Mߊk:xGƏSnXZ JZkQ -g}AְaC&*fmmMkqk[Cɋ/f$!\%\3g !*Mϱ=@:w x7f6l3h8BHmw׬Y~ Opp0RRܻwoܸq̑v[ tO5c>?{l4QF=zVklZp\+..nժUlllXXwnݺ]z-z,//TFBDFFҪ $ `AUdʍPt[:@ \h \% \S5=S KKK5~ V\CW!$۷kMS܅Pju/B3$|[D[mdx?XkGGi VᚐeKF[P-Ij %$$L8Q#HgϞ߿?PaÆ!\%pb]kd>Lkׯ__~=f[lҤ ?ڷo$\lWKj \w)\#uAѲKׄx \hص \'akVTKZSQ&KF[5NOOܹsjj*.\+((X|C***<==7oW={6t3g8::Zp!lddd||>x`dGXuGZW QF1??ݽwΝo] Ν;w.]3a„M6 צL}vDٳltJJʆ .^ذaÀ ]>Ljժ-[Dk۷oh/V?::+VDDD=( ddC'NW"Zz'kBf"kmuqư0<zqpp0H &MiҤ?f͚JkӦ vh'N@:vx{\.SN?^kЧW^ptrr=6幗-[ *--m޼9{պukj׮ݻw#G̚5&\sqqmllg~AAA$5UVV6}\`¿zѣGaÆXZjѢdϞ=feeIxmeeggK2ر=<<ܹ{n<\C:<|-1#CxZv1vX]ԡC[nI,O*pPv 8}6`NhFիl=OHLjZii)j5U,Y#|򊑚u̙3f >Vy;;'Oԕ&$d|akk믿޿E`=zq$[޽{gff|݃}jPյiӦ?33 "nZpUR݁h0?wVVVtp?YqT73t͚5HQ@ ںI&]v}̙3Νk!1hNNN˗ p?<ٱcocctDFFFcbb̙C9)77ܣQ)6]fU(yv5vXd@dB3--)Sk{#FܹsGu3` \2 \ӮB`_>|СC@=\PPP\\iѢEt׮]CxL"AP44nҹsgviݺu!t@' l׮]#G*BXUU\CҌf֭?H=}4K.'GAJNN:1݋7zzzH0 geesNP ?/BX;vx… 7Y|o"TH!C|O)UT6pԬ)5jTbb#j&]]\,pMH޽{/Y 3| jd+ ՠPK׌:\|?~LC3\Q5jhʔ)eeeǎ{)NB.@EW[?LՈPx#λwfggo>##zΕ+Wࡋ MԩSKKK) LK fH ^x"dff[a;:*bӦM}}}U'5!!sПׄjI׌ \1[n]^^ek666/_pzulAAA۶m'C5hwUU )))r`&f3Q5k;}|(..~-y "VVV~ZoܸA6#-R@1ZA<|؎FDDƶhѢ:{ \2 \R/kX\KMM `k>%PՔtڴiswww©uT[q tE\=:x`0sL-5jMl֎P*===o߾MzqӧO7־}> stream xM"|Yo2 nС]̚6$7_Cߛ(jcbKg޼OLL/ri26?8$yfY"6ɥ^nlZI3u]yX᫴65]^y³IrjFн"܌{b,N;fA6]*3V8Hv;3_ce dr! _P 0/FRlG&,ĆdU.߰_Vk-3 >>~9>~e%՜Z0\}U؀E90[ ;e;Bk3horvL:\--?V-:Jg"ť?L6j:X4)ѤS61DUJAqs*QgX4%jFA38Kg/8hRQUȱ_!bu=:&7ȗ^NU|s\QH^.FۇHS^PmmA!eaϋj9|30'gQW2>䄈#ٷ0|Z[~ܵ_'J)\[yub$@fOdG:~5j M#%,$c=t\gL;$Wd.ŀcX(F<8q+ ~3=y`N%%uuda^ EAVuzW5e!g=<ʨ} w [b1\QCӁ6S :IKWqwΧPC5wsaSa@(o'~L"O¦&~ I.`~Ǡ$@x*T2gk-\P dl e"(f|!}05ʠֹj HEwݼmIfPn&UDk94 '/[wyBڲ2 "#l?,|J/ I7 7b $+ߟޝkXٖaHM\JdJ(Q jO> 544SOt6-%N:qe.] {KNH5q+teCO*H6"VYWXeUXmcC)]T+Ѹѿs[_|$5w"%.n,M4D<&4_^q $C$?lֲfZ=B%X+W2&xe d HEH5jeIMbfv5(Ũƶ (} h!X꜃X_̈́sdnUG>ڸvmq$^b] YϢ]RsH('&*~Y5h3> stream x[mo8rp1@oHa77H@NּۧR")w|>,VSbX?[q^ߛY}p3AO?7gN_ݟ+حSjVvs n\ݞz\ڴmOͥRj[m_Vn]T GceRm4< ѭtFoFT7))` 'sT݀8m#M(p?baMS}?5էi`Y>xtۦk;: eEZj6av؍~UZzLGɟa65 <^y}%-sjMۇM00jkM#-m113L_+WBmvdziRjXonb \0se[} كVAN041:j4uz;̧?ތ')`bYFa 㬁]m8X *4qkg%ez{ jk48}FBuaIC꺥zxww6{:1{ڋ?\39Yиu DPZbG=<[E v+N HE6-Yހ5EތaH>6޴o<wۜHfCAV]+pLF)LlJ~MW3˾Og 'AR?蒲Dr:7@Τ㈜'yP?QF( HFg͇!οbEG(qT"\4ޑ<ҘǠ{HX+6%9Hp1Pmjג$As2, *Wu\ ixhjEYPҁʍSx{2_$HM?Z-$Tq DK6ZzMH4(b"ҧ_aq (W7xt8ʦKLDr17l!w 1#=Z`]]!t_b1*C ) mW]aW: LkCaL"!~TFNEw Lr!}D^Z3 @9ƥc7l%< C(,qHS37ď4 4N vXXR$|Z̟/ >1we?rĶmsl*&NiNqpV2: ! j&GnClA 0 q<nhpoq8PRP\"+8ms^%Fc,oXAcGWCtJT>堰r@4dP*q^< ;x4,ruK`H6+buǘ`ߥoQ#g6h/ O2է 9r䏣_<$MVdgJDq=>g+ u@Hocft$liX|9.Q%yMP#cR͆>/'0s\Z:=a">\2*'U<Ţ>Z]#-!%zrKҊ_AV~Y8fTjrGW2&qj凯Adf3JjR/R"Ln7Bݒe:,;uM>๶vw/A =.ΙX08&.>'61j&XRNiM0xؼRnICwZՄ_*^r♍ 8vfTI12#:3n3IOmD0zVʀeQtt/~2sN5e!od@() @+rQCzߚalVR6# jl CW8b\7vf5*].3LUlK!v )r6K! zKY+ .OFoJ)A4~`vTɈMkG'dkz$_sRR&K*z-ڱHQCbYV0y}fUT..ۙ3l=Gs8F2B,Um+Pt{Zq ‰p >;sXt;y&IC*jrqi"ZV2WGZ-x`~ m*oyQcR&'_'+[/ȓiji7ib/R˞ 9Dګ+ݶʆK#oջ5Wᢧ@_D}aaxw;_ϗw~~{D߸dC<㉢xAI&:wOYkԛ&<"dYRMdyM&ߩ 2^8-䗗Cl!!}7ȿ6n$3]Әit7?luPLAI4%% x_I7 0RG̠]ٟ_ct{šSZvS5;_ 6 ҈uqy{Q}A5w{G.)J/B)ɑx=$j zz8P~?^$JfHӺs 5jL"yXPC 4,OTN~PiU9 M{ݙl/,˾Y=ǍhW.L"_5 rQZZZrٶs*zا$ipsSpHe pJO9(Bv#wҞ0tMLLbOR'5Q cZ ZBFrRq/pTCԄ@ۃӍ- o'/U7OFE)GN3#vꭆwCkJuq TV[+?d %1`>g{kmpSC p'0(>-F~G?j]< &i5.j$ǎ hbٖRHx'Zo V͗1lM\lz?pzYӹ5'q{c d[loUÿbAgSU5t+NW&b1 2ˮп߅t ĚZWȶ1Z :iшZɿQx'M8%'A-³z[\ӟiM»3Gd>atDNȚ%'-.QKLzIjo9ąZdyVy~W-FnS. 0_|Q$=)L`Oٹw@Rfwni mC!=\{;oBM6U'KS> stream xe{P8LwӌX=Q|˺UE BA@ szFA[@ WkdC)7ruhvu.)#sww~UiDRnܴ5Pbi=`']1QWl6[o?#wQG'm:dBBI,#H$YK֑d=YN$):ċ!3*}UGGzdhF*ɌaaJlfJYZ11@\*Ns帵hpG z;M5HbaNW 61wh&8 V~z EbIIue5D9fżBȳ 4Έ 'v|aU0M=A@c)K[kmLa"HMAN"HQ?B "kоqc鸔ҫQŀ@#8f1(b[`WU1waJ\odQ@U#4ܟ  0Ή}7 C1C( 51T{ѳ \\!@~rl,*-ab^ e#0a}"D?*#Qb1ڗyd^H!el=@~#{ok5,|3n}=n帖c+jCN^쎎^AybJq!]a,s]=)e|\]w4ƝbIpLѫm*d!T!h`R 1"@ `t}h+՞`3꥜:]X%V7L\e]g@sy ~H,BLc5\0xd|3v&9?Cjk-.9[`~!:|Ώܕ)`?? S{|/:{m\qɩ)7N4c8lVٷW׃> מ!}㞜 7gYgcG ߺre[nXCYѠ0\?\"絶S!y$?VrGo5SD~DlAfbj,uï,-ͪxm, Rs܂+!* B2X~g.䉹TMfND[[+KB%YS%k[\Ϡ/7Nٍރn\Rua1S˅99PWZr"ݳ5%Z1g ~k!;-w >vm_-_>q.sPg`=-bWīMm`{Y̺Uq<_G4*b/^|Jg3m"W"Yd4 {m鏮أXv,õݗQDI_U4 z86ѯYa~f ]Լ0T`个oq#bVJWު1B'izə`2%$8Mnhմ*ysmbȼ#LKN!by;>>#/(endstream endobj 381 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O ytKVU D 1@Hg|wYudA>E0um &-m1'. 9ܔ/:MWHAL3O*g;mTR48on!47I,|J*S$endstream endobj 382 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 193 >> stream xcd`ab`ddM,M)6 JM/I,IItw0k7s7B7_ɱe2000003012X~_1w/PSڜEyY%e-ruwtwK֮n2e”I}r~M7{$_~ -ߴm?~n9.p}nnzzfL02O7CWendstream endobj 383 0 obj << /Filter /FlateDecode /Length 5182 >> stream x\KGro  ̱VV3I谒hz-s#@Q$ev-#"3U3z@tgeFGCϮ/p}w+F^m|ӻ7Wv͹9dP׷ׯo6C?(ˬ4?aƛ//RNZ}d? S  3[>{`:?z¥N9N/r@K6n7BVkZ_ɸ@+mzz-~Ö#-w.;'3Ⱦ?1|f b~ uyV7^IOn[*BAV;f?|H50^;Nh7)5|vm"SOh-ӴK\઴ρt\(~&t%X>#,=M}q%7=z+!QEKT/IH]X V=X 96㽵w^ܴL`ؖ3+ tKr&߿Q-l|Ybg+-AZMP~1DEg똼;^: Ⓗfe[mQ\bf e ANi57+(ȫ)?~7cFu *Mh@TL`yX.A`֦,p,7;= L/ iEQ{;O Sjw&pc(A0if;Ug}%iE;k~)pѧx:/[ֻvV^9oQ' L-$ڑiʩ.W؇1-c)Q< ωv7ψ(pm`t ;X {43=7p*y1[}gp)\06 ,|Kv$:'Ύۻa:e6>MSo1aZ"t1.vݩ@@(D~EyT45Yos+*â?}(T>!W(l쵖t<7b$A bZCkQv<Ğb]ЀH\D\$~JhhE7Q{^̢"s>+=.²^JjM f&g yWpa3Q$Ji<!Eޟa r;>$ʓszFP)y#@އ`.Bw]GĞnԊlRc@R$d&`!۟F]5Kq 4^LWHMyX(/"MaJzОE郧HB`UNѭhO|X e>;$|R5qlu9 rcXm8{*Pv1nF/9LZT^cX\7 3wnneigI]* H 6ц L8aa$0dxH{ۇ!u,2\TڇȨ`!*8|t]v=a p`TИluGDP/Rc@6'SzvD ?26[5_Q *q=I4<.CdG 5MԪ,RO U*aaa/t&BX!|vUK Yyh,s3I"ex~+H pMϭ<{_$ǕhjYC/ t#3Tx9c.i⮎w5E1.oGM~~ȝQ6hP%1C(HE$$D,nO$If,i uw ||h2 GWekC\pS:/4 wpMjᒙ-:]8h T!eaP3,5T52,cpm{-0bXk;^FaL@<ҌҲὔWQLܻ6qqu21e6,oș5h7eMSq5 0'Y3 ϤC1Si%ZE;k7Q̛ @lg󸗌'Jj'*& YTő4J,Eǒ{hSETB[,Wiݸf5)cTP7HbHc8_2AC(w+d9瘁duḱLoR=O`$D6PbH߃j B[+@^cG2jCJPީi+m6%E=SV2 ζ[$bnk,j|œAQ)qV)}ڝxr( c9NC, #\:ʼn,tDB#_Q*E.}m]}ϙ5w̫K8~f"G$jဳetugH(] J̆/ʗV㬪0ݚOg IJ/<CPOR%YFPRcVЫfH8VǒxZWc8UD LNGv/!<wim&[ydr:M㶅}~y CXޕ8:C ]Hl=R%Two~aiȿ1a2Yn,:\]еWן>kf+|q`3X8 шkW^ZJ%efyUaЎCy(zf+Xn?u||iiƑ:x]م@htjPp,xa@(q,u@G C4T,B> stream x\ݏ6qXinMy,vCۙh=sUIL ?XQ*eײ./ 9\~޴svi٥QuB]<\4?-MZ2˷?]SYmO vBlWY!i7 ~ԢW_ۅ_NaTCT^ By3YfQ3$t\0DnNُ۸: Wme>L 1Gn;'rxr{r2B8yE% ǹ+S Pso| Dw_NumY3> VVO48wsUւ E]Zx+j%* 93y1&GȕMQ;aT˥G{lU*e"¦8AtN+r[|8y&j|$yd1X!3?F4mb&" IIe/YzqkCǏ֦/( ·a`$.|i=# b¬L8tqjT}\B k94#Z2.@n̉!s;aF7y (TNc0Ge`V"A9LU!L}mi39A7X@_) ǰveq 5.O%CY +] T0gPTv Dɬ r<yQGl(i !2OxLGys~~p@)%۴ A z(-^=:)"Kz X(^ V~i@vprrX\ɒ9bǰ\KLYR% eXv>M}G?>E.4Uʇܕx74 H >}Le?&@J!#u?*oP%!G9EraHt[1qdk&d3#ED?zm5dmIB%Dxu.@a׋/-5`)L}!؁`1P`3 5:__m@ Ub9ה ~7yG<;dE`", [IDێb&-Z ,xOCd,$Kmu /,~L U'kǾ#"ڣN=+º@gƸ\Wjf2&r%YŽEvYWd>$1W[!PU=ƹX<|8-zuPDd*ZʅlUCz[S)iyLĜt g,fx[-: -ҖjJkR;جS.0< S,FͳO>K-kaX_EE:JX*jaP^%v>.kE=0QP!9 z(poOUUFGVr;W)u5ʔYDR vLtNk`24I}G ZI΍y|Q Yi/DbYX9X~h gE%ٴE|7C*vi;ڂ ҆o0v?YY6 YZb"`IP$J͘)#euݐ0,qh62Ԅ46&aWqs`V<j oFJ( -*]UWŵ:T S͟2r IH@پS:@3D3v?1>"aGNA3sm㣌 yAJ90q 3u{YV؍eT B}ijh0TҴҢf{ՍYvNҬvUQӑ˛~دQ{9]I n4CIpXPR9_75_ӜnpG[:ZD۩Ym*ק`es`9)s&:BA:Y82𗝈 <,3ǧ:r[Y\tha1tuss| 1?&ś<~h?j 2{;+_2Z+%bm:W3{V>sִQ[64nc5ƔhiW .iAX!5:%ؐ r=ZwNO4[:Lưlhr/Or:ʍ!i;OlV/Oo{bMӖϛ|ҮY+Jf&߂6Q@waVz Yen` A 43AyF;Y!udH}4&Ԡ]C~Oj`uNʐP<<GPoMXIVr>=ggw^Px*J*YjPfxI[/N:J鄄@xo%(Պ/P{zZUT `2i5:e&s}J,c^B\tXJ:U{ma,ЍQ@XD҆ UqU?RAH9\D[(c]d{(׸S>n N}*1[$ܬ 4XQgu. XXT Ӏ3*mzQ?0zE4-s6Q5c!a߆P d?GOO@x2jՆUXII\ ~v'4D>@V> Z΍{js@e \,QM>rL#pʼ|G[?Y"Pm3GE{HڼZgPƍ}SaZ@T$n· )%"UEWE;j"qEZQ3g\Z6:V7k"H(˂k $6y`s8Rh{xs骊EU.G|Vi jT !$ӔU XGElbrOqhuH7PAx,*5e3ټWbP׸֧,K99P18R}^EoeÜPhܨ*h` #SB:O}9L+ߝ-21uEU|hōSvcjʿ \R6< |ye1e55q gJ  : *XBiȭlk9"}H.@aVH@P)yiSqVz>"S흜@-?T@t}HVtm 7IL 6 WqD*Ľ2]zQey[| SV~ u`D׾"X@ sk 'N Ж2=Hp0x_1z3>$(OJ%k0IaKMvJ-87~mnl)t!m(>r;I:+AYK P0d:bbw`erN[ƙf[#OgI؄>JYyW$lR8RVԑr)gMkrkS9n++ ;`t}.)#>4$#mx9'xǂA0\JQ\="xcC𱫻V)SoG;p m݅̍xsҔhǁJa%A{wZ2 cu3남_Ci^TġBχʪ\c.dݽ M ¤@ kdO+C1*E?Pkʚ͇qg&KBwsY= ~>9[IjGC_/dYB%.Zm;q8w\VCv/DHRL=Db=As1Vcf:My"y~o 4BpG0?m<!@oi`gxeW4k^7z::ivҧ>/7Y zv('3w*0~bNX,j3vsqBC@Ge5oeiYk.y^ZU _sX4y HB`vV`6.-].B)rF2Gw̦le:J2{~_Ka7bCƑEfw0;\咋jct |EuJM397` wWj|u,Dy0hu};h`|JLYiji3߇$%`(p_C[m]ɩ\项`Ow]C7EObكglzʞ{0}j(C dB mD2Do*H_endstream endobj 385 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 365 >> stream xcd`ab`dd M34uI It7XӉeB`fO/C-m!FFoϸE{׏'Wb.7ۏ'v'uhvfwRGw?O~Űo{۽voTа~wM?jw -]껌KEwgtut [I*ofFy&,ʒޓ\nw+;wG>eq]ft+ 6tv4tusg}g7W7Ib`]endstream endobj 386 0 obj << /Filter /FlateDecode /Length 3769 >> stream x[#GCF)'~ @"( nA28m1=w7:SSM:0~Xs;fj~4]>Bzty\+qv^b3k9_| b,vF+WW'uL;k/c܉;)ۊh_m lo}|1uT9:! F(M/7/݃qia+-Vtr)ȼWwЁkix4 1a$4rS>\" >mڟGZNJ'tiK$F14VBAOL ׼PNtƕoM~2;%UZ)X+BE:#JRɓ VZp& :*Ti u(cݼK(ǵi&k42)aE;Udf 3dn9'je!YE΂m"ƹrJw^!9G39Oet' 7JV̈́`㮴W9Le/iN{uD֝c''eBdP 턼OH:YbTY &$$_w.=&U S{諢0JU\딉|JYT%|zV| Ny {~4(sI@Ȃ*{| gE D>[հl Cph8Z3suVcS^S% R Te*h_pr ְPh?l.^|'.W*щzBYZsJKFnX T\hM^[(/Di%IjfP=#EnAfٓ4E8+>86%i1 > lOS#U}j+eM֔CUZ T)_(: eQ^(2DBIl %Q±(/DOE%ԑΫC9z/>'|"`d)S詊`<Tu1*>kɈxOh?ʝCm4 ҡ:iQFQoN,_EIU%2ޢȽH9 ^ Oq}&*t1h㴷/W9OP(}Ӊ'۷sRi|Fe0/G}3^|ULFWpqSoj ázTD(]OoOC39Jv2tL)__GxxS?"]|_2aadtp?(P n>i0'K,~g_B ?6r 2}!X 4Nظ^] -t#$^Ibjoatt+~&$1ꄊNGmJ1%[qB-}μJQ@heDs }˘iu%rc1-ȷ#P8vԢ: z3~N%uҴFM $Tj>BI6##cu'~B'$OqOLiI~gԜvᖛHt?Q"헇.n?hhطwo~Z\Th_F=sltd5 UwJE sg p #%`Ka@Bs1g;nŏ;.xUtMZoyVPR5lsUI:jkKgCooFMqReu1w Ʀ= nd?nKշq1v]ǘSNFiG }"dD'#O:& 9J~T!Y5v7״ aD* WQcyH3`&nP4riL)dIs@dL8oY}}= 41 z$"&餦<&e5x{t]Gt?bc '-jC7Q. Ѯe@0Q*yw1mLQLMuJSۺ=u;U1Pu7qfh(s]~Ygi@sߢ }1a`[gzh[l$PjQGJcr׋)Ӊ&X`)P鰃Aa`l Al1:5vǁrA*W)dzMC%U\ն(ɩPerU|_zslF8`ct9Qjq*vzJ qD7$++rYİQ ΠMqn& ZjăN^SݤS$Lj#]&voce&Z( ǰuG e*|-zkȺɱ 窻j*[ -LJQLe J29]P&B8+ Y2܄Un|$e&+qLNWAYn_n׫!By&]]}t>Ȇ*4#$Sez|m9xG)_ nx[\0CP:;lCAP̞rmD=[!V9&v}1jb*fݡKK_'֣6-9Pד{u8O9i9>F/s8Iendstream endobj 387 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 UX @p' aKtp'w'ezv =(ul"-~H0XT5 㬃]'l3祮ʪC -A#EFO:=-Pj?%Λkĩ4-Mr{&SA|-S0endstream endobj 388 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 221 >> stream xcd`ab`dd74 JM/I, Jtw?ew+;7  KKR3sRJ YDzʅ>?+. +tstUUupOSܞcySمzpgr| ~8On; r\,!<<\<gO 't10Osendstream endobj 389 0 obj << /Filter /FlateDecode /Length 7033 >> stream x]mk7n>Eaәbѻh 4&"Z Y,w~(Iul}I$cII#R:>ffj<\wOo~*e)fsUmb6W)_}3}z}K&<7Hd2mosbU(;LAk-i(O_ vZ5ퟠ{d td\?`t2K06m6KQX7y06y61f-˜}M 0Ƥ}3ookfx7??+c]&bgPL<^E ܅SBB90%zi✗%Ep#sZ˅9.A/kn\66oBKK$+Gjǜ[0p0@snVK GK8%A V$|RRosTŕ y^8Z*{^8T<3L !()38T̴DsC=lA*wah\43H*8H1GK`W GK|  %j>a99<y6San2ױ>Df`B33Y{}cWo<,'>y:tUKSbZ\l,貀ɡ@qli*Vv[m @qCj_ylh- e/HZ;hmZFj0!^a8|@+< 9mZY DD|@j̆MSA /J w4 <[ln 'ChW.eQK ey]/nimg!e7[m06؋&f c?jߌ#Lp:g5E;DnIx1v!Q2* BWB8xX&1؂hhA}sx0'b+F$U$>u\|W4diQGn ؀}h({A=QWI &?rGA,fp19:%+䮡VDl +/XyVѶ$KXIaLM식&M)Ph=@I( Vx$GQ0b[b(1iKHۉ,vhht` ;Ъ&f c?@h<F2wW>*A\km}4,ٚ'<VX.Z",isW5.BлK6t ]BK6C@ p ncs3` Ow$` i]0b9عl!4^t R4lqh M1 6B߁| s]A}S $Kp)PBL0&؉&> WP=a ꓐBVςWēS/e5 &mRxAT=liko!2Pil!.D5 r%$ -ՄtF)ސM4]mP @ 6]vA46'DBdc/g7Ԃ%dLx0t 1{_J3U83^ZC퍩k_il 8(.P B-T. j 75koAY!J,&FIx4]m` x[ĸnB`%kX P$^IѤ"{`DR% p!fWko)D8!h{A=sW^4k,s.;Јp"v ,+LV $_Isk-(/x .P lbjy804]6I`抒@s^i±JM` | "Q^|Ei>v",͞`Q80A./Hd%IF* {#EwyR8л'B2A$Z KqNڎ׹ bHBK&9Lb־<+U=Wx[J}hUW6D<(t8 gP t$#iUcu7 BWԂ9PC&F7PiDB\cSgfz_sT9!D9bu(5ZΘ뱝 {= ^刕9H5P9b 9bȠ$\@-'MsDC)~5@]Q%v vR Y"┃?DҒ8"z7:O81ozs.y7~(cT=Hc4&~Ѥ/2ETOirL!.RG ?$e lH?i "n`Mc/ꔙILXR#* WHPO]J`>Lf6]e&* E؋\-AO? Rd@̠|n@\4k;1e@ƳQl ӪMv)` GlnVcZIaiuWjm l48f,FD+C %)=_{ _树o6HG guk9]?Z:Σg'xA:Jfί} @aa;U2%[[[DpcI> A,2'|{x_qo݈va;h<=n]<჉[-A;e4h:ț@=Û;G֎͛{}bL8Xit $Q,i`M@.ٸcF`ZpxcqI7o]`F-%i{w JOW &ָXB&W$#ހBȿBZ]j7ߩ.=>زةk!c40[Wlgv "fn ?3!80\izLC]seE<AƘ5CF5a!u={NGgCԕqCV_gZpҼ F r+߃ЏJ)Ld8t"5 vɴ,vwwcEs' v{>N}Tu8bq>U/.Z{HZ?(մyghIU(?b!??Aq*dB Ti@#bw7Ahc$w?O7+8 |1T .1FvG:ؠx\0Ң`\@@zdU9pW恶A`J0 iUQ0_ח\%^#`տbۯw?/P/O7]ok?ii揰}闟}i|_8PO'ʣOix'o,OV;_~V_|x[.VYhI',ɤy\ C wTz[eZ/20XwOXek+I[֠(%XOl3dWxxZ:: Ո -cYbEo)<s]&bk%l`4=^7C [n]{iO<]DU*{M%aѐrI&Z#4@~U%:N"eH=Sc?7:F+kߑET_= ͹$)L ǮX* W2+R5|nrp *ZȞ$[>k|8Sض6 ahԃ<_:_}߬*kpa&0Gaƛ$<^3'v {8#&ph1#s>Omv!;q9DQ3&=dt/v8؀6wry?c]P.{vs8{fKd%pN-_CƝ?Zp|.Q̵1]@K@=']s[]i]/y~j_.օ?CPuc*;1*1+Jv dZ'˳YT'j⸗ |$?]3YGJ.4/b#eu%_3; H.9VwjnkGr^qjV+%LۇhNOM8sE%a?w/i}V:ׅa> stream xZYo~_"%Ӽv6`塥{=w, x7FF,aNYqFZM-W8M1Gp\()fKdF(BfZ|QQ KclgfW0gAm`>%Kwsn0B] PNC{:O _LvΪeˢ*'*a& '9q#׽@7K0%%'Z_ΌҴݩ2@u' B8cꡙĵ l47 S H: H _Zp#Ps2 R6u S+}0ɑ;QS8-WnO}6doP&ɵpf~ q dl:L`Sr^5]&h 2&^a:'ԤFӤ |I ?A?S.AM!qrf`10͵ +TO. HT ).CrԤ~6\T=5FlLfQH l؁ψF+xݧ՞d7 LqlL0zlDF!Sq1l~n)3 E;ؐy}Ep˳:ӗb|5g X)GhkJ mN.5)V\u%][q)%,du_,Z,1T %1ݏ36D1b撤n[,;S hY7&cfAFӹp: Ii2 ٰ0ga g`fz: :I[8+)Q#rp $RSyDfKށu1!Pj5i,aĈ3ȣg4PhB&k=).yS5sԫ>FT4L%Q']:qnu$"zQ8׻n r4w-Wa\iv}Xxc|pw X WU } ن yEs$Y0BiE_@-U}q8d +K@yC%2*<%qU*d{9Yڨq487&wmӅi'kStŠfʕHa0,",.0vAZEЁ"M{cw,t*… ]H!^# &JFbDb {2G_ sB){蠢`p78QiKۧviK ѝ>mvAD$}>Ŵ]} sgO>re\.l1#w}=] \f`RtТ~qkaotH(B ҫ(/T5[{PYDfŨ1Yts4z{C9vr`6Vf]{A>tDm^._y6H >Z<HTk7ɮ\{^9[lx_oe`_V{m=m>EusX_7u x9l⁀ɽHuΧ}h8䯱^(m 8y y&mg2؟\-]"(L"Q] c]w gz Xdp c"Cj4#\9^Z47>:?.15WnVF%THc(s!k!Ö& 0#,(^pYCyL̨++b 8:ܩHoN5pNFZ,y-@(lکsrLI{uq :Ȍ\ ?7#,>&f}"[ǯ$#2rp!sn`%RExĨ @[6W#-{ddMDUI'k/8 ۽n&%Fù]膊A;53ƞB{9d9]}UCnv78zgÓ+[{7'`-Z>U-i (6Dl V'o~MSA? G yw DԻH 84>:Q؇ÿ$oDս`j2mx"Be|WB^SV:^I@%R1ax{cq g9 oYb֫FpP%|]p^ !\RT7BCƔ,@cUR͉£(<EVW@4B 07LhxE$s҈(Ȥ1J * T)`LYs9!xN#T41\xFGY!κ̵rm2{Hx,f{H6ig>> ݻ c3*Y$ *֧mz:NJey˛͔Q{!$lN0Pv(};>TʯG۵#Bڑ#5{>gd> stream xZmoР]}_mH> CA˲9tH^j"]N2;<3qgt?.Ko\~]|ВyAۛB.Yr{\*34ǝΔ!%y]֛<˥F]pγ<7_kòpNNmW5w ^*NMڄ6\fF)ۿ7 \Ү'rɟ7]Î쯪wa$ۦ/k3gdsuݭO-?-=! o)y/[n]l$ZoX^YQcs}pK߽;rS_8diUYv밗\Qg~d=܇ eI:aG^}9ѝY_á9ێ4a4zĈ]'a'=աޅ^}aVY ^펬/{_gtDnRW>`n3p*s6pFpp#|9֚I9nXƄVɓ*5c(켃z&x6gˆBGSlךtqEn>M[D8)@/Hfj3 3mS*I9}LzLO7 ^aRPH$Bf-(6ޠd.-fP녀)A$>`#@0#) 'C AoNDQ؄g`3ptQ60-@)#7ѩxBa?Rqlb& tFTNC32a[9t܈ ۔b+T䋨%`gyCTYr-v~4>Z}N`teR.sś< -g\@TH]#w(z x.y |T5 C08^O=} s~j\&s9]-nOFiV|\I[ҰDSL.1q2C]!^;ɯnl;D#Xh$,^hP)N \ycl rI+RgL & AnSP\G^A!$ʴsZhSc4hnn eKM@J1<4+n\齚ҷz^(Ü>:deFw~r>&Ή Nwҷ)OM܂Wo5Ay0}Ӎ@eQ_GUW}5-b 5faZ(\TD:yz56OXDWnε-[oXs-A-Al$):Yl?LW H3)IVqfu? f}VSA>U}P0fM 96;%s3drLI1J1ܞ%?eB@;>fb-u;*hWtaWɸgNW w1] NC6f+,C V'lОa E垅9q1dǐXmIV2[zAEJ&)'vBqH%0-NXKÑ(I"ktؚY׀!A9\5O%[BE0U8/GϜ5q0_3qcef\q'TN|BX.gp;Y1Y :F~&]ƢR;y]N8? w"sYPxƺ@q#eD">{>,e$U9cPOVi]Ca}d v G[򗻽lE#bXqadm%'9SxNaP:ɭ;ۮ ?j4울 |y&qS.^४^SLi%~[g褐K.<8  \9?b0, x4Z=-Q8]= (tI| S3֚Ϟi.Ou4fhN#6}:pJ}= \j-1ڠ5,3 *On:mHN,α5x+}:u&seHfs&Ps̍svxC|ؑ}]L3Uu(z`4@' t]$gh;] R 4?(eBiT,iAa `Bu];?o.Ҥ#S7=S6+ÙX},t f~b|TRA'j* 8'_ak:lJn&>- a{dܞjLāu.|}}$1ʘ/Z'#P$#3"e#Wq}ݲ_0<Չ~H&:>0/ryF!829K1x_~endstream endobj 392 0 obj << /Filter /FlateDecode /Length 3048 >> stream x\ˎ7dQT.ҋ,l HiZV=E.8$kG# -[|\ܱw|[.v׽^ 9]_Ex'4qpRw׻O?9sJJ90.fvL?lav-'UעI?IN#ww_W?RXeꔢ2Q/0Mmn$f-FgsS3NҚ_]}?6C߿yn6;~{7e8Sv ~?R_qɨ3#m \uV\3;8k#RG݉KF)")!>n YG=+LzWb"!2>p 08(8ɨ4 + XFq?2ezĄr#ƫ)zJU:#5vJA|ZΨtrC\#&&Er5pA.'ԄS><4iCiDyJ|e~C%!-g@JHf}ٸp4nZ1 Ff}ō!h1=JzP`2W\г4nB:TB)y. I$\1(Q ^e9MWZ$W+Ɩ,:ER~WMĹ>RRڮ`Y Lci*=J+҄y,)vK`5'F "M91 r-EbG <ڔ脱RfСM]\%*;".!MI,<%k9Dar}r7c?Dw2[x mUnK'QJ;ffUW?IN#Hy~yM~x]܉GA$ڴ@&h'іjX ,nPfX4J MU{F28Fh?}蝅 ZeALeQL J,!B7+! dVB7+! dO_Ay<&זhI3lZ1zaPR2Za[w!15xW) ŕQ,KcH\@sZb[&l" 8ΪER!XMC_Kַ4`IAw ?Dيk?PJ҇iK5O~4?M e*r{ɹz.U$.!u+M Gii]N/t3pjQf8i7L MRMꭌdu)#"]!,Ec*X RE11L 2#'2`%X=JB{` R2˵$Z:nemjh;Z֦SBj&Dhfj!+<'y,8f_E-%# _w_uVS Gx}x;?T._+2wCŚJ+ߝ[|kMao6{4כ~SS#(]X}.RCGSN̝G}O]CMq.Ӛ!S4rw7N>;VEA!OypC:ٯy4뾮gF*G %k)dR 1=R0)$N|FJ(dNeCPC8HҎL``Y]hE蠊XAGtD` M'"ge,e2>[qʀ IPG=dPY!2) ydynni^X2y&E Τ++>m+LI_<H+`K\*U Xԇ-҄/\,xh4  BKU.<EEʵ\h?)n/vIE>G]|hm͕EJ|}foa/-Ҋޖ:H#% ~L""H ))2)H ^LwH=RR_)!B+! } dVB/+! d]%%Eղ$àާ&5`y 7HYr@p%[^8 X@ip៉-R,\CiPEà*ePo-0bSMHU*KZ9iy:8:W3Ka/<"\ԉG%VcdP*эH @R0cdR$+F&EI#s3P j %2`%X5JBs` R2|ZO+&&V:M,8MtXpY771nb1."% $wR4, xd?ǻg**nsTu5E<m&|q5M9mTu5]9m&|q5x(MjBr("Uj ^%۞!۲H+s?x<B#1`3 Σu"8Tgf "An47dR7Tay Y)7?t> stream xXKoF74BE6M-&B/D;l%!;"w)JIP>Xڝ7322fȗ7 lVfix*XTru*xY2S/W"Y ° 2\ ˮJσ U4zo:mm}o4PB?pƸA ߜ"I"w"Ϯv+@*> stream xX[۸~7 cQ T:.HhdER '2+)A~yz)J,; V~ yuN 63::cnuY+ۙgasMaTea/kp崠0 iῢdԒU ! 8OቔƐ+$< ~0M*{:`%yX!ӄjb_=acE,VfaDLKPɮɒth7zs.3)Jr^pYd9S"V*fX+. k$ɥ-J'GT5]{J8RS7%GNKZc7<F vvr<|r[vڹ {欎18GU"f?:{N>|7Նl]Q~ƱRin!kZYv~ҡ;@2 W" bkձ؝$ǸQ>:KSWtVu8ߤ cagg- fjhk0G1, bnQBG['C޿ޭ^&8C`7j03VZl<a53#zUcAEoW `WBḯVADUMӓۮ ~Wj? 60=QƑ2|5D{/D0fծhED3fwAY=#nUz>y3p<ff m`_c}l+NwP߃(f9 `V(ב)> stream xYMo77E.4AD@I8J-ۑ=j*^ "8| ggv6TMG ׳}^ ͳ엷QA"ST34-QQjLd2gije{E}꺥 KbўcdxM@.xFج}/@ >;ʋy&qt223R;2m>{DE3Xu7}s=st ^z_ח3e\olJЬ!QXj.霴F :'\ RmYJڠvf}BNE'd[rEX/LzL E+X`eHjhrgﴬ0@BkɒJH9(K2Hdt T$d|!pPd㜫n)VKv=SkπDj(tYtVDGSJ#E?Kgf)r +*@5ध0ag{'U>hlTL0KL~,gfeߠ= ~. (54ԲEc> stream xYm6\/A1lE 4meWF+]u9ٜnӌyoοYξx!Ԏ:6_nga [67NN1,Z%y,*ZSeՖ}޼]TBRKvVRu}s< |Ԃ˿˩AP:{~XTKj 886X ưȱ4SZ0Kls'ר( %#M?إQGh`$.eG Ѡ-s@c{mYuMA3 ~ToW2L]dMnPn%gUl 5]fXY&k 4ך`-xHS3ZILJU>l ܭ)&sB>bdu3GNFem Ǒl7m8esV +#3U~ +'ޒ.t4d9C&ǩE+%, ⡗不 }|:!GA[ڳ:N *q JFr-Vũ(r3ݼimm$}nPuJJ3lZL^.B<2PPU̓R/N`߶-Av#Z)8?>uf'iW HOu)* fm*׿%jT\4͵ZTP'DA jҕT@"S3J>|0 \|u)f4*d8,I5hOj^H# (7RP^ֹdk!I\kN u<;6zssgCSa-69s*gu$UvN*5nOZ<ބsj3E ›dAlD17=-4F)- wJjfiA2^H&3kϟTU|ͶNnX7 /^@J7[i~kDՐnD\I6Hcac|>E$S)t8<R&>m<}dYVz fw,u#s> W L{} FwNկO@v(<^eXhfFfBY?i?f0T>&J ݥB 4'E9LoAI2 >+:<8>dG~@<7 ߇Z?C`-f "?y.@\θU۷k 9)ZO@4-5Ըw,-MwlI tGO^nikiag|'ŐH9xe9Yp:Ǩ̎"3128ج14oýYePoO}mBjBKJͱG=TeI?CeÚ*:>+ZܢG7o[e՝j>Pd&\NWfmS(ynVJd_%o9vüTs r|bXĆ`E^_a$Q1s^0Ai|iIhS9*Hߝ,~4?(e?_HUBQ ZJjMVSy1)VCL|B&?'c>_jز bȏ IV}Gȴ+ "@v04Vgp!p}5.Δ@vPT >>߳axޞ""C2'xy4JKt? iqR9)A ߢVcυBһNoR]|"d;l޵ Sޏ|NϯG}(5n_yendstream endobj 397 0 obj << /Filter /FlateDecode /Length 2404 >> stream xY[~o(Qc1;yp` l poQ(ZiwRiq}wBrt^,Ίs9_ը,.GDC-?Ul/@ 8[%s)gg{l>4^xv[]i('J)^ ^&Z {@e/ƞ~jEfc dp= (^ T{B. ; ňI1>el#Zc)#P r$񼴅U:p̮@X"e9zP^@I!GuB}P"qc <3gI.m zu3BIre%04ƇԶtkMF F NQ+qhRceDI) JBV13P 'RQ ) q_݉\mp'JsH =*R2PA=K2dW.($ uhGPCX4C&AVYTf]5@%H֎@Ӓ5uI8ԥèuI&f(mD%S[۷}`^%9T4eNm)y+d prPGiP]"ɶ (?ZC1.GEJBɲ2Td(jMCW$SZ_lVMji{+$/5 XQpϨÍHj54"n$ ~ !.u"nT}1{S%o:I)}T;H7"Ƿ}h#b*ԅA"ӺZj~qz3Hɢ+Ki&CEJB)wCKDPy%'&ʗD4j1T8MŁ9f$@6#E?jpe'AYܳ܁hFV>pQ9>=4;wc)wîew~.; Q{I`.4O#*ڮ(;ݪ͝m>+,z,;14J)w`Ky>,Ót3 zN$aDDH-ʑr@fTi<,U1-ʳ-rltۑڇMDNGf:e \]Pd}KA@lPEul4 ˴y!Gh/ͦ7"ᗮ eo)d1w y#ƛo#>Rb46O܋W?~s\-bzg~ ݬf}i>֠2G7lnb_ ?#Y sAt|.b9}E"Lm(cC|{.i&cj|UmI]v^'XfMSպ>'i #$>Ǖ@/ilW"k{@sr[]WM׬Ȩߚ oϛ8 ȧ"Lm$ӊz hYSAX ze]ODG %!Pu~I{٫݃nv4fthWL)$A.h=l> sE4U5:hG3=:bHHGt;z u:toy+>݁? ?;>&OSg66JR&:%oQm)]"PR*C OãHNtC%g0G@H,ՈEyn3Ttdt?PbN Q}18!3-vgendstream endobj 398 0 obj << /Filter /FlateDecode /Length 4118 >> stream xZmq@DiF> Q9:#i_̎nft~oSE] bX/OǦDU_x%xI?^_k0҅>*.3 4ׇV_@ػZr}sv]oַ>oJ}/j+y8>&j$y՝1vJ72tYۜn+IOwMXۻWJc:+nWu%{:o01"aD)D*uKjY⅏Iݘ C$8wFuB40>hƉ$%LX[iPޚNCZrIZ]A."mv)! M +WMerJMVR+ej <5(R;>m~9 ]*DcӓQ %rbNm++T}f3މ~e> ,@\1 (ŵ][,حNeP/*ot;#*x=- 4WdZF֍XIxU+/E ll(ؚH!ո~o&H*bӱ*}6~dG?!UdBD2VJ6ս@`~ /lhR<^a4U]?S+Gj@:Jsto*'ОnS#ք6ڨTD#-ڠze8.iA&uln?h8i] ? z>S$S w9޽^Z}<( kI,6t\.~ƂK:ӻWƽCbTo?kDpV^ lL/i_fk$]jo~hGIWk+>GI+"c` 2XȪeeti KG'8ȝ#SpNn݊;}WT46"5/Q݄ZU+Dd/CeY"M GMP7 k)&l6dPHY/1NU,[}Q҇$:ɳlQ'KH(:J,}LD(W;\{M\AI\hFMYQp?ҕ}ҹ+\f)1ǵ㜣׎vkܙ.N1Ƈ4.Ot)hVL#2I ]8 auԪ'Sq8SF-$[e ۂAvO?ϯ;*vW 3n!1e$ĹO#~Z^q ˁi tD%Ez(֛sV~ &NN5]NHEƉ:%\q ̟@ņc+wǛ۽c4n ; LNpï_p{Բ@`yNT9vy&3IRn@ٝ.pD"fwMl߼= @(&ͽ<_nOYÑіaj<')eB6K3NYHcCL%{uԌ͹'AR7ID#sPjmf{]q|~)Ρ!&ACp \8FPESda0DSE^"\QҔym?թ8%1,$}R]y#;P֖)h_@aWɾsf/Б3Y⿕X }+Y0˿8Yyu"j25 TN&^HҢnNκK[ *0p1)u"ʌ*+3Z ǼD]g]" 4~ueu/j_FG,:\ }ب1bk47)$7j}J*jcRycBc3iy+nh岫`yZNL*/ X`d,_."W4q̤ Đ߯)]nhbQ ĄYɪ4~"؜fd)T7(˺x,4<ըd>v,XLIrPfivH/p?LlV"$^,>v gJ×pRW@'\&* )w܊6=]J3 gXVA\sђ`ǎƲB} F g^32R2蛃xYiW"-%G7ć5Z-+d_) H\w\r|J{dPUb Iԛyϑ칼biWKNGr[2,~8gr`-B cQXyq$QKS%9ƣ0L_44~M+ˆ@1,;ŰF;E(Msj:ʩ/0+~0f}΍.sOxE\_T %o49s]gx-AHbRi 噞 !|X^ؤds5wsb;=į#UaN/*ذ KneO3Pj5`iHV 煏1O3"H ϽRI5{> TFzSٱ);P|s-;SJY"?w{endstream endobj 399 0 obj << /Filter /FlateDecode /Length 4845 >> stream x\Y$q~`Z5vΤK\ŹXd_꼪ghVW_9zv=j~Ώ^ӟ7}]صeF u}긼 봱5Vo7eVFY!a >jcMh#ToǏ780lp!  ajƺOGiݥ7{8xONX99?pu8f7c78)XUtl@tvǰ| <"=1.l~wzG+a0(f{= ӽ&Ж\?'z.=n>y﾿iMra@Z'ќ &Lz4 8 eA̡ moY40 ~FJ9 MDdɷt?tBX&jg prt'#5~X V\h57s8!B,ʦꎙd 0iqW񜫸$txTveqhi: teEs̺ ?('-P5mט[k[aH`)]p&n#pic1 7z*`2,4r)\V[I?yʝYl Ǔ e| 肥C󇱠v,3vϟP+~&InjZE\ 5 )S>=%cic9fi8v Cm- Ë/ŌloҔ_&]dDŽ]0A<{1يvQSHS˴ 4c¸D|܂'8!m8mPn7NZkf+!IzB|aȬJR^IxjԁG _n[AqR"`dT!#=K2 ¬ oʹ~y)ގ :>ó`xKR!Xf-%yn;PHR(fwvLh_(xL.3&H,~8~7n);cĘ"]\!Q'miD!p6fi©WH? zKlWW7Lzo #A_BX{G]W24E= hVg*Δ LTIвa s0헸By}Ȇo=yV:ϡ1~"fRJOx<,a àW٨Q¸A 6 M*<m=\_ux3q^M<.a3p:ރnq{FsKMFk:D" vi@vEh3)be:ׂ W#6T#Ւ¥"0fZG"֢5ZH3WK15ȯD|P2z̻!SǴ-l.Bu[ bMAkp?ݭDZhetV1fxn&q[D?62 ;ـcN)2BHjMͅD^)g7圌kdYBlR^zx8ΒG i!r2fr4s!J'8_"7C{}O-Ǜɸ>PBmt$߇A,&7Yx3 *Qϔ$?B!%oy/ꙭYrϴ`^ף_%SJ} pHڶ>|U.l~tZRLQПL90yR9z ȀusLg:Yrj!y%ִ[ p R#p_ )rw@xFIŅJlzdT̕M\ *TM,mV ޔ,j-=Iz/j&);`+l'֥e0U>hW}P{[ӼfƆ̮Q~[0s:^}2O#b?TBoL/X̝e'F@ kk"/&OmОhTN4'VSM "Tɜi~xfF:Y{KWYa -H3dgJ/.K,Ajd"$pNebS'`^ae-^튌 U Nt4n<5L>+yJ)GXkd)V~`c 2ؠic1C+,0Imm%(}˘pYtVFUQƆQY] Dx1 $C>ǖ rrA8|iM+{+7`Vx4zDsIҖ+N\%g.0Ϙ΂`?q;6TW .9pAπ{lə +ʎRrC7ۄlmzai-5fr6La= ?1W߿0^/xBYnBdqYH!έ 8Z7)(1C,+34FײWͮLÒ>eM$X2̣reM >b$) ZzXBjyF 5y]6 YMgؼ5 < ֒,YzU ip. U' "#~cU"IpO G7rFH ^xMb߄܍@@i|{hRIFa h2vӔ y^GCIpa0a^)Z#`蠣8cSg ͙k]'sT%qCfۇ#K:X\ .ƝHC߶%A,=Qʹ$9aH*"`1TC٘D'30G'Oɭ@\XZ US6sAz@\L ^ؚswWl•IT ,6e3VZF-)KU_QM\:x ^|n:)# aH-5‚"VURnU FV1h19W{2jos|S, 4*IMg&ػ"SY B:UG\."|k={բ_K{UQ`[> -ꫦ ~wU92V ͻ^Je%*oa`1J y ɁcUWIr=*B)*09 Q XΊ0*HU:͔uqŞAs)Jsals`82b{MMzoRfK_ai9VX4_%u(*۷Gd ,eCUҧ; X:'uѯ{)-aak(CV }0!qÖjhÌ/j&/s?0[e zL0Cgs PRhZ!|8Gv׺^8%  J7hy pTmsSJvE/I0<<>$y$ 8SO,(5¦TLqK(pRǻrh ;-BE_@ /Qz,gЄd'ֱ<(61#/NaPY0VDETR 1uQ~0kH?q qk0L Ml^w0a01p4D޲;R$0i˜+ L08V0QELXYKae KI̥d#*KҒMs_qb3[<flo@YOgFq{TzeT+VML=,7V¾T^O>N<Ҭ|-1 9ȁ| C"l(;gkx2aP4p ^&!at/@+e`˴}?\x/3bҚaqE|W&iGk쏗Lvz\T*|ȼІ?BoTu]? ).ֽ%^icR+|ֈ6jgm1"WyԢ/G?UQ'1d|? uF "{oendstream endobj 400 0 obj << /Filter /FlateDecode /Length 4445 >> stream x[K6o_^SdnW lb{3V=W~HJ$EĘt(Vz迗M./=^]'Im.n.vɹ9ԪB]^/^T߾훺QmivUU !/ e1S$;>YU4ww(ත޸ Ʉ~͓sw8u,E׋r/Tm/ɭ" buV ع:v/8x>u6LU7Ӱ,5Eɂ_8tSwI6<.bJ; QM' l&8ȡiZ]}m&fBsCGZq8x,Bpr[nSw{iAzr$^}t$ۓ} ]\TM2wILJZ~y_x0{M+`Wq/e"8|f4':"a@D᰼ꏞͪ8D^q !ӦS즰;@M#rZ&Oʪ;N5>mAJ~G,B[nc|` ҕnOhۢswt`@O"\N5[SL{U@I-Z?TkTՖ"Uy".޲* [~vC}Gteid9Fz +l|11) ?8+EۣaV2 !άy*xFO x@;}y>J-j`t&% b?4 t1Nm ngoS靖]s~w>롓m2 !o%('lڮ$I`HqN yHoOsFV|95 *gdLpLB*p+-/[?3xFp,諰 C qS);J!,9XQGV;O# p-' 3һM]zi /8^~(H7O(^Ktt`$L#PS SH .lVVI?7&yˀ߾!1 9[S |KgDzx% xH1E wtb%v}KF:G, 4sWn kD{ G.F635 bNwקcڀJI@`ȩ9:P@!+.ܴ`9:5y$̋V3}#1Y 0y /d;O8'~-j[QB;tHY-DȽ<I Ĉ\ ӏXg@WtSѯH,6yi:R9z1NΧ5pڷΫ ԭ JmmA-^ƴL.qP аQ.IH ҐX*C@2XV6"3׻pA .m!䃸WG>pxevqȘ4t+SQxE4x|!ζr Y`up}YAsNc 48!v%,ID"\`g%Ԓ6FTRϷ)'#yo*&\}4)BS0Z_+dB03R3T(pHSdi)mLV7 bζff;QrUtO Ê7nT^Vot뗻]i_l, 0Ź\fp>uI;M 0yAKl S"p$Tu.G- ~i`83#cu )=@lbU\YyH `&PIǺx!܃~0R5j*P6ez pUܣǫ2G!^ όWrt drhM(5+4gNZ!冠cZ/8(%T"9s@`aFHͿB#QEKgViɈw1]`3D,;:UTp'=)ihUJByPAR03MaB{0h3xo=W"s`vE6DzLe%EDWS) uk!x Zʺ*QN_wtPF>=x9EʴR-1C-6S)kذOE,HlϺlj@|y&2#L) UeHmmC;I`:F2FՖ -Oi8?tE)Jh@ }ȋf}XnJR XWP}O(Ըk _L9[%8gr0ea68~n/7i:NJC$Y;NC:6g8:&wb73ݦ/^nZfB,0> %:X֣*7Iׅ{p+Ll]rqs B:nmx)l& {w/_ o')b蹚E-kaZ3*smNuH\=vhau HT媕4S,`?g*4]Ҏ[`[I&\Yj v=oZ/AFE3"َ)cWaqm|V=宨#NKfqo]#rg9tpΖRS4%P:nrPx*|Q1P xb:;GhC_JjXci3=M ĞjӞQ _8~`Qi/דf\q)Z,2!-3`3,>ɬwL[Be1Z1n.K⚿ I+pӖ ׁH)I JxD=.I,G* %jp>J<31+};zkYjo]=mߢo<߫DeRz&75޶Ml\xM }(Ӻag>\R2Oq~AѰ1k;릤H+`>ꕽcɽ&pU.b˟E#/38zK/AQ.nMj}Z[sֈj~ÿ0 4 }xѡ_O=P"&P"Ef6sKRA=Ox.,(x{8)3: -5j3IPsKkLE8P7-p}덌!6=3C,ެҳ1{CP+&n~+h*oN4첊3^z ջDͪ*ma7MyAv׮hNj(Sendstream endobj 401 0 obj << /Filter /FlateDecode /Length 5785 >> stream x\KsFr ƞz$KڵqjGh|Hvc? =4 YY_> njq?᪹~ow/oM7}sSĵSgWp}cmm5Thj(FO ~AV ]7M[}Aqֽǻ dU?zo )L>ܞU݉x{UnwՍFڶ-~}O_vFx:nlU?flC@|Ğ0{?NUG^.ci\7mu39ќM"ò[kI7sBѳYUa &a[DFߪjf:?{''~D+Vv)f{YG꫐I].T@K^^~{uO#A`~s; [d}N< `φ>/me'qrx /=]N6l.A>4FW_xʁE9dj0qq%ffV2w%:s@:[czs'fG!?n_als~X$"CzxOv/@zl§M wkas$mZMZM }[hE*]uz( LE{*^eV=-v ~b8.j؟&0UlKB-||2$PZ9'X{t ``e`t-o"WRyy݊=پ@pTUqoz\=;k[+j-U7U4soثώ+EN9jt|$<@&!#զw YN' LO>sRi<~cel^ 3*"WMް Gɥ oFIM<:5Y4)uJijӉ=Y~0Sha5ضd xuv^\Y`.Yhxt :AeI`s{-y?T~P[hP%~im"CPN2ZiCL6 y>uk8@K:Ch{14-7lab.iLHNß g 6A !ɸܢ}4:he yW7ǚ\ոՒbDg2X"ow[NIBn jCyZ~Q5RQ_0(FTYb8tp*/'&iX k)C䀺щ S]=- R/eaϚuY%l4Bg3a ~34KPk2c VɜhPg(l|[JMx9b:`ѻƽ~S*7ȶw:2 n!'u/KnZ#ܧD~(NmP< _Db O$.CAu#ltZٟ>r471}cjheـc J^Hԏ6#6%:YZq)͜wBtQeA`8,<\4ODU&=TAM'GI/DPawygի&݈^ֲS,vPȟ7AeA01ׂfS]xާxgiSw.Lsvn`DPLN_#M D YIV)㴲!ӛv5RM"Hd uX/d%m1(-aれʔP3=Q=- TD%/+<](jy ͩZl̚JkAdS}ub{bGeZv߁1sdzqGZ ya8̓*<7{:1*Y 6ҫbԠI~)Չ͋XaCꓛl\ TKF +mg#<Ƽ)3"N _\f N I#>M4B:i,AGP}eo yoI>oX@:GNЋOӲYeN!mI{^|}oO˃?&fDpmqLlD/|DMXy6ngʶa?S|_{Pe+pVs 9-V:ñI hiA$?MyٞZ(|2Q4 H-cq> |vPO7fkҀV oWp[ޢkj:8HM!lsio@WZbR[Z٩ZZIPK$|? Ku|H;udM ai/=v Q i@t̷ < w@I$MѥFwK&0",tE(o.4Y%8U4 _ <:#"w!pű)1\R Ӷ PF戇/0":Kr.LQmkaFgpUZmvӭu$l'ka](xxPhtŖ;ֺ+x0i&u}T@{w\ڌu1E\|. Ӵ@eBCi;Ng7ש6Nʙz)+VϝOlkTXYsպ3c:}yg!68WpIBw( ߕp<#)ꖧb ׵X$0<m#9IgrJui˭kkW̑J&HLŅ,WiN99n&bK,XY11dI̯-uE*N.u!WJ\{Vt:~zK⾤ÿL_ytuI)Fe򲜿0g. Z$w>Kyl嶒 'N Nքύ$Wb諾֘'f4nX~7n7<4p X/k1ѭ̎ƛBel o75dִI=G09u:RCvޠMfCCD%%$[Kы 96,%Y(g.eݙߞB}ŗ^vRx|1iDS-a>%\(;"|r171#d 0}֘w*~K'_vAЀ1Q#6?dgl0ɵcZGעҠ-zp]?%0A 2t.1L֍!]9bnemG<:'`pl_)fi9Xy۳oKI_u37:lbg4eLy(N(yT:˒V<oefYIF57j{V^ؙ,V`>Uj!ѺګOhze(e:/өƤNUūހj+1"(^r{,"|Ca7jΥ-`?v%jsk&y@w;HLx>;~zd9[|Sf_q%9"3ngиIRm*gMS X# sq8/N:LS'kĜ4ߓ_L!I)"dc(aMS"޳k!xk4aMt#PUM^ڔ®D"x 2bQ TQ`$' ݟc+יb.J_hX'T嚅8P1dO\'^? endstream endobj 402 0 obj << /Filter /FlateDecode /Length 3749 >> stream xZێ}opzoy' $X H3Lsd] gGԩ uSuCof}y:ٯ^{0RƱU֖2j}_U\a ZєՏՋvsԍj[Kns%VX^;+DU@RRo}yEScyiY ݚ+j>m\V g\⿟pկW)^362}6\za%j#2qd \jwZ+E4nBJ RqҌi28RHA;ƑBi^K\F3BH!%ͥH!%M. aI őB ]b8BRO965*5P⢖7 &upܶ~{7aa 3o14Rϧ6WLY[usy\她Vp>l+jmynnۅ\͜9MXm?t76 =V?auҁG]%5׍R呿nxë}6vǻ՗ۻ=Lt.lt}胐S6g!0J#82J}yHV 9zB oO&Zib +&I%IR$4I*c$&IeLIB# 2# B9$4I*c$50M&$IB$4I*c68h9hDLG\6$YH^Ћ6j͍5bÜa՗q尹@rAJ|V-KqkNs R8aj;T [_:ĭ6n*%p.&RIwJEY3==\@B0bTMmJY'I9_p g';֘#B# !R4@Jh@:q+DC$㖓4.ܧnl\kWJބ, K›8c& 2-@ [X l \#p@ $qcFbõ>*|W'>=x`c&_=%=ZQC 0 48>6AI^,( &B"ZlΌ܄LW(Du} 2J('s۾%Q7yq8VV#-ÆۺV9._8_T3k?3]f9z2ݱq\TTv؄3PîJ8:9YV8x_nZйcsrsqNӎX _ytrE=hT%&P9~)iK+:cz<|eP^J6r)K.KdU9c `&J9!_B}oQX *҉n|@1I-9cgV-5,yYX.u#-6'K8+*v~xjCy՞nS 4 !F=n*!9ir[ueK*EBۮ&6{4 9|s^7 Ϥ\Kla &aMB"ƙU+iqRo2 #yZ=r%P]Q@ؿu2 a6]ײv{Jh yT"+{H+n=R?peE}('Kb 0qZTaEV:W!K`.ڬLU(Y(ABjCdn#t=\JPؗӀ6+ C|qD+jppr1ALfh  qI %,)'n}Mi"g* at 0m.sX& &>IR;DݰvPv,w=3Of|1"wǻQȩώGEy)F!s~vmy`Ā_ $> fk OsO2ۋQ4i9 1AO^X7&x'ۍg<ӻ0@ȗYZA(.@^!Wii.loIB>gN\1Wmne'LΙrXNK[DҞrt.'=>A_!>ZF圫fjxڭ>}ݿRgD bI_- u e!Eɰ;JsR ,<+}XkUKd^Wp&҇skaM 'bJ-֫0e0a^yy%(\`*_M> g+p.UI1iir, @6m֦a W 5i5U>\.wrik^e`]ⰟR !hgd/ Bּ]y<,w1#9d20I;̀=$/k\f<yWòn7E)&Lԍ_^F'LIjxel LzHÊ9V$w'ɉF%ӄ=0 &Oˇnq`9GMROΥ6I9ȿ>gh*ѰAw8l{a¢!H'/oI&hM|%&2=!7eƾ.d}8LF2'+839Y.%'>#HzvL;~e4+3(?CO)YZwaWEZߨ4Yx@|i{Zn-BIWRh 4v2aw7;=_K( !DZ#v]SNۡNxs1$#y;O{H}ѹ.6NpYccA YPxoDE 3jЛiG4Z㨿>$NAxs^M,u'=i UT˰ziZ;nX*5 y{7 1}ԯXc 0&K8ϣ o3/K"OAB$Z4 A\2Iϵ}jLx幛!>iqc36[n1H.[7F2OӑoҞ)ͤ1\Zbw<+?RyUx2<;wsX?Hendstream endobj 403 0 obj << /Filter /FlateDecode /Length 2415 >> stream xXm~!OY'JiHkT( tgB|w)ޙ]qd6E?43>_ۋ.8wˋ -lnlysYg.2+l`R̗?bx/.hܲ\\W|g.yiJtU?R,Ky)2[JɒEmW7w >O(GrAB4űn2ʊf}O+vӴ]5_h@e_r.X6@7S8H2+Ő7iY?T846/B PjN~2G8壬bج])ͽYtU.CЄj&%xIkԋNVS(ռ:VG3{ګDVJ]mNq1IʹfRCedTmOF W1>*U;J1|1գeקjk#13$b .G9Ji4$GO`3a~g@ ԍzPnҎ0 c*F4_"|;JO$qCeGtIScœѬ92H¸ч"?3x 8m E3d, |7 q82Qj OIsIH!4gSя:UsU{+UN)}=& hّtlq49 -7T&A(WN4\Sj# b!_@Hayȷۘ3B~$.ɈGɇt2GG^&t@A? ou&E$㞛'uY\%[QBÑpc3tm̩ޫ5 tz%/FH'—;x~w<8˅v8LZnt >ȳsa,F~'{42O /ܮ#{,87&WG׸v> stream xcd`ab`dd M34 JM/I,JtwXew&ӂ' 0cC ##5|93<{-?V~u׭[gܒ+]yi-ýgRoo n!{7endstream endobj 405 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 462 >> stream xcd`ab`dd M34uI It7<Ӊe' 0!*6+dŌ,>g.нǓ]տ ~k}%);ß'b~8}{oߌaq~~> stream xV}TS!/hsSj[gmZmAQDQq@䋯;wHJ@4 *ZVwy:=9]y<;p{~}yޗ%&`'}mIEQ%ofE&_pĚ$HBjkSWLG響tel^)]%)KeMa\l[cX[ea,l{'sD >n^M\8-uMY6.JNO6NF}iGyj Z q̬GTA ӏdvUtxR C=UԂxm|#vs)A/lj ;B/ N腮zd]i9>9v庂-y-oI]B ᑮkwr7!4Y*k pGxסL&lnaiPuAQh귙a~눆8n,NQWl>[ } qOi[[Qࠔ3\S;ih/Xک;h,闼fVlzP? Ŵ6eMX,NZ{FJfШ&3O7lyOXG дܸ2>?hQ)efd KPW0~&yKñs OB@ojRAxF:gi9.k{Z>4擛^ƛŇ@()$;O>pt-r}1VrD䗂0S7yKF^ "}C0P%ڱ8oZ4<-Ji #F~DOs:ވ)84ÊH?ڞ"p ÿ /L F(j$W/)'4}gI|Io?> x8\FK'&w2iDM;@`ZPF=nfuVj`0sTGv\(HIxCKe_Q쵷esΧf6X)+pV&'-:XP 1YE{2|ѵamB},22sujtDIAݎs6&Z?!2ݯ[fC9!g a%$) `|~XR8(!jj aiò\fޢ9?;54v̷4A"ok2uy?޸z;D&S;M ~|/Jj?D44vUX>~<3C/<33JE:mss&3sfAq6]hcW'Ǿ=cU7irvGZNRv{f0>}6 @2*L3fą^_QQ)v6F{9&N׸- !Wڀk@Iyn5 ,M3+)|CaGE!>\AMN ePb=I^3b <"t堄zr'K4 %t%Dm|u&p2 }r}6#u~&=XuMfqz迴FND[(_6 ֫&oRLW^| õm[gCnbe9\.A3(>ðN"3#;%L.:YK*$EO_Ͼ{=Z"k==}D3U::yєDJb$5RS ujv zTHaQendstream endobj 407 0 obj << /Filter /FlateDecode /Length 3411 >> stream x[mo~і jm-q€t):qZ3\rgȣdi$MrFwsʹt3w3t3w/uφGygv~5/ ¾kGfrq Za7oVoZV}U{Lf^tu˙g^V3VF 1m|= 88Y-AƚV~YT)%s]uL+0Eq~ݦJ6(TRS[q!]`B-<+w%E#kn#WgDlNHЈY8xke-@B5 l}"k^/ {A5q7606X [5,}sJؒWDV*enWH<Rd_<"Ľ.fX`\ɏh}^J`SR }PGc/WlQ!L^ȵ;xq:P"l`5LM0Aa:lU9:isN#0ԜVP-7 B9LsH67Dn#rM Q38D'(d{&f'O6ҢuXcMb !9COk+Bӡ,f@}tL2<7} q!\Rr1ESeJ>Ro;= #1\2,iG+ 79^P̬ ,>$.(T V#>!rC$Bӹ6FF{K%` h@33:( PD~PvΎ5θ 3~F#"?rDC-]YDLQY5NP%+Ě.>4 c?9"aȮU0=YehsEBQ2ߒ,Y$.KΨ|;#d;/58H"e a?pGDn<>&"ٸ7Dh d2d|WDž`U'ե#MR\MiɆՖYw=xkZ }$piQ{=g+jɶS:.NP܄O CZяOWΪ.d ["ψ\ ka*3 W*`^j&dv(7%8HDԠczIfηa;?i*{ €匈fі|qEPDUYCeuh$r]% rI["o<2K"DQ2Koڀjς@Y{ P ɴ4@^Z w\-XHY=ȊmjOhYm%rBEψ*?-jJ<{x0o-w^F!#&5%bMA \A"t8$Eah)t/j:"hye#1فӖjoٹb}2Yv4ZC/ˤ]xTr0vEXd璊Pu,19Fcfz Һ)bITd*B,[@v'Fʱ^]{3liU=zg0Ͻʠn Rܒ{y'Ppg[(eZJDsQF_EĐZnK"W} %D6 ꫯqq5XxF  #*2B%v&g7j"ޣn$(N\'*TOkGlV{:, ŕ Srv W#d,=Ǥ78t(x^hc'[x]dwQZMo!ذ{&/e~%VoȯؑP WR'HÍ|?K%-qa{Woݞ2e/YgzWuYnT>|Kٵ*Jlu*-=9SktֺҪ.!oA1/,h]h }OU|Y{ XWҵ2UNiF["YlcgSL"'!n}^%~x$oQ/g/2TC'!ߟs*\E, #J!/z0҅Jxp3"D|N9%'e㮪#fS+8 [ O7?NjdKdՠlޡMuhY 7 d:Scce˾GH`: $ iZG(shy(qy7%>ٚB*>}kKj>)NbeI/7| ci%)YQsAK"o󑔪]TFŤA>,pϩpD3!q- 8v<fJP)v!B:(T0ö ,iC$q{?rY? Mv_ӁOʧoRlVO}1T~|˽JW2×=nSr> stream xZKΙ!B@~? ǀHF =$@-Iz8+i #?;R3=7 4;]U]slI_ŏ .g-7-ϯ[rn KF K](2=Y!M8)P&a;өLu|kZNN"A%R]J&[.n\?zq wxr1k6~];`0f9*dhEUc02g=CA1Q(ɛt'~ToCWJCx4*%(+XLc);#$qƎuKm9$FnT26 $xuEN8`\LN$=EJ\Ss!UOGz kv顣U$I'qfAЄ`& }u҅thax>4?i4*J5o=G{1cesw%M "Y 9P6m\2d̄ 'i=pca&Z!lM09nmQٛ>L Hp@wW>} PPxj 9\U)V>l.`d/_wͻxgWv'{z WCk9My:_m{Tg,qgO|_]_@1$2-&+T=dymyplvWUNCz=uA{O{r?KxD'O{\VǦޗj ,FՌ}N"U Pv P}9G7_Aj p広VgOMy4$5ۢ$[_f U(; _QuNf/fW_n_0E 8 j-D/*X5+7 7H0=k#jL-#+(;.X,Ht/|fZa%߆jkX(E+핉X:!>+RB婀El.º=e0)aWv@To* Wj;Ufh<3Yp(GosH$,x) Jf~?#Mǃi<sSO/'HL3õ;B;fuLBѓn,[)Ƥ]cN{\3 SSk x0:_lc͓'O<ԋzpeq]$?gGzFj":>WEH.!EŪPkβX!ø Sq jo2>:hzZ>Hi2~"uGw^J& #)̊Yz@3 (î7@t)Gd>ZC@k_v(4I|HhK6>ĥ(ezK4^oa.1-q(PPpg_<ӎcZeΗ?Tl.la &bCEK|:JȌ.4P~0IаU"0x4*6l0ӆB!)1na[ܤļ$m'ԱSumgXLYSP!iLSEilgRqQI(zIw+B,,HU8l ~ϸb6< l9E>Yu‹b`耋 dz(s.T6 NШggIylWuZH0Y2Dn=5WԌ5F;}lAS]E͌\Sm~;PCPR ѫl^ )QG~`tNޭeflcBN6uȲگ: S|l/M\(KԒYdztԛl%xP./O=2V@7U[uW>Pt}H1m-J`1Hu .T2X ޲$V0P(&XH\g䵓 dfcNni%tH/e[igƘVqojfG c ulK0x:$V'QEҪ>ŁCr!&m|63b@yc?Ҏ3tfiM FDtVN6sV,Ġ_Vy}D-}ѻ8NF'I2]|pAK|2.Tfk)C) ,^2Vuw2h5A/ hďTBv9HȨlʤ *ٟ~,,<_AE`Dw/%~N:OJG ` ҲP* 'e8M YBxGrys|s&Ln?m~|ք sn U~FMҺe'ԃklbPVu>ТյWɝ+}R6nB c APCK&o5qendstream endobj 409 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 364 >> stream x˽/a{\TN$H &6z L"BOSwz)^M%$7$b5Ӧw.*$Itdtp-@0P/ksS)' I* qImB{Gv>8"!>4pY_l +$XGPf"Ec{dbRѲhEnp2 /fzJr`=1)ҙ=ak -V〝F(aj d 敗א:MV|Ty¾b}t9;3~aH mW\"¬X¬]>FW=\]ft{4XVT@&w`endstream endobj 410 0 obj << /Filter /FlateDecode /Length 2750 >> stream xZo_A%Լ~ޮHN,8K| OTHʲy:7Ǔm0д4nV%V/=&|˄949馏&_`pJ[Y6=:GؔsSrVuiul^2hSl +jV٢ͅext/4B*]<$.mi=qЀۺx7B&/p]NA@H69Nj&ժ4ԐRes)^e]'j'˫,/M([( WF["+Fi {=T܂Kdō#+&LqB{Bq;!qA0l^| $*Sg h%4 e Rw5n d rqOܒ@K U $.FbMdGK"u[!ʸ?TH^\[3Ϯ*^ctm(M nx!w^W5%MiCi!0NUj^ x gCVk &ռXy6hh{Vi$d~C"$ taˬwh ,oYf^hh;41{{^ ތ ly+!D h6H"d4tmlhْ:xdFf>3X4E eP3zIp[4o OY%eܵ&rE%y! a!ω =Ӯ N:9s}/4^ 74~ l/klQX\NP;t(rp,~%ףPR~FL7>`;KqgvpdK6$$QNAiaJ7;ֹ{YJivbn0-`/4eO)eh_:(am Bf@`?h8M#)%3T/44ޣVMħ Ԕi 7f텆޿ܿaaX 1!~yOqdU43BCOݘ2㍹~KtrL'ʵ{$*N֤S?өjYijKb7yj  $_c^ x r@&ݫ54{t@(Dx3=Jof@ Z,AYyݾqDiXQW TPVk~cipBW']KEhK,f8=.utM׾9Ʊ3L#Ͷ(}^ߏS"œbIMN&4Оsy> stream x>LMRoman10-ItalicaJ  &byCZE`-fp_OOeivki(x}(be7G *'Hr`EXrjַRkap nKa@-ËȚ3wmnBncOP^Ȭj`.h(ŋqsp}dqEqAW4ne9A_Y[}{ydSWiuCs  To ^endstream endobj 412 0 obj << /Type /XRef /Length 335 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 413 /ID [<4bc35b504b508f8d5611bf95b901ec81><3a4d4b8117b95ab20ad0a8e160115815>] >> stream x-KQTwZ [dAѠD   aa6AhQ[LkEqg s}90u_QFl 8V_֣7S*kܛhw;3=yƹ ͺBOb &VD,]2!mzDqaI\Pl; ߡfN.^jsŽ ]~&#xg [b/]ߓKX,+b7bx#3Nחb6jܑ4^Kgl؅BIepۯX1A_ ,> endstream endobj startxref 310305 %%EOF flexmix/inst/doc/mixture-regressions.R0000644000176200001440000003131214404662015017633 0ustar liggesusers### R code from vignette source 'mixture-regressions.Rnw' ################################################### ### code chunk number 1: mixture-regressions.Rnw:62-72 ################################################### options(width=60, prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) library("graphics") library("stats") library("flexmix") library("lattice") ltheme <- canonical.theme("postscript", FALSE) lattice.options(default.theme=ltheme) data("NPreg", package = "flexmix") data("dmft", package = "flexmix") source("myConcomitant.R") ################################################### ### code chunk number 2: mixture-regressions.Rnw:499-502 ################################################### par(mfrow=c(1,2)) plot(yn~x, col=class, pch=class, data=NPreg) plot(yp~x, col=class, pch=class, data=NPreg) ################################################### ### code chunk number 3: mixture-regressions.Rnw:509-517 ################################################### suppressWarnings(RNGversion("3.5.0")) set.seed(1802) library("flexmix") data("NPreg", package = "flexmix") Model_n <- FLXMRglm(yn ~ . + I(x^2)) Model_p <- FLXMRglm(yp ~ ., family = "poisson") m1 <- flexmix(. ~ x, data = NPreg, k = 2, model = list(Model_n, Model_p), control = list(verbose = 10)) ################################################### ### code chunk number 4: mixture-regressions.Rnw:558-559 ################################################### print(plot(m1)) ################################################### ### code chunk number 5: mixture-regressions.Rnw:598-600 ################################################### m1.refit <- refit(m1) summary(m1.refit, which = "model", model = 1) ################################################### ### code chunk number 6: mixture-regressions.Rnw:605-612 ################################################### print(plot(m1.refit, layout = c(1,3), bycluster = FALSE, main = expression(paste(yn *tilde(" ")* x + x^2))), split= c(1,1,2,1), more = TRUE) print(plot(m1.refit, model = 2, main = expression(paste(yp *tilde(" ")* x)), layout = c(1,2), bycluster = FALSE), split = c(2,1,2,1)) ################################################### ### code chunk number 7: mixture-regressions.Rnw:643-648 ################################################### Model_n2 <- FLXMRglmfix(yn ~ . + 0, nested = list(k = c(1, 1), formula = c(~ 1 + I(x^2), ~ 0))) m2 <- flexmix(. ~ x, data = NPreg, cluster = posterior(m1), model = list(Model_n2, Model_p)) m2 ################################################### ### code chunk number 8: mixture-regressions.Rnw:653-654 ################################################### c(BIC(m1), BIC(m2)) ################################################### ### code chunk number 9: mixture-regressions.Rnw:672-676 ################################################### data("betablocker", package = "flexmix") betaGlm <- glm(cbind(Deaths, Total - Deaths) ~ Treatment, family = "binomial", data = betablocker) betaGlm ################################################### ### code chunk number 10: mixture-regressions.Rnw:693-696 ################################################### betaMixFix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ 1 | Center, model = FLXMRglmfix(family = "binomial", fixed = ~ Treatment), k = 2:4, nrep = 5, data = betablocker) ################################################### ### code chunk number 11: mixture-regressions.Rnw:705-706 ################################################### betaMixFix ################################################### ### code chunk number 12: mixture-regressions.Rnw:713-715 ################################################### betaMixFix_3 <- getModel(betaMixFix, which = "BIC") betaMixFix_3 <- relabel(betaMixFix_3, "model", "Intercept") ################################################### ### code chunk number 13: mixture-regressions.Rnw:728-729 ################################################### parameters(betaMixFix_3) ################################################### ### code chunk number 14: mixture-regressions.Rnw:737-750 ################################################### library("grid") betablocker$Center <- with(betablocker, factor(Center, levels = Center[order((Deaths/Total)[1:22])])) clusters <- factor(clusters(betaMixFix_3), labels = paste("Cluster", 1:3)) print(dotplot(Deaths/Total ~ Center | clusters, groups = Treatment, as.table = TRUE, data = betablocker, xlab = "Center", layout = c(3, 1), scales = list(x = list(cex = 0.7, tck = c(1, 0))), key = simpleKey(levels(betablocker$Treatment), lines = TRUE, corner = c(1,0)))) betaMixFix.fitted <- fitted(betaMixFix_3) for (i in 1:3) { seekViewport(trellis.vpname("panel", i, 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[1:22, i], "native"), gp = gpar(lty = 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[23:44, i], "native"), gp = gpar(lty = 2)) } ################################################### ### code chunk number 15: mixture-regressions.Rnw:769-775 ################################################### betaMix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ Treatment | Center, model = FLXMRglm(family = "binomial"), k = 3, nrep = 5, data = betablocker) betaMix <- relabel(betaMix, "model", "Treatment") parameters(betaMix) c(BIC(betaMixFix_3), BIC(betaMix)) ################################################### ### code chunk number 16: mixture-regressions.Rnw:795-796 ################################################### print(plot(betaMixFix_3, nint = 10, mark = 1, col = "grey", layout = c(3, 1))) ################################################### ### code chunk number 17: mixture-regressions.Rnw:805-806 ################################################### print(plot(betaMixFix_3, nint = 10, mark = 2, col = "grey", layout = c(3, 1))) ################################################### ### code chunk number 18: mixture-regressions.Rnw:820-821 ################################################### table(clusters(betaMix)) ################################################### ### code chunk number 19: mixture-regressions.Rnw:826-828 ################################################### predict(betaMix, newdata = data.frame(Treatment = c("Control", "Treated"))) ################################################### ### code chunk number 20: mixture-regressions.Rnw:834-836 ################################################### betablocker[c(1, 23), ] fitted(betaMix)[c(1, 23), ] ################################################### ### code chunk number 21: mixture-regressions.Rnw:846-847 ################################################### summary(refit(betaMix)) ################################################### ### code chunk number 22: mixture-regressions.Rnw:858-865 ################################################### ModelNested <- FLXMRglmfix(family = "binomial", nested = list(k = c(2, 1), formula = c(~ Treatment, ~ 0))) betaMixNested <- flexmix(cbind(Deaths, Total - Deaths) ~ 1 | Center, model = ModelNested, k = 3, data = betablocker, cluster = posterior(betaMix)) parameters(betaMixNested) c(BIC(betaMix), BIC(betaMixNested), BIC(betaMixFix_3)) ################################################### ### code chunk number 23: mixture-regressions.Rnw:876-877 ################################################### data("bioChemists", package = "flexmix") ################################################### ### code chunk number 24: mixture-regressions.Rnw:908-912 ################################################### data("bioChemists", package = "flexmix") Model1 <- FLXMRglm(family = "poisson") ff_1 <- stepFlexmix(art ~ ., data = bioChemists, k = 1:3, model = Model1) ff_1 <- getModel(ff_1, "BIC") ################################################### ### code chunk number 25: mixture-regressions.Rnw:929-931 ################################################### print(plot(refit(ff_1), bycluster = FALSE, scales = list(x = list(relation = "free")))) ################################################### ### code chunk number 26: mixture-regressions.Rnw:938-942 ################################################### Model2 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + mar + ment) ff_2 <- flexmix(art ~ fem + phd, data = bioChemists, cluster = posterior(ff_1), model = Model2) c(BIC(ff_1), BIC(ff_2)) ################################################### ### code chunk number 27: mixture-regressions.Rnw:950-951 ################################################### summary(refit(ff_2)) ################################################### ### code chunk number 28: mixture-regressions.Rnw:958-962 ################################################### Model3 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + mar + ment) ff_3 <- flexmix(art ~ fem, data = bioChemists, cluster = posterior(ff_2), model = Model3) c(BIC(ff_2), BIC(ff_3)) ################################################### ### code chunk number 29: mixture-regressions.Rnw:970-971 ################################################### print(plot(refit(ff_3), bycluster = FALSE, scales = list(x = list(relation = "free")))) ################################################### ### code chunk number 30: mixture-regressions.Rnw:981-987 ################################################### Model4 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + mar + ment) ff_4 <- flexmix(art ~ 1, data = bioChemists, cluster = posterior(ff_2), concomitant = FLXPmultinom(~ fem), model = Model4) parameters(ff_4) summary(refit(ff_4), which = "concomitant") BIC(ff_4) ################################################### ### code chunk number 31: mixture-regressions.Rnw:996-1000 ################################################### Model5 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + ment + fem) ff_5 <- flexmix(art ~ 1, data = bioChemists, cluster = posterior(ff_2), model = Model5) BIC(ff_5) ################################################### ### code chunk number 32: mixture-regressions.Rnw:1006-1013 ################################################### pp <- predict(ff_5, newdata = data.frame(kid5 = 0, mar = factor("Married", levels = c("Single", "Married")), fem = c("Men", "Women"), ment = mean(bioChemists$ment))) matplot(0:12, sapply(unlist(pp), function(x) dpois(0:12, x)), type = "b", lty = 1, xlab = "Number of articles", ylab = "Probability") legend("topright", paste("Comp.", rep(1:2, each = 2), ":", c("Men", "Women")), lty = 1, col = 1:4, pch = paste(1:4), bty = "n") ################################################### ### code chunk number 33: mixture-regressions.Rnw:1362-1367 ################################################### data("dmft", package = "flexmix") Model <- FLXMRziglm(family = "poisson") Fitted <- flexmix(End ~ log(Begin + 0.5) + Gender + Ethnic + Treatment, model = Model, k = 2 , data = dmft, control = list(minprior = 0.01)) summary(refit(Fitted)) ################################################### ### code chunk number 34: refit (eval = FALSE) ################################################### ## print(plot(refit(Fitted), components = 2, box.ratio = 3)) ################################################### ### code chunk number 35: mixture-regressions.Rnw:1396-1397 ################################################### print(plot(refit(Fitted), components = 2, box.ratio = 3)) ################################################### ### code chunk number 36: mixture-regressions.Rnw:1442-1449 ################################################### Concomitant <- FLXPmultinom(~ yb) MyConcomitant <- myConcomitant(~ yb) set.seed(1234) m2 <- flexmix(. ~ x, data = NPreg, k = 2, model = list(Model_n, Model_p), concomitant = Concomitant) m3 <- flexmix(. ~ x, data = NPreg, k = 2, model = list(Model_n, Model_p), cluster = posterior(m2), concomitant = MyConcomitant) ################################################### ### code chunk number 37: mixture-regressions.Rnw:1451-1453 ################################################### summary(m2) summary(m3) ################################################### ### code chunk number 38: mixture-regressions.Rnw:1458-1462 ################################################### determinePrior <- function(object) { object@concomitant@fit(object@concomitant@x, posterior(object))[!duplicated(object@concomitant@x), ] } ################################################### ### code chunk number 39: mixture-regressions.Rnw:1465-1467 ################################################### determinePrior(m2) determinePrior(m3) ################################################### ### code chunk number 40: mixture-regressions.Rnw:1509-1513 ################################################### SI <- sessionInfo() pkgs <- paste(sapply(c(SI$otherPkgs, SI$loadedOnly), function(x) paste("\\\\pkg{", x$Package, "} ", x$Version, sep = "")), collapse = ", ") flexmix/inst/doc/bootstrapping.Rnw0000644000176200001440000004660014404637307017051 0ustar liggesusers\documentclass[nojss]{jss} \usepackage{amsfonts,bm,amsmath,amssymb} %%\usepackage{Sweave} %% already provided by jss.cls %%%\VignetteIndexEntry{Finite Mixture Model Diagnostics Using Resampling Methods} %%\VignetteDepends{flexmix} %%\VignetteKeywords{R, finite mixture model, resampling, bootstrap} %%\VignettePackage{flexmix} \title{Finite Mixture Model Diagnostics Using Resampling Methods} <>= options(useFancyQuotes = FALSE) digits <- 3 Colors <- c("#E495A5", "#39BEB1") critical_values <- function(n, p = "0.95") { data("qDiptab", package = "diptest") if (n %in% rownames(qDiptab)) { return(qDiptab[as.character(n), p]) }else { n.approx <- as.numeric(rownames(qDiptab)[which.min(abs(n - as.numeric(rownames(qDiptab))))]) return(sqrt(n.approx)/sqrt(n) * qDiptab[as.character(n.approx), p]) } } library("graphics") library("flexmix") combine <- function(x, sep, width) { cs <- cumsum(nchar(x)) remaining <- if (any(cs[-1] > width)) combine(x[c(FALSE, cs[-1] > width)], sep, width) c(paste(x[c(TRUE, cs[-1] <= width)], collapse= sep), remaining) } prettyPrint <- function(x, sep = " ", linebreak = "\n\t", width = getOption("width")) { x <- strsplit(x, sep)[[1]] paste(combine(x, sep, width), collapse = paste(sep, linebreak, collapse = "")) } @ \author{Bettina Gr{\"u}n\\ Wirtschaftsuniversit{\"a}t Wien \And Friedrich Leisch\\ Universit\"at f\"ur Bodenkultur Wien} \Plainauthor{Bettina Gr{\"u}n, Friedrich Leisch} \Address{ Bettina Gr\"un\\ Institute for Statistics and Mathematics\\ Wirtschaftsuniversit{\"a}t Wien\\ Welthandelsplatz 1\\ 1020 Wien, Austria\\ E-mail: \email{Bettina.Gruen@R-project.org}\\ Friedrich Leisch\\ Institut f\"ur Angewandte Statistik und EDV\\ Universit\"at f\"ur Bodenkultur Wien\\ Peter Jordan Stra\ss{}e 82\\ 1190 Wien, Austria\\ E-mail: \email{Friedrich.Leisch@boku.ac.at} } \Abstract{ This paper illustrates the implementation of resampling methods in \pkg{flexmix} as well as the application of resampling methods for model diagnostics of fitted finite mixture models. Convenience functions to perform these methods are available in package \pkg{flexmix}. The use of the methods is illustrated with an artificial example and the \code{seizure} data set. } \Keywords{\proglang{R}, finite mixture models, resampling, bootstrap} \Plainkeywords{R, finite mixture models, resampling, bootstrap} %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \begin{document} \SweaveOpts{engine=R, echo=true, height=5, width=8, eps=FALSE, keep.source=TRUE} \setkeys{Gin}{width=0.95\textwidth} \section{Implementation of resampling methods}\label{sec:implementation} The proposed framework for model diagnostics using resampling \citep{mixtures:gruen+leisch:2004} equally allows to investigate model fit for all kinds of mixture models. The procedure is applicable to mixture models with different component specific models and does not impose any limitation such as for example on the dimension of the parameter space of the component specific model. In addition to the fitting step different component specific models only require different random number generators for the parametric bootstrap. The \code{boot()} function in \pkg{flexmix} is a generic \proglang{S4} function with a method for fitted finite mixtures of class \code{"flexmix"} and is applicable to general finite mixture models. The function with arguments and their defaults is given by: <>= cat(prettyPrint(gsub("boot_flexmix", "boot", prompt(flexmix:::boot_flexmix, filename = NA)$usage[[2]]), sep = ", ", linebreak = paste("\n", paste(rep(" ", 2), collapse = ""), sep= ""), width = 70)) @ The interface is similar to the \code{boot()} function in package \pkg{boot} \citep{mixtures:Davison+Hinkley:1997, mixtures:Canty+Ripley:2010}. The \code{object} is a fitted finite mixture of class \code{"flexmix"} and \code{R} denotes the number of resamples. The possible bootstrapping method are \code{"empirical"} (also available as \code{"ordinary"}) and \code{"parametric"}. For the parametric bootstrap sampling from the fitted mixture is performed using \code{rflexmix()}. For mixture models with different component specific models \code{rflexmix()} requires a sampling method for the component specific model. Argument \code{initialize\_solution} allows to select if the EM algorithm is started in the original finite mixture solution or if random initialization is performed. The fitted mixture model might contain weights and group indicators. The weights are case weights and allow to reduce the amount of data if observations are identical. This is useful for example for latent class analysis of multivariate binary data. The argument \code{keep\_weights} allows to indicate if they should be kept for the bootstrapping. Group indicators allow to specify that the component membership is identical over several observations, e.g., for repeated measurements of the same individual. Argument \code{keep\_groups} allows to indicate if the grouping information should also be used in the bootstrapping. \code{verbose} indicates if information on the progress should be printed. The \code{control} argument allows to control the EM algorithm for fitting the model to each of the bootstrap samples. By default the \code{control} argument is extracted from the fitted model provided by \code{object}. \code{k} allows to specify the number of components and by default this is also taken from the fitted model provided. The \code{model} argument determines if also the model and the weights slot for each sample are stored and returned. The returned object is of class \code{"FLXboot"} and otherwise only contains the fitted parameters, the fitted priors, the log likelihoods, the number of components of the fitted mixtures and the information if the EM algorithm has converged. The likelihood ratio test is implemented based on \code{boot()} in function \code{LR\_test()} and returns an object of class \code{"htest"} containing the number of valid bootstrap replicates, the p-value, the double negative log likelihood ratio test statistics for the original data and the bootstrap replicates. The \code{plot} method for \code{"FLXboot"} objects returns a parallel coordinate plot with the fitted parameters separately for each of the components. \section{Artificial data set} In the following a finite mixture model is used as the underlying data generating process which is theoretically not identifiable. We are assuming a finite mixture of linear regression models with two components of equal size where the coverage condition is not fulfilled \citep{mixtures:Hennig:2000}. Hence, intra-component label switching is possible, i.e., there exist two parameterizations implying the same mixture distribution which differ how the components between the covariate points are combined. We assume that one measurement per object and a single categorical regressor with two levels are given. The usual design matrix for a model with intercept uses the two covariate points $\mathbf{x}_1 = (1, 0)'$ and $\mathbf{x}_2 = (1, 1)'$. The mixture distribution is given by \begin{eqnarray*} H(y|\mathbf{x}, \Theta) &=& \frac{1}{2} N(\mu_1, 0.1) + \frac{1}{2} N(\mu_2, 0.1), \end{eqnarray*} where $\mu_k(\mathbf{x}) = \mathbf{x}'\bm{\alpha}_k$ and $N(\mu, \sigma^2)$ is the normal distribution. Now let $\mu_1(\mathbf{x}_1) = 1$, $\mu_2(\mathbf{x}_1) = 2$, $\mu_1(\mathbf{x}_2) = -1$ and $\mu_2(\mathbf{x}_2) = 4$. As Gaussian mixture distributions are generically identifiable the means, variances and component weights are uniquely determined in each covariate point given the mixture distribution. However, as the coverage condition is not fulfilled, the two possible solutions for $\bm{\alpha}$ are: \begin{description} \item[Solution 1:] $\bm{\alpha}_1^{(1)} = (2,\phantom{-}2)'$, $\bm{\alpha}_2^{(1)} = (1,-2)'$, \item[Solution 2:] $\bm{\alpha}_1^{(2)} = (2,-3)'$, $\bm{\alpha}_2^{(2)} = (1,\phantom{-}3)'$. \end{description} We specify this artificial mixture distribution using \code{FLXdist()}. \code{FLXdist()} returns an unfitted finite mixture of class \code{"FLXdist"}. The class of fitted finite mixture models \code{"flexmix"} extends class \code{"FLXdist"}. Each component follows a normal distribution. The parameters specified in a named list therefore consist of the regression coefficients and the standard deviation. Function \code{FLXdist()} has an argument \code{formula} for specifying the regression in each of the components, an argument \code{k} for the component weights and \code{components} for the parameters of each of the components. <<>>= library("flexmix") Component_1 <- list(Model_1 = list(coef = c(1, -2), sigma = sqrt(0.1))) Component_2 <- list(Model_1 = list(coef = c(2, 2), sigma = sqrt(0.1))) ArtEx.mix <- FLXdist(y ~ x, k = rep(0.5, 2), components = list(Component_1, Component_2)) @ We draw a balanced sample with 50 observations in each covariate point from the mixture model using \code{rflexmix()} after defining the data points for the covariates. \code{rflexmix()} can either have an unfitted or a fitted finite mixture as input. For unfitted mixtures data has to be provided using the \code{newdata} argument. For already fitted mixtures data can be optionally provided, otherwise the data used for fitting the mixture is used. <<>>= ArtEx.data <- data.frame(x = rep(0:1, each = 100/2)) suppressWarnings(RNGversion("3.5.0")) set.seed(123) ArtEx.sim <- rflexmix(ArtEx.mix, newdata = ArtEx.data) ArtEx.data$y <- ArtEx.sim$y[[1]] ArtEx.data$class <- ArtEx.sim$class @ In Figure~\ref{fig:art} the sample is plotted together with the two solutions for combining $x_1$ and $x_2$, i.e., this illustrates intra-component label switching. \begin{figure} \centering <>= par(mar = c(5, 4, 2, 0) + 0.1) plot(y ~ x, data = ArtEx.data, pch = with(ArtEx.data, 2*class + x)) pars <- list(matrix(c(1, -2, 2, 2), ncol = 2), matrix(c(1, 3, 2, -3), ncol = 2)) for (i in 1:2) apply(pars[[i]], 2, abline, col = Colors[i]) @ \caption{Balanced sample from the artificial example with the two theoretical solutions.} \label{fig:art} \end{figure} We fit a finite mixture to the sample using \code{stepFlexmix()}. <<>>= set.seed(123) ArtEx.fit <- stepFlexmix(y ~ x, data = ArtEx.data, k = 2, nrep = 5, control = list(iter = 1000, tol = 1e-8, verbose = 0)) @ The fitted mixture can be inspected using \code{summary()} and \code{parameters()}. <<>>= summary(ArtEx.fit) parameters(ArtEx.fit) @ Obviously the fitted mixture parameters correspond to the parameterization we used to specify the mixture distribution. Using standard asymptotic theory to analyze the fitted mixture model gives the following estimates for the standard deviations. <<>>= ArtEx.refit <- refit(ArtEx.fit) summary(ArtEx.refit) @ The fitted mixture can also be analyzed using resampling techniques. For analyzing the stability of the parameter estimates where the possibility of identifiability problems is also taken into account the parametric bootstrap is used with random initialization. Function \code{boot()} can be used for empirical or parametric bootstrap (specified by the argument \code{sim}). The logical argument \code{initialize_solution} specifies if the initialization is in the original solution or random. By default random initialization is made. The number of bootstrap samples is set by the argument \code{R}. Please note that the arguments are chosen to correspond to those for function \code{boot} in package \pkg{boot} \citep{mixtures:Davison+Hinkley:1997}. <>= set.seed(123) ArtEx.bs <- boot(ArtEx.fit, R = 200, sim = "parametric") ArtEx.bs @ <>= if (file.exists("ArtEx.bs.rda")) { load("ArtEx.bs.rda") } else { set.seed(123) ArtEx.bs <- boot(ArtEx.fit, R = 200, sim = "parametric") save(ArtEx.bs, file = "ArtEx.bs.rda") } ArtEx.bs @ Function \code{boot()} returns an object of class \code{"\Sexpr{class(ArtEx.bs)}"}. The default plot compares the bootstrap parameter estimates to the confidence intervals derived using standard asymptotic theory in a parallel coordinate plot (see Figure~\ref{fig:plot.FLXboot-art}). Clearly two groups of parameter estimates can be distinguished which are about of equal size. One subset of the parameter estimates stays within the confidence intervals induced by standard asymptotic theory, while the second group corresponds to the second solution and clusters around these parameter values. \begin{figure}[h!] \centering <>= print(plot(ArtEx.bs, ordering = "coef.x", col = Colors)) @ \caption{Diagnostic plot of the bootstrap results for the artificial example.} \label{fig:plot.FLXboot-art} \end{figure} In the following the DIP-test is applied to check if the parameter estimates follow a unimodal distribution. This is done for the aggregated parameter esimates where unimodality implies that this parameter is not suitable for imposing an ordering constraint which induces a unique labelling. For the separate component analysis which is made after imposing an ordering constraint on the coefficient of $x$ rejection the null hypothesis of unimodality implies that identifiability problems are present, e.g.~due to intra-component label switching. <<>>= require("diptest") parameters <- parameters(ArtEx.bs) Ordering <- factor(as.vector(apply(matrix(parameters[,"coef.x"], nrow = 2), 2, order))) Comp1 <- parameters[Ordering == 1,] Comp2 <- parameters[Ordering == 2,] dip.values.art <- matrix(nrow = ncol(parameters), ncol = 3, dimnames=list(colnames(parameters), c("Aggregated", "Comp 1", "Comp 2"))) dip.values.art[,"Aggregated"] <- apply(parameters, 2, dip) dip.values.art[,"Comp 1"] <- apply(Comp1, 2, dip) dip.values.art[,"Comp 2"] <- apply(Comp2, 2, dip) dip.values.art @ The critical value for column \code{Aggregated} is \Sexpr{round(critical_values(nrow(parameters)), digits = digits)} and for the columns of the separate components \Sexpr{round(critical_values(nrow(Comp1)), digits = digits)}. The component sizes as well as the standard deviations follow a unimodal distribution for the aggregated data as well as for each of the components. The regression coefficients are multimodal for the aggregate data as well as for each of the components. While from the aggregated case it might be concluded that imposing an ordering constraint on the intercept or the coefficient of $x$ is suitable, the component-specific analyses reveal that a unique labelling was not achieved. \section{Seizure} In \cite{mixtures:Wang+Puterman+Cockburn:1996} a Poisson mixture regression is fitted to data from a clinical trial where the effect of intravenous gammaglobulin on suppression of epileptic seizures is investigated. The data used were 140 observations from one treated patient, where treatment started on the $28^\textrm{th}$ day. In the regression model three independent variables were included: treatment, trend and interaction treatment-trend. Treatment is a dummy variable indicating if the treatment period has already started. Furthermore, the number of parental observation hours per day were available and it is assumed that the number of epileptic seizures per observation hour follows a Poisson mixture distribution. The number of epileptic seizures per parental observation hour for each day are plotted in Figure~\ref{fig:seizure}. The fitted mixture distribution consists of two components which can be interpreted as representing 'good' and 'bad' days of the patients. The mixture model can be formulated by \begin{equation*} H(y|\mathbf{x}, \Theta) = \pi_1 P(\lambda_1) + \pi_2 P(\lambda_2), \end{equation*} where $\lambda_k = e^{\mathbf{x}'\bm{\alpha}_k}$ for $k = 1,2$ and $P(\lambda)$ is the Poisson distribution. The data is loaded and the mixture fitted with two components. <<>>= data("seizure", package = "flexmix") model <- FLXMRglm(family = "poisson", offset = log(seizure$Hours)) control <- list(iter = 1000, tol = 1e-10, verbose = 0) set.seed(123) seizMix <- stepFlexmix(Seizures ~ Treatment * log(Day), data = seizure, k = 2, nrep = 5, model = model, control = control) @ The fitted regression lines for each of the two components are shown in Figure~\ref{fig:seizure}. \begin{figure}[h!] \begin{center} <>= par(mar = c(5, 4, 2, 0) + 0.1) plot(Seizures/Hours~Day, data=seizure, pch = as.integer(seizure$Treatment)) abline(v = 27.5, lty = 2, col = "grey") matplot(seizure$Day, fitted(seizMix)/seizure$Hours, type="l", add = TRUE, col = 1, lty = 1, lwd = 2) @ \caption{Seizure data with the fitted values for the \citeauthor{mixtures:Wang+Puterman+Cockburn:1996} model. The plotting character for the observed values in the base period is a circle and for those in the treatment period a triangle.} \label{fig:seizure} \end{center} \end{figure} The parameteric bootstrap with random initialization is used to investigate identifiability problems and parameter stability. The diagnostic plot is given in Figure~\ref{fig:plot.FLXboot-seiz}. The coloring is according to an ordering constraint on the intercept. Clearly the parameter estimates corresponding to the solution where the bad days from the base period are combined with the good days from the treatement period and vice versa for the good days of the base period can be distinguished and indicate the slight identifiability problems of the fitted mixture. <>= set.seed(123) seizMix.bs <- boot(seizMix, R = 200, sim = "parametric") seizMix.bs @ <>= if (file.exists("seizMix.bs.rda")) { load("seizMix.bs.rda") } else { set.seed(123) seizMix.bs <- boot(seizMix, R = 200, sim = "parametric") save(seizMix.bs, file = "seizMix.bs.rda") } seizMix.bs @ \begin{figure}[h!] \centering <>= print(plot(seizMix.bs, ordering = "coef.(Intercept)", col = Colors)) @ \label{fig:plot.FLXboot-seiz} \caption{Diagnostic plot of the bootstrap results for the \code{seizure} data.} \end{figure} <<>>= parameters <- parameters(seizMix.bs) Ordering <- factor(as.vector(apply(matrix(parameters[,"coef.(Intercept)"], nrow = 2), 2, order))) Comp1 <- parameters[Ordering == 1,] Comp2 <- parameters[Ordering == 2,] @ For applying the DIP test also an ordering constraint on the intercept is used. The critical value for column \code{Aggregated} is \Sexpr{round(critical_values(nrow(parameters)), digits = digits)} and for the columns of the separate components \Sexpr{round(critical_values(nrow(Comp1)), digits = digits)}. <<>>= dip.values.art <- matrix(nrow = ncol(parameters), ncol = 3, dimnames = list(colnames(parameters), c("Aggregated", "Comp 1", "Comp 2"))) dip.values.art[,"Aggregated"] <- apply(parameters, 2, dip) dip.values.art[,"Comp 1"] <- apply(Comp1, 2, dip) dip.values.art[,"Comp 2"] <- apply(Comp2, 2, dip) dip.values.art @ For the aggregate results the hypothesis of unimodality cannot be rejected for the trend. For the component-specific analyses unimodality cannot be rejected only for the intercept (where the ordering condition was imposed on) and again the trend. For all other parameter estimates unimodality is rejected which indicates that the ordering constraint was able to impose a unique labelling only for the own parameter and not for the other parameters. This suggests identifiability problems. %%------------------------------------------------------------------------- %%------------------------------------------------------------------------- \bibliography{mixture} \end{document} flexmix/inst/doc/mymclust.R0000644000176200001440000000205314404637307015460 0ustar liggesusersmymclust <- function (formula = .~., diagonal = TRUE) { retval <- new("FLXMC", weighted = TRUE, formula = formula, dist = "mvnorm", name = "my model-based clustering") retval@defineComponent <- function(para) { logLik <- function(x, y) { mvtnorm::dmvnorm(y, mean = para$center, sigma = para$cov, log = TRUE) } predict <- function(x) { matrix(para$center, nrow = nrow(x), ncol = length(para$center), byrow = TRUE) } new("FLXcomponent", parameters = list(center = para$center, cov = para$cov), df = para$df, logLik = logLik, predict = predict) } retval@fit <- function(x, y, w, ...) { para <- cov.wt(y, wt = w)[c("center", "cov")] df <- (3 * ncol(y) + ncol(y)^2)/2 if (diagonal) { para$cov <- diag(diag(para$cov)) df <- 2 * ncol(y) } retval@defineComponent(c(para, df = df)) } retval } flexmix/inst/doc/bootstrapping.pdf0000644000176200001440000034636314404662040017055 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3390 /Filter /FlateDecode /N 53 /First 429 >> stream xrF}bTʜ{aيk)9RD"!`l{ )Pnmm40wO3IL@ YD4x@ $ze$ID֚pFPDhoA *x+x3QJK Q9DbRD0bdqb PJ"$ "u&B" J@ hPˆ0`QH1N$n,vD-_"-30YXD589@ (8 p!= f@ , L%,Lb1 GXDHY0/z8.b"nHe1KIT =$@YvG^S-i LɄ1%3.DK&ՋQYo\㍪%²ߨl@W"4EB^,Lr#eDz4Jnȗ)kH&||-ƹߏ+'*˶qY{%ۗsvOW`|r=Y0Y6~ l%74ˋ|H `rydT}"G {nvj!,H#9BdZd4W _y.?Ȗs pH qA][J]%cL915gZ{',)L&:E2q[%4ϾLKGz`.`ag@`6 )E6L ^&x~SH98[Gu=Rrab(re@|9!縶v;(uU7u)ҚxjR*$V~Gmۂ ''o?;;yOg>sr? }=et~No }0h>@JD@ >`Pve\{0ܲ鸘"@^,=K #ߺv`&C+I"lWT:pB7۞] =->e,lU N>V2AyxwS_CNA>=G =%}Oc:l&tB'¿;:){: }٘.hN ZLIB/]8MI+G,ߜW n9! yCVB>xGi ) @tnrA5jwJ.@8ΧVy%4I6]rLL9(OT̒IvR}}YL/>>Q}͓?aD+YWK;FC5[%۬}J |Eoʍcd'on~; elfRu̴LlfL-;_,ujY{Aj]W r9.-~~4-8OT' 5cP 8heW>/1WZY;~D>CcֲqG"f,DEZk՘ZzhiH+Qtݕ5d]LerM 7[P,(A)c2ؿtɃl r9fr̯|wr!ӳ*ƹbg Q0tC3 xUAeq˨+>6gFq2e'b7ͼh5p91{3QPf9fFl]cI @KV o:N}˛{gOs# / Ȃ QBj4tŊu.#ޢ@Ц|,v aC(i@NRMc<%66'ޑ?.OO&լhfλj$U!{YKZ#vciRM_؍0[5w2@usli_zދw7M|xDEaGe#̾VM<\k e}Юm:gQJZlV[4`;|v ֜7`ߎczC B`C`-s' a(E_^_] ^~f:3}ne|GxtX1{?Oarj]Ղ\"Oy]@ʎnѣ|==zzulӣ ȎK掞\u%pk&wnYC0ڟ ^/#mWC%gx9sH?ϻs!w=j0 5p6tKoSRv5Ί٨Ӣg8) p\Y{T$:V7)ցFm מU~uyͶC`TZ]vm>©@l M]. Cp>Bn8\DE P0 ޻p%Y~.ڻa68pc$}\~x\m{^LgJ{ˇEwo m?Z`7{ KT5)A@tQXt2I?-I_ʤa_u"ͻN67.]? 7`@Y݆rWw D̺`z󹦃 KCj7gsP>&N?ӺgH`ch8c :Oendstream endobj 55 0 obj << /Subtype /XML /Type /Metadata /Length 1523 >> stream GPL Ghostscript 10.00.0 R, finite mixture models, resampling, bootstrap 2023-03-16T19:46:56+01:00 2023-03-16T19:46:56+01:00 LaTeX with hyperref Finite Mixture Model Diagnostics Using Resampling MethodsBettina Grün, Friedrich Leisch endstream endobj 56 0 obj << /Type /ObjStm /Length 2319 /Filter /FlateDecode /N 52 /First 418 >> stream xZ[sH}_я3Z}ޢɅ 0 :0;ŃqX#elet,Y a٢kaL8L`Jfcu`V0kf%q*&tPj&<Xd-I%1]̬gJL:ɠcSqpIa=dN3eb\ ,SNcc*癎 Ĵ./%i'iE$ýaF`],e0=38|^d-m՚a c`EHʭa؂A-f@wA (ᗒ:԰hxzhc1ɺ`b0w d咽ge %^MWIV0[߭?گbd>c NBWU_]b{W_陣~=aO`,/W B3t<+jPXΒtzBozb$Eowޓd߬fɚ:']o0'$$$Y$4?_'4e i6)cՑm24 H^ndjM+:)9}gj'WM˛tl_eiJ'j]-Rj@?[E"-;N>Fo>i@|zQ,H9R,d$FXhV޷fz$\4 O|S~s^O?P|d!^h+64.{_:Iε*Oϋ2d0 `;Kk( W$WBo޴QC:y80-70-14-50-i ^+GW+Jp ݿc1 :A YZdBs@c˖f]bjl5I)678$L _G+Zr_JᩢRF5 P=~_ ?<|gFdrFjHr߭FOc:΄H1NIPRȀ/X~ t0gD#pn#P\E1B>FJ`D] '"k:}5UU D`ѴQ@4mŬlچيIoK6Tm,&/ek )mн`ReQJt?>J,P3lȫ+kT,h䈰 k4X0%5ϒIΦ.(HJUC\lmuAq[Pau>TDUcuki 뱉fK*l.3D9&8i(^ Ş誊 Y n[leTf2 y{vyP?~="Z5-矯>˵zmuP3@&K1m*Ea1kY;Jn[\ҫdG G7uNϮ:˲bٷ,ANB0י\gjg,bpzlvc8XP&Ëmd2":[sbzWv0kPq?J jQhe"CR/A)kEj5PsLDP1`)>u(MyA/`N baj|+ޥcʇk ++"W{vǁW/:5}#xꁼ*7gEgգvEMW7I*7˩ǭG D 'b΁aӻbig>o,_(oZF(;*qM8NBAjG*,/Nϗ4JCשgP81YA_V/endstream endobj 109 0 obj << /Filter /FlateDecode /Length 4332 >> stream x[K6_ؤ'G+l!/gLjVJ3V܏Z9,jRRè%п,/jl/ Nov^r\E\qWBGݵjy)e]J#k_ nviWi1Z 5=]mpj ZiX۬w z#d}{sb?'k-5kwuDlx~nDjngd%uvMĵ`RJT>٪]C+|ze{Mؚ;r٧]؊ю;Xwh+3s\UE싥W@ $ܵۮYu2 ͪ%q*̊ErFb!=: "WeB#}z'J^l)77}kW]Hׯ.S'rBn.?}Wfa}XF+g,O}˫.64d*euU0:ͥk@υa- cq(vS׊l ч~+awnůMwrld)ɷ:\V*x&T[pKcfԜd4x@4A{ {-ש}*Ța]j`u)AZ[.cOu'B@*vPR<7a0 eO:H+d);:7J93<|tsMS-"yIcSN' iIJAGٜ,xnY ɗKS Q,!u6:)K>Ah!%~>A "Ӹ_P@X:N_5!ZmW.,~V^/5ʣji;;Yk!zk Wr,V>}2lQX5x%Sޅg Xwo*L8 )'ϴ43P=8[Ӽfpjr5׉m$㌳@B@/JJ S5?6Y=&lHq1bHd 2B:PْUjIaOICD3I*X%,fyQDk waJxVz3mCRl cMKXou+bQpl瀽i4Db"Ugk`\ nbx:ðH<(@/w6+4;<ǐ%EaHQ#5>y0A6J?$%HXRVwa٫e(l]ԀYi_to%긺d/>/3oVޭk^z؄u{?]sUwMMɤv~3Qޘ,Ai hP|KtpXYi,ogF6k2<@U!,?WWu>ufkNS=H+53jY42ܓyє,ԇaimXZpWR}9X=5GG}f=Td~lƥO҅lq6R6a4{$8f|p?#KQhp(7T2-K!7z.& g_f۴zs@cbBɷ I!`-]FksN07Y"n’ޛ X;"~0w(yFvyZX*ZBQuLa(BZnOBQwz3!L+dlIxpQ;CWzۮ_}}vҚ6uA%*2Z`TUEun+*Vdm33r>_, ]pOm*N]n)Vwʸ^ˣ8Ӓx"mNZj ] C$PgFwf7+ۡ\La}? Y6MY\">SBy2C_.+DEMq;̠jd MnQED5!C!mhq%4Y*NH==5^߂0Y5]Kr<-N.dki ?uR2*D6 ZN w=endstream endobj 110 0 obj << /Filter /FlateDecode /Length 5624 >> stream x<ێ$uAy4d\IzaIv N5۰O|J<2|˖?nvq,7l 8Stx71C:anL>N9@Cz7]]#ki¼WR8s 黾Φ.Pf| &&GXq^ |u`gz5cvs6Xo:y7}\a_h%8C;/l]>MN)m8+]ٿ1y/15BF{_6t{h3NB`ݪ }3fqxPRDv ' ٦pk ^7oC!U͑xCw/ƇՁÞ"D v5@!"Sh+aĄk-YțI!=9ZJZhsb?o#JhsF?0"*$DJV3A LE)6g7C3pE)EZz|~1 75)V xw/ zp%Azpi5b | /^αcK4gtJ(]̰+@pdTif{VVu`F Jri\K<]ydĺADX3RPUU}*F,adۋ#4ofn{7c(l?i"{8*D+PR㖀a`{F0191CHVMf(ݽGnJED=߿<*T L4H`SQ(ՂtL}00]nQ}20Q-5Ǹ_d},PMіDO3~ =fs @.8)}э;6\kLXfur@ Nf83CX!b ,UZi+sC^#5yg"uf̷%XgVXY #gl CE :t\'MԌ]:3$7X UX k<P\K&M&<=UM+?F)T&jÐ+ӻyVGiZ7hN8xhiZ E3Na0 %"?$?8ֈq1BK"]KfNQF i Z#7&m~$ ~_/rs$ UJXXVe\ʜU1veZpҪq&\ؽQ*vۻ&ThmMH-b86Hss>N ϣ7)eyWu AކWClq5ʘYvJ, @aqaS34DaszfLLK 20~ʤ 1z?V Ⱦ_Ljd` .c a>m֣ˆՕL弫3PZҨaR] P?|w]lka,>>έbΊ dLӯYZJ AY&Mk0ft1E/ iǦl52*SU>Ccͩè#Q#U[ -,ecjPnX' v6P1Ͻj;^#HI(P~LV[ i3m}c9=.·!5yY.$j=7!|j`ą#VсmLdI e+JXx4BU}ZbaXb _dܜ6qR.tgُi@8RKE iBМc`GT2yگ0(HװU{iOc [R1suS#F6vQ)V,dTWaAӡʞ8 7d< Yskr@ xlLR5udpUH!42zFv3/'uA㔮 5B !dmҖvA1DeT n'@zc,I;G%(% Sp!BJ?68 ʆ) U.'"fY,WmYbDlMP\!ЅI,+sv;rz5D/nxZRVT A4j5؎]aD>odu90€gP@Nȵ+reR9CV4cd7lT46vh,J{gutr5%fm.tA*6.m.@4o2@ݴ!VQ0M;jz%A+09Te}8bՁ%o_/9Պ 4DŽY@n]k:R/G ,r|W |ŘTޒ{C4w~_βB4hik("\QerK﫨\ ؑ#ǧI Bbƒ#BJ-Ez01xQq""]f{{d=m8c${HV!&lI)B̯BՕƃYW+ b(ƁJ =>L~l';݃-ʵ`)`m`X*٩U>pd _˴:;ŧUK }-3ͿM%S2%M1T1NHX? ޣi qȤ FPLȋ<5pJ-)qbQ}*5 ص&o5X&r=XPU&ԨkwZb7 f7_đ9x2NY1b@\sעM)nk"|0a%cث[?ڴa,Ud"mg'zL9})1cLzkjfo`~- ȩ:y?K~t ԷVwGԘeRcf^?wJ~f98 woR9<?6CmeN)( ^;e)L > 紀8g g)6YY_fe JTPcRW:ԘKNJqhc'C1? ڕ9:+8R~#'"w5R-rJ弣O" ^F2sF`Gyeʸ1?k2 ҆TG֕%2nǯIdLkVy~jlgfmJр+_8◅#|WeT=B 3k6#egZpw1<."D@Կӕ%#v(fwUjͨV=jPS'@ȀvxK (9O\pN=Sx=B̅rJ31]֘u0x:Q;GۓL|rVh˖m~`BPtt#>>^qbpZ4i V6=# Z <o(V8[IH儴R<%Q,BV1 6JI-ǼxB%/ 1dG? w+?YD'V!k,Y[={F)"ԑxWHIzPկ| m]V"NHM!+,w4ŕPjFO%ͺJH CXzL$TuU\fZȗ_v8s &IIkQt>[8.5ē/\?+ ANJ?z [0Q=lx5:ѡ3䓈oç?JkGGh@DQFdgbPd+򰶵IyR7h];̄PW!^.P 7Db uȅy{ZB\. 곻hfLB)aCÝiK}z3-ԓǫTendstream endobj 111 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3830 >> stream xW XWXC,ADEF +jEnDeQfQAO7 .( 4"[%H4:I //јdt3_)7s[Hb&v_U}_co( ץbS^87.:vޢ pQan-n80ט9sΏHXXtC`ZMˣWF 4兩ɼΌbe/̌eV0㘕xf.g&1&Y,b3SWi̋L ø2nx+4Zƞbvv%Jr՞8:dV[.s4:8qvoA0\=]_rMu}0g:oe$uɊ6wy 5Kx0Sﱧ~;rs@`j ^`#ټ=}9L԰u.h: <sd'0ǁn]-5IV7TסCU(wKX{ %юU}Kw;h \$h 'p0SD^H?Bߵ+$+~Ô=I3nA8m8wNLfiuhU,μu;/ G<xR/E8,ŷ1&x0{!n*jR85zOL!ӧ"ģs"N|nq)R5qt}.mʛ]hz$X@(O#oHky)—.!򗘠Fa]*y"s~o@J:Gq/qH\܉UK4spO4yp%xҼS$. hTl06=5JdqPo0㎐5km</wx(T%M jZ' [d3DC5B~Em!'BI)dU7ȌD5 _T84HìiUU#-ͥ],- Oڊ#Z=c u,f:ݥ9thXY8lZ.r_?Wl-;w (BCd-L5(J9p~5hX}Tʢ%ůyPf_y\)6@aOn>'7dS~1d<8$6Gαfx/nҒ8!єrEq)?\`c3Q@/[-g3$ d7=顒{cZ #` }|9A܆L-,qoI x3W;A2ߜUhKH 0WU@^0{^Es&/D9Qkno뷼(*QQ8['apжVUCnqLlpBz-BǍ7SYEjYۉ|zҧmlV4mO pzhe{Ne%pkʺg喙(<;NeyLnǕ-I0bZcz*ewSK\a,h f^}]^5B+]&Rv#Z-Yhxaȍ,),5A Tu}ںKYchZ`-R௷l)n dzC>go>&Vm ml~(I:'NOf_}m+)y*>XKɆX@KÕ!'`=79Jla!lXI5sGSk^?3ߚ-4O)}Ct@%vjJ55NaRTXֲ4EzI, 6lB,_|.s\JE#߅褔#i2 sѸ+GЧl]4 eg= rh0kӪM$`g ?Z (^Fsr7ݧ]I$Ad鈾''h!uQ4k,$'j >ϟhs" hr^hⰪ#ucMɖ0[mz&BLg-_ôzG=T<#ʣՕuZM.U'ܘʰUDX袰(1-%5"@Sl;z٠Ru睎?/B+Tcj*tm0>jmU$ZJ*1P?~L ǫB4A7Ξ|kEdeSP EdٛVD>@_Dޘ7FWnx#]V l >*Ny)F}QY%3])a`C>#Vi0B;ყЙk]j9^DWX[jpXÁ{DR'OR0N#/ Йy3Up:q%| ܥ@yzlS'ઍ`*_-Qölnl6&HҌ͊aн{J Q#*2s\y'po^f (t`,sYMhze Q,jXshx!DvwärtK5?D1QKɃ{H(..}"jS\L䦚SfZ:J 7W16RIZ~QT\5BtfzlUC6U' ]CX|$E*EpUHtOwpS>Ql fnjֶ 6%Kt5Y;A)'S7\Y E(mM){}Dpait«&).Pl-t>A;2waTY Ψz|N#>&aߐvT6m.+4m[|ߘI I̢;+M﹫ t;y6CvЧ 7*$C.ƅjx+>\?'"4ku㖓g+2-!g0q[6F.cq!;O˧<0*tF8>T B k TX+qy(\ 22v)@3J޺#fBUfz=Ũ3)TT/-M^)jO/jӈoe^_D:&kf ~2zH!ZT`%'T8 N+\ \-y<ٴ\Q4endstream endobj 112 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7466 >> stream xyw\g ;cWƕE|fm`7Q;bWDXݥD4vcKA͛Xs$&qv>9:uVBYt$E}7؍-ksx 6n.RbqgоH|JgLY47x^ ;/ sYcb%[/k#FG;n'McP[PKRjzZNO9PʑJQjj5ZCͤFRkY(j 5MPv\j5OPDj5ZLMS(o+ՍS=)+zQr7JYS ʆCR}tQNNBeAQ$G6tx"]-=kaoQk9,Hvv/0`::4ӯvݥԮ&uK^Уc =negUziz(wݩw\F%6Sm'ᶥ/p\r2u~?4 d 5†PI|"RTUثRM pШ#K.;uK^`.5Adm@ֈG jHv?΋1^=^(MeݴÍ%2YekBӫ t@j"< Xw ^U^ԸJ\DoT\diD-ي~zBrGuS VHy~:5{YXEy8͵t*0ܔ-%V1%AQ)qtO`$,{H'2P{iĕ|W>kՔA)[MRRf-7&hTfaIR~+< w=a ?GQ!*7p v 6SpjN/;xh/}J72peHW'S|H^Koy$CJ :.7KYWFqj9$i/T"@ L.(΃UBDrk֬>:l<Û b4R"FٖcG#L azu޵f{KaϱN%}YvRɗYՏ4:)k`P矟!rroI.~\D衐A4Xőc΍ÖI$^G^UIc[d;d#ġ!hrpoKֆ-_>0eRC&<\>6kҴ 0Uס3KvEB ^)oE8L,w!A0 =+*LJ<-,m9xFFxf&DW~uaJ1 Y& 6f4l4K y, >i/G ωJ3MPy P\{eh$J,E-gSd)IKXJ)4mu{V!ثCh{mF!N1itQ43+9!$۷ɥVS 4[¯:ya2Vo uj4 AG@V?bRpnJC۠"C쐋9VΟR Bpakv2jA+*ӣrKTYp.:$y8}L0YTic4ΰ\\߲`efJ>jwNitDB\+p o>+"AKRH^x'6IQ M74 Y>E\-zkby87ŪJC k|*[G(dƯ@סQ;P:s}OL*0^ o&։Il%B4Mf͙1)ql}X%FO1hħWpv4Au[׷I43nqbdGҷiTqo3SKF!B*$ Hi}tA0ިkuAb p[Ju$ؔA>GLƋ,/4#Er!> ȝiT_) 6pD&!G ŏXҕk68p%[C?Hwezz;@ s4:ƷW d꒦]Sa)] I9$^j0 5FLajGi!2!Sg$:($xO[MLRgxl#Pa|&yc{ *G!S鉬𯊤c!9)GT &K6Y`QꈾQ峪 JD2v(aU riH!Ii,@H8ġ44@[햺5뀙2{<Uʲ#9er+ZfmaU< pC;%l!ۢs(~m:W=pMI&J;}HY;BH~S>57zx1aBz.9b <::6_g҈A<\ Q6[ ̛6%VL25ȑ٥f}bמ=GJJCz[oXiԓc}#Ջ*Nl99iz"4!^'-yK@~eK!@TjFA?=LUz"UN.&fa5dfR2ڼfs mm w]d P˦nZΡ+tjt5_k!V!^[7[I`-TFg3 ȭdj Rgrpf5[(CC#ՐzL*u;yi:"SB6t^i-Ѱ"`i T2sB#76PT f J/_r_dUOj[%ު'Ra,O D5?j+0+CФ; itnJg"¼]_L)z>nJ%]l4uS+G9(ow5Zq2YElQ*LN!N!: BVˊ0_K,Zԁ( WtQm"RHz]S$U-Km%kU(y=+i6^OͦRGb9f AvWHS!Vayo dK4pClŁ"~O ƌTaLhraJ9mw%SL+!7&SNثB]FLK_G'ujaDSEZ^L6(#tzɽ)0"4 \jXC|VA4]Aއ0Rf2Bsǧуȿ}~FBHZl8Z3e:ݢlfQ&:6'r 'n>GؙQx h ʖ|I>ߢlC4pNo{& 57wnZĕ9Lu Gpx17@\ LPnhYENa )Ex9fmgC?[ 1h* Aq9T揚dh_+w\>P[8 NvݚUB0DU%Icjfcٳ?v2C;<\%;<7 3 uc˹`1!ML ᡒ:zM0]|66XY&) :UB YQ))9ETH@bޝIXWN>49mhxq:]Tܻ¹~,6q'7θH8ueGYܮEZ3sϴ#+e'4u5Xм[}jސmݚxl 2s[qyry5rAE"$7IJGo >l(cJ }}BW}{9WwAD>qZKj( jvqsdq0ShBNP ڢPAC*eD \dƟ zR'.-c C9x)25.]TG+o`pm,ⓉJz6=±C(<}4K"C(< t*Ј(n !fW#^Oa6w$lZdT*l 6<%fuo~A>w2h4LG!Yg,4ˠkthrv RDXdW\36)]HDOI}Zu!2DAx\WOTPFU-! ٭"t&gƬxg"tE;[~SlOdAQp_][VPX\K;\#gl|k>n2 Bz҅-К])oHnI =Wkb RZxK&4"5GDx;;AV Jg$ΛCb|VYa1FgA6R!I>hInhva'#^}u` qw}S_BW;NDfZHE^RPT盋94mÑ{əfnzˎOG?!g;S? F.iē4 iuw:-DbYؖUT9kվxF:~K8,W'5"{D:+qw+5!3I^~1=?ϯ&';9=3|iZAyEbYH%%(_*G9$oQı*7$,JhWVTT65+L(T؄vm60%w3,Σd?|b^=*2-+J|bf L4MwXtНqڒi|zG.eS2.JIn&@ A.Nь*;s⾳}6M;㮬M+ p<$A?kE`kU)r~l6K/T0^ \8&nֱnQҳx9ȐFh+l]Ƅ~̷glM0;=GAOϸ )H5qkԉC+{O֊0fG#k&9bs^ʕD9VҠfvF]iugOƽ-c\Y)ٓH!N/uf DJ.ꜰCp򙛕/E<VZq }p,Ra 9NnJ(`dؙFcnڅ̔䛐E [v+=hx Hpx2Dbq-ZHELk͒EGɡFua{Qx_9'cO0Ɔ\FTmy2 0_wf|˺%mņ] !9YkWZr髦Lyʋq퇼aM9j+`'μGĿAHuGLb습a7q6tv(8Wpq v8vqS+p-sO8Grp"O'ۆkm u!șjˎɪ& j[FeLąiNB?s5ig~k헍Vhr|]IF=p) Cj QS^_FLP3^E| %#'FB,3'M>!]K\hϩ^7_EvA̘AdgF8I#^/0Hz?:M:G%%81A0.P%)<;tZ!mtEV^!PɗmBp(s V9V h+Sp^)IOWM;-)ٸ:T~@a! h` j h ”φ;m+{`6ԉ 鏾@ꐉpO4s) 2LswynN OC0#0 SMx GU5Tbی(xiHvr>}Re&6xK$n m6o.;øͅk;ouU@|6&B!83> stream xe{PW{UaգFXES 5APDyN님$WY+-k5&Tv쵊1lʾurvd2٤$싊_07(aoF٨98ۧ*LujE|Q].'&* RaTZES(Q5⤂3u':wvG9(>Vd(nAc&)Q8KwF~ǐt8N^4qnBҔh st{A'<%0_MhB9QoXސJ+iOE$jY&Ș4=i*f3]z>gi$Wߣ8*`H'OMΐ7^ LM]ɚ:*孃qOP!Ę]8OX4[zZnJ " KԩjGw@jt[nsSWg‚#|i &-C M(Hc–m93Y0%/<\rY\`X>0(NaAe^wX( 8Z"#cc##-6bu:#m"c !xwKs^!m%k;sG{ȔF$ ^"߱s}mv&sVO_{>œ-eIO7<98;/< cX:Ŧc.obyq:l_@Lf;?+ۓ^SyЗ* s=2Ug"QWG[f@ znݪc_Uh| .X=膽8]P"_gLEޓZKRse!C/-t\]:rE}>ȼ72La̦Ά+Wj2KJ+k޽&x9΁HNg|q-پ)8 ][S% >13=~0CG/̑ʂ6 YD)SAe4[au"YАKL:d7T3]¢$/p[[|EH4w㒇K-3T򒼊Z6oE7xcKM"@|KJRJ *X-KL lj%䴔d⼒L߄k@$|҅cJ( ^ 9t9ͼRT;0 xq2\&7KL/\\EEQlendstream endobj 114 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6094 >> stream xYXX`2R43E1bT XtAzM;ˢ&5$*F7=|x+2er{=K蘬vt xo 'wP]ASpX2C hc(m- ^j&{uȽktz;3f;뽨sΛ XM\hԷ)jzZGYQ$j2B9SS&jBml-RjZFͤQ˩w);jP\j5ZMͧ5ԛ5E1 eJX5ʍ⨱eNYPz%52^xJFP JD u]uY7/1 4Mg?KvJZm &07^d\=rȼ:ccl+BΜkNzl^fKN1o0Ȱ4t|8nquu;^7kWT>zbR $e(GUP;_m.G!()-6))E3~ IWԡV)j5t[RW*tyj"as3@=3*?)Yi Z $ eх+]yVنJ, ׀JM{s2ꛂ,9Wҽ}f3ri#8b[ptG,p4cod (PbPC'g [jexblPUZ[4ɥT*`C.7jY0YA=9ϦJՃ l2`Sc:pϯ40*LWBr:Ba!ۮZU^9v2z<Q/?,84 A~?\f~W-T㏹{1 aӬw-pWs^ J=|90cS f|tAg`q3ml<78]Y p,Qu죚^$:Fti:z2!V0.TϩV:pL `ķw#fּaK*"3e-@U2Ţ U9mKW<{)]\Ӡ Y2T=7Fzx7ݵh|b@N_|S- kz¹˷/*~#΃i؂ǻG/j#2<=7`XGLh0!e7ue)+])wq}JT¤P0F:pMfF]GU%koM:ЖIKO2QY6a5 %)wf,Q&qU1Gl^ڙ?(aGĺZ|{GP-\.>|Fp15Uey~t^*a<ٮ-&q 6._=,3`toO2ɥa Pn`¡_1W퓳?{Eihk,>#23uJ5@Si[ '0w|"F}gmPx/OH) D*]4 etҚf}sJS60G:@v\6Gu&X-ތurʅܘVQXrWٲtYD<տb 0:JERM`p\#Ar>,c#r;=,Owt Abt^xX  =^pBs5|bG8~iB)4L;4cĂVV+ܧxl{V1> Q{Q@OW:ɳ}2QŸ[tYfCi@W0%ʁCڂ_aLJĠq9(Ɔ&|I]ApadK/;$+08 *2Jj :ǥN(@Eee\>AlPVQXF2G$cW\NVwNB~ղҦ gdTed|qw[t7$Z֜J?4(∹ ő(aw?ICDV [ϑհՌ;d5D %i1(|%4 JV3ƢOc<=4[?tFh|064 j\!p;si) #XU]u@G I9T@EOFT!IH۞E_g|H4+րBܦ0m-x}-fR/)""NN }+ q(! ޏbDgt u}DE0[wmа-%@kGI4i"1^T)qx!óR^[o!Wl |M(LazggW=0|b6ASEMU<X DcR+yG?s(-7.A)m&%>*԰9`5a? !)q;gG$&%bT_c2iw(BVBA@wAkqBwuUF*8yNHvH2%*H Tɋ\TJp֎ֲ ֞M{7.ItGg;,<{467$?p4 slW78kaz$7Hj15=*k?Xh}tdO0pD@FӧۉG280?_8\IܺuDZ`jOD{҇HsMS z/+@h@]xgW̰T/2kLC%+w)(V>HE_:/28w1!BJE%!R$CGmOׂϚ09 Jr&A\쐀gÿgK&=9n1~a ^/l* HA' C#tk   mjQ[g<A~Ռ}CW<>?oQU"t9}B 'G%;6ҳ^8 o7`Kεtٹna=Ɣ#>}MnI[O!B$o@Z WETXN-q ,*ϠQ$rKL?Ɍ$򘦐H_3I}-C| isOOLQV΁EeXo3"~ÌY =^u Ǿ4~6ht s3o8h_uX85m0jie8d QB,ixo{~eC|Eƣ'0-Wt-h &Q]cI ;/&M0RbgxLLгvG Tx#'eد[Ҥ(bf#J<8&dVH]%Cyggﲼ %" itDTxaRq:0.&1: I-RaL LGȻ4+U](D*(UgLt`;Lփbn{B|ܛs22(Ԉd$eJ#^~]="bx|DHtQ6:|܃|}omU?TqqSrihbTt],Ȫ\.,|u.pfa]?EmKl,+TlhT|~QRH&cYb9𤘘d}Klx,=, 0Aɥ]V$Z^׃K$EÓ8I ;.5@) ]\À q<͎xٝWw(lb˫k˛'[?E;,ߺYxM|BR2[lj:>an~%x6;zyj’Y|k U[|_P'0(#)% !Fŀ =k6gq@!''s.*($4gO Ӛ}*<ʢdII_@LPBAtQt GHi(`dܜ< 88* jDMeU5bR u=`&Ë7=1ߋQrz?QVr08fԒ#P4ƶ!^hb۪y GJH'x'?cCO<8AO5wb+%(";@>UqdÇ־*<搲( ]`1iL~/MX%|B2zRsKnW#t$W fpiv,H@9>cb;-uۭquutϭQ L@Wy^ TBÅ]]C9mj/a]F; `XUIf@v*PT N8\x7:ܟaZ9e ް[r9`O/Ü-϶:9s{緹%c~uZ`ߣT\רk8# .W~4 GYn<#*+ գr/|f#B'I-aFs$|M^@N? gϖwESxVʙ iO2c1FV>FͣM!RRLނe{-Ki|QU} *@u<Ǔ+iO9-/b::ĆN1EA2]uW-A6(8SЗQ5QgrZ)\cTn՗$0tѰs:#t:u8uX%EuEUA=Pet~l> @ QEmx,>5=^xO|Rt @gB+,Ug}"[)ױ/=ldO~G[%i1Z0=K:$M80.?wsyAjF> stream x}KseqWB;/^]LW~8!J,葩`Մ E˿݋oT{-ƹrvǛ^;s6ؒ-l;ERNElL:6s!>>u0W솇'z~|\l+|o^?%KB7wޝC/EOW?xO\ay )v7>yWOmefO?W_WW Y;|Bqgn-}-oWb4X_Wp&Tg.|Dw.yWݯJ93YV67#Ks!M(Z/@`-Ws.07wmip>oAA'>w/5N\ 3 ۏzMOpW2 zWoѝ/p>"' 흅ٟ>=εc1Ղ9WZ}EdPn՞A}, fƙ9-6CpaTxoț h\MzQmoE7䢐s=B17TogtwX>LrcY:{} |J˖MwD= !^ b -W U3<Il`׃Q QH+}*}9؜͢M1Y [˓?0x.|qyŸl?q"_R@#AQR^w[ ,L,<7f.Qg vfI2`*j&puq-5g@A%2Q>.V$ /V( Xd1H +=lu*xA{+T]~Lp45[#s\ 1FX" ڕ^0V+6auWll7R]WyJz\=%iII J*ډ"*~%Z$1Yc(ӆ;\Ϟ]@Rr. E4mGYHްQῊ]a{ƳZ *&G3O׶\Gb@,p4XaSVx!99M_Y+>>8F1 3 >jNN|2ew;N`0IG=KY%!tE DK<Jrw",pׄ 0wbV833FN+a5L:rCh h,4c0`UĐ19ĺ ҡuBX1|FI3XFc1Ji޸B#1ÀY{kCI2&\ /4 E t6q'9@},ӁQ)0^5-Ŋx8a8Ozv0Y[z0RC~1jp~T8u"'!iGtWa ;j׀ "M_+F?$V$lFL}ЇC@ҤJJ,l`@U}}%"UR5_ S" z0u0h5(fY g0[-Wmn6.8|wr XrO1\*+e͞…iq[?,4(tԕf"tt&̈L+{ rec1aFzbӊ@,x+dM#_Ņi _#9l#2:cG|5.tu0ygLSoZcs+v5<0D0*̼NAuJ*ZS}^< .5dI|ѸǝV,s]n VaYcbt1 iDAeȁ>ړUU+4b8k҂`Q$U0j0ÚTUiXS4&aLH/I ԣ^ |"t1:+h+F#L'I$,柅5g0H=VD!v~RxTtoT8 sT$ XTQ_n9lKZE\'Bݡru Xe+ͺIJ}ja5YhFz=cj^k5 WR(c5IK }r7?USy8qNM$406#WC #K/ EV"Oƥ `x"lcW<d]lY$Rukt+ʔ"g,1Ȉ?Dlb=E")we|.oܧM c[Z`b73B2,3% UUtX~ :UC/,%Fp>{ y GQ(_Qavt|WljKƽ"ϭ ք(Q2 Oa>)ʜ> R_#hOUz)[Ux~ЬXȡ&I͸<7ܩn`=tyR3U\E(-Jj|_xEd@3<.ps2]}e(ɲrrZEz(ISֽx:h# T4PLf3W&QhV*1̌s%Qp'b" z)'mglf嫤\Fv}v؋q%[!__d(3sXH%-.yK'J{0ZCݖK*U>%YUx912S[Dm,G">T0EY3ƛywklC)I2BbMQ*4!"TW>]X4%9qUyɇjLp k0"l*)|:e0D.[#Ư6gs$Y E;}ԫZ1[""pafuKQ8ƝxMMI 9̟Dn5PWx+NRg nQS2!{VM)<maxڃg]jlxܴjc<؟(7X=Y(HT("ܜ{lUV'`%X5 (K+<$VUaYe6 HdqEm.u'fY0(bV0m!Gv4gY$tׁ +\}4@&Է<"jDuh'" MT*.' N;&~ 5*(Q6BRDyT#b#:e?Fs0{v\t("5E / nnH{I=bFjbs4cE&d8@hWU0E m(aRV]:_|E"Kde:﷨ ʬ5j A [C(|aD0VfVM\EI-dpb i0rs6<V^/h#D$DEh"WVڜ("~3/" 1 . ϖ4_掀CD|;9;c-0[tꄈt`uo".EX0yRW]?()ij aAH ڢct*FH5kЋG1}:":"H`xuX  ^W s^t>}VN@V8 > mNZ;cA4kz*X4:3!#+CGJ@jF6&D*8-ĞߙUXWz Lbc A΁Q621Bo Ћ0 @Ҍ-1!Rid:UcBd<.v(z ٦p\gl6HYFH8=y_xV1v'X0T릡鹑 ^Qo˽gDչ&jGܻ#E]g™KF(o7|D2,T˶H=Wm2 W 75="<vIi!ҙ6{ȼ!XUqsg dYؓ~arf :Eб$&>ChzÆ Of0!ґVu_ٙbV%LoD 8j#S]ZȚ%+²L,wWBt&DI G'.Y08g[~㈻tl9 %(#x$?Uuz~Eꘋ)э m(\;ـ*fWHΠB`g zHQ5Zh\BXBq!2SFZ&4Dza7.D:jj\•ݩ-Xc 9P/MӸls!ҫR^\xfd#ꄆ9 a|Vs!1ݸ˻H2L Pᛄ] ʪr/sfUʠa#6W;"E[)OB"3yJ,)3AH;GTYMKdp!咝9 b\ՃSnYWՙ5DEh3 ةgC]%,\Av/ք"Ǚ#EY)u*u\lʊyq!;`\lN[4.D:lI`-m$;J5/>G6*D6;Ae:"mv)وE6Iv.D6iUUO@LuW*zsѸ+Q)+a dƅHUbx@ziQ=YòAZ"ָEr\ƅHgTlTLZNSBMllTl{N4ZvE'?#m$lT,<3Xx*Q3TlփڹJ)PBM\LdUBs!()6.D}Y/kEr#;!mu9FȢfE?dxt΂΁N:g ?d,t(s.o"!md /|F(&9UkCd`&-G|gzfYEZ`u'"4>DXG :D+H?VgQ.HFD}.A$,&#˷LjAF(WEMCdH! W.;"3Ut xE@gC1,KJƲhG$r-Dqmkf`hl .\ ,v*i:0X^ilxS6~GHT$)5.p XI!s6.D0*q!2XBdw*ӹ n\,Sc5΅vu\/eW .Df*ҹy4iq.+fP!*[1!ً 22NIJnI>6ucBdgV ۙ+z;bD`^dwER3*(hcCkDu"DjD6Zo"i뼎B;"SLQD̛/E#˻*LBcBsqEJd:q h'(eyCU4Xfh0!;D)A"v/ЙfS;"=6'!׸:a!0^%"(?~z,$\Hg/O$'Rl0RdvvcBHyqH}3!2-5h0!RD,9 Q5*ՈU!R* ׈L;BD/Ɩ;jZKlH9]ۣCQ|gB"zba(L9JLL{Ա!(|kx8tZ΃|57΄!D^9 NnM>o, 2]5NvYQDR+TuAd6:: "H)T)ըϝQ8m `:hY"(b "5؝x\3,i]׿Ҍr G΃fvxeGVjj .1ɪ(< 1l$%PPnE?MHm`%“\.V|QG\c+vѱZFvu:B k*fư@3ޘE wlm1YhA?U*` |yBܻNF= ըtaD4&D@^;"D J|}:" [x@gfq=C1ИtaD5cPHcB$G˹):r\bu&D:c ꌥ-FpHH=xEJ2?ςܵ"5@Ta ATi\95f4J3ũ rs9A|Fs1p獍!B {vxw_EU: RkW:H!Zm,L*D0TNyAlN~R,QN'`jiɚ:!tD1y*B2G&N.Y$2sL 䁁Eɑv<.^?!^H?ܹEPJFYd0X:IkXUkΑ6D|2Ym* Lƕ98[Ze\ڔxC i;"~]FݚAAG6FY Ǹ*toT CH\G /ꎘ݁W Bq -(u\Exi*ٙ,`Bd~Jhb?20m SډFՒth2x MvD=zt"Dêm9 kaMs" љyLRvڵ(Z4{"k:f)u x7"DF(lnI;"E9N"""lY+e!yh-dv0F&UW3WlB\Bt^ vLEw/`_5*X@}{Q.ܢ ,[< J+c`YV+r-t 33H%΃H xuAlzIA 4,TK`,-ƃ(Pxh*޹_ʫXcB&΄H?h}LZ*JkQ%qm:!EL,,4vlETLK-4  wإ Q:m u,2Әٌ%< 3!ClȈ.ϟqkS2"!ۃlFD1fu"QQrc!1UhVLO"j%;"PƳ43s7þ+-]̓ҘYԩ"^D#om^gBd;"zBvfL!(*߈1'L6*VL"<뽅_4+ ;8bW -}hhLlp_YƄHc΄uVNb/$E-`M MtQTL,E̟\CLt`&H:"1Cl3!2of`Qژ$MBddQ Q5QyYbtmF4-~w%v:fvck4,¼q_B==LAM8e[:v qfdJ6_u %KpcB0>_^T*%<2.D+xKCi[xu%+;"y%F1W˾霬su?fޚe RCbW/sx,tgmm>e'*.+ޭJ#ȏ "JF~1Q~=YSaz|}Y^8ЧUSǎ"|AsQ{Xum;ukn: gm1N4q!7*bw',A|d#Cd5{bz\ɲ=I1gjgYĹ[e |F{4!UX'C N_-BuZ%&LsƲ[F62DHq@]ϵNZB,VZ-bzZm!ΫB-NZ ~]h$'q;"}%*"lR^ރ )4Z$,AllUz!WbC5 DX?0EZ3;Vx})"5i&A 8oQoƆȐec{q?QiqkA*#5igCd.}ܩ('麁_z] 6D*)xuIlLE- 1`&O:5>Das S^̯o|NE;{o!2˼W`ΉN^X(Si{kVV7_-U+ UsyU3"4buu^2 Z&R:>_,:_ aE ]FH5E}k[%:dbT0L dN )|f'#tFeL4 >}Firܣk)H[Yr̄)\W7.t6P:"~]3^(r {p?:",_D"Z0r ;sCY|b#CJ'dlO-M,y9 )7 BϳThA<4g"e ܹ3Ӆ5dsd;I+̉CKzqe"MY촺XY -wV,K|WfoכX 7Xz',te6'wtDf|4Df#ȴ.aTE: "S$txpEŒ"G9نK9V'6U<4SOsˮ*m[*iD KWvD%AMtnG@dV;(`t"d(j՝ O@dwD ΁(0"YI <$ " &]UO-Z|TuR@: EƚwFTTvDHz|b3M@f,$ h4bHrA" v wk?+j,fU%mQ< )z.@gP" M>8zۺgx "4 Zb/xAs6_AH x ҏA4xQN&HiU: " -$MlgAdь95yay.ҍ93a?pQ U΂Ȝs/?Xʺ {σsbNN4Vdy}iEL'eӌ%YLXDqNiY٩0iAyAD%QۍQ_d;3&KRk #k% "ьk (27xUͷ VU.2;fQk΃H>vSyL΃dIAE q5h윧SQ?!@=ViU9BdD 2՚4y΂T)WY":ieAָygIf: "S&Fl$LjɤfW)r^F;"9GY\t1J;"sF\`:dyՂTf}n "z(KTՎ5_KrVz+ո-x>y,! 0:Iڹ"6>`Ad,}iFT)_,F`W+hd~ O"+\nwZR.̰lP u֢Y|zQ~} "W)+R<+gU΂ȼ2x4bޟ΂tQ%IJ OZV$I; "}h WSG6DL"˶mz^,}LۺW׎WvDF6zWKɼpeH AȀTڹ'-![9n0\S5yFHwnfAdIvDh9fb: "U ũj%:"VT4D*qYƃHDUE=aņ>N*>())0pohY^x́+j!5u8H渜ܒ;R4[vD泭wٔ WtD:TMUB@d D *۰N7_Fv8G^ᰋZU5C?dW PlPPvEki9d15i"2jvtC3=/`xK1~_dN\EEN| 81`eTY ]5> {Af:+?u{ΰDśÇELH\?iLOL 1et 'i/975_ݎO0Dd h[oC~~QAz~,P[c=a/_qO=Ѿ?m17mς4O/M z١ǹ mۯii8(tɞ\GB I{eiplfk_#FL\A㻋C8p,0ݏ/O/KqO=~7O~b׿ 1˞Aaj-_~v (ğpuOsoaj?dq`rJ(7JpIo2`$_}Aiu?~3^ ær lܼo%Ó6$bNDz^ {C$ruGy1)~{ h>-]T{3|Csu߯-;(oȯmŠhtL!r8F}i ?79v_Rs{7]i@6qa/|v;jxs詺~[2Egb9uW'C3`TPm:nJ-n3 {CwaZew@  Z?~6NW=K/|/8`6>ll<'Է)EhVSxcxsg 2Ql:2@5c;at?'8m"JE1p嘙tN׈6w#Da<{+B]D^kMo_b!N[U¶}J }2A6Og_s3N0-'kmrifv_< s[hx+߿ C]gev |Oy-&Pr5ڼ~_}[#U@n*;m 6||h/lS #4Gonm}=?l@zOIasvӯb?!*)w"ae*<_ܐ_}q[Ԇ/!h! .ðS/*–k̮8mR:S#@\uܶe#li_ `2S77Y>+KpxhAr`: N7?RQeApg1ڠzj[:wm{z`i#W]0Oa/dmcY,ßoT쉇-d7?cs1N%_YW!6xs/eZ!p>}~|c`NqmWDwq#DOlL#V?endstream endobj 116 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3375 >> stream xW T׺2L#eV֪"ZEHEKL=*s~?G_I!)BؾoM;wE̡u3zj5H͠ޢfR&*B=ORRTZHNHInRI(ʓ2^zqZ"DHYW #bN,܇%mҲaS$CKРc_^ťYX.:roJ؆gywòF1ᒢkkvQ~)@s> `59P5*LS^DKZ;`T ސÞdxpD]ԏ]aguxWL6nT@Rc4u%vY}f&px- x. Q2͆X+e(DVUbnշ)-DEN&rw6@iWrE|\uP΅oXWAk VCp3L*{ӿ!*r}27Ep5᰾'8E`5:d % rPhLEσ$Saw̗^1͵Zg AMy> stream x LMRoman10-BoldP  YxY^D<[ ދ/$\7-) \0L'zxg[\yxd\ P^   To O!_tendstream endobj 118 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 530 >> stream xU]hRqogzACBZ.ʨ1+%+*mԜ;ĩ^\b-EE]"D?X }C a=.3[[;X?@ 8p)kXlZX>Snj_I#U!JbZ@Io# ɓ҂[|$im<)ųcفukJJ쟌D,hrL1WI'UgHM|I]~uv/^ wXSͨ/Ky~3Ģn$(s7+߱ŗ7aǖ;.R5eZdZL%Ґ%]Sp#`u< 7&bS#~Ww(,P y(Wl: Y.j<&PډڞmE#hIt d$Sx<}=-mX(ȣ'.%kUpiӍR)jz3pBendstream endobj 119 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1244 >> stream xU{PTuew^`Y&fw#MQ^"1k| $CEaXqW1f5mAuIpuLS#0iM-\1c?9g{|)EӴreT̎4oL¶4 ф2PH@!=7]:'ǷLlbžx\}s.DQ ʃRRkGyQ::Jӏ%fAoQoÁ_C4Q 䠺9[ wj>q. "u+ߋI[a-kX!]s͌?|n:2{`XɊ.4ڻ44-O'F){ l"\<Ύo4Mb @w1b]:d_|!0.)J-.__?6xꅌYsִx|cmYE,"dT 87ř5~CCa#sз[5}_u~' hy'8:xU{nlټBϑ`/gs wũR'KKv X]TgdzDZ+"N ;-is[4 >_Qiv!CzDH@>KX/B,ݷ_օ5c.p^ڮtB{>>z5_>;nfQ9:noTec 3IBN 'x2W0tG#`[Vl*SGU)H`>UIA,/Oos t8)e>;B=Y'$HO2(?g z=ߢS%J>;A!v+Æ[qCn&>YN"/Kx#prXM|U RDp83=8NSHeXjL(2v4v.p00 (YQendstream endobj 120 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1882 >> stream x{pw-{Y8 . %@Iv&@&3P!,6e˖zY֮v%YO[%!ۘML] $aHdR& }CeIӴәw~߽$E$h֭CIAS+|}ʝEj5ճ.[Γwr WߢW.D swA(HrܼrZMFw츆 M(^v͋J.G|HNl!Ir$OÉl9Q,V_ɡrTQK)WxPޗE6䓣 eJǠ%Խ~H,+Ncl.kކ990"oG9Ό?MZ޾z\o7\H|'aտKd?D}9hڈS2k+96ˬN"E#D(2-h2h;[xt馚2i'4-P{:+[PC7L}`%L͂`vbe5 I}rضas1^6n-.:!IW^2pvWGNa35{g%7B3LCyh'MFG[zZ!t|Pﱌ]Kcg=%Lnwp cQ@{8F$=PJ/('p-3p=!H7J@;gBuP p*Ϝ]nbhuX pjej:" iᆀF -.A^1T׊D>!GK3WTOEvuV Z<ņUmV;ub@vw0;IR~4Eg6b^=a?ewHT.oQ(5^{ {}:6ɋrOr[3)O"ߡL0]0}X;* Ua|[?r 9zP6[ Z㏴f? 4~HfjldY/f~PY`wL?3cu9Opqayp n;~?gӂYzSXaPA H[X(F5ˍa3vlHpFCmH"&V_O7fO^JLx*-Z9CPcK tحY=-5VWV%C'dDApղ?/VZZ,L>2_5hפ2U]QQSSQ]Juw䂬}nC)Hf> o? tכ$"Ď)O<g!oջtvަ_jݿz ]I%ےY)Wz1stZ c(0@)jj™[06ѝ aF"ן9=m$'36Ef¥85hI#[b 'ܣ)g6;Ьnh7GP =2jPGkDnPD._TdndV({T-%ZAhawW2/Ь7kiMQ{ߟ 3A\%ҫPU_U~]@': v6IlL#@ٓSk(y_l 9o?~ 4*YU /R㛺=z- Gn?f~4.ߺwl^LE(A%J732w.΃[ŀmwwss!w!AJendstream endobj 121 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O0  ]Z?eB%:t8K结ϲnr _XցV$h, x> stream xcd`ab`ddM,M) JM/I,IItw_0k7s7B7_ɱe2000003012X~_QWteƬܺ:k%V5M?mwV&L&8uB7-dɵ[9\{y_={&Lb`, Aendstream endobj 123 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 225 >> stream xcd`ab`ddM,M)64 JM/I,Ivew;0SeXb} F +~Ք3~w/+˻+jYvW̟=of ߫63':VVڊy3le9{<_~ -ߴm3fn9.p}nn}N׻GPtendstream endobj 124 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 350 >> stream xSLMMathItalic8-RegularJ  kL)@嚧e#y~\qH\q$XU[u|sYQW_RRQRr{#ltSuy}y"͇|z}R…xR~}P`ŵNjvCp  To Ⱦ6endstream endobj 125 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 302 >> stream x#LMMathItalic10-BoldTX  alpha T}~~nhr|xxxHmTulUWJ\`2Lݴafȋͭ|NhrOq_sSbͬɨ苖)r_szXr  w > stream xYo!#@RQ8HH $Ӊ͝(-y:qOg( wwv3˲\nԾ]Ww 4ty~pG1]0M | 7uYRS4n?yEKC՚s^D n4BkrDQYi|tMB::5W+0S[wႼM4}iBЃB$0 TJX}#K"Nֿ:Rb7%:WF z~qn;j W ^WG:tIt9zXerRm>'m' 0B:m5T}jA6LX}j+'[8<̬.< !\PZ#%_!{84Y9{nlPBec^Sqۗx *8P@ 5Af@+p?W{*# nMmWx?#LQ:RٮkGdx2m9DyԳ o-~Krtb;pzFs[J4$l05֌W]# zɬТ~Y̘r I|wO|9C:<@T{t9 LOt~-yяzh3uc=X3WgSO$vC"yix[\mk`,(W.cKuIiAtGO_$b, @A` L#D@,n9 օ֐GƋFF̰lA@:)J8xqN.ۂ4ĞüPs; #*9r Ȯ~a Ad=8kIe" "P5K48~@9ظ t%9QGJrTOEܸrs-0+b)J@Ԉ0.ܖ TG>YSCf#_ӄkG[ih"N ]ː 31p.;>XZG۠ybktM,#lN56Q2s::M$R/aF%I^s8<![OPSv#ȚP_90;8[t̐t]kC3 G<7dx(hG#ш0B$d>6f9:S&(\ [0\u5k3or1q6K}%f=HXm!U5\c4XLCh(P^&1<|ur[Bin'}m(D,t TmP n> stream xY |SU(Wle)Rh@nI$m&ifi}Ze1JXTQ\R93P~wϭ5t6f|\|Pg<6|OJDDǐ?V`;gzOĨGSŸmm K%-O^'mOݱ*mջD ^25t-=Œgʘ=yH=MR j2LMQR멩TZBm^6QSSRj5ZNYJj65ZEͥj5ZDS#Q/5(z |(wJB''q0ʃZF\B nY4{gC8Q|>,b9W{⿏>=xUOjIc6>1 s,7Ǐ_;ǧf='|9qT1/v)[;ZZ%BEkJD՜a27@p~SB2h ,C.*] `i`Csi= ].*9H0x, <'" =;UliK$2a~)E6D2b*)YB!,/XP ̻t>+ sDR˵"nIN->ku8q_kr\k:^ijG?G1{y`kqLOEhJe}:L걺:ix&UZ|,vD T@sZ#Su Q !iaH0'ZϱzN _n<q \|6E=l:,+}^Mn}G2X!,voW;oC֎$PXKzvnk@Sг̇,nBjuhg*77Bh}㚷Wo,,╱8zʾ6ljv]@sёxӡqJ3kǧ Bv0TJ kfkS0jHس0 ~P> 2ưKi+J^͂#W8Su$W~r8PqW;z_܄fc$d{JqAF$4h|CMQi ^)F">2 pUק$r48;>ĐLDU&sm$ݍkN!ţO%ǻ-ԏz('v-$k?F^j|˾)u9AkоXbGaBOltփS1|" Ge.p57١wA'kMU9SL\t' !VקtOkL$H` ]XW,>B/9o0?\lֽ564UNuY<#_8yh 72vI )ߕ_=*'-M}D /4%xV\|d><]A gbM%BcvZӵc{g~Dz_ /;Rd){X8_q~scwW 6 s&}hz͎0HszH zmQ3`3+ʼ* E ЋTN/IlVv無L8ׂ#̄f;vJs n[Z{]?54o1xaAT܀3FL͙4*idlG’XW>h8í$ce]1þތ/G;\> yMTJ`;nn`mZ湆 h)ћ$KdX9_4 jܤw`|=W*|nR;j;I0b7{Sś[Fg\:oBu"½` M[K-c%&ᮛ%ijPdkݟދ`!Pz=KۜPˡ)b}Q1/3ZwQ}.yh6lO(x ]x\c?R!zSN"j7s+\p:o@i7Fwg,,f~R#cej̸KŁ7ML.Re+Z^Põ{%0 hLo/Ӱ %*y=9~-; 7:4ͧp8&uTi Z:n8a}c*I -421}VȖCDun􌹿Q A,Tɛ2@M'9h":@H|<i-1 ѹ}1*}9{ixh'Ͷ^JO mPmy%TYQ(adi9 M;]q"$CIt,n$Mli-"ˠeg*̅˾-8r=$$QP֓0W&V0U̒yyhC|;*'= /K!Hvg8Aā2a2ʥQƩފVm}Av QIU.6miypƜ])(+(*5[5',лϠ7%~v)p\ڙtQ[8}H)AN7r(&Ɍz}6VK~]g5U낐q^8n+\Jx럕xf|Kx6nD 0-:7I8°Kh fKb+H8ouTS]?H|IrOQ^=瑳vĨwվR:2@ *UjFͥB7u&܂]&ږhiil+7B4¡؝t!a!"N0ǣO{}ܬ^X m2Loc,kn.)5-P鋖3IݜwX i⿹*žc`uGc'Dңn-MpkVNx(8$S tu-5E[K=-ztUS|4'E4%j.4aW7;%}d8{S|2~E݂z wY_=M AY rAmj/Rru) Ia[=Z .<$@Bo˕讂öc\rc)&q}ddgFt)Vt}rMd$C."#&V^e^E^et_Ga-`+-m=-?/\}}~TNn5MuI_b;K\nSl'8pȠw Ζ]<`vFBBȗ=͞ CCcG#,??q ~wu&;ΰz*FGI/HzTVCmKuUĪEf߇ǀAOBRAecT ^rq z[" =fz41X [a$ٛ'ȳLF]ď1K2ud0+DY1!ek .Gք7$sC~nA^FyfJ z +ѐgGGoaN+t q/ ]lͳh]fwcB;ֶ^z ɂk1_-_cTY!Rdg[e\l}Zqz5BaYA e\ay. ĵDy!%r'|_/ 4 =ފV2+ĉx\2"4MUN<'1H;)Bo݁ kBv&3d:Qj$Vʈzlp3 $ M:(67J;j1D]{哧Η|^F̺GBWnTUܜWl(ί=t?cbji`l*[+u1 ΨuG^տ6%K}G[C3j}-y9zSw%i.;{b) { kr!݁+dX5`ݷ(R@Os:D.Z15h oVm':kq^yfF.['WFTY%5bɊ}A^,'A>Y !_]YQ.$VXQ[Qk8D˓'4o@C2M*C[@BOXe'02mTM耐@i Wٕ" 61NQ'7(mv4c9F.Z+)GتٚҎrH3j- WqvVNiKT+ ~:["+hvrBxBo ib 7+/Ol}f;+TE*6u7 1(Q]*Gh%ZrL]e^>hC#ȘN\ :˦Nx. k7^K`+K3|S o@Yk-!h[޵?3GC!L$/V-{%hy<N9 s-8*/a7>?/.<] 0Πi3qzAIGs܀["E)v-gWG3MhבsXYAgKMBN[K2f }r?rrm8EjNR&K1XYkǬsYJW s4[]hvS W r>yU}UE@qp7 B{b~7Qw>kkgʾTYq"+/+0UҋdCQwrYZE>ՐaU4JyF᎚bs!or"UepJ*ΪORU:IQORzEt;ҩvO(zy%"u>>‰N ҥ9ԢB *~25}>9̹e6B;Bk)9dyeZ2 Twhzʢs^Yn:K1 dxxۋM.(FU75Yk21 f{;> stream x]O10 ,. 2D! }I$.|D/J`H_#49U a:8 d >LY]zCKHQDUkhgtJmyp8I.~r 6/0>S4endstream endobj 129 0 obj << /Filter /FlateDecode /Length 3495 >> stream xZK7Cpaٰ#AVv%Xje%?>_fza)VUWi-Fa%/W?dWOH E""Acv}uX fs#xޏAkW1 q؟o6[(ۿ;?hB7ỻVQHk"dpa-bTtyo}b&l SۥLrI_Q3]vqտ kZkgfFxʎZa׿ WҸ0v~}XI+(=(AڬoWO WQ"WB^;ą20~mc XLPF S/tWF3B:7zGpRwbb+֡9Jofu)t_iQmDI@bp[AUx#p9TLNC*(ݾ-F}gw.&P'0+Y!@|eB#\U[! rE ^ԎIggOp/jgw)4U^7_7/e^l^*Yu8ʾ r9nb}^Q+8*m;FBcuAC% 8 v,²"%f*լ' [>FH6 `:gbteb4gLrhJ0yٱ+7}NCc((<% EUH,TzgNenU5[¢m*k|1* 6mf*HEɤ]Z%&UbΤe~b&tJĤJ`&i^PH+jS@mqv)se-p qJqJj)v{Be}F;N]s~bz%bs Ws*rl$, kG_eaIt:ܕn4OvQ :^؞aݽMߞp13Ѻx\ !ȉ0$:tEACN4VE0(f^Wb<Ȑ3^=odJakȈ\LV:sM|()W.m7D!\F+Q(W#G[ߤXsC~NЂ葻W¾"}0AG zH;}5@bR0nO/qX\Z)˧WrU L-2UQ}2U JLU0EvqIA٦HcNtVnJX'`PO=Sͅ`-m)sڵ5@e.m٣; .sO٤3~"@FœI9P_O/ar~l # u&KFBfIwӟ^FŌH}AO0IyS˷zıqmG:|ݕ;&z#v3<\;,nqLOywhI<10n;&JȻK&`n3fe{ި0Q>](.gwnb:)&Lu6ι?Ә'%m ~M!EE/6[Z5wlb~&q4ĿNe4X@Ye$^ۢw7/g77i5UT45Wse 4Ws rGRxJbHRy1y\Q9. rU²mA?]NGlVRLg3+%xVp3IY}1J{-N?5]~7[tHs\Z71NN|!.cM6Iv`]ؼs$[wuhY.Q.WAY{_JP"re*bΉSjwtj7"ޭٽ ۻ(&wS,P )V " iѨ]R+{)UǵNrA/Dme=䟖+6p?iSUßkwd,Nt7֍xLonޤa$9lVt5[Z435> stream x}[oeq;Ba %@زdHH@?P==T##].gWY& Qw.Uշ|kƟܘO7_n?Fɹjo}{ow_nNݟZ9_>~3g}_?{&o|2P.Oywll~QlI?VwLJO=O//xk!}q߱ǿ<|\y o]a, 86kXdϕi ?w7??/~m?fO=ҟ?}~z~6_-Ju쒻  uɖso&C&ߟ@z 7݆|ƺlaA.?|oc9kfsQ04"Z5kSV޻q[[ `OyϹ;!ľwӯ~|~WX{kǮpzym7Xy[d$L4e WL:͋:5՞A|, rL"pςx!ٴ"=L.?!`OE0N>:g(`ϩngɜC_w Ăf\1 ?9|c7 {ޘh7Fc9~.FUȄKl)Bš;Pa ?{pķLJZ2*s՟+;[X XkoZC0%ed= "[I)MV\|J)Ąh`pϖk9F϶w.|Bt.]> V":yRTHITTVGRa =D0 -s,CE>KtZ4 m})3<Ѽ,:29T gw|aa=#Yz8nYFZrΪ~0'f2Rvے¢\bQA3W:k$tq6g rvǢX$\aJrٲg>R%f>vt+?GF3Bar9Z.L2# (#Y ;@[&9eI'"ZX$o^FYq Y+w-M<̷d x+FrͦbVSzl`yCb;l%9p#s? X<ox,. AX6*bZӟpV6=zIS4F!ob-.oR6upx2MüHkrPQ'@NUb V0m/7XgM-̝&5smS8p}7)) Fkf`+LE+GtxpASWq(t4g7V d@as]h+FX╙G`L4dGfIºZ1té1DZS9Q>,:1#`th+WYb8[Ħ5rm綠cBs#nU;!6 |}j-2?픰g&jW9ύ;66葳UEǭ$iWX+:8,AQFJӫ9J6^<ގ&cvIlᎻ7>L-81vQax^WlVB^9 "bżR`AvdQcpx,48X&oRABY;PsbT3NPdu!yKfIWzCa)]4pB.z]J!caBEs@ }*V4n:qNأ roFQ8$8PQb^^,4h)q<+CIqrw#=K"N¬|B1YN L"(dM6LX}V>9 Tp2p ;wEAV K"P;EbF \Z%{,E2G$ŅhGU񘶗\6IpĘ N-_=F`#aILFӬ0!U$'kր(.,L֦6r8 ΌX&$.`.4ȴRT x :*H#4fa]dacUH ]č۴Kin4 n(@7_D$ `0V4 [*@\ȠZZU=*׶ʢ7nX07 \xp܊ĽA`bAEP=SUT#F3D8́K),ܵ(tPP_pu[->>FF&a Y,X("I 9kiҶELQ٨rA3] +[E. 2_̋l I0p b\&YlJ'+!\nYw*cZ*)DTYbd $'jD9U#IcBgvU[Dt@UE]DlVrH9 ;YS|օˆPXvG`D.YTX[E`_KE>I\|kI1,F\"dS[`yhS^3TVjki+t{67:rvF-XcUSdeSXy!)zU onCV"gV£+UŠE)U$K\jF9K#*5T^$$`b/3C# #.{]kv-qwbsdL)@TMmuhR60aD" wY ̼h:pBsVjwU yY9X P{ g A,Is:N&$(iE:1o4FTmTa1841U`xՋs*c nE ]*ܺ$o%bhe *]0gQF]9ע/6 "Fil *r%<IJ\+ep؃µIѱ6vcT]³P$- p%i `.JF\1. l45 \PVc!oEwλ.$t$cUuX ߐs+ m%-x储 o>Oy]k΢-%EܞP)lQa-9KD hc9 y黽է!Qك\}o Szw[FtޮCPr9łV ua^xWĠǾ+7%' 4X ؋~UnJ'؂h~$o=L`n.X+,JZhDXv 4ttJ,}Voj)oOQb7fTR&ɝ {X gv@`׭7Y%?&Ả0l5-ȗ'7-xe5.{e  lT_;vyE>C2&,AG.J!V^5Z|miӊ)Hb@:IL'v'$SKS S 5Źo^X?&OI:|Z(5 @2h/)Vrvzg;)ĚKMB+ kH&č*m;Umhg,ˆiN6e%RYUdIˍXN/3&)Mvc"Qp$* ĶnWG*A)BԓyX/E25bROTz&*//uUdNo2; 帇(N҂er,$uIuWJ|B4(q& 0 .bwѱĴgI|lωHS;BXxOF!o`|TOQݮryABܺ1-KRY`h3)Y +6 |2} oQ H?M3h4pR`+6N7I^7 2`ˢҚbeۢ8~jRuIUC9;iXN$Q[2D{~Iq64) URBaƐEMYs@:$u$8:tD51UII7 O`qVO$sٯfAI [ {.l5Up:'blKuǚgMMXaD gqзFAJ2*p֞m/cnAhvM2llTcE106&Yy3}75߾=A5I3ȡsMٍKhª"b+% 6BOnq_\քy2MҥEhDBn:$,ȰJ/#d]p"K7"E0X&iʢ(e":$ ݢ\ ӻU?)+c 3yX%!ddbYgB/)U f_ iR,t'Y|:HXi"6h&Nb@n$]T'bԨaL2`@$,m)LRh2SL_WD@؀h&sh*+-Ra[nӅ,~rFTvZ,T 1] !ci&Х7zb(z# c$;$=ҊXx {EjiL2'^I4[ Yй0yЏQ-sRr%.h_) 4X&$玽DIHp )7IiF^y%wkmvwUepzE*!_St(5̆;}0!.dzy T$Te@ bK)l'$;Et~If`_46j$J*,bNdڥ>/A\$vxTuG痤[<%v%q/JVM;9k,Gz*A.IE:]ۭf2kRvUwK;1p,H`CI E$y,f4^X!"+h:{5IN/IG~@-e+/ӈjѺ+ءȟ,Vj37633DYFJWTMK| EN?_ ~-,o˒0uTfy$3 h"H&۷$գHRbc0F1y-4!IX*;9& 7% G⏏@lk.{#pf8 P}3&e:(<02A4`gda-Rh&80aK^ 4i(@8EcU*@wM|e)!tQU0(&+lPLbi՗ D NTMՍb.^h>Ɵ,~:N1I)g5 |{b 4%Io)QŬ7TcFiu8;FLE?bsU圲=;ƃaQK)A30>y&U+U1d>Dm)*xdMW$SFU:& ߆ ˉr葷MX8[]"D O͐4t$RNa &pdD{#48BsҌ4[ {|.|:?Yd;7Hɢu= |˃gR Bݟ<;AS*0x&Yt-<ďo5<8 5Y=R0X.WJ<, 1*N3IsMIO.dW  H`?\fSq4Ii86NI_eo~ӎ3jL2eZgo3Iȑ^ ^4k$SzM$x2HJvmDj,g:.wf9+7|bB.W!${s81I69&baճ-V_z+U-1IQݱH I:KP_"\ U5PS:;F+׋ G3W=ގﵱq]oĢLXtxlK+3F붝Z4v8 !H+ۧ_{b~: G{Ug9I&,1xؤLY% y˪e'd>WmIb[yjdNb[pH'F tIJDR#?dnK+Zxڳ}K>^8fXxѡީG`/7rf6N2I'iÄ'3X&V^;$ YڢLҁ! IJ\ë!ȶ!ފc=Y&1!"66- J[ѱҬLubDdRI>D :PI*5l{}$= "Z~&W)A2Ie5$ *eL2 [Bpp4:aFMK'rDD˖$UtĄ8s\AFd.A0}=hsĿ;F&;$}#~*x$}#vrLeuK=!c.%Xؽꕃd2ufv$]0ChpL3iEIL #%FNc$}W|wIEӉۺEPl1s+% tyt(:eoщ+ M*~;D[hF1& HME vg$5KqߠoM9(&Fu) IrvaM*4,ZوblaVh1IDr͍8H啃crtbN2*:5x'~X̉ M+{]unzSL2p+tʤd1笘g?dsU1O ?chݏ"૰3LWHḿK]7&!z 1kSik͑Txz'd`m^l.b#wU)F0IUŅ` meO.6t7>ɒY~kkgp6op_RemuvIKҧw2Mlt[\6kLxiLĹݹc7`KHH$=dZKIfX]IxzM\vIX&!jmeo"\H2%U^OLvI68 "EY)b@Fn%RWm$[T$f=ylgq:$up "@ FGIŶ&q=2qK:F0"BACj nݪ'`s:$d^}f؈Ŋ Grh.9 t[ z!*܉?&]NMrKҥc$CN~I鄓Ea?Jc{K Vl d jYV7b]5N5C׸``*4$w0_RL痔18DP_b!f3LfL~IJ#A1_RE v:$뛫)&ȑ I\ょV?KHnr &j0ɜ'$$]y1[Ty~`dZ 첳b :he)+GPon,(/PW־T,Qf-8X4s_8T\ 6Eo/6l󢥗mBsQ:ip!hԜQLQր'NfBP#g~IKYMdT@L~I)TBN~Ix 1%Yr$E8_~IA~IfUd&o28)aGQ*8%@K. Ty~;мL~I:PLtj痤:5]hy=XN #"^h^Ėy~,*/I5,H/Y* \'(&Hj+ W;@vLҫ؝` ;4eӃ].)` eVO1r/x;4\REhK2Z,0訪̒U8XR'GĒ,LvA,"sEJRBIFc:m`deU-OU u*ɐ: ﬒TYy;z*IWMX% y VI6k1IAb VI  64:tI(hV<̃j$LАx^"UM>Ui/+V̜\B-裸T, KS"L΍nJ2I/A*)`JMEb0Ar0F_ TQtTʴwJRNyEF%VXimTǾp^N*_Ķ!`%ڃSɪapJRi6˖vEi% 8jM/wcȦfgNdzMwdkkJ2I*5?E6QA\'u^vaa_Q`)=2dPwXcMeAQvAWG w*A,ɔGQu5bIfT> d($()+: &$aNtfX'd-K4;މ%#'$CeJ cU:E:N!McA켒UQ元HQ 4x% yUvjA+I';U鴒 ΢*&OS)N+ɌnJ2 qԠ8QP8A>(%urBXSrNbSھ̷:$5uI"KI]AԻ RIݘ9jT /hr>Uceebpk*SxSr#J20,Ī ih8!'>JC 2H%G$̪A*I¤]aJ2GvIM 8oo|ӪV pwI~کn_o?ݘA@Iyl ֙מ?XsrOnw%r޾.`Jr/?pݽ0GZ>}mlj==>x<+MO8=}/vz\uyO>ޡ7fOO7mȧחǓB]lIBe\V[zϯG~%\>=>|3i7m 2v/03w`ӧ珟^?~'.?xQԵwWlO=s׏?~ݼ-0#_ f{hcpg.8ClPoι] 8#!Zg =={sb b}>z Iqc\(oIŤdY,~dl~}釴`-x,v$4 ܁^H(Ò;<|Y${fP2n /e5c&@^z0.u;yc'y33Buz6CcaazJa,徟(GOTy\1 5ʕ;T^*Q|G2Bx'ݮKAl3uO_H7`5A3z͸/˸61Zwk?y \.}\_^!_S\>?kFa='_ufC:p'@N#?w{ 1iw+ 4 pzI%SmxqmA`~Q!Wazp_?L;e"ǶK[U;@FxJhn ?p-z7,.xF П%p5< DeAn&վc_Y?Oہ F፰`s-5̓`^3hB13XjNCw]w՟I^^i!Xl>p7$gdʸ\maa*pὸ7 `po@,.#8U,AMm*(Iq-DSO1`H,X^م+Ǵp>ӑxף6~&^>h{qѣcVm{[JorBͧ_M9^Lkh6MOD;KąbD/& g3 !6n&rxef> stream xumL[eǟK{ "nnQ ɲddƒeTx)*e[06D^)မe8h(`2MD7bp#c2oA<_?? цab,tsT'~|“! k kY:0hݘ"-rW>MH%TF &j]%ed,aΰǚz  2hL4kV %>Žh`hm#y%N(ǥ7\G{Ʈx&!vuW@>0-Hn=ɘs|O/h\.]6(Faȵﮢ쳭z'Ǐ|M,ѭYIX,WeF銕pԎsG9m&YEnz?o݋DwSfiD*|Ύ8f&PQUܾ֎EZhbS_HsP11{YBB۟M6&oSܔ|3ខ8ZYD;5 O5xb\C\`rᪧW9Pb0bx({&5_a?VbpZny2kAO13v~]'THJiЂ<d}rw6-ZmWueqzwf1Xx1VNW9 jhlt.M(xʪqYr^sNʣcjVWrC.kt_E{Zb4|C5Kj, ^+rg  76drP%_SΠ $%kj?w]hPah1'f_մ uULE 4uHpxJݣujh35 BƳJV7 r>i2!  `076Cendstream endobj 132 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5005 >> stream xXy\Wb$~ *kmk.6.uq)uW\ #;$dy$l$Ȏ ֺZkk[i3ә;'N^ ?Hsy}>x֭ߜ'e3$%D x`4F(6f')RHxm_f֚=k#Gl ߜekRYs2e*EM6RS! &j FmQ;tjzISQljKQSdj JQCM4vj8=an' wO`1nt?8'x"e1ucۓKX(~*˜ݧWbՠs}7>r0er-. hZ"P)R|vP"TV@uEpnhQ7m,j:NӃJҪ(  l}B-lQ1chG7g!%uzf8y3 |'y*4*V6uڊR՜A,}{s7JXűLQrndY 2idתvATUF-Pj)Wl7gtjNa U/ЇwQ(N.lzwSh% ?o9Q}3wJeU}+Jј?!1ŗ"))ݍJ |%PɡOhF|%{߾,\Dv3!)c]m3//~c$0M]C~hܘyO*-v ٯ=&f<$f$m^zq"96FYmnj6۸3".!-<nx B(AH尠?\$WGkE cнkV5Dnx&kG ' <4 MAS9<b 3"wIvbdix Z7'IãRpq)k6fY;|39ƶ,+=]syE^A:C/b>F/U'B1-)u9]>{čܼ޹|o'CA _y3Ԝy\Ug\)-HWȂ5 4 Zmd@i>F0 mhylޫ B%n9`/Ι>u:ӊ|(ו8>J}F-4qgP| [,M5Y]_{Vc,&i *P_dj9-WMJWĴ/dm2'pP-)Lp6]r'{Qhy> )X!hlQK7 f47q~wUZ-t)%'72dD\pKwkU*aN! r?l_]8}n6KɊK:}MM^Hk帷84EanjJnËp󆫜XxrnYm6u>Y8KN_)O67׳m nG<3y+{5;;)U{Ԑ.ylT ڢPTAK7oHoz0lmpeC+OG]~/$̖^Vj,af_(;sZwg4VN̔&[nH$]8)?2+41@zm7&JjH#5܌sl4i`x}m\mxቐi`IpkIUWA 5N;`OWL8E?*_]{ U Z6Pbe"ИGxkusm ^׿]sqkwdC*4JR`>)/ϧ)|oX5C8^ ͅn<5C3ꪯIhYuPEUFEv2x1u+%+-rĐ_ 㾠躥bRpzEL(4D.K]\7DRM`)5YZ 8T"1Z i7Px--,/g(?{檙<ΤqQDm eel ^lwЯ;ӛuhoJVdc-= o|i, ;86Je3ޖ{T]G+z2j*"ÔA)띝":>m[z꓿VKC|{oxs"&x:9 /m853Ÿ귋kr6Ȥ67Mf̮rTL%Hi .Eh6<^?G : s;7@@HAKAC{>&7aI;qcĪlFaO괎TȚ֪4Jz4'ZL;5w:FK oVl+`bNO <^6[p-`.-妲\ޅ <4sO]w'A _3{pKttS~TwbiUI{1 O鿧hRiiAumYBˆ2iiNhi9tI?JSNtCVydD-.T[NV7 g3Z8pMjzޣo8^.am3ĤĬ--v$YMhZ,]PeZ$B_.jI"c1un?ƙNe8dE]g۲ 9KSgl4؈ي@Y$ǐ&3Ҩ2uz]-ֲm NMzACL2 (ғ40 oTA2- V鬮#蹦Z*B)"c"EED[#'NwNO  jH5@9M%-G ΢o*'Aiː p= IPf{RAo#O8 cv46Um@tCw`[w6 ۽;ZaބrvCgv)R4E\TtÀ7 ?y㽟I*7ɀj&շ) r~͟.1 l#ꆌE僸4lrt#!ˢsil 5?拂װw/~I.B J:a4ĞGo]$m asB>E&>Qz#p( $eqmT1؇B8y=|QTHO@:0 x *`82.1NQmj|W@IXD2rIC h|Lw1b.,W: 0yAz ;m5+lG(Љ5ݾ]:nvӞvY0n>4ef]3;݉5$rтpMGFZmYK@ Eh/.bh4b%x"&{ (j(I[JaM%(SbP(36]BИ#i L a@P#zb&G;IgДӐsguMчۆeO9Þ|OJ9cTTy 2p*ȇbaZb]tdez݃_YꑤX|ȂBD,,7(3^> stream xuLMSans10-Regularu`  RS434T vZҋ#J2PwDKP $$PD+uQlu\j/Q{mo߻jlTTB4)(`aa^}vrzj`ITI0b17|:T}ʋv?> stream xcd`ab`dd74 JM/I, Jtw?ew+;7  KKR3sRJ YDzʅ>?+. +tstUUupOSܞcySمzpgr| ~8On; r\,!<<\<gO 't10Osendstream endobj 135 0 obj << /Filter /FlateDecode /Length 3772 >> stream xZYo~o0A/hhE6 ;@;zY\HK/GԒCyHUu7C.;M~?$=9ywxW$EZӋ aaixrɾ;lijMfXWS<-XJ) v.1$M3X[Emu(r(b?G.\4l~uD!pŋ ӛaᒭRt+ub W]OsͶ..6[odܤPIaI$ ɾE"[/^xAL*'lnK>zI{мL/ rq/bn1uj2V}[E!3-mE 컾-I(BApi(B 7G/U}UUFe_5p\sr(po~s.Da~im IB4cGY);9[Hr(i+ul0|r͢P&jwpBj+ǩR+1壕XV5۱.$M<]Xq7HGŰrEfބrNByJpWn(nz,b,CD B0w䮃~,cੲ'dXFwAȽU"V;6E#(Az1O2B$EXMO'gBEb{F6DC&rVy^FojQ1Z&XےsFe%?9i\EQ=窈=콐ZIe; pit[YYdz'jāt!3>)l0Ƅє%c$/j'- 5+.B5cuxmJT$ &5M|Qb,yaGp#L}q)ǒq@!z@$kh"+t-oS%q"8/C@:yf C86iބ%߄+lӘ@Хهkn]OLglV G6{^2 7:WeqMBƍk_|A>Iw8w '꿶םoIv|9]s]0gh}tX)r'p9֘ZeN9˲:ȳM9Gtbpnя{7tuqNMTP,xH[C:@!?2hMuBk*f_ۃ8A.;qS[w!-E54 2tJdSdzlquA U RW KA!"AȜDL *D( _H7z,%/#g3j$:ʓPq$N?l")e{P[}g UUiNEkIeB5PYtj]{<}d!}Swzm]ňdlY#x ޖ!q[AUajQ:ȬOaj;qkd m>p3bC[]-﷎UdahG_'C~d|2EmbxhB;4ӉHKu/4idZ>6vdzY<EGp=s'j6!_}VqvZQqAג,z뛛7x3ޱ1=6"j[a:]$_SZmaf-?H̑K1 =O ,I'OTW]IC܏kLJU}9~lJC= RSXBKP4Yvi.d3o' .)N3pʑUZDƇzȴӱ||S,cv2=,_cpjBXK$ٷ=y_Z*Erلr v9 : AA#D2P6L`j^{QM!D(j`u95ڬu"m)ZT-FIfJOd}Z^#r!D˶O |@?Ե P%j~ #ˢ?p9z$X,SIN[B rBֲ/OH5}=(h ;K#Ć1~w66AfWEG(0,Nr^*x (! Ѿگ֬}$ 89)|(N'2#䴇wcg?tY0 ܈v<&H)Q8[뺼vv6vp|:{u>vݶǑp´ۜC%q>o-^ psED[3~jX5=PPl+&< nWMJЮD͙<_JW%-}[Cmcp9+0b,+$_@ۓٺc^GRVGv'ҕko@s |Li'O<0iendstream endobj 136 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 332 >> stream xcd`ab`dd M̳ JM/I, Jt78Ӌe}BgO` POfFFGgkи>Bߙ:~{_  ewIK~8[k~k~aa2HgRߜO+w 3|<&?O5˶gZ=uMvwְ{w^~?qlpv\lk6f%g{\}\˺Wto'f%ĪlYN^:KH>g7W70wn!͒> stream xZKorT9a GCl' 7:@iFgDvW>琪~$g8d_=?-.rogbwwa'/; ]\ޞW肱"c]h3pvE>.Wy˂ ]W-'47\8YN{#F@""/^PE*ỉm% [pAN^ژzT13M x9#zyJXqJ+R_[&\6,y23n(^A "Q~BA%4I/RA%oVLll*J{@[hC@ZLڗ}7~)HBZ$CaGSFlH0Z GQr ֲ#3XKȑ#n`7:/@:wH0VĚߧ+.Kk M5U aS5y(( GO < TA ^3T6cRF׶#((ʊt=H:At.ט?FLMm9ߚ8>vf5BA7U/[.u訴 ;,W"n 7`59oyr C,\oQLPo{ysr?myIw828m@Ez&^?|Y?4z'gﺦqerENظ >{ K^ 8<,z J 3NdD4z#O"|NZy~%A z6p{]΍*=I1mNH8ͳvV[Irhqu2XrSs%gK8[ o&'r"<906iɬ?1~Lm_{r%M!\)Sۄ_fVxV! 5Cux,Iaw\֠wT8qBǚ)pZ*0ϟIaglU"ķ J'Ik qU;;zWGGp,nrTn&Bs>>ŪǧG7ؤy?ntC^6V6B;'$nRJ>36tFc;i=׿v< Ӌ7*Y 7/+oMnFSH7aVNbn2Lą!Pnk#JQ8ׂo'JԊ; /S ^%}'GiVx1.Bzj8}r\ݹ}>v3n ɓJq8>~N`);9|b?CJs=>LQ~:/>=mpUQcSa#FC)M[2L[Uq;N3`\G#? w"Z87GN'NQhd19C@Tq^̩ڙma.t&cףW:hvj$ ۑv_^8T8 c&vúc5}O9[/+vv>,N'揟>&Ct?ĔL,v3ցbؿ0?q|0;9%M1 Juz@>/O>)6#cln_6G<<_G?9gBkLo˛n֐[y\hTB ^]lM7n29C0A ٲ`ڷsu鹕 s%g[WHIҌ+*{iMmoqirՏ:8ȸ9~$ߝ1wa.}וFdLƟ4U3*s0r|}lª<: їBI~@2rl8ynlq@;Ry8Kr*,sܷLǑ^I%!(ȿ+r(($ @8mOxSv>\l ߝ,mpendstream endobj 138 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 270 >> stream xcd`ab`dd M̳uIHtw#e\G 0cC$##ˌk3u0tb?b]~.]|%vuw7uvgqO-ofu~E]Vmhhl햬 g[շ{n:8WtMhU;o>p}3tς૝#`Sg-ýo'gt10^Xjendstream endobj 139 0 obj << /Filter /FlateDecode /Length 163 >> stream x]O0 ЅjqP(_Ct;,ֳ ":ր#MEY' ݕ`,S(endstream endobj 140 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 193 >> stream xcd`ab`ddM,M)6 JM/I,IItw0k7s7B7_ɱe2000003012X~_1w/PSڜEyY%e-ruwtwK֮n2e”I}r~M7{$_~ -ߴm?~n9.p}nnzzfL02O7CWendstream endobj 141 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 345 >> stream xNLMMathItalic6-RegularK  kL陫$y}zgo?Xe4>‹h|ovkkZLP[c]W^lqf} wz~s|{ {r:Ά{P|Ie&vCp  To Qendstream endobj 142 0 obj << /Filter /FlateDecode /Length 8127 >> stream x\M]q7ddq;=_~A | ^V#/RkInۑ9E1YEa/w_W^U?y_ʩ]J*՛W[~[ e[ʩQWl?xvt{:pc~vOSn[Dy٧׾v~G ngm5ϧǧwkG۶z<~/?^ꝟsAm?;p=fx՛[4Ċ?Dp:Fv`x햾f1b,iw m{= ;[])a:7sۻ?G/_*{)pG8ʱMW=b`+jvr,G8R;5(X]yi{0JOvBeMhc?ݫ_%SQRquLI)?Yf5RXj4҂ re'GE( ^qхq UJѝBquB ϐQ(:2DgҨ]cgQ.aNH9ZpT҇D х jRS}PFka;)w'_FrbJe1"`eFaU- j^VcH[-p. l/YKA(ߘغoY#דK V>a:aZp>Räбat}26d" ww(5PT;6n6/ b0[Yg}`YB/k+N &*Ra:a0!jpQ#M K2-{[Bـ{d0 ,'C@bR=r\K 7p1 Y-Œ,AA@6ִ2 5g ,c;[A@ǎ"=R.!c+X@qGJXt}%Fqd~S5k,Kb`eF6NwJ7r_IK Z8C3:E}P6htUi,FblyimNC1Ҿ||~ MȠqd)j&+bJwւtc:}b08tlXqP:c+&Xtb56@2)$+05J} G5)I[ 59yj~ޯ8*8"r%cZ^g4K=~'`EL`;U?;rWk { шKO#_(5$ Bu^f5/5'u֕"P-#H1WMkL:&p_a|sP:dR3&ʘde[WYj"a`eB?Z |>zԥFrp-?8x:B3/Qv͊^}wbJ4>em Fl@]sal):&M{?)}[0\=dzR#2Ql{d0 -+kQz?{eSܤLQpdh%J!NA[z'< z&'>jd `e $-PRxG/wQQK9Z8CdžZk:qPdX~Д§Ӂ2-I,|ԨzV-XY QF;-&G/TQäбaqDI_sr뵌~P-j8n6`E8N{ZZߓ-e$xw8N>^ x^L|5B0ZA89SR]rיHk ){ 㐱9Eގ~R'*`$,[L̨` XQ{ Q.*h`2r^nK Z8BGSqY+Ba!jzD ?[+{\ͳQ|,7Ů7*x 4 yʘH>霰 2 8^f83YVEi63KZZ~aZȠNSQLyxN7?ՙݹMLogH|z^ϔ?- 2&CXiI-P*r,38O1" ӊ_i(#NTĠ(#Ý {eTFU+듢Ib”*hp2NrVvm8X K%Q12"| +vinu('-a:k"Dle Jܞ0)` 2bME9]{gbSp&Lp&&M:L7 3Q.Sŝl&w(/䙒sNN"nN'ׅ)™y$:rkVWv: ih\gϑlP/e=166Osk@BPɈj' [.? ޟT}N5L, z?Md JM~3^/20,LSX3{uv"bC;pBWY[Ł3r̪!. 9 CD= DJ{NNPK}5 1KgHKn^|0E9B+UeɇheP[Gj^rmgE׽6x.ͫW, LMLrxƱ t!vp™)[b&TyCUwePcHynh|WUtPWH 3iB%E IQF0 .~P-ʘ[!8q4.[<ރk S\<_( +#=gyr4.>0& K_1\#ip A9,eI'nLr*g&.՝~ &.Kf$v!iE|(* f@H9 '8d$rm m 3ҳotrV9T,z-h9U=QW,⺁vruC=4q ۱)zfR7)Iƭ 0LP|L ]n[9ʩ4.8^tTKޗ'N45>Y[WK>XB g^}wkʦ7"e2/KR(%dVL((C`i5PJ495ݥ0pk183gio;3ιa:H.:{4!(b0*6;U˅^Q&c#g):!,L\g$Tl–l8ɤK|O#LGFd.2Pp$#xN}癍(gI:1w2q%s1g,zqETqЫEɨ8:eӳanCo0zWurDG[ *gPF@̺yy>ʼVdF(.׊ RF5)uG#(VNؕ&M@ɘp>Crl}B+grj0GlVfCSHԊ='22K] r+:"f QdŋQr@zqx6{gIN|s."Ua$VW"ur(/݄[ 0P+2^òYj^4F+0' 'J㊞#5%/`9&(X3yN%'̋yng6E9s1 OxM]1prVM_~P{絘%o^wJ]2ʡּm:P0ZZ>VDp^s6(SyDdmqR Vv, Hkfj|eg΅9 *?Zw"+鈊WBW_r,Psr\Qr?`F;{Byj)O`ĎWӑÿf샋 }x1 A/]Wj<]N-BʹHv6/1W&b6iߚe#4!%74} "Wǵ@z L%IJ, ) W=t<.>C#M7u!f\IMp 3J>I/rW#xKUIZij`X4JʒrT%3Yύ-D$nYՓ!'ݗGҜ票jK!}aΰ&E9n„t?份K[gȞe0\ZROIG x(.ŹBJ\Y\*boEFJo+) %g.^msΌiM:=ʮ6;*z<%>7=J=B;(Q r]D#gؽ.g$ީlPwǁ¦7dA[8(7wɎӭ%crkSdPvmͫ'GRe ~Mjl&ٕ7D;2緾N3BVl@%N*3GS `id-iZzAlQ̫E~ql17]63]l+lQ(7l5 w --f z~lbz Pl>j-ꫴ/ro}x%=r0Y7kUN&[PR;pW ͆dX7Hvn 䲡4Pt~DžO_ |D⑘OnM(A)KO{3riuF;6c9mPΗ檱X4(=jXfҘbG28]3 X6 #/K"oỊFG#?o&>ch$4 D%)dgAO"yƠd6꼤btAb,[]=^wAoK%ŜB1O ⍴?kWӣ7ɑ5yX'ͼN4xi3%=} ,iq9@ѾFGK#%@ȟ}2-{~]OIK-iKb-rxC?P55o{ AO;:tq׼#vs^_gxNM그}w`L<]zKr*oPgKu=RwCezAF'/QW.Zv6mDz}B]"v'71+/y3ƃc ZPX _zxJ_Kxl<;~ RFq?vxwP<[wԚقP=p8?`X) {`9@IxD#f>߰{zp>gewO>n{{z>xN=dݧ[xstY|G`p-|7D/>Ix{|z7eڻW퇛_?<vz(Q|,/dMnStH/n޿ptzfpx4 B$lisCzߣ>lݧoOo|wI^ykH5v_;Of>F*mW;/"*Bk@"F GZcOn?B1K6w^b9J i%58̟'6ڹr ut.۹v!Skȓ"cd/4pB-b8ğ^W}ǖ1Wl: -h& ,q;ӣ֣F.7m>Rж/so{{6퇼[/ޜY~6q=쯇ǁ>Bbx8@}p1\9tΒxl>Khz#C5f?yq$|)K^g0}xYMaB2{z9 JS;f|A1 1^F_et8iqpD:t%Ba~Z>`K}f5jHRvD*G[ ,4O> stream xU]o0}Ϗ@yt5c{O[6 &B۵eK\@^~$N:!>${=Ӈ`럓UDyk?&xGo//XM:XX 5q%I~+J>qyLhTY2cJtV0vST)T{)YĄMp.UB. S5g{Y.0toս]k(GPF1!3g9rQPaA9M%o1Q0Q8,DW˲0euב!3iB .PNGo;daQEN޾uzh+We2PɦCҮC(*T>BFY*l<`& Z1kq.kL&f)>TӬpA 2s);uS_ ShKh1`i2(k{|{[Hh'\`TAkD-iq3Ә,Kqrמ:ʣ`zH0r(3aIt;㙝辮nfzn%sH96 W+.bEO/4`짏)("4CE ޵&;tDI*ާ=~`3Ciu2R gGn5;*>[Y ^whKPn{i(V;)ۡ{_F}Q 08- DZݮq1tB"endstream endobj 144 0 obj << /Type /XRef /Length 170 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 145 /ID [<9767fae095004aa470c1884467bdc09d><0f98358e8d4f2df210cea0c3495b4e02>] >> stream xcb&F~0 $8JLg|fs[ѐ$' e-قfs RDr Q )DH`W`R,k f;$# -])J ҲDH`z&EHx0;l0lf 6q& endstream endobj startxref 117558 %%EOF flexmix/inst/doc/mixture-regressions.Rnw0000644000176200001440000022017614404637307020216 0ustar liggesusers% % Copyright (C) 2008 Bettina Gruen and Friedrich Leisch % $Id: mixture-regressions.Rnw $ % \documentclass[nojss]{jss} \usepackage{amsfonts} \title{FlexMix Version 2: Finite Mixtures with Concomitant Variables and Varying and Constant Parameters} \Plaintitle{FlexMix Version 2: Finite Mixtures with Concomitant Variables and Varying and Constant Parameters} \Shorttitle{FlexMix Version 2} \author{Bettina Gr{\"u}n\\ Wirtschaftsuniversit{\"a}t Wien \And Friedrich Leisch\\ Universit\"at f\"ur Bodenkultur Wien} \Plainauthor{Bettina Gr{\"u}n, Friedrich Leisch} \Address{ Bettina Gr\"un\\ Institute for Statistics and Mathematics\\ Wirtschaftsuniversit{\"a}t Wien\\ Welthandelsplatz 1\\ 1020 Wien, Austria\\ E-mail: \email{Bettina.Gruen@R-project.org}\\ Friedrich Leisch\\ Institut f\"ur Angewandte Statistik und EDV\\ Universit\"at f\"ur Bodenkultur Wien\\ Peter Jordan Stra\ss{}e 82\\ 1190 Wien, Austria\\ E-mail: \email{Friedrich.Leisch@boku.ac.at} } \Abstract{ This article is a (slightly) modified version of \cite{mixtures:Gruen+Leisch:2008a}, published in the \emph{Journal of Statistical Software}. \pkg{flexmix} provides infrastructure for flexible fitting of finite mixture models in \proglang{R} using the expectation-maximization (EM) algorithm or one of its variants. The functionality of the package was enhanced. Now concomitant variable models as well as varying and constant parameters for the component specific generalized linear regression models can be fitted. The application of the package is demonstrated on several examples, the implementation described and examples given to illustrate how new drivers for the component specific models and the concomitant variable models can be defined. } \Keywords{\proglang{R}, finite mixture models, generalized linear models, concomitant variables} \Plainkeywords{R, finite mixture models, generalized linear models, concomitant variables} \usepackage{amsmath, listings} \def\argmax{\mathop{\rm arg\,max}} %% \usepackage{Sweave} prevent automatic inclusion \SweaveOpts{width=9, height=4.5, eps=FALSE, keep.source=TRUE} <>= options(width=60, prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) library("graphics") library("stats") library("flexmix") library("lattice") ltheme <- canonical.theme("postscript", FALSE) lattice.options(default.theme=ltheme) data("NPreg", package = "flexmix") data("dmft", package = "flexmix") source("myConcomitant.R") @ %%\VignetteIndexEntry{FlexMix Version 2: Finite Mixtures with Concomitant Variables and Varying and Constant Parameters} %%\VignetteDepends{flexmix} %%\VignetteKeywords{R, finite mixture models, model based clustering, latent class regression} %%\VignettePackage{flexmix} \begin{document} %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Introduction}\label{sec:introduction} Finite mixture models are a popular technique for modelling unobserved heterogeneity or to approximate general distribution functions in a semi-parametric way. They are used in a lot of different areas such as astronomy, biology, economics, marketing or medicine. An overview on mixture models is given in \cite{mixtures:Everitt+Hand:1981}, \cite{mixtures:Titterington+Smith+Makov:1985}, \cite{mixtures:McLachlan+Basford:1988}, \cite{mixtures:Boehning:1999}, \cite{mixtures:McLachlan+Peel:2000} and \cite{mixtures:Fruehwirth-Schnatter:2006}. Version 1 of \proglang{R} package \pkg{flexmix} was introduced in \cite{mixtures:Leisch:2004}. The main design principles of the package are extensibility and fast prototyping for new types of mixture models. It uses \proglang{S}4 classes and methods \citep{mixtures:Chambers:1998} as implemented in the \proglang{R} package \pkg{methods} and exploits advanced features of \proglang{R} such as lexical scoping \citep{mixtures:Gentleman+Ihaka:2000}. The package implements a framework for maximum likelihood estimation with the expectation-maximization (EM) algorithm \citep{mixtures:Dempster+Laird+Rubin:1977}. The main focus is on finite mixtures of regression models and it allows for multiple independent responses and repeated measurements. The EM algorithm can be controlled through arguments such as the maximum number of iterations or a minimum improvement in the likelihood to continue. Newly introduced features in the current package version are concomitant variable models \citep{mixtures:Dayton+Macready:1988} and varying and constant parameters in the component specific regressions. Varying parameters follow a finite mixture, i.e., several groups exist in the population which have different parameters. Constant parameters are fixed for the whole population. This model is similar to mixed-effects models \citep{mixtures:Pinheiro+Bates:2000}. The main difference is that in this application the distribution of the varying parameters is unknown and has to be estimated. Thus the model is actually closer to the varying-coefficients modelling framework \citep{mixtures:Hastie+Tibshirani:1993}, using convex combinations of discrete points as functional form for the varying coefficients. The extension to constant and varying parameters allows for example to fit varying intercept models as given in \cite{mixtures:Follmann+Lambert:1989} and \cite{mixtures:Aitkin:1999}. These models are frequently applied to account for overdispersion in the data where the components follow either a binomial or Poisson distribution. The model was also extended to include nested varying parameters, i.e.~this allows to have groups of components with the same parameters \citep{mixtures:Gruen+Leisch:2006, mixtures:Gruen:2006}. In Section~\ref{sec:model-spec-estim} the extended model class is presented together with the parameter estimation using the EM algorithm. In Section~\ref{sec:using-new-funct} examples are given to demonstrate how the new functionality can be used. An overview on the implementational details is given in Section~\ref{sec:implementation}. The new model drivers are presented and changes made to improve the flexibility of the software and to enable the implementation of the new features are discussed. Examples for writing new drivers for the component specific models and the concomitant variable models are given in Section~\ref{sec:writing-your-own}. This paper gives a short overview on finite mixtures and the package in order to be self-contained. A more detailed introduction to finite mixtures and the package \pkg{flexmix} can be found in \cite{mixtures:Leisch:2004}. All computations and graphics in this paper have been done with \pkg{flexmix} version \Sexpr{packageDescription("flexmix",fields="Version")} and \proglang{R} version \Sexpr{getRversion()} using Sweave \citep{mixtures:Leisch:2002}. The newest release version of \pkg{flexmix} is always available from the Comprehensive \proglang{R} Archive Network at \url{http://CRAN.R-project.org/package=flexmix}. An up-to-date version of this paper is contained in the package as a vignette, giving full access to the \proglang{R} code behind all examples shown below. See \code{help("vignette")} or \cite{mixtures:Leisch:2003} for details on handling package vignettes. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Model specification and estimation}\label{sec:model-spec-estim} A general model class of finite mixtures of regression models is considered in the following. The mixture is assumed to consist of $K$ components where each component follows a parametric distribution. Each component has a weight assigned which indicates the a-priori probability for an observation to come from this component and the mixture distribution is given by the weighted sum over the $K$ components. If the weights depend on further variables, these are referred to as concomitant variables. In marketing choice behaviour is often modelled in dependence of marketing mix variables such as price, promotion and display. Under the assumption that groups of respondents with different price, promotion and display elasticities exist mixtures of regressions are fitted to model consumer heterogeneity and segment the market. Socio-demographic variables such as age and gender have often been shown to be related to the different market segments even though they generally do not perform well when used to a-priori segment the market. The relationships between the behavioural and the socio-demographic variables is then modelled through concomitant variable models where the group sizes (i.e.~the weights of the mixture) depend on the socio-demographic variables. The model class is given by \begin{align*} h(y|x, w, \psi) &= \sum_{k = 1}^K \pi_k(w, \alpha) f_k(y|x,\theta_{k})\\ &= \sum_{k = 1}^K \pi_k(w, \alpha) \prod_{d=1}^D f_{kd}(y_d|x_d,\theta_{kd}), \end{align*} where $\psi$ denotes the vector of all parameters for the mixture density $h()$ and is given by $(\alpha, (\theta_k)_{k=1,\ldots,K})$. $y$ denotes the response, $x$ the predictor and $w$ the concomitant variables. $f_k$ is the component specific density function. Multivariate variables $y$ are assumed to be dividable into $D$ subsets where the component densities are independent between the subsets, i.e.~the component density $f_k$ is given by a product over $D$ densities which are defined for the subset variables $y_d$ and $x_d$ for $d=1,\ldots,D$. The component specific parameters are given by $\theta_k = (\theta_{kd})_{d=1,\ldots,D}$. Under the assumption that $N$ observations are available the dimensions of the variables are given by $y = (y_d)_{d=1,\ldots,D} \in \mathbb{R}^{N \times \sum_{d=1}^D k_{yd}}$, $x = (x_d)_{d=1,\ldots,D} \in \mathbb{R}^{N \times \sum_{d=1}^D k_{xd}}$ for all $d = 1,\ldots,D$ and $w \in \mathbb{R}^{N \times k_w}$. In this notation $k_{yd}$ denotes the dimension of the $d^{\textrm{th}}$ response, $k_{xd}$ the dimension of the $d^{\textrm{th}}$ predictors and $k_w$ the dimension of the concomitant variables. For mixtures of GLMs each of the $d$ responses will in general be univariate, i.e.~multivariate responses will be conditionally independent given the segment memberships. For the component weights $\pi_k$ it holds $\forall w$ that \begin{equation}\label{eq:prior} \sum_{k=1}^K \pi_k(w,\alpha) = 1 \quad \textrm{and} \quad \pi_k(w, \alpha) > 0, \, \forall k, \end{equation} where $\alpha$ are the parameters of the concomitant variable model. For the moment focus is given to finite mixtures where the component specific densities are from the same parametric family, i.e.~$f_{kd} \equiv f_d$ for notational simplicity. If $f_d$ is from the exponential family of distributions and for each component a generalized linear model is fitted \citep[GLMs;][]{mixtures:McCullagh+Nelder:1989} these models are also called GLIMMIX models \citep{mixtures:Wedel+DeSarbo:1995}. In this case the component specific parameters are given by $\theta_{kd} = (\beta'_{kd}, \phi_{kd})$ where $\beta_{kd}$ are the regression coefficients and $\phi_{kd}$ is the dispersion parameter. The component specific parameters $\theta_{kd}$ are either restricted to be equal over all components, to vary between groups of components or to vary between all components. The varying between groups is referred to as varying parameters with one level of nesting. A disjoint partition $K_c$, $c = 1,\ldots,C$ of the set $\tilde{K} := \{1\ldots,K\}$ is defined for the regression coefficients. $C$ is the number of groups of the regression coefficients at the nesting level. The regression coefficients are accordingly split into three groups: \begin{align*} \beta_{kd} &= (\beta'_{1d}, \beta'_{2,c(k)d}, \beta'_{3,kd})', \end{align*} where $c(k) = \{c = 1,\ldots, C: k \in K_c\}$. Similar a disjoint partition $K_v$, $v = 1,\ldots,V$, of $\tilde{K}$ can be defined for the dispersion parameters if nested varying parameters are present. $V$ denotes the number of groups of the dispersion parameters at the nesting level. This gives: \begin{align*} \phi_{kd} &= \left\{\begin{array}{ll} \phi_{d} & \textrm{for constant parameters}\\ \phi_{kd} & \textrm{for varying parameters}\\ \phi_{v(k)d} & \textrm{for nested varying parameters} \end{array}\right. \end{align*} where $v(k) = \{v = 1,\ldots,V: k \in K_v\}$. The nesting structure of the component specific parameters is also described in \cite{mixtures:Gruen+Leisch:2006}. Different concomitant variable models are possible to determine the component weights \citep{mixtures:Dayton+Macready:1988}. The mapping function only has to fulfill condition \eqref{eq:prior}. In the following a multinomial logit model is assumed for the $\pi_k$ given by \begin{equation*} \pi_k(w,\alpha) = \frac{e^{w'\alpha_k}}{\sum_{u = 1}^K e^{w'\alpha_u}} \quad \forall k, \end{equation*} with $\alpha = (\alpha'_k)'_{k=1,\ldots,K}$ and $\alpha_1 \equiv 0$. %%------------------------------------------------------------------------- \subsection{Parameter estimation}\label{sec:estimation} The EM algorithm \citep{mixtures:Dempster+Laird+Rubin:1977} is the most common method for maximum likelihood estimation of finite mixture models where the number of components $K$ is fixed. The EM algorithm applies a missing data augmentation scheme. It is assumed that a latent variable $z_n \in \{0,1\}^K$ exists for each observation $n$ which indicates the component membership, i.e.~$z_{nk}$ equals 1 if observation $n$ comes from component $k$ and 0 otherwise. Furthermore it holds that $\sum_{k=1}^K z_{nk}=1$ for all $n$. In the EM algorithm these unobserved component memberships $z_{nk}$ of the observations are treated as missing values and the data is augmented by estimates of the component membership, i.e.~the estimated a-posteriori probabilities $\hat{p}_{nk}$. For a sample of $N$ observations $\{(y_1, x_1, w_1), \ldots, (y_N, x_N, w_N)\}$ the EM algorithm is given by: \begin{description} \item[E-step:] Given the current parameter estimates $\psi^{(i)}$ in the $i$-th iteration, replace the missing data $z_{nk}$ by the estimated a-posteriori probabilities \begin{align*} \hat{p}_{nk} & = \frac{\displaystyle \pi_k(w_n, \alpha^{(i)}) f(y_n| x_n, \theta_k^{(i)}) }{\displaystyle \sum_{u = 1}^K \pi_u(w_n, \alpha^{(i)}) f(y_n |x_n, \theta_u^{(i)}) }. \end{align*} \item[M-step:] Given the estimates for the a-posteriori probabilities $\hat{p}_{nk}$ (which are functions of $\psi^{(i)}$), obtain new estimates $\psi^{(i+1)}$ of the parameters by maximizing \begin{align*} Q(\psi^{(i+1)}|\psi^{(i)}) &= Q_1(\theta^{(i+1)} | \psi^{(i)}) + Q_2(\alpha^{(i+1)} | \psi^{(i)}), \end{align*} where \begin{align*} Q_1(\theta^{(i+1)} | \psi^{(i)}) &= \sum_{n = 1}^N \sum_{k = 1}^K \hat{p}_{nk} \log(f(y_n | x_n, \theta_k^{(i+1)})) \end{align*} and \begin{align*} Q_2(\alpha^{(i+1)}| \psi^{(i)}) &= \sum_{n = 1}^N \sum_{k = 1}^K \hat{p}_{nk} \log(\pi_k(w_n, \alpha^{(i+1)})). \end{align*} $Q_1$ and $Q_2$ can be maximized separately. The maximization of $Q_1$ gives new estimates $\theta^{(i+1)}$ and the maximization of $Q_2$ gives $\alpha^{(i+1)}$. $Q_1$ is maximized separately for each $d=1,\ldots,D$ using weighted ML estimation of GLMs and $Q_2$ using weighted ML estimation of multinomial logit models. \end{description} Different variants of the EM algorithm exist such as the stochastic EM \citep[SEM;][]{mixtures:Diebolt+Ip:1996} or the classification EM \citep[CEM;][]{mixtures:Celeux+Govaert:1992}. These two variants are also implemented in package \pkg{flexmix}. For both variants an additional step is made between the expectation and maximization steps. This step uses the estimated a-posteriori probabilities and assigns each observation to only one component, i.e.~classifies it into one component. For SEM this assignment is determined in a stochastic way while it is a deterministic assignment for CEM. For the SEM algorithm the additional step is given by: \begin{description} \item[S-step:] Given the a-posteriori probabilities draw \begin{align*} \hat{z}_n &\sim \textrm{Mult}((\hat{p}_{nk})_{k=1,\ldots,K}, 1) \end{align*} where $\textrm{Mult}(\theta, T)$ denotes the multinomial distribution with success probabilities $\theta$ and number of trials $T$. \end{description} Afterwards, the $\hat{z}_{nk}$ are used instead of the $\hat{p}_{nk}$ in the M-step. For the CEM the additional step is given by: \begin{description} \item[C-step:] Given the a-posteriori probabilities define \begin{align*} \hat{z}_{nk} &= \left\{\begin{array}{ll} 1&\textrm{if } k = \min\{ l : \hat{p}_{nl} \geq \hat{p}_{nk}\, \forall k=1,\ldots,K\}\\ 0&\textrm{otherwise}. \end{array}\right. \end{align*} \end{description} Please note that in this step the observation is assigned to the component with the smallest index if the same maximum a-posteriori probability is observed for several components. Both of these variants have been proposed to improve the performance of the EM algorithm, because the ordinary EM algorithm tends to converge rather slowly and only to a local optimum. The convergence behavior can be expected to be better for the CEM than ordinary EM algorithm, while SEM can escape convergence to a local optimum. However, the CEM algorithm does not give ML estimates because it maximizes the complete likelihood. For SEM good approximations of the ML estimator are obtained if the parameters where the maximum likelihood was encountered are used as estimates. Another possibility for determining parameter estimates from the SEM algorithm could be the mean after discarding a suitable number of burn-ins. An implementational advantage of both variants is that no weighted maximization is necessary in the M-step. It has been shown that the values of the likelihood are monotonically increased during the EM algorithm. On the one hand this ensures the convergence of the EM algorithm if the likelihood is bounded, but on the other hand only the detection of a local maximum can be guaranteed. Therefore, it is recommended to repeat the EM algorithm with different initializations and choose as final solution the one with the maximum likelihood. Different initialization strategies for the EM algorithm have been proposed, as its convergence to the optimal solution depends on the initialization \citep{mixtures:Biernacki+Celeux+Govaert:2003,mixtures:Karlis+Xekalaki:2003}. Proposed strategies are for example to first make several runs of the SEM or CEM algorithm with different random initializations and then start the EM at the best solution encountered. The component specific parameter estimates can be determined separately for each $d=1,\ldots,D$. For simplicity of presentation the following description assumes $D=1$. If all parameter estimates vary between the component distributions they can be determined separately for each component in the M-step. However, if also constant or nested varying parameters are specified, the component specific estimation problems are not independent from each other any more. Parameters have to be estimated which occur in several or all components and hence, the parameters of the different components have to be determined simultaneously for all components. The estimation problem for all component specific parameters is then obtained by replicating the vector of observations $y = (y_n)_{n=1,\ldots,N}$ $K$ times and defining the covariate matrix $X = (X_{\textrm{constant}}, X_{\textrm{nested}}, X_{\textrm{varying}})$ by \begin{align*} &X_{\textrm{constant}} = \mathbf{1}_K \otimes (x'_{1,n})_{n=1,\ldots,N}\\ &X_{\textrm{nested}} = \mathbf{J} \odot (x'_{2,n})_{n=1,\ldots,N}\\ &X_{\textrm{varying}} = \mathbf{I}_K \otimes(x'_{3,n})_{n=1,\ldots,N}, \end{align*} where $\mathbf{1}_K$ is a vector of 1s of length $K$, $\mathbf{J}$ is the incidence matrix for each component $k=1,\ldots,K$ and each nesting group $c \in C$ and hence is of dimension $K \times |C|$, and $\mathbf{I}_K$ is the identity matrix of dimension $K \times K$. $\otimes$ denotes the Kronecker product and $\odot$ the Khatri-Rao product (i.e., the column-wise Kronecker product). $x_{m,n}$ are the covariates of the corresponding coefficients $\beta_{m,.}$ for $m=1,2,3$. Please note that the weights used for the estimation are the a-posteriori probabilities which are stacked for all components, i.e.~a vector of length $N K$ is obtained. Due to the replication of data in the case of constant or nested varying parameters the amount of memory needed for fitting the mixture model to large datasets is substantially increased and it might be easier to fit only varying coefficients to these datasets. To overcome this problem it could be considered to implement special data structures in order to avoid storing the same data multiple times for large datasets. Before each M-step the average component sizes (over the given data points) are checked and components which are smaller than a given (relative) minimum size are omitted in order to avoid too small components where fitting problems might arise. This strategy has already been recommended for the SEM algorithm \citep{mixtures:Celeux+Diebolt:1988} because it allows to determine the suitable number of components in an automatic way given that the a-priori specified number of components is large enough. This recommendation is based on the assumption that the redundent components will be omitted during the estimation process if the algorithm is started with too many components. If omission of small components is not desired the minimum size required can be set to zero. All components will be then retained throughout the EM algorithm and a mixture with the number of components specified in the initialization will be returned. The algorithm is stopped if the relative change in the log-likelihood is smaller than a pre-specified $\epsilon$ or the maximum number of iterations is reached. For model selection different information criteria are available: AIC, BIC and ICL \citep[Integrated Complete Likelihood;][]{mixtures:Biernacki+Celeux+Govaert:2000}. They are of the form twice the negative loglikelihood plus number of parameters times $k$ where $k=2$ for the AIC and $k$ equals the logarithm of the number of observations for the BIC. The ICL is the same as the BIC except that the complete likelihood (where the missing class memberships are replaced by the assignments induced by the maximum a-posteriori probabilities) instead of the likelihood is used. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Using the new functionality} \label{sec:using-new-funct} In the following model fitting and model selection in \proglang{R} is illustrated on several examples including mixtures of Gaussian, binomial and Poisson regression models, see also \cite{mixtures:Gruen:2006} and \cite{mixtures:Gruen+Leisch:2007a}. More examples for mixtures of GLMs are provided as part of the software package through a collection of artificial and real world datasets, most of which have been previously used in the literature (see references in the online help pages). Each dataset can be loaded to \proglang{R} with \code{data("}\textit{name}\code{")} and the fitting of the proposed models can be replayed using \code{example("}\textit{name}\code{")}. Further details on these examples are given in a user guide which can be accessed using \code{vignette("regression-examples", package="flexmix")} from within \proglang{R}. %%----------------------------------------------------------------------- \subsection{Artificial example}\label{sec:artificial-example} In the following the artificial dataset \code{NPreg} is used which has already been used in \cite{mixtures:Leisch:2004} to illustrate the application of package \pkg{flexmix}. The data comes from two latent classes of size \Sexpr{nrow(NPreg)/2} each and for each of the classes the data is drawn with respect to the following structure: \begin{center} \begin{tabular}{ll} Class~1: & $ \mathit{yn} = 5x+\epsilon$\\ Class~2: & $ \mathit{yn} = 15+10x-x^2+\epsilon$ \end{tabular} \end{center} with $\epsilon\sim N(0,9)$, see the left panel of Figure~\ref{fig:npreg}. The dataset \code{NPreg} also includes a response $\mathit{yp}$ which is given by a generalized linear model following a Poisson distribution and using the logarithm as link function. The parameters of the mean are given for the two classes by: \begin{center} \begin{tabular}{ll} Class~1: & $ \mu_1 = 2 - 0.2x$\\ Class~2: & $ \mu_2 = 1 + 0.1x$. \end{tabular} \end{center} This signifies that given $x$ the response $\mathit{yp}$ in group $k$ follows a Poisson distribution with mean $e^{\mu_k}$, see the right panel of Figure~\ref{fig:npreg}. \setkeys{Gin}{width=\textwidth} \begin{figure} \centering <>= par(mfrow=c(1,2)) plot(yn~x, col=class, pch=class, data=NPreg) plot(yp~x, col=class, pch=class, data=NPreg) @ \caption{Standard regression example (left) and Poisson regression (right).} \label{fig:npreg} \end{figure} This model can be fitted in \proglang{R} using the commands: <<>>= suppressWarnings(RNGversion("3.5.0")) set.seed(1802) library("flexmix") data("NPreg", package = "flexmix") Model_n <- FLXMRglm(yn ~ . + I(x^2)) Model_p <- FLXMRglm(yp ~ ., family = "poisson") m1 <- flexmix(. ~ x, data = NPreg, k = 2, model = list(Model_n, Model_p), control = list(verbose = 10)) @ If the dimensions are independent the component specific model for multivariate observations can be specified as a list of models for each dimension. The estimation can be controlled with the \code{control} argument which is specified with an object of class \code{"FLXcontrol"}. For convenience also a named list can be provided which is used to construct and set the respective slots of the \code{"FLXcontrol"} object. Elements of the control object are \code{classify} to select ordinary EM, CEM or SEM, \code{minprior} for the minimum relative size of components, \code{iter.max} for the maximum number of iterations and \code{verbose} for monitoring. If \code{verbose} is a positive integer the log-likelihood is reported every \code{verbose} iterations and at convergence together with the number of iterations made. The default is to not report any log-likelihood information during the fitting process. The estimated model \code{m1} is of class \code{"flexmix"} and the result of the default plot method for this class is given in Figure~\ref{fig:root1}. This plot method uses package \pkg{lattice} \citep{mixtures:Sarkar:2008} and the usual parameters can be specified to alter the plot, e.g.~the argument \code{layout} determines the arrangement of the panels. The returned object is of class \code{"trellis"} and the plotting can also be influenced by the arguments of its show method. The default plot prints rootograms (i.e., a histogram of the square root of counts) of the a-posteriori probabilities of each observation separately for each component. For each component the observations with a-posteriori probabilities less than a pre-specified $\epsilon$ (default is $10^{-4}$) for this component are omitted in order to avoid that the bar at zero dominates the plot \citep{mixtures:Leisch:2004a}. Please note that the labels of the y-axis report the number of observations in each bar, i.e.~the squared values used for the rootograms. \begin{figure} \centering <>= print(plot(m1)) @ \caption{The plot method for \code{"flexmix"} objects, here obtained by \code{plot(m1)}, shows rootograms of the posterior class probabilities.} \label{fig:root1} \end{figure} More detailed information on the estimated parameters with respect to standard deviations and significance tests can be obtained with function \code{refit()}. This function determines the variance-covariance matrix of the estimated parameters by using the inverted negative Hesse matrix as computed by the general purpose optimizer \code{optim()} on the full likelihood of the model. \code{optim()} is initialized in the solution obtained with the EM algorithm. For mixtures of GLMs we also implemented the gradient, which speeds up convergence and gives more precise estimates of the Hessian. Naturally, function \code{refit()} will also work for models which have been determined by applying some model selection strategy depending on the data (AIC, BIC, \ldots). The same caution is necessary as when using \code{summary()} on standard linear models selected using \code{step()}: The p-values shown are not correct because they have not been adjusted for the fact that the same data are used to select the model and compute the p-values. So use them only in an exploratory manner in this context, see also \cite{mixtures:Harrell:2001} for more details on the general problem. The returned object can be inspected using \code{summary()} with arguments \code{which} to specify if information for the component model or the concomitant variable model should be shown and \code{model} to indicate for which dimension of the component models this should be done. Selecting \code{model=1} gives the parameter estimates for the dimension where the response variable follows a Gaussian distribution. <<>>= m1.refit <- refit(m1) summary(m1.refit, which = "model", model = 1) @ \begin{figure} \centering <>= print(plot(m1.refit, layout = c(1,3), bycluster = FALSE, main = expression(paste(yn *tilde(" ")* x + x^2))), split= c(1,1,2,1), more = TRUE) print(plot(m1.refit, model = 2, main = expression(paste(yp *tilde(" ")* x)), layout = c(1,2), bycluster = FALSE), split = c(2,1,2,1)) @ \caption{The default plot for refitted \code{"flexmix"} objects, here obtained by \code{plot(refit(m1), model = 1)} and \code{plot(refit(m1), model = 2)}, shows the coefficient estimates and their confidence intervals.} \label{fig:refit} \end{figure} The default plot method for the refitted \code{"flexmix"} object depicts the estimated coefficients with corresponding confidence intervals and is given in Figure~\ref{fig:refit}. It can be seen that for the first model the confidence intervals of the coefficients of the intercept and the quadratic term of \code{x} overlap with zero. A model where these coefficients are set to zero can be estimated with the model driver function \code{FLXMRglmfix()} and the following commands for specifying the nesting structure. The argument \code{nested} needs input for the number of components in each group (given by \code{k}) and the formula which determines the model matrix for the nesting (given by \code{formula}). This information can be provided in a named list. For the restricted model the element \code{k} is a vector with two 1s because each of the components has different parameters. The formulas specifying the model matrices of these coefficients are \verb/~ 1 + I(x^2)/ for an intercept and a quadratic term of $x$ for component 1 and \code{~ 0} for no additional coefficients for component 2. The EM algorithm is initialized in the previously fitted model by passing the posterior probabilities in the argument \code{cluster}. <<>>= Model_n2 <- FLXMRglmfix(yn ~ . + 0, nested = list(k = c(1, 1), formula = c(~ 1 + I(x^2), ~ 0))) m2 <- flexmix(. ~ x, data = NPreg, cluster = posterior(m1), model = list(Model_n2, Model_p)) m2 @ Model selection based on the BIC would suggest the smaller model which also corresponds to the true underlying model. <<>>= c(BIC(m1), BIC(m2)) @ %%----------------------------------------------------------------------- \subsection{Beta-blockers dataset} \label{sec:beta-blockers} The dataset is analyzed in \cite{mixtures:Aitkin:1999, mixtures:Aitkin:1999a} using a finite mixture of binomial regression models. Furthermore, it is described in \citet[p.~165]{mixtures:McLachlan+Peel:2000}. The dataset is from a 22-center clinical trial of beta-blockers for reducing mortality after myocardial infarction. A two-level model is assumed to represent the data, where centers are at the upper level and patients at the lower level. The data is illustrated in Figure~\ref{fig:beta}. First, the center information is ignored and a binomial logit regression model with treatment as covariate is fitted using \code{glm}, i.e.~$K=1$ and it is assumed that the different centers are comparable: <<>>= data("betablocker", package = "flexmix") betaGlm <- glm(cbind(Deaths, Total - Deaths) ~ Treatment, family = "binomial", data = betablocker) betaGlm @ The residual deviance suggests that overdispersion is present in the data. In the next step the intercept is allowed to follow a mixture distribution given the centers. This signifies that the component membership is fixed for each center. This grouping is specified in \proglang{R} by adding \code{| Center} to the formula similar to the notation used in \pkg{nlme} \citep{mixtures:Pinheiro+Bates:2000}. Under the assumption of homogeneity within centers identifiability of the model class can be ensured as induced by the sufficient conditions for identifability given in \cite{mixtures:Follmann+Lambert:1991} for binomial logit models with varying intercepts and \cite{mixtures:Gruen+Leisch:2008} for multinomial logit models with varying and constant parameters. In order to determine the suitable number of components, the mixture is fitted with different numbers of components. <<>>= betaMixFix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ 1 | Center, model = FLXMRglmfix(family = "binomial", fixed = ~ Treatment), k = 2:4, nrep = 5, data = betablocker) @ The returned object is of class \code{"stepFlexmix"} and printing the object gives the information on the number of iterations until termination of the EM algorithm, a logical indicating if the EM algorithm has converged, the log-likelihood and some model information criteria. The plot method compares the fitted models using the different model information criteria. <<>>= betaMixFix @ A specific \code{"flexmix"} model contained in the \code{"stepFlexmix"} object can be selected using \code{getModel()} with argument \code{which} to specify the selection criterion. The best model with respect to the BIC is selected with: <<>>= betaMixFix_3 <- getModel(betaMixFix, which = "BIC") betaMixFix_3 <- relabel(betaMixFix_3, "model", "Intercept") @ The components of the selected model are ordered with respect to the estimated intercept values. In this case a model with three components is selected with respect to the BIC. The fitted values for the model with three components are given in Figure~\ref{fig:beta} separately for each component and the treatment and control groups. The fitted parameters of the component specific models can be accessed with: <<>>= parameters(betaMixFix_3) @ Please note that the coefficients of variable \code{Treatment} are the same for all three components. \begin{figure} \centering <>= library("grid") betablocker$Center <- with(betablocker, factor(Center, levels = Center[order((Deaths/Total)[1:22])])) clusters <- factor(clusters(betaMixFix_3), labels = paste("Cluster", 1:3)) print(dotplot(Deaths/Total ~ Center | clusters, groups = Treatment, as.table = TRUE, data = betablocker, xlab = "Center", layout = c(3, 1), scales = list(x = list(cex = 0.7, tck = c(1, 0))), key = simpleKey(levels(betablocker$Treatment), lines = TRUE, corner = c(1,0)))) betaMixFix.fitted <- fitted(betaMixFix_3) for (i in 1:3) { seekViewport(trellis.vpname("panel", i, 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[1:22, i], "native"), gp = gpar(lty = 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[23:44, i], "native"), gp = gpar(lty = 2)) } @ \setkeys{Gin}{width=0.8\textwidth} \caption{Relative number of deaths for the treatment and the control group for each center in the beta-blocker dataset. The centers are sorted by the relative number of deaths in the control group. The lines indicate the fitted values for each component of the 3-component mixture model with varying intercept and constant parameters for treatment.} \label{fig:beta} \end{figure} The variable \code{Treatment} can also be included in the varying part of the model. This signifies that a mixture distribution is assumed where for each component different values are allowed for the intercept and the treatment coefficient. This mixture distribution can be specified using function \code{FLXMRglm()}. Again it is assumed that the heterogeneity is only between centers and therefore the aggregated data for each center can be used. <<>>= betaMix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ Treatment | Center, model = FLXMRglm(family = "binomial"), k = 3, nrep = 5, data = betablocker) betaMix <- relabel(betaMix, "model", "Treatment") parameters(betaMix) c(BIC(betaMixFix_3), BIC(betaMix)) @ The difference between model \code{betaMix} and \code{betaMixFix\_3} is that the treatment coefficients are the same for all three components for \code{betaMixFix\_3} while they have different values for \code{betaMix} which can easily be seen when comparing the fitted component specific parameters. The larger model \code{betaMix} which also allows varying parameters for treatment has a higher BIC and therefore the smaller model \code{betaMixFix\_3} would be preferred. The default plot for \code{"flexmix"} objects gives a rootogram of the posterior probabilities for each component. Argument \code{mark} can be used to inspect with which components the specified component overlaps as all observations are coloured in the different panels which are assigned to this component based on the maximum a-posteriori probabilities. \begin{figure} \centering <>= print(plot(betaMixFix_3, nint = 10, mark = 1, col = "grey", layout = c(3, 1))) @ \caption{Default plot of \code{"flexmix"} objects where the observations assigned to the first component are marked.}\label{fig:default} \end{figure} \begin{figure} \centering <>= print(plot(betaMixFix_3, nint = 10, mark = 2, col = "grey", layout = c(3, 1))) @ \caption{Default plot of \code{"flexmix"} objects where the observations assigned to the third component are marked.}\label{fig:default-2} \end{figure} The rootogram indicates that the components are well separated. In Figure~\ref{fig:default} it can be seen that component 1 is completely separated from the other two components, while Figure~\ref{fig:default-2} shows that component 2 has a slight overlap with both other components. The cluster assignments using the maximum a-posteriori probabilities are obtained with: <<>>= table(clusters(betaMix)) @ The estimated probabilities of death for each component for the treated patients and those in the control group can be obtained with: <<>>= predict(betaMix, newdata = data.frame(Treatment = c("Control", "Treated"))) @ or by obtaining the fitted values for two observations (e.g.~rows 1 and 23) with the desired levels of the predictor \code{Treatment} <<>>= betablocker[c(1, 23), ] fitted(betaMix)[c(1, 23), ] @ A further analysis of the model is possible with function \code{refit()} which returns the estimated coefficients together with the standard deviations, z-values and corresponding p-values. Please note that the p-values are only approximate in the sense that they have not been corrected for the fact that the data has already been used to determine the specific fitted model. <<>>= summary(refit(betaMix)) @ Given the estimated treatment coefficients we now also compare this model to a model where the treatment coefficient is assumed to be the same for components 1 and 2. Such a model is specified using the model driver \code{FLXMRglmfix()}. As the first two components are assumed to have the same coeffcients for treatment and for the third component the coefficient for treatment shall be set to zero the argument \code{nested} has \code{k = c(2,1)} and \code{formula = c(\~{}Treatment, \~{})}. <<>>= ModelNested <- FLXMRglmfix(family = "binomial", nested = list(k = c(2, 1), formula = c(~ Treatment, ~ 0))) betaMixNested <- flexmix(cbind(Deaths, Total - Deaths) ~ 1 | Center, model = ModelNested, k = 3, data = betablocker, cluster = posterior(betaMix)) parameters(betaMixNested) c(BIC(betaMix), BIC(betaMixNested), BIC(betaMixFix_3)) @ The comparison of the BIC values suggests that the nested model with the same treatment effect for two components and no treatment effect for the third component is the best. %%----------------------------------------------------------------------- \subsection[Productivity of Ph.D. students in biochemistry]{Productivity of Ph.D.~students in biochemistry} \label{sec:bioChemists} <>= data("bioChemists", package = "flexmix") @ This dataset is taken from \cite{mixtures:Long:1990}. It contains \Sexpr{nrow(bioChemists)} observations from academics who obtained their Ph.D.~degree in biochemistry in the 1950s and 60s. It includes \Sexpr{sum(bioChemists$fem=="Women")} women and \Sexpr{sum(bioChemists$fem=="Men")} men. The productivity was measured by counting the number of publications in scientific journals during the three years period ending the year after the Ph.D.~was received. In addition data on the productivity and the prestige of the mentor and the Ph.D.~department was collected. Two measures of family characteristics were recorded: marriage status and number of children of age 5 and lower by the year of the Ph.D. First, mixtures with one, two and three components and only varying parameters are fitted, and the model minimizing the BIC is selected. This is based on the assumption that unobserved heterogeneity is present in the data due to latent differences between the students in order to be productive and achieve publications. Starting with the most general model to determine the number of components using information criteria and checking for possible model restrictions after having the number of components fixed is a common strategy in finite mixture modelling \citep[see][]{mixtures:Wang+Puterman+Cockburn:1996}. Function \code{refit()} is used to determine confidence intervals for the parameters in order to choose suitable alternative models. However, it has to be noted that in the course of the procedure these confidence intervals will not be correct any more because the specific fitted models have already been determined using the same data. <<>>= data("bioChemists", package = "flexmix") Model1 <- FLXMRglm(family = "poisson") ff_1 <- stepFlexmix(art ~ ., data = bioChemists, k = 1:3, model = Model1) ff_1 <- getModel(ff_1, "BIC") @ The selected model has \Sexpr{ff_1@k} components. The estimated coefficients of the components are given in Figure~\ref{fig:coefficients-1} together with the corresponding 95\% confidence intervals using the plot method for objects returned by \code{refit()}. The plot shows that the confidence intervals of the parameters for \code{kid5}, \code{mar}, \code{ment} and \code{phd} overlap for the two components. In a next step a mixture with two components is therefore fitted where only a varying intercept and a varying coefficient for \code{fem} is specified and all other coefficients are constant. The EM algorithm is initialized with the fitted mixture model using \code{posterior()}. \begin{figure} \centering <>= print(plot(refit(ff_1), bycluster = FALSE, scales = list(x = list(relation = "free")))) @ \caption{Coefficient estimates and confidence intervals for the model with only varying parameters.}\label{fig:coefficients-1} \end{figure} <<>>= Model2 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + mar + ment) ff_2 <- flexmix(art ~ fem + phd, data = bioChemists, cluster = posterior(ff_1), model = Model2) c(BIC(ff_1), BIC(ff_2)) @ If the BIC is used for model comparison the smaller model including only varying coefficients for the intercept and \code{fem} is preferred. The coefficients of the fitted model can be obtained using \code{refit()}: <<>>= summary(refit(ff_2)) @ It can be seen that the coefficient of \code{phd} does for both components not differ significantly from zero and might be omitted. This again improves the BIC. <<>>= Model3 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + mar + ment) ff_3 <- flexmix(art ~ fem, data = bioChemists, cluster = posterior(ff_2), model = Model3) c(BIC(ff_2), BIC(ff_3)) @ The coefficients of the restricted model without \code{phd} are given in Figure~\ref{fig:coefficients-2}. \begin{figure}[t] \centering <>= print(plot(refit(ff_3), bycluster = FALSE, scales = list(x = list(relation = "free")))) @ \caption{Coefficient estimates and confidence intervals for the model with varying and constant parameters where the variable \code{phd} is not used in the regression.}\label{fig:coefficients-2} \end{figure} An alternative model would be to assume that gender does not directly influence the number of articles but has an impact on the segment sizes. <<>>= Model4 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + mar + ment) ff_4 <- flexmix(art ~ 1, data = bioChemists, cluster = posterior(ff_2), concomitant = FLXPmultinom(~ fem), model = Model4) parameters(ff_4) summary(refit(ff_4), which = "concomitant") BIC(ff_4) @ This suggests that the proportion of women is lower in the second component which is the more productive segment. The alternative modelling strategy where homogeneity is assumed at the beginning and a varying interept is added if overdispersion is observed leads to the following model which is the best with respect to the BIC. <<>>= Model5 <- FLXMRglmfix(family = "poisson", fixed = ~ kid5 + ment + fem) ff_5 <- flexmix(art ~ 1, data = bioChemists, cluster = posterior(ff_2), model = Model5) BIC(ff_5) @ \begin{figure} \centering \setkeys{Gin}{width=0.8\textwidth} <>= pp <- predict(ff_5, newdata = data.frame(kid5 = 0, mar = factor("Married", levels = c("Single", "Married")), fem = c("Men", "Women"), ment = mean(bioChemists$ment))) matplot(0:12, sapply(unlist(pp), function(x) dpois(0:12, x)), type = "b", lty = 1, xlab = "Number of articles", ylab = "Probability") legend("topright", paste("Comp.", rep(1:2, each = 2), ":", c("Men", "Women")), lty = 1, col = 1:4, pch = paste(1:4), bty = "n") @ \caption{The estimated productivity for each compoment for men and women.} \label{fig:estimated} \end{figure} \setkeys{Gin}{width=0.98\textwidth} In Figure~\ref{fig:estimated} the estimated distribution of productivity for model \code{ff\_5} are given separately for men and women as well as for each component where for all other variables the mean values are used for the numeric variables and the most frequent category for the categorical variables. The two components differ in that component 1 contains the students who publish no article or only a single article, while the students in component 2 write on average several articles. With a constant coefficient for gender women publish less articles than men in both components. This example shows that different optimal models are chosen for different modelling procedures. However, the distributions induced by the different variants of the model class may be similar and therefore it is not suprising that they then will have similar BIC values. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Implementation}\label{sec:implementation} The new features extend the available model class described in \cite{mixtures:Leisch:2004} by providing infrastructure for concomitant variable models and for fitting mixtures of GLMs with varying and constant parameters for the component specific parameters. The implementation of the extensions of the model class made it necessary to define a better class structure for the component specific models and to modify the fit functions \code{flexmix()} and \code{FLXfit()}. An overview on the \proglang{S}4 class structure of the package is given in Figure~\ref{fig:class structure}. There is a class for unfitted finite mixture distributions given by \code{"FLXdist"} which contains a list of \code{"FLXM"} objects which determine the component specific models, a list of \code{"FLXcomponent"} objects which specify functions to determine the component specific log-likelihoods and predictions and which contain the component specific parameters, and an object of class \code{"FLXP"} which specifies the concomitant variable model. Class \code{"flexmix"} extends \code{"FLXdist"}. It represents a fitted finite mixture distribution and it contains the information about the fitting with the EM algorithm in the object of class \code{"FLXcontrol"}. Repeated fitting with the EM algorithm with different number of components is provided by function \code{stepFlexmix()} which returns an object of class \code{"stepFlexmix"}. Objects of class \code{"stepFlexmix"} contain the list of the fitted mixture models for each number of components in the slot \code{"models"}. \setkeys{Gin}{width=.9\textwidth} \begin{figure}[t] \centering \includegraphics{flexmix} \caption{UML class diagram \citep[see][]{mixtures:Fowler:2004} of the \pkg{flexmix} package.} \label{fig:class structure} \end{figure} \setkeys{Gin}{width=\textwidth} For the component specific model a virtual class \code{"FLXM"} is introduced which (currently) has two subclasses: \code{"FLXMC"} for model-based clustering and \code{"FLXMR"} for clusterwise regression, where predictor variables are given. Additional slots have been introduced to allow for data preprocessing and the construction of the components was separated from the fit and is implemented using lexical scoping \citep{mixtures:Gentleman+Ihaka:2000} in the slot \code{defineComponent}. \code{"FLXMC"} has an additional slot \code{dist} to specify the name of the distribution of the variable. In the future functionality shall be provided for sampling from a fitted or unfitted finite mixture. Using this slot observations can be generated by using the function which results from adding an \code{r} at the beginnning of the distribution name. This allows to only implement the (missing) random number generator functions and otherwise use the same method for sampling from mixtures with component specific models of class \code{"FLXMC"}. For \code{flexmix()} and \code{FLXfit()} code blocks which are model dependent have been identified and different methods implemented. Finite mixtures of regressions with varying, nested and constant parameters were a suitable model class for this identification task as they are different from models previously implemented. The main differences are: \begin{itemize} \item The number of components is related to the component specific model and the omission of small components during the EM algorithm impacts on the model. \item The parameters of the component specific models can not be determined separately in the M-step and a joint model matrix is needed. \end{itemize} This makes it also necessary to have different model dependent methods for \code{fitted()} which extracts the fitted values from a \code{"flexmix"} object, \code{predict()} which predicts new values for a \code{"flexmix"} object and \code{refit()} which refits an estimated model to obtain additional information for a \code{"flexmix"} object. %%----------------------------------------------------------------------- \subsection{Component specific models with varying and constant parameters}\label{sec:comp-models-with} A new M-step driver is provided which fits finite mixtures of GLMs with constant and nested varying parameters for the coefficients and the dispersion parameters. The class \code{"FLXMRglmfix"} returned by the driver \code{FLXMRglmfix()} has the following additional slots with respect to \code{"FLXMRglm"}: \begin{description} \item[\code{design}:] An incidence matrix indicating which columns of the model matrix are used for which component, i.e.~$\mathbf{D}=(\mathbf{1}_K,\mathbf{J}, \mathbf{I}_K)$. \item[\code{nestedformula}:] An object of class \code{"FLXnested"} containing the formula for the nested regression coefficients and the number of components in each $K_c$, $c \in C$. \item[\code{fixed}:] The formula for the constant regression coefficients. \item[\code{variance}:] A logical indicating if different variances shall be estimated for the components following a Gaussian distribution or a vector specifying the nested structure for estimating these variances. \end{description} The difference between estimating finite mixtures including only varying parameters using models specified with \code{FLXMRglm()} and those with varying and constant parameters using function \code{FLXMRglmfix()} is hidden from the user, as only the specified model is different. The fitted model is also of class \code{"flexmix"} and can be analyzed using the same functions as for any model fitted using package \pkg{flexmix}. The methods used are the same except if the slot containing the model is accessed and method dispatching is made via the model class. New methods are provided for models of class \code{"FLXMRglmfix"} for functions \code{refit()}, \code{fitted()} and \code{predict()} which can be used for analyzing the fitted model. The implementation allows repeated measurements by specifying a grouping variable in the formula argument of \code{flexmix()}. Furthermore, it has to be noticed that the model matrix is determined by updating the formula of the varying parameters successively with the formula of the constant and then of the nested varying parameters. This ensures that if a mixture distribution is fitted for the intercept, the model matrix of a categorical variable includes only the remaining columns for the constant parameters to have full column rank. However, this updating scheme makes it impossible to estimate a constant intercept while allowing varying parameters for a categorical variable. For this model one big model matrix is constructed where the observations are repeated $K$ times and suitable columns of zero added. The coefficients of all $K$ components are determined simultaneously in the M-step, while if only varying parameters are specified the maximization of the likelihood is made separately for all components. For large datasets the estimation of a combination of constant and varying parameters might therefore be more challenging than only varying parameters. %% ----------------------------------------------------------------------- \subsection{Concomitant variable models}\label{sec:conc-vari-models} For representing concomitant variable models the class \code{"FLXP"} is defined. It specifies how the concomitant variable model is fitted using the concomitant variable model matrix as predictor variables and the current a-posteriori probability estimates as response variables. The object has the following slots: \begin{description} \item[\code{fit}:] A \code{function (x, y, ...)} returning the fitted values for the component weights during the EM algorithm. \item[\code{refit}:] A \code{function (x, y, ...)} used for refitting the model. \item[\code{df}:] A \code{function (x, k, ...)} returning the degrees of freedom used for estimating the concomitant variable model given the model matrix \code{x} and the number of components \code{k}. \item[\code{x}:] A matrix containing the model matrix of the concomitant variables. \item[\code{formula}:] The formula for determining the model matrix \code{x}. \item[\code{name}:] A character string describing the model, which is only used for print output. \end{description} Two constructor functions for concomitant variable models are provided at the moment. \code{FLXPconstant()} is for constant component weights without concomitant variables and for multinomial logit models \code{FLXPmultinom()} can be used. \code{FLXPmultinom()} has its own class \code{"FLXPmultinom"} which extends \code{"FLXP"} and has an additional slot \code{coef} for the fitted coefficients. The multinomial logit models are fitted using package \pkg{nnet} \citep{mixtures:Venables+Ripley:2002}. %%----------------------------------------------------------------------- \subsection{Further changes} The estimation of the model with the EM algorithm was improved by adapting the variants to correspond to the CEM and SEM variants as outlined in the literature. To make this more explicit it is now also possible to use \code{"CEM"} or \code{"SEM"} to specify an EM variant in the \code{classify} argument of the \code{"FLXcontrol"} object. Even though the SEM algorithm can in general not be expected to converge the fitting procedure is also terminated for the SEM algorithm if the change in the relative log-likelhood is smaller than the pre-specified threshold. This is motivated by the fact that for well separated clusters the posteriors might converge to an indicator function with all weight concentrated in one component. The fitted model with the maximum likelihood encountered during the SEM algorithm is returned. For discrete data in general multiple observations with the same values are given in a dataset. A \code{weights} argument was added to the fitting function \code{flexmix()} in order to avoid repeating these observations in the provided dataset. The specification is through a \code{formula} in order to allow selecting a column of the data frame given in the \code{data} argument. The weights argument allows to avoid replicating the same observations and hence enables more efficient memory use in these applications. This possibitliy is especially useful in the context of model-based clustering for mixtures of Poisson distributions or latent class analysis with multivariate binary observations. In order to be able to apply different initialization strategies such as for example first running several different random initializations with CEM and then switching to ordinary EM using the best solution found by CEM for initialization a \code{posterior()} function was implemented. \code{posterior()} also takes a \code{newdata} argument and hence, it is possible to apply subset strategies for large datasets as suggested in \cite{mixtures:Wehrens+Buydens+Fraley:2004}. The returned matrix of the posterior probabilities can be used to specify the \code{cluster} argument for \code{flexmix()} and the posteriors are then used as weights in the first M-step. The default plot methods now use trellis graphics as implemented in package \pkg{lattice} \citep{mixtures:Sarkar:2008}. Users familiar with the syntax of these graphics and with the plotting and printing arguments will find the application intuitive as a lot of plotting arguments are passed to functions from \pkg{lattice} as for example \code{xyplot()} and \code{histogram()}. In fact only new panel, pre-panel and group-panel functions were implemented. The returned object is of class \code{"trellis"} and the show method for this class is used to create the plot. Function \code{refit()} was modified and has now two different estimation methods: \code{"optim"} and \code{"mstep"}. The default method \code{"optim"} determines the variance-covariance matrix of the parameters from the inverse Hessian of the full log-likelihood. The general purpose optimizer \code{optim()} is used to maximize the log-likelihood and initialized in the solution obtained with the EM algorithm. For mixtures of GLMs there are also functions implemented to determine the gradient which can be used to speed up convergence. The second method \code{"mstep"} is only a raw approximation. It performs an M-step where the a-posteriori probabilities are treated as given instead of estimated and returns for the component specific models nearly complete \code{"glm"} objects which can be further analyzed. The advantage of this method is that the return value is basically a list of standard \code{"glm"} objects, such that the regular methods for this class can be used. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Writing your own drivers}\label{sec:writing-your-own} Two examples are given in the following to demonstrate how new drivers can be provided for concomitant variable models and for component specific models. Easy extensibility is one of the main implementation aims of the package and it can be seen that writing new drivers requires only a few lines of code for providing the constructor functions which include the fit functions. %%----------------------------------------------------------------------- \subsection{Component specific models: Zero-inflated models}\label{sec:component-models} \lstset{frame=trbl,basicstyle=\small\tt,stepnumber=5,numbers=left} In Poisson or binomial regression models it can be often encountered that the observed number of zeros is higher than expected. A mixture with two components where one has mean zero can be used to model such data. These models are also referred to as zero-inflated models \citep[see for example][]{mixtures:Boehning+Dietz+Schlattmann:1999}. A generalization of this model class would be to fit mixtures with more than two components where one component has a mean fixed at zero. So this model class is a special case of a mixture of generalized linear models where (a) the family is restricted to Poisson and binomial and (b) the parameters of one component are fixed. For simplicity the implementation assumes that the component with mean zero is the first component. In addition we assume that the model matrix contains an intercept and to have the first component absorbing the access zeros the coefficient of the intercept is set to $-\infty$ and all other coefficients are set to zero. Hence, to implement this model using package \pkg{flexmix} an appropriate model class is needed with a corresponding convenience function for construction. During the fitting of the EM algorithm using \code{flexmix()} different methods for this model class are needed when determining the model matrix (to check the presence of an intercept), to check the model after a component is removed and for the M-step to account for the fact that the coefficients of the first component are fixed. For all other methods those available for \code{"FLXMRglm"} can be re-used. The code is given in Figure~\ref{fig:ziglm.R}. \begin{figure} \centering \begin{minipage}{0.98\textwidth} \lstinputlisting{ziglm.R} \end{minipage} \caption{Driver for a zero-inflated component specific model.} \label{fig:ziglm.R} \end{figure} The model class \code{"FLXMRziglm"} is defined as extending \code{"FLXMRglm"} in order to be able to inherit methods from this model class. For construction of a \code{"FLXMRziglm"} class the convenicence function \code{FLXMRziglm()} is used which calls \code{FLXMRglm()}. The only differences are that the family is restricted to binomial or Poisson, that a different name is assigned and that an object of the correct class is returned. The presence of the intercept in the model matrix is checked in \code{FLXgetModelmatrix()} after using the method available for \code{"FLXMRglm"} models as indicated by the call to \code{callNextMethod()}. During the EM algorithm \code{FLXremoveComponent()} is called if one component is removed. For this model class it checks if the first component has been removed and if this is the case the model class is changed to \code{"FLXMRglm"}. In the M-step the coefficients of the first component are fixed and not estimated, while for the remaining components the M-step of \code{"FLXMRglm"} objects can be used. During the EM algorithm \code{FLXmstep()} is called to perform the M-step and returns a list of \code{"FLXcomponent"} objects with the fitted parameters. A new method for this function is needed for \code{"FLXMRziglm"} objects in order to account for the fixed coefficients in the first component, i.e.~for the first component the \code{"FLXcomponent"} object is constructed and concatenated with the list of \code{"FLXcomponent"} objects returned by using the \code{FLXmstep()} method for \code{"FLXMRglm"} models for the remaining components. Similar modifications are necessary in order to be able to use \code{refit()} for this model class. The code for implementing the \code{refit()} method using \code{optim()} for \code{"FLXMRziglm"} is not shown, but can be inspected in the source code of the package. \subsubsection{Example: Using the driver} This new M-step driver can be used to estimate a zero-inflated Poisson model to the data given in \cite{mixtures:Boehning+Dietz+Schlattmann:1999}. The dataset \code{dmft} consists of count data from a dental epidemiological study for evaluation of various programs for reducing caries collected among school children from an urban area of Belo Horizonte (Brazil). The variables included are the number of decayed, missing or filled teeth (DMFT index) at the beginning and at the end of the observation period, the gender, the ethnic background and the specific treatment for \Sexpr{nrow(dmft)} children. The model can be fitted with the new driver function using the following commands: <<>>= data("dmft", package = "flexmix") Model <- FLXMRziglm(family = "poisson") Fitted <- flexmix(End ~ log(Begin + 0.5) + Gender + Ethnic + Treatment, model = Model, k = 2 , data = dmft, control = list(minprior = 0.01)) summary(refit(Fitted)) @ Please note that \cite{mixtures:Boehning+Dietz+Schlattmann:1999} added the predictor \code{log(Begin + 0.5)} to serve as an offset in order to be able to analyse the improvement in the DMFT index from the beginning to the end of the study. The linear predictor with the offset subtracted is intended to be an estimate for $\log(\mathbb{E}(\textrm{End})) - \log(\mathbb{E}(\textrm{Begin}))$. This is justified by the fact that for a Poisson distributed variable $Y$ with mean between 1 and 10 it holds that $\mathbb{E}(\log(Y + 0.5))$ is approximately equal to $\log(\mathbb{E}(Y))$. $\log(\textrm{Begin} + 0.5)$ can therefore be seen as an estimate for $\log(\mathbb{E}(\textrm{Begin}))$. The estimated coefficients with corresponding confidence intervals are given in Figure~\ref{fig:dmft}. As the coefficients of the first component are restricted a-priori to minus infinity for the intercept and to zero for the other variables, they are of no interest and only the second component is plotted. The box ratio can be modified as for \code{barchart()} in package \pkg{lattice}. The code to produce this plot is given by: <>= print(plot(refit(Fitted), components = 2, box.ratio = 3)) @ \begin{figure} \centering \setkeys{Gin}{width=0.9\textwidth} <>= <> @ \caption{The estimated coefficients of the zero-inflated model for the \code{dmft} dataset. The first component is not plotted as this component captures the inflated zeros and its coefficients are fixed a-priori.} \label{fig:dmft} \end{figure} %%----------------------------------------------------------------------- \subsection{Concomitant variable models}\label{sec:concomitant-models} If the concomitant variable is a categorical variable, the multinomial logit model is equivalent to a model where the component weights for each level of the concomitant variable are determined by the mean values of the a-posteriori probabilities. The driver which implements this \code{"FLXP"} model is given in Figure~\ref{fig:myConcomitant.R}. A name for the driver has to be specified and a \code{fit()} function. In the \code{fit()} function the mean posterior probability for all observations with the same covariate points is determined, assigned to the corresponding observations and the full new a-posteriori probability matrix returned. By contrast \code{refit()} only returns the new a-posteriori probability matrix for the number of unique covariate points. \lstset{frame=trbl,basicstyle=\small\tt,stepnumber=5,numbers=left} \begin{figure} \centering \begin{minipage}{0.98\textwidth} \lstinputlisting{myConcomitant.R} \end{minipage} \caption{Driver for a concomitant variable model where the component weights are determined by averaging over the a-posteriori probabilities for each level of the concomitant variable.} \label{fig:myConcomitant.R} \end{figure} \subsubsection{Example: Using the driver} If the concomitant variable model returned by \code{myConcomitant()} is used for the artificial example in Section~\ref{sec:using-new-funct} the same fitted model is returned as if a multinomial logit model is specified. An advantage is that in this case no problems occur if the fitted probabilities are close to zero or one. <>= Concomitant <- FLXPmultinom(~ yb) MyConcomitant <- myConcomitant(~ yb) set.seed(1234) m2 <- flexmix(. ~ x, data = NPreg, k = 2, model = list(Model_n, Model_p), concomitant = Concomitant) m3 <- flexmix(. ~ x, data = NPreg, k = 2, model = list(Model_n, Model_p), cluster = posterior(m2), concomitant = MyConcomitant) @ <<>>= summary(m2) summary(m3) @ For comparing the estimated component weights for each value of $\mathit{yb}$ the following function can be used: <<>>= determinePrior <- function(object) { object@concomitant@fit(object@concomitant@x, posterior(object))[!duplicated(object@concomitant@x), ] } @ <<>>= determinePrior(m2) determinePrior(m3) @ Obviously the fitted values of the two models correspond to each other. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section{Summary and outlook}\label{sec:summary-outlook} Package \pkg{flexmix} was extended to cover finite mixtures of GLMs with (nested) varying and constant parameters. This allows for example the estimation of varying intercept models. In order to be able to characterize the components given some variables concomitant variable models can be estimated for the component weights. The implementation of these extensions have triggered some modifications in the class structure and in the fit functions \code{flexmix()} and \code{FLXfit()}. For certain steps, as e.g.~the M-step, methods which depend on the component specific models are defined in order to enable the estimation of finite mixtures of GLMs with only varying parameters and those with (nested) varying and constant parameters with the same fit function. The flexibility of this modified implementation is demonstrated by illustrating how a driver for zero-inflated models can be defined. In the future diagnostic tools based on resampling methods shall be implemented as bootstrap results can give valuable insights into the model fit \citep{mixtures:Gruen+Leisch:2004}. A function which conveniently allows to test linear hypotheses about the parameters using the variance-covariance matrix returned by \code{refit()} would be a further valuable diagnostic tool. The implementation of zero-inflated Poisson and binomial regression models are a first step towards relaxing the assumption that all component specific distributions are from the same parametric family. As mixtures with components which follow distributions from different parametric families can be useful for example to model outliers \citep{mixtures:Dasgupta+Raftery:1998,mixtures:Leisch:2008}, it is intended to also make this functionality available in \pkg{flexmix} in the future. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section*{Computational details} <>= SI <- sessionInfo() pkgs <- paste(sapply(c(SI$otherPkgs, SI$loadedOnly), function(x) paste("\\\\pkg{", x$Package, "} ", x$Version, sep = "")), collapse = ", ") @ All computations and graphics in this paper have been done using \proglang{R} version \Sexpr{getRversion()} with the packages \Sexpr{pkgs}. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \section*{Acknowledgments} This research was supported by the the Austrian Science Foundation (FWF) under grants P17382 and T351. Thanks also to Achim Zeileis for helpful discussions on implementation details and an anonymous referee for asking a good question about parameter significance which initiated the new version of function \code{refit()}. %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \bibliography{mixture} %%----------------------------------------------------------------------- %%----------------------------------------------------------------------- \end{document} flexmix/inst/doc/flexmix-intro.Rnw0000644000176200001440000010256514404637307016766 0ustar liggesusers% % Copyright (C) 2004-2005 Friedrich Leisch % $Id: flexmix-intro.Rnw 5187 2020-06-25 17:59:39Z gruen $ % \documentclass[nojss]{jss} \title{FlexMix: A General Framework for Finite Mixture Models and Latent Class Regression in \proglang{R}} \Plaintitle{FlexMix: A General Framework for Finite Mixture Models and Latent Class Regression in R} \Shorttitle{FlexMix: Finite Mixture Models in \proglang{R}} \author{Friedrich Leisch\\Universit\"at f\"ur Bodenkultur Wien} \Plainauthor{Friedrich Leisch} \Address{ Friedrich Leisch\\ Institut f\"ur Angewandte Statistik und EDV\\ Universit\"at f\"ur Bodenkultur Wien\\ Peter Jordan Stra\ss{}e 82\\ 1190 Wien, Austria\\ E-mail: \email{Friedrich.Leisch@boku.ac.at} } \usepackage[utf8]{inputenc} \usepackage{listings} \newcommand{\R}{\proglang{R}} <>= suppressWarnings(RNGversion("3.5.0")) set.seed(1504) options(width=70, prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) grDevices::ps.options(family="Times") library("graphics") library("flexmix") data("NPreg") @ \Abstract{ This article was originally published as \cite{flexmix:Leisch:2004a} in the \emph{Journal of Statistical Software}. FlexMix implements a general framework for fitting discrete mixtures of regression models in the \R{} statistical computing environment: three variants of the EM algorithm can be used for parameter estimation, regressors and responses may be multivariate with arbitrary dimension, data may be grouped, e.g., to account for multiple observations per individual, the usual formula interface of the \proglang{S} language is used for convenient model specification, and a modular concept of driver functions allows to interface many different types of regression models. Existing drivers implement mixtures of standard linear models, generalized linear models and model-based clustering. FlexMix provides the E-step and all data handling, while the M-step can be supplied by the user to easily define new models. } \Keywords{\proglang{R}, finite mixture models, model based clustering, latent class regression} \Plainkeywords{R, finite mixture models, model based clustering, latent class regression} \Volume{11} \Issue{8} \Month{October} \Year{2004} \Submitdate{2004-04-19} \Acceptdate{2004-10-18} %%\usepackage{Sweave} %% already provided by jss.cls %%\VignetteIndexEntry{FlexMix: A General Framework for Finite Mixture Models and Latent Class Regression in R} %%\VignetteDepends{flexmix} %%\VignetteKeywords{R, finite mixture models, model based clustering, latent class regression} %%\VignettePackage{flexmix} \begin{document} \section{Introduction} \label{sec:introduction} Finite mixture models have been used for more than 100 years, but have seen a real boost in popularity over the last decade due to the tremendous increase in available computing power. The areas of application of mixture models range from biology and medicine to physics, economics and marketing. On the one hand these models can be applied to data where observations originate from various groups and the group affiliations are not known, and on the other hand to provide approximations for multi-modal distributions \citep{flexmix:Everitt+Hand:1981,flexmix:Titterington+Smith+Makov:1985,flexmix:McLachlan+Peel:2000}. In the 1990s finite mixture models have been extended by mixing standard linear regression models as well as generalized linear models \citep{flexmix:Wedel+DeSarbo:1995}. An important area of application of mixture models is market segmentation \citep{flexmix:Wedel+Kamakura:2001}, where finite mixture models replace more traditional cluster analysis and cluster-wise regression techniques as state of the art. Finite mixture models with a fixed number of components are usually estimated with the expectation-maximization (EM) algorithm within a maximum likelihood framework \citep{flexmix:Dempster+Laird+Rubin:1977} and with MCMC sampling \citep{flexmix:Diebolt+Robert:1994} within a Bayesian framework. \newpage The \R{} environment for statistical computing \citep{flexmix:R-Core:2004} features several packages for finite mixture models, including \pkg{mclust} for mixtures of multivariate Gaussian distributions \citep{flexmix:Fraley+Raftery:2002,flexmix:Fraley+Raftery:2002a}, \pkg{fpc} for mixtures of linear regression models \citep{flexmix:Hennig:2000} and \pkg{mmlcr} for mixed-mode latent class regression \citep{flexmix:Buyske:2003}. There are three main reasons why we have chosen to write yet another software package for EM estimation of mixture models: \begin{itemize} \item The existing implementations did not cover all cases we needed for our own research (mainly marketing applications). \item While all \R{} packages mentioned above are open source and hence can be extended by the user by modifying the source code, we wanted an implementation where extensibility is a main design principle to enable rapid prototyping of new mixture models. \item We include a sampling-based variant of the EM-algorithm for models where weighted maximum likelihood estimation is not available. FlexMix has a clean interface between E- and M-step such that variations of both are easy to combine. \end{itemize} This paper is organized as follows: First we introduce the mathematical models for latent class regression in Section~\ref{sec:latent-class-regr} and shortly discuss parameter estimation and identifiability. Section~\ref{sec:using-flexmix} demonstrates how to use FlexMix to fit models with the standard driver for generalized linear models. Finally, Section~\ref{sec:extending-flexmix} shows how to extend FlexMix by writing new drivers using the well-known model-based clustering procedure as an example. \section{Latent class regression} \label{sec:latent-class-regr} Consider finite mixture models with $K$ components of form \begin{equation}\label{eq:1} h(y|x,\psi) = \sum_{k = 1}^K \pi_k f(y|x,\theta_k) \end{equation} \begin{displaymath} \pi_k \geq 0, \quad \sum_{k = 1}^K \pi_k = 1 \end{displaymath} where $y$ is a (possibly multivariate) dependent variable with conditional density $h$, $x$ is a vector of independent variables, $\pi_k$ is the prior probability of component $k$, $\theta_k$ is the component specific parameter vector for the density function $f$, and $\psi=(\pi_1,,\ldots,\pi_K,\theta_1',\ldots,\theta_K')'$ is the vector of all parameters. If $f$ is a univariate normal density with component-specific mean $\beta_k'x$ and variance $\sigma^2_k$, we have $\theta_k = (\beta_k', \sigma_k^2)'$ and Equation~(\ref{eq:1}) describes a mixture of standard linear regression models, also called \emph{latent class regression} or \emph{cluster-wise regression} \citep{flexmix:DeSarbo+Cron:1988}. If $f$ is a member of the exponential family, we get a mixture of generalized linear models \citep{flexmix:Wedel+DeSarbo:1995}, known as \emph{GLIMMIX} models in the marketing literature \citep{flexmix:Wedel+Kamakura:2001}. For multivariate normal $f$ and $x\equiv1$ we get a mixture of Gaussians without a regression part, also known as \emph{model-based clustering}. The posterior probability that observation $(x,y)$ belongs to class $j$ is given by \begin{equation}\label{eq:3} \Prob(j|x, y, \psi) = \frac{\pi_j f(y | x, \theta_j)}{\sum_k \pi_k f(y | x, \theta_k)} \end{equation} The posterior probabilities can be used to segment data by assigning each observation to the class with maximum posterior probability. In the following we will refer to $f(\cdot|\cdot, \theta_k)$ as \emph{mixture components} or \emph{classes}, and the groups in the data induced by these components as \emph{clusters}. \subsection{Parameter estimation} \label{sec:parameter-estimation} The log-likelihood of a sample of $N$ observations $\{(x_1,y_1),\ldots,(x_N,y_N)\}$ is given by \begin{equation}\label{eq:4} \log L = \sum_{n=1}^N \log h(y_n|x_n,\psi) = \sum_{n=1}^N \log\left(\sum_{k = 1}^K \pi_kf(y_n|x_n,\theta_k) \right) \end{equation} and can usually not be maximized directly. The most popular method for maximum likelihood estimation of the parameter vector $\psi$ is the iterative EM algorithm \citep{flexmix:Dempster+Laird+Rubin:1977}: \begin{description} \item[Estimate] the posterior class probabilities for each observation \begin{displaymath} \hat p_{nk} = \Prob(k|x_n, y_n, \hat \psi) \end{displaymath} using Equation~(\ref{eq:3}) and derive the prior class probabilities as \begin{displaymath} \hat\pi_k = \frac1N \sum_{n=1}^N \hat p_{nk} \end{displaymath} \item[Maximize] the log-likelihood for each component separately using the posterior probabilities as weights \begin{equation}\label{eq:2} \max_{\theta_k} \sum_{n=1}^N \hat p_{nk} \log f(y_n | x_n, \theta_k) \end{equation} \end{description} The E- and M-steps are repeated until the likelihood improvement falls under a pre-specified threshold or a maximum number of iterations is reached. The EM algorithm cannot be used for mixture models only, but rather provides a general framework for fitting models on incomplete data. Suppose we augment each observation $(x_n,y_n)$ with an unobserved multinomial variable $z_n = (z_{n1},\ldots,z_{nK})$, where $z_{nk}=1$ if $(x_n,y_n)$ belongs to class $k$ and $z_{nk}=0$ otherwise. The EM algorithm can be shown to maximize the likelihood on the ``complete data'' $(x_n,y_n,z_n)$; the $z_n$ encode the missing class information. If the $z_n$ were known, maximum likelihood estimation of all parameters would be easy, as we could separate the data set into the $K$ classes and estimate the parameters $\theta_k$ for each class independently from the other classes. If the weighted likelihood estimation in Equation~(\ref{eq:2}) is infeasible for analytical, computational, or other reasons, then we have to resort to approximations of the true EM procedure by assigning the observations to disjoint classes and do unweighted estimation within the groups: \begin{displaymath} \max_{\theta_k} \sum_{n: z_{nk=1}} \log f(y_n | x_n, \theta_k) \end{displaymath} This corresponds to allow only 0 and 1 as weights. Possible ways of assigning the data into the $K$ classes are \begin{itemize} \item \textbf{hard} \label{hard} assignment to the class with maximum posterior probability $p_{nk}$, the resulting procedure is called maximizing the \emph{classification likelihood} by \cite{flexmix:Fraley+Raftery:2002}. Another idea is to do \item \textbf{random} assignment to classes with probabilities $p_{nk}$, which is similar to the sampling techniques used in Bayesian estimation (although for the $z_n$ only). \end{itemize} Well known limitations of the EM algorithm include that convergence can be slow and is to a local maximum of the likelihood surface only. There can also be numerical instabilities at the margin of parameter space, and if a component gets to contain only a few observations during the iterations, parameter estimation in the respective component may be problematic. E.g., the likelihood of Gaussians increases without bounds for $\sigma^2\to 0$. As a result, numerous variations of the basic EM algorithm described above exist, most of them exploiting features of special cases for $f$. \subsection{Identifiability} \label{sec:identifiability} An open question is still identifiability of many mixture models. A comprehensive overview of this topic is beyond the scope of this paper, however, users of mixture models should be aware of the problem: \begin{description} \item[Relabelling of components:] Mixture models are only identifiable up to a permutation of the component labels. For EM-based approaches this only affects interpretation of results, but is no problem for parameter estimation itself. \item[Overfitting:] If a component is empty or two or more components have the same parameters, the data generating process can be represented by a smaller model with fewer components. This kind of unidentifiability can be avoided by requiring that the prior weights $\pi_k$ are not equal to zero and that the component specific parameters are different. \item[Generic unidentifiability:] It has been shown that mixtures of univariate normal, gamma, exponential, Cauchy and Poisson distributions are identifiable, while mixtures of discrete or continuous uniform distributions are not identifiable. A special case is the class of mixtures of binomial and multinomial distributions which are only identifiable if the number of components is limited with respect to, e.g., the number of observations per person. See \cite{flexmix:Everitt+Hand:1981}, \cite{flexmix:Titterington+Smith+Makov:1985}, \cite{flexmix:Grun:2002} and references therein for details. \end{description} FlexMix tries to avoid overfitting because of vanishing prior probabilities by automatically removing components where the prior $\pi_k$ falls below a user-specified threshold. Automated diagnostics for generic identifiability are currently under investigation. Relabelling of components is in some cases more of a nuisance than a real problem (``component 2 of the first run may be component 3 in the second run''), more serious are interactions of component relabelling and categorical predictor variables, see \cite{flexmix:Grun+Leisch:2004} for a discussion and how bootstrapping can be used to assess identifiability of mixture models. \pagebreak[4] \section{Using FlexMix} \label{sec:using-flexmix} \SweaveOpts{width=12,height=8,eps=FALSE,keep.source=TRUE} The standard M-step \texttt{FLXMRglm()} of FlexMix is an interface to R's generalized linear modelling facilities (the \texttt{glm()} function). As a simple example we use artificial data with two latent classes of size \Sexpr{nrow(NPreg)/2} each: \begin{center} \begin{tabular}{ll} Class~1: & $ y = 5x+\epsilon$\\ Class~2: & $ y = 15+10x-x^2+\epsilon$\\ \end{tabular} \end{center} with $\epsilon\sim N(0,9)$ and prior class probabilities $\pi_1=\pi_2=0.5$, see the left panel of Figure~\ref{fig:npreg}. We can fit this model in \R{} using the commands <<>>= library("flexmix") data("NPreg") m1 <- flexmix(yn ~ x + I(x^2), data = NPreg, k = 2) m1 @ and get a first look at the estimated parameters of mixture component~1 by <<>>= parameters(m1, component = 1) @ and <<>>= parameters(m1, component = 2) @ for component~2. The paramter estimates of both components are close to the true values. A cross-tabulation of true classes and cluster memberships can be obtained by <<>>= table(NPreg$class, clusters(m1)) @ The summary method <<>>= summary(m1) @ gives the estimated prior probabilities $\hat\pi_k$, the number of observations assigned to the corresponding clusters, the number of observations where $p_{nk}>\delta$ (with a default of $\delta=10^{-4}$), and the ratio of the latter two numbers. For well-seperated components, a large proportion of observations with non-vanishing posteriors $p_{nk}$ should also be assigned to the corresponding cluster, giving a ratio close to 1. For our example data the ratios of both components are approximately 0.7, indicating the overlap of the classes at the cross-section of line and parabola. \begin{figure}[htbp] \centering <>= par(mfrow=c(1,2)) plot(yn~x, col=class, pch=class, data=NPreg) plot(yp~x, col=class, pch=class, data=NPreg) @ \caption{Standard regression example (left) and Poisson regression (right).} \label{fig:npreg} \end{figure} Histograms or rootograms of the posterior class probabilities can be used to visually assess the cluster structure \citep{flexmix:Tantrum+Murua+Stuetzle:2003}, this is now the default plot method for \texttt{"flexmix"} objects \citep{flexmix:Leisch:2004}. Rootograms are very similar to histograms, the only difference is that the height of the bars correspond to square roots of counts rather than the counts themselves, hence low counts are more visible and peaks less emphasized. \begin{figure}[htbp] \centering <>= print(plot(m1)) @ \caption{The plot method for \texttt{"flexmix"} objects, here obtained by \texttt{plot(m1)}, shows rootograms of the posterior class probabilities.} \label{fig:root1} \end{figure} Usually in each component a lot of observations have posteriors close to zero, resulting in a high count for the corresponing bin in the rootogram which obscures the information in the other bins. To avoid this problem, all probabilities with a posterior below a threshold are ignored (we again use $10^{-4}$). A peak at probability 1 indicates that a mixture component is well seperated from the other components, while no peak at 1 and/or significant mass in the middle of the unit interval indicates overlap with other components. In our simple example the components are medium well separated, see Figure~\ref{fig:root1}. Tests for significance of regression coefficients can be obtained by <<>>= rm1 <- refit(m1) summary(rm1) @ Function \texttt{refit()} fits weighted generalized linear models to each component using the standard \R{} function \texttt{glm()} and the posterior probabilities as weights, see \texttt{help("refit")} for details. The data set \texttt{NPreg} also includes a response from a generalized linear model with a Poisson distribution and exponential link function. The two classes of size \Sexpr{nrow(NPreg)/2} each have parameters \begin{center} \begin{tabular}{ll} Class~1: & $ \mu_1 = 2 - 0.2x$\\ Class~2: & $ \mu_2 = 1 + 0.1x$\\ \end{tabular} \end{center} and given $x$ the response $y$ in group $k$ has a Poisson distribution with mean $e^{\mu_k}$, see the right panel of Figure~\ref{fig:npreg}. The model can be estimated using <>= options(width=55) @ <<>>= m2 <- flexmix(yp ~ x, data = NPreg, k = 2, model = FLXMRglm(family = "poisson")) summary(m2) @ <>= options(width=65) @ \begin{figure}[htbp] \centering <>= print(plot(m2)) @ \caption{\texttt{plot(m2)}} \label{fig:root2} \end{figure} Both the summary table and the rootograms in Figure~\ref{fig:root2} clearly show that the clusters of the Poisson response have much more overlap. For our simple low-dimensional example data the overlap of the classes is obvious by looking at scatterplots of the data. For data in higher dimensions this is not an option. The rootograms and summary tables for \texttt{"flexmix"} objects work off the densities or posterior probabilities of the observations and thus do not depend on the dimensionality of the input space. While we use simple 2-dimensional examples to demonstrate the techniques, they can easily be used on high-dimensional data sets or models with complicated covariate structures. \subsection{Multiple independent responses} \label{sec:mult-indep-resp} If the response $y=(y_1,\ldots,y_D)'$ is $D$-dimensional and the $y_d$ are mutually independent the mixture density in Equation~(\ref{eq:1}) can be written as \begin{eqnarray*} h(y|x,\psi) &=& \sum_{k = 1}^K \pi_k f(y|x,\theta_k)\\ &=& \sum_{k = 1}^K \pi_k \prod_{d=1}^D f_d(y|x,\theta_{kd}) \end{eqnarray*} To specify such models in FlexMix we pass it a list of models, where each list element corresponds to one $f_d$, and each can have a different set of dependent and independent variables. To use the Gaussian and Poisson responses of data \texttt{NPreg} simultaneously, we use the model specification \begin{Sinput} > m3 = flexmix(~x, data=NPreg, k=2, + model=list(FLXMRglm(yn~.+I(x^2)), + FLXMRglm(yp~., family="poisson"))) \end{Sinput} <>= m3 <- flexmix(~ x, data = NPreg, k = 2, model=list(FLXMRglm(yn ~ . + I(x^2)), FLXMRglm(yp ~ ., family = "poisson"))) @ Note that now three model formulas are involved: An overall formula as first argument to function \texttt{flexmix()} and one formula per response. The latter ones are interpreted relative to the overall formula such that common predictors have to be specified only once, see \texttt{help("update.formula")} for details on the syntax. The basic principle is that the dots get replaced by the respective terms from the overall formula. The rootograms show that the posteriors of the two-response model are shifted towards 0 and 1 (compared with either of the two univariate models), the clusters are now well-separated. \begin{figure}[htbp] \centering <>= print(plot(m3)) @ \caption{\texttt{plot(m3)}} \label{fig:root3} \end{figure} \subsection{Repeated measurements} \label{sec:repe-meas} If the data are repeated measurements on $M$ individuals, and we have $N_m$ observations from individual $m$, then the log-likelihood in Equation~(\ref{eq:4}) can be written as \begin{displaymath} \log L = \sum_{m=1}^M \sum_{n=1}^{N_m} \log h(y_{mn}|x_{mn},\psi), \qquad \sum_{m=1}^M N_m = N \end{displaymath} and the posterior probability that individual $m$ belongs to class $j$ is given by \begin{displaymath} \Prob(j|m) = \frac{\pi_j \prod_{n=1}^{N_m} f(y_{mn} | x_{mn}, \theta_j)}{\sum_k \pi_k \prod_{n=1}^{N_m} f(y_{mn} | x_{mn}, \theta_k)} \end{displaymath} where $(x_{mn}, y_{mn})$ is the $n$-th observation from individual $m$. As an example, assume that the data in \texttt{NPreg} are not 200 independent observations, but 4 measurements each from 50 persons such that $\forall m: N_m=4$. Column \texttt{id2} of the data frame encodes such a grouping and can easily be used in FlexMix: <<>>= m4 <- flexmix(yn ~ x + I(x^2) | id2, data = NPreg, k = 2) summary(m4) @ Note that convergence of the EM algorithm is much faster with grouping and the two clusters are now perfectly separated. \subsection{Control of the EM algorithm} \label{sec:control-em-algorithm} Details of the EM algorithm can be tuned using the \texttt{control} argument of function \texttt{flexmix()}. E.g., to use a maximum number of 15 iterations, report the log-likelihood at every 3rd step and use hard assignment of observations to clusters (cf. page~\pageref{hard}) the call is <<>>= m5 <- flexmix(yn ~ x + I(x^2), data = NPreg, k = 2, control = list(iter.max = 15, verbose = 3, classify = "hard")) @ Another control parameter (\texttt{minprior}, see below for an example) is the minimum prior probability components are enforced to have, components falling below this threshold (the current default is 0.05) are removed during EM iteration to avoid numerical instabilities for components containing only a few observations. Using a minimum prior of 0 disables component removal. \subsection{Automated model search} In real applications the number of components is unknown and has to be estimated. Tuning the minimum prior parameter allows for simplistic model selection, which works surprisingly well in some situations: <<>>= m6 <- flexmix(yp ~ x + I(x^2), data = NPreg, k = 4, control = list(minprior = 0.2)) m6 @ Although we started with four components, the algorithm converged at the correct two component solution. A better approach is to fit models with an increasing number of components and compare them using AIC or BIC. As the EM algorithm converges only to the next local maximum of the likelihood, it should be run repeatedly using different starting values. The function \texttt{stepFlexmix()} can be used to repeatedly fit models, e.g., <<>>= m7 <- stepFlexmix(yp ~ x + I(x^2), data = NPreg, control = list(verbose = 0), k = 1:5, nrep = 5) @ runs \texttt{flexmix()} 5 times for $k=1,2,\ldots,5$ components, totalling in 25 runs. It returns a list with the best solution found for each number of components, each list element is simply an object of class \texttt{"flexmix"}. To find the best model we can use <<>>= getModel(m7, "BIC") @ and choose the number of components minimizing the BIC. \section{Extending FlexMix} \label{sec:extending-flexmix} One of the main design principles of FlexMix was extensibility, users can provide their own M-step for rapid prototyping of new mixture models. FlexMix was written using S4 classes and methods \citep{flexmix:Chambers:1998} as implemented in \R{} package \pkg{methods}. The central classes for writing M-steps are \texttt{"FLXM"} and \texttt{"FLXcomponent"}. Class \texttt{"FLXM"} specifies how the model is fitted using the following slots: \begin{description} \item[fit:] A \texttt{function(x,y,w)} returning an object of class \texttt{"FLXcomponent"}. \item[defineComponent:] Expression or function constructing the object of class \texttt{"FLXcomponent"}. \item[weighted:] Logical, specifies if the model may be fitted using weighted likelihoods. If \texttt{FALSE}, only hard and random classification are allowed (and hard classification becomes the default). \item[formula:] Formula relative to the overall model formula, default is \verb|.~.| \item[name:] A character string describing the model, this is only used for print output. \end{description} The remaining slots of class \texttt{"FLXM"} are used internally by FlexMix to hold data, etc. and omitted here, because they are not needed to write an M-step driver. The most important slot doing all the work is \texttt{fit} holding a function performing the maximum likelihood estimation described in Equation~(\ref{eq:2}). The \texttt{fit()} function returns an object of class \texttt{"FLXcomponent"} which holds a fitted component using the slots: \begin{description} \item[logLik:] A \texttt{function(x,y)} returning the log-likelihood for observations in matrices \texttt{x} and \texttt{y}. \item[predict:] A \texttt{function(x)} predicting \texttt{y} given \texttt{x}. \item[df:] The degrees of freedom used by the component, i.e., the number of estimated parameters. \item[parameters:] An optional list containing model parameters. \end{description} In a nutshell class \texttt{"FLXM"} describes an \emph{unfitted} model, whereas class \texttt{"FLXcomponent"} holds a \emph{fitted} model. \lstset{frame=trbl,basicstyle=\small\tt,stepnumber=5,numbers=left} \begin{figure}[tb] \centering \begin{minipage}{0.94\textwidth} \lstinputlisting{mymclust.R} \end{minipage} \caption{M-step for model-based clustering: \texttt{mymclust} is a simplified version of the standard FlexMix driver \texttt{FLXmclust}.} \label{fig:mymclust.R} \end{figure} \subsection{Writing an M-step driver} \label{sec:writing-an-m} Figure~\ref{fig:mymclust.R} shows an example driver for model-based clustering. We use function \texttt{dmvnorm()} from package \pkg{mvtnorm} for calculation of multivariate Gaussian densities. In line~5 we create a new \texttt{"FLXMC"} object named \texttt{retval}, which is also the return value of the driver. Class \texttt{"FLXMC"} extends \texttt{"FLXM"} and is used for model-based clustering. It contains an additional slot with the name of the distribution used. All drivers should take a formula as their first argument, this formula is directly passed on to \texttt{retval}. In most cases authors of new FlexMix drivers need not worry about formula parsing etc., this is done by \texttt{flexmix} itself. In addition we have to declare whether the driver can do weighted ML estimation (\texttt{weighted=TRUE}) and give a name to our model. The remainder of the driver creates a \texttt{fit()} function, which takes regressors \texttt{x}, response \texttt{y} and weights \texttt{w}. For multivariate Gaussians the maximum likelihood estimates correspond to mean and covariance matrix, the standard R function \texttt{cov.wt()} returns a list containing estimates of the weighted covariance matrix and the mean for given data. Our simple example performs clustering without a regression part, hence $x$ is ignored. If \texttt{y} has $D$ columns, we estimate $D$ parameters for the mean and $D(D-1)/2$ parameters for the covariance matrix, giving a total of $(3D+D^2)/2$ parameters (line~11). As an additional feature we allow the user to specify whether the covariance matrix is assumed to be diagonal or a full matrix. For \texttt{diagonal=TRUE} we use only the main diagonal of the covariance matrix (line~14) and the number of parameters is reduced to $2D$. In addition to parameter estimates, \texttt{flexmix()} needs a function calculating the log-likelihood of given data $x$ and $y$, which in our example is the log-density of a multivariate Gaussian. In addition we have to provide a function predicting $y$ given $x$, in our example simply the mean of the Gaussian. Finally we create a new \texttt{"FLXcomponent"} as return value of function \texttt{fit()}. Note that our internal functions \texttt{fit()}, \texttt{logLik()} and \texttt{predict()} take only \texttt{x}, \texttt{y} and \texttt{w} as arguments, but none of the model-specific parameters like means and covariances, although they use them of course. \R{} uses \emph{lexical scoping} rules for finding free variables \citep{flexmix:Gentleman+Ihaka:2000}, hence it searches for them first in the environment where a function is defined. E.g., the \texttt{fit()} function uses the variable \texttt{diagonal} in line~24, and finds it in the environment where the function itself was defined, which is the body of function \texttt{mymclust()}. Function \texttt{logLik()} uses the list \texttt{para} in lines~8 and 9, and uses the one found in the body of \texttt{defineComponent()}. Function \texttt{flexmix()} on the other hand never sees the model parameters, all it uses are function calls of form \texttt{fit(x,y,w)} or \texttt{logLik(x,y)}, which are exactly the same for all kinds of mixture models. In fact, it would not be necessary to even store the component parameters in the \texttt{"FLXcomponent"} object, they are there only for convenience such that users can easily extract and use them after \texttt{flexmix()} has finished. Lexical scope allows to write clean interfaces in a very elegant way, the driver abstracts all model details from the FlexMix main engine. \subsection{Example: Using the driver} \label{sec:example:-model-based} \SweaveOpts{width=12,height=6,eps=FALSE} <>= library("flexmix") set.seed(1504) options(width=60) grDevices::ps.options(family="Times") suppressMessages(require("ellipse")) suppressMessages(require("mvtnorm")) source("mymclust.R") @ As a simple example we use the four 2-dimensional Gaussian clusters from data set \texttt{Nclus}. Fitting a wrong model with diagonal covariance matrix is done by <<>>= data("Nclus") m1 <- flexmix(Nclus ~ 1, k = 4, model = mymclust()) summary(m1) @ The result can be seen in the left panel of Figure~\ref{fig:ell}, the result is ``wrong'' because we forced the ellipses to be parallel to the axes. The overlap between three of the four clusters can also be inferred from the low ratio statistics in the summary table (around 0.5 for components 1, 3 and 4), while the much better separated upper left cluster has a much higher ratio of 0.85. Using the correct model with a full covariance matrix can be done by setting \texttt{diagonal=FALSE} in the call to our driver \texttt{mymclust()}: <<>>= m2 <- flexmix(Nclus ~ 1, k = 4, model = mymclust(diagonal = FALSE)) summary(m2) @ \begin{figure}[htbp] \centering <>= par(mfrow=1:2) plotEll(m1, Nclus) plotEll(m2, Nclus) @ \caption{Fitting a mixture model with diagonal covariance matrix (left) and full covariance matrix (right).} \label{fig:ell} \end{figure} \pagebreak[4] \section{Summary and outlook} \label{sec:summary} The primary goal of FlexMix is extensibility, this makes the package ideal for rapid development of new mixture models. There is no intent to replace packages implementing more specialized mixture models like \pkg{mclust} for mixtures of Gaussians, FlexMix should rather be seen as a complement to those. By interfacing R's facilities for generalized linear models, FlexMix allows the user to estimate complex latent class regression models. Using lexical scope to resolve model-specific parameters hides all model details from the programming interface, FlexMix can in principle fit almost arbitrary finite mixture models for which the EM algorithm is applicable. The downside of this is that FlexMix can in principle fit almost arbitrary finite mixture models, even models where no proper theoretical results for model identification etc.\ are available. We are currently working on a toolset for diagnostic checks on mixture models to test necessary identifiability conditions for those cases where results are available. We also want to implement newer variations of the classic EM algorithm, especially for faster convergence. Another plan is to have an interactive version of the rootograms using \texttt{iPlots} \citep{flexmix:Urbanek+Theus:2003} such that the user can explore the relations between mixture components, possibly linked to background variables. Other planned extensions include covariates for the prior probabilities and to allow to mix different distributions for components, e.g., to include a Poisson point process for background noise. \section*{Computational details} <>= SI <- sessionInfo() pkgs <- paste(sapply(c(SI$otherPkgs, SI$loadedOnly), function(x) paste("\\\\pkg{", x$Package, "} ", x$Version, sep = "")), collapse = ", ") @ All computations and graphics in this paper have been done using \proglang{R} version \Sexpr{getRversion()} with the packages \Sexpr{pkgs}. \section*{Acknowledgments} This research was supported by the Austrian Science Foundation (FWF) under grant SFB\#010 (`Adaptive Information Systems and Modeling in Economics and Management Science'). Bettina Gr\"un has modified the original version to include and reflect the changes of the package. \bibliography{flexmix} \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End: flexmix/inst/doc/regression-examples.R0000644000176200001440000004066314404662037017606 0ustar liggesusers### R code from vignette source 'regression-examples.Rnw' ################################################### ### code chunk number 1: regression-examples.Rnw:11-14 ################################################### library("stats") library("graphics") library("flexmix") ################################################### ### code chunk number 2: start ################################################### options(width=70, prompt = "R> ", continue = "+ ", useFancyQuotes = FALSE) suppressWarnings(RNGversion("3.5.0")) set.seed(1802) library("lattice") ltheme <- canonical.theme("postscript", FALSE) lattice.options(default.theme=ltheme) ################################################### ### code chunk number 3: NregFix ################################################### set.seed(2807) library("flexmix") data("NregFix", package = "flexmix") Model <- FLXMRglm(~ x2 + x1) fittedModel <- stepFlexmix(y ~ 1, model = Model, nrep = 3, k = 3, data = NregFix, concomitant = FLXPmultinom(~ w)) fittedModel <- relabel(fittedModel, "model", "x1") summary(refit(fittedModel)) ################################################### ### code chunk number 4: diffModel ################################################### Model2 <- FLXMRglmfix(fixed = ~ x2, nested = list(k = c(1, 2), formula = c(~ 0, ~ x1)), varFix = TRUE) fittedModel2 <- flexmix(y ~ 1, model = Model2, cluster = posterior(fittedModel), data = NregFix, concomitant = FLXPmultinom(~ w)) BIC(fittedModel) BIC(fittedModel2) ################################################### ### code chunk number 5: artificial-example ################################################### plotNregFix <- NregFix plotNregFix$w <- factor(NregFix$w, levels = 0:1, labels = paste("w =", 0:1)) plotNregFix$x2 <- factor(NregFix$x2, levels = 0:1, labels = paste("x2 =", 0:1)) plotNregFix$class <- factor(NregFix$class, levels = 1:3, labels = paste("Class", 1:3)) print(xyplot(y ~ x1 | x2*w, groups = class, data = plotNregFix, cex = 0.6, auto.key = list(space="right"), layout = c(2,2))) ################################################### ### code chunk number 6: refit ################################################### summary(refit(fittedModel2)) ################################################### ### code chunk number 7: beta-glm ################################################### data("betablocker", package = "flexmix") betaGlm <- glm(cbind(Deaths, Total - Deaths) ~ Treatment, family = "binomial", data = betablocker) betaGlm ################################################### ### code chunk number 8: beta-fix ################################################### betaMixFix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ 1 | Center, model = FLXMRglmfix(family = "binomial", fixed = ~ Treatment), k = 2:4, nrep = 3, data = betablocker) betaMixFix ################################################### ### code chunk number 9: beta-fig ################################################### library("grid") betaMixFix_3 <- getModel(betaMixFix, "3") betaMixFix_3 <- relabel(betaMixFix_3, "model", "Intercept") betablocker$Center <- with(betablocker, factor(Center, levels = Center[order((Deaths/Total)[1:22])])) clusters <- factor(clusters(betaMixFix_3), labels = paste("Cluster", 1:3)) print(dotplot(Deaths/Total ~ Center | clusters, groups = Treatment, as.table = TRUE, data = betablocker, xlab = "Center", layout = c(3, 1), scales = list(x = list(draw = FALSE)), key = simpleKey(levels(betablocker$Treatment), lines = TRUE, corner = c(1,0)))) betaMixFix.fitted <- fitted(betaMixFix_3) for (i in 1:3) { seekViewport(trellis.vpname("panel", i, 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[1:22, i], "native"), gp = gpar(lty = 1)) grid.lines(unit(1:22, "native"), unit(betaMixFix.fitted[23:44, i], "native"), gp = gpar(lty = 2)) } ################################################### ### code chunk number 10: beta-full ################################################### betaMix <- stepFlexmix(cbind(Deaths, Total - Deaths) ~ Treatment | Center, model = FLXMRglm(family = "binomial"), k = 3, nrep = 3, data = betablocker) summary(betaMix) ################################################### ### code chunk number 11: default-plot ################################################### print(plot(betaMixFix_3, mark = 1, col = "grey", markcol = 1)) ################################################### ### code chunk number 12: parameters ################################################### parameters(betaMix) ################################################### ### code chunk number 13: cluster ################################################### table(clusters(betaMix)) ################################################### ### code chunk number 14: predict ################################################### predict(betaMix, newdata = data.frame(Treatment = c("Control", "Treated"))) ################################################### ### code chunk number 15: fitted ################################################### fitted(betaMix)[c(1, 23), ] ################################################### ### code chunk number 16: refit ################################################### summary(refit(getModel(betaMixFix, "3"))) ################################################### ### code chunk number 17: mehta-fix ################################################### data("Mehta", package = "flexmix") mehtaMix <- stepFlexmix(cbind(Response, Total - Response)~ 1 | Site, model = FLXMRglmfix(family = "binomial", fixed = ~ Drug), control = list(minprior = 0.04), nrep = 3, k = 3, data = Mehta) summary(mehtaMix) ################################################### ### code chunk number 18: mehta-fig ################################################### Mehta$Site <- with(Mehta, factor(Site, levels = Site[order((Response/Total)[1:22])])) clusters <- factor(clusters(mehtaMix), labels = paste("Cluster", 1:3)) print(dotplot(Response/Total ~ Site | clusters, groups = Drug, layout = c(3,1), data = Mehta, xlab = "Site", scales = list(x = list(draw = FALSE)), key = simpleKey(levels(Mehta$Drug), lines = TRUE, corner = c(1,0)))) mehtaMix.fitted <- fitted(mehtaMix) for (i in 1:3) { seekViewport(trellis.vpname("panel", i, 1)) sapply(1:nlevels(Mehta$Drug), function(j) grid.lines(unit(1:22, "native"), unit(mehtaMix.fitted[Mehta$Drug == levels(Mehta$Drug)[j], i], "native"), gp = gpar(lty = j))) } ################################################### ### code chunk number 19: mehta-full ################################################### mehtaMix <- stepFlexmix(cbind(Response, Total - Response) ~ Drug | Site, model = FLXMRglm(family = "binomial"), k = 3, data = Mehta, nrep = 3, control = list(minprior = 0.04)) summary(mehtaMix) ################################################### ### code chunk number 20: mehta-sub ################################################### Mehta.sub <- subset(Mehta, Site != 15) mehtaMix <- stepFlexmix(cbind(Response, Total - Response) ~ 1 | Site, model = FLXMRglmfix(family = "binomial", fixed = ~ Drug), data = Mehta.sub, k = 2, nrep = 3) summary(mehtaMix) ################################################### ### code chunk number 21: tribolium ################################################### data("tribolium", package = "flexmix") TribMix <- stepFlexmix(cbind(Remaining, Total - Remaining) ~ 1, k = 2:3, model = FLXMRglmfix(fixed = ~ Species, family = "binomial"), concomitant = FLXPmultinom(~ Replicate), data = tribolium) ################################################### ### code chunk number 22: wang-model ################################################### modelWang <- FLXMRglmfix(fixed = ~ I(Species == "Confusum"), family = "binomial") concomitantWang <- FLXPmultinom(~ I(Replicate == 3)) TribMixWang <- stepFlexmix(cbind(Remaining, Total - Remaining) ~ 1, data = tribolium, model = modelWang, concomitant = concomitantWang, k = 2) summary(refit(TribMixWang)) ################################################### ### code chunk number 23: tribolium ################################################### clusters <- factor(clusters(TribMixWang), labels = paste("Cluster", 1:TribMixWang@k)) print(dotplot(Remaining/Total ~ factor(Replicate) | clusters, groups = Species, data = tribolium[rep(1:9, each = 3) + c(0:2)*9,], xlab = "Replicate", auto.key = list(corner = c(1,0)))) ################################################### ### code chunk number 24: subset ################################################### TribMixWangSub <- stepFlexmix(cbind(Remaining, Total - Remaining) ~ 1, k = 2, data = tribolium[-7,], model = modelWang, concomitant = concomitantWang) ################################################### ### code chunk number 25: trypanosome ################################################### data("trypanosome", package = "flexmix") TrypMix <- stepFlexmix(cbind(Dead, 1-Dead) ~ 1, k = 2, nrep = 3, data = trypanosome, model = FLXMRglmfix(family = "binomial", fixed = ~ log(Dose))) summary(refit(TrypMix)) ################################################### ### code chunk number 26: trypanosome ################################################### tab <- with(trypanosome, table(Dead, Dose)) Tryp.dat <- data.frame(Dead = tab["1",], Alive = tab["0",], Dose = as.numeric(colnames(tab))) plot(Dead/(Dead+Alive) ~ Dose, data = Tryp.dat) Tryp.pred <- predict(glm(cbind(Dead, 1-Dead) ~ log(Dose), family = "binomial", data = trypanosome), newdata=Tryp.dat, type = "response") TrypMix.pred <- predict(TrypMix, newdata = Tryp.dat, aggregate = TRUE)[[1]] lines(Tryp.dat$Dose, Tryp.pred, lty = 2) lines(Tryp.dat$Dose, TrypMix.pred, lty = 3) legend(4.7, 1, c("GLM", "Mixture model"), lty=c(2, 3), xjust=0, yjust=1) ################################################### ### code chunk number 27: fabric-fix ################################################### data("fabricfault", package = "flexmix") fabricMix <- stepFlexmix(Faults ~ 1, model = FLXMRglmfix(family="poisson", fixed = ~ log(Length)), data = fabricfault, k = 2, nrep = 3) summary(fabricMix) summary(refit(fabricMix)) Lnew <- seq(0, 1000, by = 50) fabricMix.pred <- predict(fabricMix, newdata = data.frame(Length = Lnew)) ################################################### ### code chunk number 28: fabric-fix-nested ################################################### fabricMix2 <- flexmix(Faults ~ 0, data = fabricfault, cluster = posterior(fabricMix), model = FLXMRglmfix(family = "poisson", fixed = ~ log(Length), nested = list(k=c(1,1), formula=list(~0,~1)))) summary(refit(fabricMix2)) fabricMix2.pred <- predict(fabricMix2, newdata = data.frame(Length = Lnew)) ################################################### ### code chunk number 29: fabric-fig ################################################### plot(Faults ~ Length, data = fabricfault) sapply(fabricMix.pred, function(y) lines(Lnew, y, lty = 1)) sapply(fabricMix2.pred, function(y) lines(Lnew, y, lty = 2)) legend(190, 25, paste("Model", 1:2), lty=c(1, 2), xjust=0, yjust=1) ################################################### ### code chunk number 30: patent ################################################### data("patent", package = "flexmix") ModelPat <- FLXMRglm(family = "poisson") FittedPat <- stepFlexmix(Patents ~ lgRD, k = 3, nrep = 3, model = ModelPat, data = patent, concomitant = FLXPmultinom(~ RDS)) summary(FittedPat) ################################################### ### code chunk number 31: patent-fixed ################################################### ModelFixed <- FLXMRglmfix(family = "poisson", fixed = ~ lgRD) FittedPatFixed <- flexmix(Patents ~ 1, model = ModelFixed, cluster = posterior(FittedPat), concomitant = FLXPmultinom(~ RDS), data = patent) summary(FittedPatFixed) ################################################### ### code chunk number 32: Poisson ################################################### lgRDv <- seq(-3, 5, by = 0.05) newdata <- data.frame(lgRD = lgRDv) plotData <- function(fitted) { with(patent, data.frame(Patents = c(Patents, unlist(predict(fitted, newdata = newdata))), lgRD = c(lgRD, rep(lgRDv, 3)), class = c(clusters(fitted), rep(1:3, each = nrow(newdata))), type = rep(c("data", "fit"), c(nrow(patent), nrow(newdata)*3)))) } plotPatents <- cbind(plotData(FittedPat), which = "Wang et al.") plotPatentsFixed <- cbind(plotData(FittedPatFixed), which = "Fixed effects") plotP <- rbind(plotPatents, plotPatentsFixed) rds <- seq(0, 3, by = 0.02) x <- model.matrix(FittedPat@concomitant@formula, data = data.frame(RDS = rds)) plotConc <- function(fitted) { E <- exp(x%*%fitted@concomitant@coef) data.frame(Probability = as.vector(E/rowSums(E)), class = rep(1:3, each = nrow(x)), RDS = rep(rds, 3)) } plotConc1 <- cbind(plotConc(FittedPat), which = "Wang et al.") plotConc2 <- cbind(plotConc(FittedPatFixed), which = "Fixed effects") plotC <- rbind(plotConc1, plotConc2) print(xyplot(Patents ~ lgRD | which, data = plotP, groups=class, xlab = "log(R&D)", panel = "panel.superpose", type = plotP$type, panel.groups = function(x, y, type = "p", subscripts, ...) { ind <- plotP$type[subscripts] == "data" panel.xyplot(x[ind], y[ind], ...) panel.xyplot(x[!ind], y[!ind], type = "l", ...) }, scales = list(alternating=FALSE), layout=c(1,2), as.table=TRUE), more=TRUE, position=c(0,0,0.6, 1)) print(xyplot(Probability ~ RDS | which, groups = class, data = plotC, type = "l", scales = list(alternating=FALSE), layout=c(1,2), as.table=TRUE), position=c(0.6, 0.01, 1, 0.99)) ################################################### ### code chunk number 33: seizure ################################################### data("seizure", package = "flexmix") seizMix <- stepFlexmix(Seizures ~ Treatment * log(Day), data = seizure, k = 2, nrep = 3, model = FLXMRglm(family = "poisson", offset = log(seizure$Hours))) summary(seizMix) summary(refit(seizMix)) ################################################### ### code chunk number 34: seizure ################################################### seizMix2 <- flexmix(Seizures ~ Treatment * log(Day/27), data = seizure, cluster = posterior(seizMix), model = FLXMRglm(family = "poisson", offset = log(seizure$Hours))) summary(seizMix2) summary(refit(seizMix2)) ################################################### ### code chunk number 35: seizure ################################################### seizMix3 <- flexmix(Seizures ~ log(Day/27)/Treatment, data = seizure, cluster = posterior(seizMix), model = FLXMRglm(family = "poisson", offset = log(seizure$Hours))) summary(seizMix3) summary(refit(seizMix3)) ################################################### ### code chunk number 36: seizure ################################################### plot(Seizures/Hours~Day, pch = c(1,3)[as.integer(Treatment)], data=seizure) abline(v=27.5, lty=2, col="grey") legend(140, 9, c("Baseline", "Treatment"), pch=c(1, 3), xjust=1, yjust=1) matplot(seizure$Day, fitted(seizMix)/seizure$Hours, type="l", add=TRUE, lty = 1, col = 1) matplot(seizure$Day, fitted(seizMix3)/seizure$Hours, type="l", add=TRUE, lty = 3, col = 1) legend(140, 7, paste("Model", c(1,3)), lty=c(1, 3), xjust=1, yjust=1) ################################################### ### code chunk number 37: salmonella ################################################### data("salmonellaTA98", package = "flexmix") salmonMix <- stepFlexmix(y ~ 1, data = salmonellaTA98, k = 2, nrep = 3, model = FLXMRglmfix(family = "poisson", fixed = ~ x + log(x + 10))) ################################################### ### code chunk number 38: salmonella ################################################### salmonMix.pr <- predict(salmonMix, newdata=salmonellaTA98) plot(y~x, data=salmonellaTA98, pch=as.character(clusters(salmonMix)), xlab="Dose of quinoline", ylab="Number of revertant colonies of salmonella", ylim=range(c(salmonellaTA98$y, unlist(salmonMix.pr)))) for (i in 1:2) lines(salmonellaTA98$x, salmonMix.pr[[i]], lty=i) ################################################### ### code chunk number 39: regression-examples.Rnw:923-927 ################################################### SI <- sessionInfo() pkgs <- paste(sapply(c(SI$otherPkgs, SI$loadedOnly), function(x) paste("\\\\pkg{", x$Package, "} ", x$Version, sep = "")), collapse = ", ") flexmix/inst/doc/myConcomitant.R0000644000176200001440000000130314404637307016424 0ustar liggesusersmyConcomitant <- function(formula = ~ 1) { z <- new("FLXP", name = "myConcomitant", formula = formula) z@fit <- function(x, y, w, ...) { if (missing(w) || is.null(w)) w <- rep(1, length(x)) f <- as.integer(factor(apply(x, 1, paste, collapse = ""))) AVG <- apply(w*y, 2, tapply, f, mean) (AVG/rowSums(AVG))[f,,drop=FALSE] } z@refit <- function(x, y, w, ...) { if (missing(w) || is.null(w)) w <- rep(1, length(x)) f <- as.integer(factor(apply(x, 1, paste, collapse = ""))) AVG <- apply(w*y, 2, tapply, f, mean) (AVG/rowSums(AVG)) } z } flexmix/inst/CITATION0000644000176200001440000000276314404661473014061 0ustar liggesuserscitHeader(sprintf("To cite package %s in publications use:", sQuote(meta$Package))) bibentry("Manual", other = unlist(citation(auto = meta), recursive = FALSE)) bibentry(bibtype = "Article", title = "{FlexMix}: A General Framework for Finite Mixture Models and Latent Class Regression in {R}", author = person(given = "Friedrich", family = "Leisch"), journal = "Journal of Statistical Software", year = "2004", volume = "11", number = "8", pages = "1--18", doi = "10.18637/jss.v011.i08" ) bibentry(bibtype = "Article", title = "Fitting Finite Mixtures of Generalized Linear Regressions in {R}", author = c(person(given = "Bettina", family = "Gr\\\"un"), person(given = "Friedrich", family = "Leisch")), journal = "Computational Statistics \\& Data Analysis", year = "2007", volume = "51", number = "11", pages = "5247--5252", doi = "10.1016/j.csda.2006.08.014" ) bibentry(bibtype = "Article", title = "{FlexMix} Version 2: Finite Mixtures with Concomitant Variables and Varying and Constant Parameters", author = c(person(given = "Bettina", family = "Gr\\\"un"), person(given = "Friedrich", family = "Leisch")), journal = "Journal of Statistical Software", year = "2008", volume = "28", number = "4", pages = "1--35", doi = "10.18637/jss.v028.i04" ) flexmix/inst/NEWS.Rd0000644000176200001440000004721214404661473013765 0ustar liggesusers\name{NEWS} \title{News for Package 'flexmix'} \section{Changes in flexmix version 2.3-19}{ \itemize{ \item Internal function names were changed to avoid having them look like S3 methods. \item Updated CITATION to use \code{bibentry()}. } } \section{Changes in flexmix version 2.3-18}{ \itemize{ \item The man page improved for the different general model classes such as `\code{FLXM}', `\code{FLXMC}', `\code{FLXMR}' and `\code{FLXMCsparse}'. Thanks to Jonas Hagenberg for pointing out that the class structure is not well documented. \item An issue fixed for `\code{FLXMRlmmc}' models. } } \section{Changes in flexmix version 2.3-17}{ \itemize{ \item A model class for sparse model-based clustering added with a suitable method to resolve the formula and extract the data. \item There was a bug with dimensions being dropped if only a single individual was used for grouping. Thanks to Adam Hulman for pointing this issue out. \item The \code{FLXgetModelmatrix} method for `\code{FLXMRlmm}' objects has been improved. Thanks to Adam Hulman for pointing out that this might be unnecessarily slow. } } \section{Changes in flexmix version 2.3-16}{ \itemize{ \item The vignette explaining model diagnostics using resampling methods was modified to use more replications. Thanks to Eric Book for pointing this issue out. } } \section{Changes in flexmix version 2.3-15}{ \itemize{ \item Modified the internal function \code{groupPosteriors} to be more efficient for a large number of groups. Thanks to Marnix Koops for pointing the problem out. \item Model driver \code{FLXMRlmer()} adapted for \pkg{lme4} (>= 1.1). Thanks to Mark Senior for pointing the problem out. \item Model driver \code{FLXMRmgcv()} adapted for \pkg{mgcv} (>= 1.8-27). \item Data set \code{Catsup} is now loaded from package \pkg{Ecdat} instead of \pkg{mlogit}. } } \section{Changes in flexmix version 2.3-14}{ \itemize{ \item A bug fixed for \code{FLXMRcondlogit()} to ensure that the formula for the strata is also stored and can be used for predicting, etc. new data. Thanks to Peter Fraser-Mackenziefor for pointing the problem out. \item A bug fixed for \code{FLXMRglmfix()} which occurred if several components were simultaneously omitted. Thanks to Moritz Berger for pointing the problem out. \item For mixtures of mixed effects models and with censored data only weighted ML estimation is implemented. This is now checked and an error is given if a different estimation method is requested. \item A generic function \code{sigma()} is available for R >= 3.3.0 and thus different versions of \code{sigma()} need to be called depending on the R version. Thanks also to Stephen Martin for pointing the issue out. \item Components are now generated using functions instead of expressions. \item Functions from the base packages \pkg{stats}, \pkg{graphics} and \pkg{grDevices} are now correctly imported before being used. \item Function \code{FLXMCdist1} implements model drivers for univariate mixtures of different distributions. } } \section{Changes in flexmix version 2.3-13}{ \itemize{ \item A model driver for mixtures of regression models with (adaptive) lasso and elastic net penalizations for the coefficients building on \pkg{glmnet} was provided by Frederic Mortier and Nicolas Picard. \item A bug in a coerce method to class \code{"FLXnested"} fixed. } } \section{Changes in flexmix version 2.3-12}{ \itemize{ \item Construction of model matrices changed to re-use levels of factors while fitting for prediction. Thanks to Robert Poos for pointing the problem out. \item Package mgcv of version at least 1.8-0 is required in order to avoid loading of the package for formula evaluation. } } \section{Changes in flexmix version 2.3-11}{ \itemize{ \item Examples changed to be less time consuming. \item Bug fixed in ungroupPriors() and getPriors() to work with a grouping where only one unique value is contained. Thanks to Christine Koehler for pointing the problem out. \item The \code{logLik()} method for \code{"flexmix"} objects now also has a \code{newdata} argument. \item In the M-step only the parameters of the previously fitted components are passed over to avoid nesting of environments. Thanks to Dominik Ernst for pointing the problem out. \item Function \code{boot()} was fixed to work correctly with grouped data if the groups are kept and if fixed effects are fitted. \item Authors@R used in DESCRIPTION. Deepayan Sarkar listed as contributor due to internal code copied from package lattice used for the plots in flexmix. \item Model driver \code{FLXMRlmer()} adapted for \pkg{lme4} (>= 1.0). } } \section{Changes in flexmix version 2.3-10}{ \itemize{ \item Parallel processing disabled in \code{stepFlexmix()}. } } \section{Changes in flexmix version 2.3-9}{ \itemize{ \item Package dependencies, imports and suggests modified suitably for R >= 2.15.0. \item NEWS file adapted to a NEWS.Rd file. \item Parallel processing is enabled in \code{stepFlexmix()}. See \pkg{flexclust} for details. \item New model drivers \code{FLXMRmultinom()} and \code{FLXMRcondlogit()} are included which allow to fit finite mixtures of multinomial logit and conditional logit models. Identifiability problems might arise for this model class. Details on sufficient identifiability conditions are given in Gruen and Leisch (2008). \item A bug in \code{FLXMRlmm()} was fixed which did not allow to specify restrictions on the variances of the random effects and / or the residuals. Thanks to Gregory Matthews for pointing the problem out. } } \section{Changes in flexmix version 2.3-8}{ \itemize{ \item The fit function in the M-step by default now is called with an argument containing the fitted component. This allows to use the parameter estimates from the previous step for initialization. Fit functions which do not require this now need a \code{...} argument. Thanks to Hannah Frick and Achim Zeileis for requesting this feature. \item Function \code{initFlexmix()} was added which is an alternative to \code{stepFlexmix()} where first several short runs of EM, SEM or CEM are performed followed by a long run of EM. } } \section{Changes in flexmix version 2.3-7}{ \itemize{ \item A bug fixed in \code{predict()} and \code{fitted()} if a concomitant variable model is specified and \code{aggregate = TRUE}. Thanks to Julia Schiffner for pointing this out. \item A bug fixed in \code{FLXMRmgcv()} if observations were removed in the M-step because their a-posterior probabilities were smaller than eps. Thanks to Ghislain Geniaux for pointing the problem out. } } \section{Changes in flexmix version 2.3-6}{ \itemize{ \item Vignettes moved from /inst/doc to /vignettes. \item \code{stepFlexmix()} can now be called with a concomitant variable model \code{FLXPmultinom()} for \code{k} starting with 1 without getting an error. The concomitant variable model is internally replaced by \code{FLXPconstant()}. \item The \code{boot()} method for \code{"flexmix"} objects is extended to mixture models with concomitant variables and mixtures of linear mixed models. \item A bug fixed in the \code{summary()} method for \code{"flexmix"} objects. The column post > 0 did not give the correct results if weights were used for fitting the mixture. \item A bug fixed in the \code{unique()} method for \code{"stepFlexmix"} objects. This only occurred if components were dropped as well as if the EM algorithm did not converge for all repetitions. Thanks to Sebastian Meyer for pointing this out. } } \section{Changes in flexmix version 2.3-5}{ \itemize{ \item A bug fixed in \code{posterior()}. Fixed priors were always used, also if a concomitant variable model was present. \item A method added for \code{prior()} such that if newdata is supplied and the object is of class \code{"flexmix"} the prior class probabilities for each observation are returned. } } \section{Changes in flexmix version 2.3-4}{ \itemize{ \item A generic method for \code{nobs()} is introduced in \pkg{stats4} for \R 2.13.0. \pkg{flexmix} now does not define this generic function and \code{logLik()}, \code{AIC()} and \code{BIC()} methods were modified to better exploit already available methods. Thanks to Prof. Brian D. Ripley for suggesting the modification. } } \section{Changes in flexmix version 2.3-3}{ \itemize{ \item A bug for \code{boot()} fixed for \code{"flexmix"} objects with an unbalanced grouping variable. Thanks to Laszlo Sandor for pointing this out. } } \section{Changes in flexmix version 2.3-2}{ \itemize{ \item A bug for \code{rflexmix()} fixed for \code{"flexmix"} objects with a concomitant variable model. Thanks to Greg Petroski for pointing this out. } } \section{Changes in flexmix version 2.3-1}{ \itemize{ \item Functionality for bootstrapping finite mixture models added. } } \section{Changes in flexmix version 2.2-11}{ \itemize{ \item More generics and methods exported to use the \code{refit()} method when extending \pkg{flexmix} in other packages. } } \section{Changes in flexmix version 2.2-10}{ \itemize{ \item For long formulas \code{FLXMRglmfix()} did not work properly due to the splitting of the formula into several parts by \code{deparse()}. This is fixed by pasting them together again. Thanks to Dustin Tingley for the bug report. } } \section{Changes in flexmix version 2.2-9}{ \itemize{ \item A new model driver \code{FLXMRmgcv()} is added which allows to fit regularized linear models in the components. \item More generics and methods exported to allow extending \pkg{flexmix} in other packages. } } \section{Changes in flexmix version 2.2-8}{ \itemize{ \item The a-posteriori probabilities are now also determined as changed for \code{FLXfit()} for version 2.2-6 for \code{refit()}. \item Bug fixed for FLXfillconcomitant and refit when weights and grouping are present. A check was added that weights are identical within groups. \item Function \code{group()} is now exported. } } \section{Changes in flexmix version 2.2-7}{ \itemize{ \item Bug in the \code{FLXgetModelmatrix()} method for the \code{"FLXMRlmm"} class fixed when determining identical random effects covariates for the grouping. \item A new model driver for finite mixtures of linear mixed effects models with left-censored observations is added. } } \section{Changes in flexmix version 2.2-6}{ \itemize{ \item Determination of the a-posteriori probabilities made numerically more stable for small likelihoods. Thanks to Nicolas Picard for the code patch. \item \code{summary()} for \code{"FLXMRmstep"} objects now returns similar output for \code{which = "concomitant"} as for \pkg{flexmix} version 2.0-1. \item New demo driver \code{FLXMCnorm1()} for univariate Gaussian clustering. \item Non-postive values for the maximum number of iterations for the \code{"FLXcontrol"} object are not valid. A validity check for this is now included. } } \section{Changes in flexmix version 2.2-5}{ \itemize{ \item Model class \code{"FLXMRfix"} introduced which is a subclass of \code{"FLXMR"} and a superclass for \code{"FLXMRglmfix"} which also extends \code{"FLXMRglm"}. \item Model driver \code{FLXMCfactanal()} added which allows to fit finite mixtures of Gaussian distributions where the variance-covariance matrix is estimated using factor analyzers. \item Comparison of formulas now done using \code{identical()}. } } \section{Changes in flexmix version 2.2-4}{ \itemize{ \item Model drivers \code{FLXMRlmer()} and \code{FLXMRlmm()} added for fitting finite mixtures of linear mixed effects models. \item \code{EIC()} added as additional information criterion for assessing model fit. \item Bug fixed in \code{plot()} method for \code{"flexmix"} objects introduced in version 2.2-3. } } \section{Changes in flexmix version 2.2-3}{ \itemize{ \item New model driver \code{FLXMCmvcombi()} which is a combination of Gaussian and binary. \item \code{parameters()} now also has a which argument in order to allow to access the parameters of the concomitant variable model. \item Bug fixed in \code{refit()}. \item \code{nobs()} now returns the number of rows in the data.frame and not the number of individuals (similar as for example by lme). } } \section{Changes in flexmix version 2.2-0}{ \itemize{ \item vignette describing Version 2 added \item isTRUE(\code{all.equal()}) replaced with \code{identical()}. \item Bug fixed for prior in \code{flexmix()}. \item New function \code{relabel()} to sort components (generic is in modeltools). \item New example data generator \code{ExLinear()}. \item Fixed a bug in handling groups (gave an error for empty design matrices). \item Added new model \code{FLXMRrobglm()} for robust estimation of GLMs. } } \section{Changes in flexmix version 2.1-0}{ \itemize{ \item Renamed \code{cluster()} to \code{clusters()} to avoid conflict with \code{cluster()} from package survival \item Bug fixed in internal functions using S4 generics and methods. } } \section{Changes in flexmix version 2.0-2}{ \itemize{ \item \code{refit()} now has a method argument. For method \code{"optim"} the variance-covariance matrix is determined using \code{optim()} to maximize the likelihood initialized in the solution found by the EM algorithm. Method \code{"mstep"} refits the component specific and concomitant models treating the posterior probabilities as given, i.e. performs an M-step of the EM algorithm. } } \section{Changes in flexmix version 2.0-1}{ \itemize{ \item \code{Lapply()} added which allows to apply a function to each component of a finite mixture \item \code{KLdiv()} modified to allow for determination with a discrete and a continuous version of the KL divergence } } \section{Changes in flexmix version 2.0-0}{ \itemize{ \item Model driver for zero-inflated component specific models. \item Latent class analysis for binary multivariate data is now possible to estimate for truncated data where the number of observations with pattern only zeros is missing. \item new argument newdata for \code{cluster()} \item new \code{unique()} method for \code{"stepFlexmix"} objects } } \section{Changes in flexmix version 1.9-0}{ \itemize{ \item New class definitions for component specific models and concomitant variable models. \item \code{fitted()} and \code{predict()} now have an aggregate argument in order to be able to determine the aggregated values over all components. \item The package has now a vignette presenting several applications of finite mixtures of regression models with varying and fixed effects on artificial and real data which can be a accessed using the command vignette("regression-examples"). \item The vignette "flexmix-intro" was adapted to reflect the changes made in the package. \item \code{stepFlexmix()} now returns an object of class \code{"stepFlexmix"} which has a \code{print()} and \code{plot()} method. In addition \code{getModel()} can be used to select an appropriate model. \item \code{flexmix()} now has a weights argument for multiple identical observations. \item New model drivers for latent class analysis with Bernoulli and Poisson distributed multivariate observations. \item Variants of the EM algorithm have been modified to correspond to CEM and SEM. These names can now also be used for specifying the classify slot of the \code{"FLXcontrol"} object. } } \section{Changes in flexmix version 1.8-1}{ \itemize{ \item The package can now fit concomitant variable models. \item New M-step driver for regression models with varying and fixed effects. \item ICL information criterion added. } } \section{Changes in flexmix version 1.1-2}{ \itemize{ \item Fixed a bug that made the log-likelihood infinity for observations where all posteriors are numerically zero. \item Fixed a bug for formulae with dots. \item \code{posterior()} now has a newdata argument. \item New demo driver for model-based clustering of binary data. \item Adapted to changes in \code{summary.glm()} of \R version 2.3.0. } } \section{Changes in flexmix version 1.1-1}{ \itemize{ \item The \code{cluster} argument of \code{flexmix()} may now also be a matrix of posterior probabilities. \item Fixed a bug to make size table work in case of empty clusters. \item Fixed a bug in likelihood computation for grouped observations. \item The artificial NPreg data now also have a binomial response, added example to help(\code{"flexmix"}). } } \section{Changes in flexmix version 1.1-0}{ \itemize{ \item The \code{FLXglm()} driver now has an offset argument. \item New data set seizure as example for a Poisson GLM with an offset. \item \code{fitted()} can be used to extract fitted values from \code{"flexmix"} and \code{"FLXrefit"} objects. \item New accessor methods \code{cluster()} and \code{posterior()}. \item The package now uses lazy loading and has a namespace. } } \section{Changes in flexmix version 1.0-0}{ \itemize{ \item The package has now an introductionary vignette which can be accessed using the command vignette("flexmix-intro"). The vignette has been published in the Journal of Statistical Software, Volume 11, Issue 8 (\doi{10.18637/jss.v011.i08}), and the paper should be used as citation for \pkg{flexmix}, run \code{citation("flexmix")} in \R 2.0.0 or newer for details. \item Several typos in help pages have been fixed. } } \section{Changes in flexmix version 0.9-1}{ \itemize{ \item Adjusted for \R 2.0.0. \item Fixed a bug in the \code{summary()} and \code{plot()} methods of \code{"flexmix"} objects in case of empty clusters. \item \code{stepFlexmix()} takes two new arguments: \code{compare} allows fo find minimum AIC/BIC solutions in addition to maximum likelihood, \code{verbose} gives some information about progress. \item Use a default of \code{verbose = 0} in \code{FLXcontrol()} (better in combination with \code{stepFlexmix()}). } } \section{Changes in flexmix version 0.9-0}{ \itemize{ \item New \code{summary()} and \code{plot()} methods for \code{"flexmix"} objects. \item \code{"FLXglm"} objects can now be automatically \code{refit()}ted to get table of significance tests for coefficients. \item New function \code{stepFlexmix()} for more automated model search. \item The artificial example data now have functions to create them and a pre-stored data sets, new function \code{plotEll()} to plot 2d Gaussians with confidence ellipses. \item New function \code{KLdiv()} to compute Kullback-Leibler divergence of components. \item The calculation of the degrees of freedom for \code{FLXmclust()} was wrong } } \section{Changes in flexmix version 0.7-1}{ \itemize{ \item Fixed some codoc problems (missing aliases). \item First version released on CRAN: 0.7-0 } }